diff --git a/.clang-tidy b/.clang-tidy index 0400b500e5c..ca84a4834e5 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -210,3 +210,6 @@ CheckOptions: value: false - key: performance-move-const-arg.CheckTriviallyCopyableMove value: false + # Workaround clang-tidy bug: https://github.com/llvm/llvm-project/issues/46097 + - key: readability-identifier-naming.TypeTemplateParameterIgnoredRegexp + value: expr-type diff --git a/.github/ISSUE_TEMPLATE/10_question.md b/.github/ISSUE_TEMPLATE/10_question.md index a112b9599d5..5b3d00a3180 100644 --- a/.github/ISSUE_TEMPLATE/10_question.md +++ b/.github/ISSUE_TEMPLATE/10_question.md @@ -7,6 +7,6 @@ assignees: '' --- -> Make sure to check documentation https://clickhouse.yandex/docs/en/ first. If the question is concise and probably has a short answer, asking it in Telegram chat https://telegram.me/clickhouse_en is probably the fastest way to find the answer. For more complicated questions, consider asking them on StackOverflow with "clickhouse" tag https://stackoverflow.com/questions/tagged/clickhouse +> Make sure to check documentation https://clickhouse.com/docs/en/ first. If the question is concise and probably has a short answer, asking it in Telegram chat https://telegram.me/clickhouse_en is probably the fastest way to find the answer. For more complicated questions, consider asking them on StackOverflow with "clickhouse" tag https://stackoverflow.com/questions/tagged/clickhouse > If you still prefer GitHub issues, remove all this text and ask your question here. diff --git a/.github/ISSUE_TEMPLATE/50_build-issue.md b/.github/ISSUE_TEMPLATE/50_build-issue.md index a358575cd7c..9b05fbbdd13 100644 --- a/.github/ISSUE_TEMPLATE/50_build-issue.md +++ b/.github/ISSUE_TEMPLATE/50_build-issue.md @@ -7,7 +7,7 @@ assignees: '' --- -> Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.yandex/docs/en/development/build/ +> Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.com/docs/en/development/build/ **Operating system** diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index a172947b2fc..5b47f94a324 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -7,6 +7,7 @@ env: "on": schedule: - cron: '13 3 * * *' + workflow_dispatch: jobs: DockerHubPushAarch64: diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 4a3880543c4..d50a2151f2f 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -1733,6 +1733,51 @@ jobs: docker kill "$(docker ps -q)" ||: docker rm -f "$(docker ps -a -q)" ||: sudo rm -fr "$TEMP_PATH" + TestsBugfixCheck: + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/tests_bugfix_check + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Tests bugfix validate check (actions) + KILL_TIMEOUT=3600 + REPO_COPY=${{runner.temp}}/tests_bugfix_check/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Bugfix test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + + TEMP_PATH="${TEMP_PATH}/integration" \ + REPORTS_PATH="${REPORTS_PATH}/integration" \ + python3 integration_test_check.py "Integration tests bugfix validate check" \ + --validate-bugfix --post-commit-status=file || echo 'ignore exit code' + + TEMP_PATH="${TEMP_PATH}/stateless" \ + REPORTS_PATH="${REPORTS_PATH}/stateless" \ + python3 functional_test_check.py "Stateless tests bugfix validate check" "$KILL_TIMEOUT" \ + --validate-bugfix --post-commit-status=file || echo 'ignore exit code' + + python3 bugfix_validate_check.py "${TEMP_PATH}/stateless/post_commit_status.tsv" "${TEMP_PATH}/integration/post_commit_status.tsv" + - name: Cleanup + if: always() + run: | + docker kill "$(docker ps -q)" ||: + docker rm -f "$(docker ps -a -q)" ||: + sudo rm -fr "$TEMP_PATH" ############################################################################################## ############################ FUNCTIONAl STATEFUL TESTS ####################################### ############################################################################################## diff --git a/CHANGELOG.md b/CHANGELOG.md index 61724ab2d0c..100b03ab92b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ * Make `arrayCompact` function behave as other higher-order functions: perform compaction not of lambda function results but on the original array. If you're using nontrivial lambda functions in arrayCompact you may restore old behaviour by wrapping `arrayCompact` arguments into `arrayMap`. Closes [#34010](https://github.com/ClickHouse/ClickHouse/issues/34010) [#18535](https://github.com/ClickHouse/ClickHouse/issues/18535) [#14778](https://github.com/ClickHouse/ClickHouse/issues/14778). [#34795](https://github.com/ClickHouse/ClickHouse/pull/34795) ([Alexandre Snarskii](https://github.com/snar)). * Change implementation specific behavior on overflow of function `toDatetime`. It will be saturated to the nearest min/max supported instant of datetime instead of wraparound. This change is highlighted as "backward incompatible" because someone may unintentionally rely on the old behavior. [#32898](https://github.com/ClickHouse/ClickHouse/pull/32898) ([HaiBo Li](https://github.com/marising)). +* Make function `cast(value, 'IPv4')`, `cast(value, 'IPv6')` behave same as `toIPv4`, `toIPv6` functions. Changed behavior of incorrect IP address passed into functions `toIPv4`,` toIPv6`, now if invalid IP address passes into this functions exception will be raised, before this function return default value. Added functions `IPv4StringToNumOrDefault`, `IPv4StringToNumOrNull`, `IPv6StringToNumOrDefault`, `IPv6StringOrNull` `toIPv4OrDefault`, `toIPv4OrNull`, `toIPv6OrDefault`, `toIPv6OrNull`. Functions `IPv4StringToNumOrDefault `, `toIPv4OrDefault `, `toIPv6OrDefault ` should be used if previous logic relied on `IPv4StringToNum`, `toIPv4`, `toIPv6` returning default value for invalid address. Added setting `cast_ipv4_ipv6_default_on_conversion_error`, if this setting enabled, then IP address conversion functions will behave as before. Closes [#22825](https://github.com/ClickHouse/ClickHouse/issues/22825). Closes [#5799](https://github.com/ClickHouse/ClickHouse/issues/5799). Closes [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#35240](https://github.com/ClickHouse/ClickHouse/pull/35240) ([Maksim Kita](https://github.com/kitaisreal)). #### New Feature @@ -366,7 +367,7 @@ #### Improvement -* Now date time conversion functions that generates time before `1970-01-01 00:00:00` will be saturated to zero instead of overflow. [#29953](https://github.com/ClickHouse/ClickHouse/pull/29953) ([Amos Bird](https://github.com/amosbird)). It also fixes a bug in index analysis if date truncation function would yield result before the Unix epoch. +* Now date time conversion functions that generates time before `1970-01-01 00:00:00` will be saturated to zero instead of overflow. [#29953](https://github.com/ClickHouse/ClickHouse/pull/29953) ([Amos Bird](https://github.com/amosbird)). It also fixes a bug in index analysis if date truncation function would yield result before the Unix epoch. * Always display resource usage (total CPU usage, total RAM usage and max RAM usage per host) in client. [#33271](https://github.com/ClickHouse/ClickHouse/pull/33271) ([alexey-milovidov](https://github.com/alexey-milovidov)). * Improve `Bool` type serialization and deserialization, check the range of values. [#32984](https://github.com/ClickHouse/ClickHouse/pull/32984) ([Kruglov Pavel](https://github.com/Avogar)). * If an invalid setting is defined using the `SET` query or using the query parameters in the HTTP request, error message will contain suggestions that are similar to the invalid setting string (if any exists). [#32946](https://github.com/ClickHouse/ClickHouse/pull/32946) ([Antonio Andelic](https://github.com/antonio2368)). diff --git a/CMakeLists.txt b/CMakeLists.txt index 9649fc32d74..5157f0f9903 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -261,13 +261,16 @@ endif () # Add a section with the hash of the compiled machine code for integrity checks. # Only for official builds, because adding a section can be time consuming (rewrite of several GB). # And cross compiled binaries are not supported (since you cannot execute clickhouse hash-binary) -if (OBJCOPY_PATH AND YANDEX_OFFICIAL_BUILD AND (NOT CMAKE_TOOLCHAIN_FILE)) +if (OBJCOPY_PATH AND CLICKHOUSE_OFFICIAL_BUILD AND (NOT CMAKE_TOOLCHAIN_FILE)) set (USE_BINARY_HASH 1) endif () # Allows to build stripped binary in a separate directory -if (OBJCOPY_PATH AND READELF_PATH) - set(BUILD_STRIPPED_BINARIES_PREFIX "" CACHE STRING "Build stripped binaries with debug info in separate directory") +if (OBJCOPY_PATH AND STRIP_PATH) + option(INSTALL_STRIPPED_BINARIES "Build stripped binaries with debug info in separate directory" OFF) + if (INSTALL_STRIPPED_BINARIES) + set(STRIPPED_BINARIES_OUTPUT "stripped" CACHE STRING "A separate directory for stripped information") + endif() endif() cmake_host_system_information(RESULT AVAILABLE_PHYSICAL_MEMORY QUERY AVAILABLE_PHYSICAL_MEMORY) # Not available under freebsd diff --git a/base/glibc-compatibility/CMakeLists.txt b/base/glibc-compatibility/CMakeLists.txt index ddec09121e1..ef7ec6d7fc0 100644 --- a/base/glibc-compatibility/CMakeLists.txt +++ b/base/glibc-compatibility/CMakeLists.txt @@ -51,6 +51,6 @@ if (GLIBC_COMPATIBILITY) message (STATUS "Some symbols from glibc will be replaced for compatibility") -elseif (YANDEX_OFFICIAL_BUILD) +elseif (CLICKHOUSE_OFFICIAL_BUILD) message (WARNING "Option GLIBC_COMPATIBILITY must be turned on for production builds.") endif () diff --git a/benchmark/greenplum/result_parser.py b/benchmark/greenplum/result_parser.py index 8af20d265a0..4ed1aa5c4a5 100755 --- a/benchmark/greenplum/result_parser.py +++ b/benchmark/greenplum/result_parser.py @@ -4,11 +4,12 @@ import sys import json + def parse_block(block=[], options=[]): - #print('block is here', block) - #show_query = False - #show_query = options.show_query + # print('block is here', block) + # show_query = False + # show_query = options.show_query result = [] query = block[0].strip() if len(block) > 4: @@ -20,9 +21,9 @@ def parse_block(block=[], options=[]): timing2 = block[2].strip().split()[1] timing3 = block[3].strip().split()[1] if options.show_queries: - result.append( query ) + result.append(query) if not options.show_first_timings: - result += [ timing1 , timing2, timing3 ] + result += [timing1, timing2, timing3] else: result.append(timing1) return result @@ -37,12 +38,12 @@ def read_stats_file(options, fname): for line in f.readlines(): - if 'SELECT' in line: + if "SELECT" in line: if len(block) > 1: - result.append( parse_block(block, options) ) - block = [ line ] - elif 'Time:' in line: - block.append( line ) + result.append(parse_block(block, options)) + block = [line] + elif "Time:" in line: + block.append(line) return result @@ -50,7 +51,7 @@ def read_stats_file(options, fname): def compare_stats_files(options, arguments): result = [] file_output = [] - pyplot_colors = ['y', 'b', 'g', 'r'] + pyplot_colors = ["y", "b", "g", "r"] for fname in arguments[1:]: file_output.append((read_stats_file(options, fname))) if len(file_output[0]) > 0: @@ -58,65 +59,92 @@ def compare_stats_files(options, arguments): for idx, data_set in enumerate(file_output): int_result = [] for timing in data_set: - int_result.append(float(timing[0])) #y values - result.append([[x for x in range(0, len(int_result)) ], int_result, -pyplot_colors[idx] + '^' ] ) -# result.append([x for x in range(1, len(int_result)) ]) #x values -# result.append( pyplot_colors[idx] + '^' ) + int_result.append(float(timing[0])) # y values + result.append( + [ + [x for x in range(0, len(int_result))], + int_result, + pyplot_colors[idx] + "^", + ] + ) + # result.append([x for x in range(1, len(int_result)) ]) #x values + # result.append( pyplot_colors[idx] + '^' ) return result + def parse_args(): from optparse import OptionParser - parser = OptionParser(usage='usage: %prog [options] [result_file_path]..') - parser.add_option("-q", "--show-queries", help="Show statements along with timings", action="store_true", dest="show_queries") - parser.add_option("-f", "--show-first-timings", help="Show only first tries timings", action="store_true", dest="show_first_timings") - parser.add_option("-c", "--compare-mode", help="Prepare output for pyplot comparing result files.", action="store", dest="compare_mode") + + parser = OptionParser(usage="usage: %prog [options] [result_file_path]..") + parser.add_option( + "-q", + "--show-queries", + help="Show statements along with timings", + action="store_true", + dest="show_queries", + ) + parser.add_option( + "-f", + "--show-first-timings", + help="Show only first tries timings", + action="store_true", + dest="show_first_timings", + ) + parser.add_option( + "-c", + "--compare-mode", + help="Prepare output for pyplot comparing result files.", + action="store", + dest="compare_mode", + ) (options, arguments) = parser.parse_args(sys.argv) if len(arguments) < 2: parser.print_usage() sys.exit(1) - return ( options, arguments ) + return (options, arguments) + def gen_pyplot_code(options, arguments): - result = '' + result = "" data_sets = compare_stats_files(options, arguments) for idx, data_set in enumerate(data_sets, start=0): x_values, y_values, line_style = data_set - result += '\nplt.plot(' - result += '%s, %s, \'%s\'' % ( x_values, y_values, line_style ) - result += ', label=\'%s try\')' % idx - print('import matplotlib.pyplot as plt') + result += "\nplt.plot(" + result += "%s, %s, '%s'" % (x_values, y_values, line_style) + result += ", label='%s try')" % idx + print("import matplotlib.pyplot as plt") print(result) - print( 'plt.xlabel(\'Try number\')' ) - print( 'plt.ylabel(\'Timing\')' ) - print( 'plt.title(\'Benchmark query timings\')' ) - print('plt.legend()') - print('plt.show()') + print("plt.xlabel('Try number')") + print("plt.ylabel('Timing')") + print("plt.title('Benchmark query timings')") + print("plt.legend()") + print("plt.show()") def gen_html_json(options, arguments): tuples = read_stats_file(options, arguments[1]) - print('{') + print("{") print('"system: GreenPlum(x2),') - print(('"version": "%s",' % '4.3.9.1')) + print(('"version": "%s",' % "4.3.9.1")) print('"data_size": 10000000,') print('"time": "",') print('"comments": "",') print('"result":') - print('[') + print("[") for s in tuples: print(s) - print(']') - print('}') + print("]") + print("}") def main(): - ( options, arguments ) = parse_args() + (options, arguments) = parse_args() if len(arguments) > 2: gen_pyplot_code(options, arguments) else: gen_html_json(options, arguments) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/cmake/strip.sh b/cmake/strip.sh deleted file mode 100755 index de596887159..00000000000 --- a/cmake/strip.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash - -BINARY_PATH=$1 -BINARY_NAME=$(basename $BINARY_PATH) -DESTINATION_STRIPPED_DIR=$2 -OBJCOPY_PATH=${3:objcopy} -READELF_PATH=${4:readelf} - -BUILD_ID=$($READELF_PATH -n $1 | sed -n '/Build ID/ { s/.*: //p; q; }') -BUILD_ID_PREFIX=${BUILD_ID:0:2} -BUILD_ID_SUFFIX=${BUILD_ID:2} -TEMP_BINARY_PATH="${BINARY_PATH}_temp" - -DESTINATION_DEBUG_INFO_DIR="$DESTINATION_STRIPPED_DIR/lib/debug/.build-id" -DESTINATION_STRIP_BINARY_DIR="$DESTINATION_STRIPPED_DIR/bin" - -mkdir -p "$DESTINATION_DEBUG_INFO_DIR/$BUILD_ID_PREFIX" -mkdir -p "$DESTINATION_STRIP_BINARY_DIR" - -$OBJCOPY_PATH --only-keep-debug "$BINARY_PATH" "$DESTINATION_DEBUG_INFO_DIR/$BUILD_ID_PREFIX/$BUILD_ID_SUFFIX.debug" - -touch "$TEMP_BINARY_PATH" -$OBJCOPY_PATH --add-gnu-debuglink "$DESTINATION_DEBUG_INFO_DIR/$BUILD_ID_PREFIX/$BUILD_ID_SUFFIX.debug" "$BINARY_PATH" "$TEMP_BINARY_PATH" -$OBJCOPY_PATH --strip-all "$TEMP_BINARY_PATH" "$DESTINATION_STRIP_BINARY_DIR/$BINARY_NAME" -rm -f "$TEMP_BINARY_PATH" diff --git a/cmake/strip_binary.cmake b/cmake/strip_binary.cmake index e430807772d..1f24790a159 100644 --- a/cmake/strip_binary.cmake +++ b/cmake/strip_binary.cmake @@ -11,16 +11,43 @@ macro(clickhouse_strip_binary) message(FATAL_ERROR "A binary path name must be provided for stripping binary") endif() - if (NOT DEFINED STRIP_DESTINATION_DIR) message(FATAL_ERROR "Destination directory for stripped binary must be provided") endif() add_custom_command(TARGET ${STRIP_TARGET} POST_BUILD - COMMAND bash ${ClickHouse_SOURCE_DIR}/cmake/strip.sh ${STRIP_BINARY_PATH} ${STRIP_DESTINATION_DIR} ${OBJCOPY_PATH} ${READELF_PATH} - COMMENT "Stripping clickhouse binary" VERBATIM + COMMAND mkdir -p "${STRIP_DESTINATION_DIR}/lib/debug/bin" + COMMAND mkdir -p "${STRIP_DESTINATION_DIR}/bin" + COMMAND cp "${STRIP_BINARY_PATH}" "${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET}" + COMMAND "${OBJCOPY_PATH}" --only-keep-debug --compress-debug-sections "${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET}" "${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug" + COMMAND chmod 0644 "${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug" + COMMAND "${STRIP_PATH}" --remove-section=.comment --remove-section=.note "${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET}" + COMMAND "${OBJCOPY_PATH}" --add-gnu-debuglink "${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug" "${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET}" + COMMENT "Stripping clickhouse binary" VERBATIM ) install(PROGRAMS ${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET} DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) - install(DIRECTORY ${STRIP_DESTINATION_DIR}/lib/debug DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT clickhouse) + install(FILES ${STRIP_DESTINATION_DIR}/lib/debug/bin/${STRIP_TARGET}.debug DESTINATION ${CMAKE_INSTALL_LIBDIR}/debug/${CMAKE_INSTALL_FULL_BINDIR}/${STRIP_TARGET}.debug COMPONENT clickhouse) +endmacro() + + +macro(clickhouse_make_empty_debug_info_for_nfpm) + set(oneValueArgs TARGET DESTINATION_DIR) + cmake_parse_arguments(EMPTY_DEBUG "" "${oneValueArgs}" "" ${ARGN}) + + if (NOT DEFINED EMPTY_DEBUG_TARGET) + message(FATAL_ERROR "A target name must be provided for stripping binary") + endif() + + if (NOT DEFINED EMPTY_DEBUG_DESTINATION_DIR) + message(FATAL_ERROR "Destination directory for empty debug must be provided") + endif() + + add_custom_command(TARGET ${EMPTY_DEBUG_TARGET} POST_BUILD + COMMAND mkdir -p "${EMPTY_DEBUG_DESTINATION_DIR}/lib/debug" + COMMAND touch "${EMPTY_DEBUG_DESTINATION_DIR}/lib/debug/${EMPTY_DEBUG_TARGET}.debug" + COMMENT "Addiding empty debug info for NFPM" VERBATIM + ) + + install(FILES "${EMPTY_DEBUG_DESTINATION_DIR}/lib/debug/${EMPTY_DEBUG_TARGET}.debug" DESTINATION "${CMAKE_INSTALL_LIBDIR}/debug/${CMAKE_INSTALL_FULL_BINDIR}" COMPONENT clickhouse) endmacro() diff --git a/cmake/tools.cmake b/cmake/tools.cmake index d6fddd0509e..d571a46ad26 100644 --- a/cmake/tools.cmake +++ b/cmake/tools.cmake @@ -170,32 +170,32 @@ else () message (FATAL_ERROR "Cannot find objcopy.") endif () -# Readelf (FIXME copypaste) +# Strip (FIXME copypaste) if (COMPILER_GCC) - find_program (READELF_PATH NAMES "llvm-readelf" "llvm-readelf-13" "llvm-readelf-12" "llvm-readelf-11" "readelf") + find_program (STRIP_PATH NAMES "llvm-strip" "llvm-strip-13" "llvm-strip-12" "llvm-strip-11" "strip") else () - find_program (READELF_PATH NAMES "llvm-readelf-${COMPILER_VERSION_MAJOR}" "llvm-readelf" "readelf") + find_program (STRIP_PATH NAMES "llvm-strip-${COMPILER_VERSION_MAJOR}" "llvm-strip" "strip") endif () -if (NOT READELF_PATH AND OS_DARWIN) +if (NOT STRIP_PATH AND OS_DARWIN) find_program (BREW_PATH NAMES "brew") if (BREW_PATH) execute_process (COMMAND ${BREW_PATH} --prefix llvm ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE LLVM_PREFIX) if (LLVM_PREFIX) - find_program (READELF_PATH NAMES "llvm-readelf" PATHS "${LLVM_PREFIX}/bin" NO_DEFAULT_PATH) + find_program (STRIP_PATH NAMES "llvm-strip" PATHS "${LLVM_PREFIX}/bin" NO_DEFAULT_PATH) endif () - if (NOT READELF_PATH) + if (NOT STRIP_PATH) execute_process (COMMAND ${BREW_PATH} --prefix binutils ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE BINUTILS_PREFIX) if (BINUTILS_PREFIX) - find_program (READELF_PATH NAMES "readelf" PATHS "${BINUTILS_PREFIX}/bin" NO_DEFAULT_PATH) + find_program (STRIP_PATH NAMES "strip" PATHS "${BINUTILS_PREFIX}/bin" NO_DEFAULT_PATH) endif () endif () endif () endif () -if (READELF_PATH) - message (STATUS "Using readelf: ${READELF_PATH}") +if (STRIP_PATH) + message (STATUS "Using strip: ${STRIP_PATH}") else () - message (FATAL_ERROR "Cannot find readelf.") + message (FATAL_ERROR "Cannot find strip.") endif () diff --git a/cmake/version.cmake b/cmake/version.cmake index 963f291c0f3..acaa772ff2f 100644 --- a/cmake/version.cmake +++ b/cmake/version.cmake @@ -18,6 +18,6 @@ set (VERSION_STRING_SHORT "${VERSION_MAJOR}.${VERSION_MINOR}") math (EXPR VERSION_INTEGER "${VERSION_PATCH} + ${VERSION_MINOR}*1000 + ${VERSION_MAJOR}*1000000") -if(YANDEX_OFFICIAL_BUILD) +if(CLICKHOUSE_OFFICIAL_BUILD) set(VERSION_OFFICIAL " (official build)") endif() diff --git a/contrib/hyperscan b/contrib/hyperscan index e9f08df0213..5edc68c5ac6 160000 --- a/contrib/hyperscan +++ b/contrib/hyperscan @@ -1 +1 @@ -Subproject commit e9f08df0213fc637aac0a5bbde9beeaeba2fe9fa +Subproject commit 5edc68c5ac68d2d4f876159e9ee84def6d3dc87c diff --git a/contrib/libcxx b/contrib/libcxx index 61e60294b1d..172b2ae074f 160000 --- a/contrib/libcxx +++ b/contrib/libcxx @@ -1 +1 @@ -Subproject commit 61e60294b1de01483caa9f5d00f437c99b674de6 +Subproject commit 172b2ae074f6755145b91c53a95c8540c1468239 diff --git a/contrib/libcxx-cmake/CMakeLists.txt b/contrib/libcxx-cmake/CMakeLists.txt index 332fb0411cd..dc9df48b2c1 100644 --- a/contrib/libcxx-cmake/CMakeLists.txt +++ b/contrib/libcxx-cmake/CMakeLists.txt @@ -18,12 +18,14 @@ set(SRCS "${LIBCXX_SOURCE_DIR}/src/filesystem/directory_iterator.cpp" "${LIBCXX_SOURCE_DIR}/src/filesystem/int128_builtins.cpp" "${LIBCXX_SOURCE_DIR}/src/filesystem/operations.cpp" +"${LIBCXX_SOURCE_DIR}/src/format.cpp" "${LIBCXX_SOURCE_DIR}/src/functional.cpp" "${LIBCXX_SOURCE_DIR}/src/future.cpp" "${LIBCXX_SOURCE_DIR}/src/hash.cpp" "${LIBCXX_SOURCE_DIR}/src/ios.cpp" "${LIBCXX_SOURCE_DIR}/src/ios.instantiations.cpp" "${LIBCXX_SOURCE_DIR}/src/iostream.cpp" +"${LIBCXX_SOURCE_DIR}/src/legacy_pointer_safety.cpp" "${LIBCXX_SOURCE_DIR}/src/locale.cpp" "${LIBCXX_SOURCE_DIR}/src/memory.cpp" "${LIBCXX_SOURCE_DIR}/src/mutex.cpp" @@ -33,6 +35,9 @@ set(SRCS "${LIBCXX_SOURCE_DIR}/src/random.cpp" "${LIBCXX_SOURCE_DIR}/src/random_shuffle.cpp" "${LIBCXX_SOURCE_DIR}/src/regex.cpp" +"${LIBCXX_SOURCE_DIR}/src/ryu/d2fixed.cpp" +"${LIBCXX_SOURCE_DIR}/src/ryu/d2s.cpp" +"${LIBCXX_SOURCE_DIR}/src/ryu/f2s.cpp" "${LIBCXX_SOURCE_DIR}/src/shared_mutex.cpp" "${LIBCXX_SOURCE_DIR}/src/stdexcept.cpp" "${LIBCXX_SOURCE_DIR}/src/string.cpp" @@ -49,7 +54,9 @@ set(SRCS add_library(cxx ${SRCS}) set_target_properties(cxx PROPERTIES FOLDER "contrib/libcxx-cmake") -target_include_directories(cxx SYSTEM BEFORE PUBLIC $) +target_include_directories(cxx SYSTEM BEFORE PUBLIC + $ + $/src) target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI) # Enable capturing stack traces for all exceptions. diff --git a/contrib/libcxxabi b/contrib/libcxxabi index df8f1e727db..6eb7cc7a7bd 160000 --- a/contrib/libcxxabi +++ b/contrib/libcxxabi @@ -1 +1 @@ -Subproject commit df8f1e727dbc9e2bedf2282096fa189dc3fe0076 +Subproject commit 6eb7cc7a7bdd779e6734d1b9fb451df2274462d7 diff --git a/contrib/libcxxabi-cmake/CMakeLists.txt b/contrib/libcxxabi-cmake/CMakeLists.txt index 425111d9b26..bf1ede8a60e 100644 --- a/contrib/libcxxabi-cmake/CMakeLists.txt +++ b/contrib/libcxxabi-cmake/CMakeLists.txt @@ -1,24 +1,24 @@ set(LIBCXXABI_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libcxxabi") set(SRCS -"${LIBCXXABI_SOURCE_DIR}/src/stdlib_stdexcept.cpp" -"${LIBCXXABI_SOURCE_DIR}/src/cxa_virtual.cpp" -"${LIBCXXABI_SOURCE_DIR}/src/cxa_thread_atexit.cpp" -"${LIBCXXABI_SOURCE_DIR}/src/fallback_malloc.cpp" -"${LIBCXXABI_SOURCE_DIR}/src/cxa_guard.cpp" -"${LIBCXXABI_SOURCE_DIR}/src/cxa_default_handlers.cpp" -"${LIBCXXABI_SOURCE_DIR}/src/cxa_personality.cpp" -"${LIBCXXABI_SOURCE_DIR}/src/stdlib_exception.cpp" "${LIBCXXABI_SOURCE_DIR}/src/abort_message.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_aux_runtime.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_default_handlers.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_demangle.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_exception.cpp" -"${LIBCXXABI_SOURCE_DIR}/src/cxa_handlers.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_exception_storage.cpp" -"${LIBCXXABI_SOURCE_DIR}/src/private_typeinfo.cpp" -"${LIBCXXABI_SOURCE_DIR}/src/stdlib_typeinfo.cpp" -"${LIBCXXABI_SOURCE_DIR}/src/cxa_aux_runtime.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_guard.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_handlers.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_personality.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_thread_atexit.cpp" "${LIBCXXABI_SOURCE_DIR}/src/cxa_vector.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/cxa_virtual.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/fallback_malloc.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/private_typeinfo.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/stdlib_exception.cpp" "${LIBCXXABI_SOURCE_DIR}/src/stdlib_new_delete.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/stdlib_stdexcept.cpp" +"${LIBCXXABI_SOURCE_DIR}/src/stdlib_typeinfo.cpp" ) add_library(cxxabi ${SRCS}) @@ -30,6 +30,7 @@ target_compile_options(cxxabi PRIVATE -w) target_include_directories(cxxabi SYSTEM BEFORE PUBLIC $ PRIVATE $ + PRIVATE $ ) target_compile_definitions(cxxabi PRIVATE -D_LIBCPP_BUILDING_LIBRARY) target_compile_options(cxxabi PRIVATE -nostdinc++ -fno-sanitize=undefined -Wno-macro-redefined) # If we don't disable UBSan, infinite recursion happens in dynamic_cast. diff --git a/contrib/libxml2 b/contrib/libxml2 index 18890f471c4..a075d256fd9 160000 --- a/contrib/libxml2 +++ b/contrib/libxml2 @@ -1 +1 @@ -Subproject commit 18890f471c420411aa3c989e104d090966ec9dbf +Subproject commit a075d256fd9ff15590b86d981b75a50ead124fca diff --git a/contrib/replxx b/contrib/replxx index 9460e5e0fc1..6f0b6f151ae 160000 --- a/contrib/replxx +++ b/contrib/replxx @@ -1 +1 @@ -Subproject commit 9460e5e0fc10f78f460af26a6bd928798cac864d +Subproject commit 6f0b6f151ae2a044625ae93acd19ca365fcea64d diff --git a/docker/docs/check/Dockerfile b/docker/docs/check/Dockerfile index 174be123eed..4eb03a91e7a 100644 --- a/docker/docs/check/Dockerfile +++ b/docker/docs/check/Dockerfile @@ -1,4 +1,3 @@ -# rebuild in #33610 # docker build -t clickhouse/docs-check . ARG FROM_TAG=latest FROM clickhouse/docs-builder:$FROM_TAG diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index e3e2e689b17..a57a734e3df 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -95,6 +95,14 @@ RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \ && apt-get install gcc-11 g++-11 --yes \ && apt-get clean +# Architecture of the image when BuildKit/buildx is used +ARG TARGETARCH +ARG NFPM_VERSION=2.15.0 + +RUN arch=${TARGETARCH:-amd64} \ + && curl -Lo /tmp/nfpm.deb "https://github.com/goreleaser/nfpm/releases/download/v${NFPM_VERSION}/nfpm_${arch}.deb" \ + && dpkg -i /tmp/nfpm.deb \ + && rm /tmp/nfpm.deb COPY build.sh / -CMD ["bash", "-c", "/build.sh 2>&1 | ts"] +CMD ["bash", "-c", "/build.sh 2>&1"] diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index 2f18b07ffe1..31416e1a0ee 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -1,7 +1,13 @@ #!/usr/bin/env bash +exec &> >(ts) set -x -e +cache_status () { + ccache --show-config ||: + ccache --show-stats ||: +} + mkdir -p build/cmake/toolchain/darwin-x86_64 tar xJf MacOSX11.0.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1 ln -sf darwin-x86_64 build/cmake/toolchain/darwin-aarch64 @@ -19,15 +25,23 @@ read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}" env cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA "-DCMAKE_BUILD_TYPE=$BUILD_TYPE" "-DSANITIZE=$SANITIZER" -DENABLE_CHECK_HEAVY_BUILDS=1 "${CMAKE_FLAGS[@]}" .. -ccache --show-config ||: -ccache --show-stats ||: +cache_status +# clear cache stats ccache --zero-stats ||: -# shellcheck disable=SC2086 # No quotes because I want it to expand to nothing if empty. +# No quotes because I want it to expand to nothing if empty. +# shellcheck disable=SC2086 ninja $NINJA_FLAGS clickhouse-bundle -ccache --show-config ||: -ccache --show-stats ||: +cache_status + +if [ -n "$MAKE_DEB" ]; then + rm -rf /build/packages/root + # No quotes because I want it to expand to nothing if empty. + # shellcheck disable=SC2086 + DESTDIR=/build/packages/root ninja $NINJA_FLAGS install + bash -x /build/packages/build +fi mv ./programs/clickhouse* /output mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds @@ -84,8 +98,7 @@ fi # ../docker/packager/other/fuzzer.sh # fi -ccache --show-config ||: -ccache --show-stats ||: +cache_status if [ "${CCACHE_DEBUG:-}" == "1" ] then diff --git a/docker/packager/packager b/docker/packager/packager index 05b2e02df96..f82d402d613 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -#-*- coding: utf-8 -*- +# -*- coding: utf-8 -*- import subprocess import os import argparse @@ -8,36 +8,39 @@ import sys SCRIPT_PATH = os.path.realpath(__file__) -IMAGE_MAP = { - "deb": "clickhouse/deb-builder", - "binary": "clickhouse/binary-builder", -} def check_image_exists_locally(image_name): try: - output = subprocess.check_output("docker images -q {} 2> /dev/null".format(image_name), shell=True) + output = subprocess.check_output( + f"docker images -q {image_name} 2> /dev/null", shell=True + ) return output != "" - except subprocess.CalledProcessError as ex: + except subprocess.CalledProcessError: return False + def pull_image(image_name): try: - subprocess.check_call("docker pull {}".format(image_name), shell=True) + subprocess.check_call(f"docker pull {image_name}", shell=True) return True - except subprocess.CalledProcessError as ex: - logging.info("Cannot pull image {}".format(image_name)) + except subprocess.CalledProcessError: + logging.info(f"Cannot pull image {image_name}".format()) return False + def build_image(image_name, filepath): context = os.path.dirname(filepath) - build_cmd = "docker build --network=host -t {} -f {} {}".format(image_name, filepath, context) - logging.info("Will build image with cmd: '{}'".format(build_cmd)) + build_cmd = f"docker build --network=host -t {image_name} -f {filepath} {context}" + logging.info("Will build image with cmd: '%s'", build_cmd) subprocess.check_call( build_cmd, shell=True, ) -def run_docker_image_with_env(image_name, output, env_variables, ch_root, ccache_dir, docker_image_version): + +def run_docker_image_with_env( + image_name, output, env_variables, ch_root, ccache_dir, docker_image_version +): env_part = " -e ".join(env_variables) if env_part: env_part = " -e " + env_part @@ -47,28 +50,52 @@ def run_docker_image_with_env(image_name, output, env_variables, ch_root, ccache else: interactive = "" - cmd = "docker run --network=host --rm --volume={output_path}:/output --volume={ch_root}:/build --volume={ccache_dir}:/ccache {env} {interactive} {img_name}".format( - output_path=output, - ch_root=ch_root, - ccache_dir=ccache_dir, - env=env_part, - img_name=image_name + ":" + docker_image_version, - interactive=interactive + cmd = ( + f"docker run --network=host --rm --volume={output}:/output " + f"--volume={ch_root}:/build --volume={ccache_dir}:/ccache {env_part} " + f"{interactive} {image_name}:{docker_image_version}" ) - logging.info("Will build ClickHouse pkg with cmd: '{}'".format(cmd)) + logging.info("Will build ClickHouse pkg with cmd: '%s'", cmd) subprocess.check_call(cmd, shell=True) -def parse_env_variables(build_type, compiler, sanitizer, package_type, image_type, cache, distcc_hosts, split_binary, clang_tidy, version, author, official, alien_pkgs, with_coverage, with_binaries): + +def is_release_build(build_type, package_type, sanitizer, split_binary): + return ( + build_type == "" + and package_type == "deb" + and sanitizer == "" + and not split_binary + ) + + +def parse_env_variables( + build_type, + compiler, + sanitizer, + package_type, + image_type, + cache, + distcc_hosts, + split_binary, + clang_tidy, + version, + author, + official, + additional_pkgs, + with_coverage, + with_binaries, +): DARWIN_SUFFIX = "-darwin" DARWIN_ARM_SUFFIX = "-darwin-aarch64" ARM_SUFFIX = "-aarch64" FREEBSD_SUFFIX = "-freebsd" - PPC_SUFFIX = '-ppc64le' + PPC_SUFFIX = "-ppc64le" result = [] - cmake_flags = ['$CMAKE_FLAGS'] + result.append("OUTPUT_DIR=/output") + cmake_flags = ["$CMAKE_FLAGS"] is_cross_darwin = compiler.endswith(DARWIN_SUFFIX) is_cross_darwin_arm = compiler.endswith(DARWIN_ARM_SUFFIX) @@ -77,46 +104,73 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX) if is_cross_darwin: - cc = compiler[:-len(DARWIN_SUFFIX)] + cc = compiler[: -len(DARWIN_SUFFIX)] cmake_flags.append("-DCMAKE_AR:FILEPATH=/cctools/bin/x86_64-apple-darwin-ar") - cmake_flags.append("-DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/x86_64-apple-darwin-install_name_tool") - cmake_flags.append("-DCMAKE_RANLIB:FILEPATH=/cctools/bin/x86_64-apple-darwin-ranlib") + cmake_flags.append( + "-DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/" + "x86_64-apple-darwin-install_name_tool" + ) + cmake_flags.append( + "-DCMAKE_RANLIB:FILEPATH=/cctools/bin/x86_64-apple-darwin-ranlib" + ) cmake_flags.append("-DLINKER_NAME=/cctools/bin/x86_64-apple-darwin-ld") - cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-x86_64.cmake") + cmake_flags.append( + "-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-x86_64.cmake" + ) elif is_cross_darwin_arm: - cc = compiler[:-len(DARWIN_ARM_SUFFIX)] + cc = compiler[: -len(DARWIN_ARM_SUFFIX)] cmake_flags.append("-DCMAKE_AR:FILEPATH=/cctools/bin/aarch64-apple-darwin-ar") - cmake_flags.append("-DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/aarch64-apple-darwin-install_name_tool") - cmake_flags.append("-DCMAKE_RANLIB:FILEPATH=/cctools/bin/aarch64-apple-darwin-ranlib") + cmake_flags.append( + "-DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/" + "aarch64-apple-darwin-install_name_tool" + ) + cmake_flags.append( + "-DCMAKE_RANLIB:FILEPATH=/cctools/bin/aarch64-apple-darwin-ranlib" + ) cmake_flags.append("-DLINKER_NAME=/cctools/bin/aarch64-apple-darwin-ld") - cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-aarch64.cmake") + cmake_flags.append( + "-DCMAKE_TOOLCHAIN_FILE=/build/cmake/darwin/toolchain-aarch64.cmake" + ) elif is_cross_arm: - cc = compiler[:-len(ARM_SUFFIX)] - cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-aarch64.cmake") - result.append("DEB_ARCH_FLAG=-aarm64") + cc = compiler[: -len(ARM_SUFFIX)] + cmake_flags.append( + "-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-aarch64.cmake" + ) + result.append("DEB_ARCH=arm64") elif is_cross_freebsd: - cc = compiler[:-len(FREEBSD_SUFFIX)] - cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/freebsd/toolchain-x86_64.cmake") + cc = compiler[: -len(FREEBSD_SUFFIX)] + cmake_flags.append( + "-DCMAKE_TOOLCHAIN_FILE=/build/cmake/freebsd/toolchain-x86_64.cmake" + ) elif is_cross_ppc: - cc = compiler[:-len(PPC_SUFFIX)] - cmake_flags.append("-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-ppc64le.cmake") + cc = compiler[: -len(PPC_SUFFIX)] + cmake_flags.append( + "-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-ppc64le.cmake" + ) else: cc = compiler - result.append("DEB_ARCH_FLAG=-aamd64") + result.append("DEB_ARCH=amd64") - cxx = cc.replace('gcc', 'g++').replace('clang', 'clang++') + cxx = cc.replace("gcc", "g++").replace("clang", "clang++") if image_type == "deb": - result.append("DEB_CC={}".format(cc)) - result.append("DEB_CXX={}".format(cxx)) - # For building fuzzers - result.append("CC={}".format(cc)) - result.append("CXX={}".format(cxx)) - elif image_type == "binary": - result.append("CC={}".format(cc)) - result.append("CXX={}".format(cxx)) - cmake_flags.append('-DCMAKE_C_COMPILER=`which {}`'.format(cc)) - cmake_flags.append('-DCMAKE_CXX_COMPILER=`which {}`'.format(cxx)) + result.append("MAKE_DEB=true") + cmake_flags.append("-DENABLE_TESTS=0") + cmake_flags.append("-DENABLE_UTILS=0") + cmake_flags.append("-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON") + cmake_flags.append("-DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON") + cmake_flags.append("-DCMAKE_AUTOGEN_VERBOSE=ON") + cmake_flags.append("-DCMAKE_INSTALL_PREFIX=/usr") + cmake_flags.append("-DCMAKE_INSTALL_SYSCONFDIR=/etc") + cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var") + cmake_flags.append("-DBUILD_STANDALONE_KEEPER=ON") + if is_release_build(build_type, package_type, sanitizer, split_binary): + cmake_flags.append("-DINSTALL_STRIPPED_BINARIES=ON") + + result.append(f"CC={cc}") + result.append(f"CXX={cxx}") + cmake_flags.append(f"-DCMAKE_C_COMPILER={cc}") + cmake_flags.append(f"-DCMAKE_CXX_COMPILER={cxx}") # Create combined output archive for split build and for performance tests. if package_type == "performance": @@ -126,12 +180,14 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ result.append("COMBINED_OUTPUT=shared_build") if sanitizer: - result.append("SANITIZER={}".format(sanitizer)) + result.append(f"SANITIZER={sanitizer}") if build_type: - result.append("BUILD_TYPE={}".format(build_type)) + result.append(f"BUILD_TYPE={build_type.capitalize()}") + else: + result.append("BUILD_TYPE=None") - if cache == 'distcc': - result.append("CCACHE_PREFIX={}".format(cache)) + if cache == "distcc": + result.append(f"CCACHE_PREFIX={cache}") if cache: result.append("CCACHE_DIR=/ccache") @@ -142,109 +198,188 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ # result.append("CCACHE_UMASK=777") if distcc_hosts: - hosts_with_params = ["{}/24,lzo".format(host) for host in distcc_hosts] + ["localhost/`nproc`"] - result.append('DISTCC_HOSTS="{}"'.format(" ".join(hosts_with_params))) + hosts_with_params = [f"{host}/24,lzo" for host in distcc_hosts] + [ + "localhost/`nproc`" + ] + result.append('DISTCC_HOSTS="' + " ".join(hosts_with_params) + '"') elif cache == "distcc": - result.append('DISTCC_HOSTS="{}"'.format("localhost/`nproc`")) + result.append('DISTCC_HOSTS="localhost/`nproc`"') - if alien_pkgs: - result.append("ALIEN_PKGS='" + ' '.join(['--' + pkg for pkg in alien_pkgs]) + "'") + if additional_pkgs: + result.append("MAKE_APK=true") + result.append("MAKE_RPM=true") + result.append("MAKE_TGZ=true") if with_binaries == "programs": - result.append('BINARY_OUTPUT=programs') + result.append("BINARY_OUTPUT=programs") elif with_binaries == "tests": - result.append('ENABLE_TESTS=1') - result.append('BINARY_OUTPUT=tests') - cmake_flags.append('-DENABLE_TESTS=1') + result.append("ENABLE_TESTS=1") + result.append("BINARY_OUTPUT=tests") + cmake_flags.append("-DENABLE_TESTS=1") if split_binary: - cmake_flags.append('-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 -DCLICKHOUSE_SPLIT_BINARY=1') + cmake_flags.append( + "-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 " + "-DCLICKHOUSE_SPLIT_BINARY=1" + ) # We can't always build utils because it requires too much space, but # we have to build them at least in some way in CI. The split build is # probably the least heavy disk-wise. - cmake_flags.append('-DENABLE_UTILS=1') + cmake_flags.append("-DENABLE_UTILS=1") if clang_tidy: - cmake_flags.append('-DENABLE_CLANG_TIDY=1') - cmake_flags.append('-DENABLE_UTILS=1') - cmake_flags.append('-DENABLE_TESTS=1') - cmake_flags.append('-DENABLE_EXAMPLES=1') + cmake_flags.append("-DENABLE_CLANG_TIDY=1") + cmake_flags.append("-DENABLE_UTILS=1") + cmake_flags.append("-DENABLE_TESTS=1") + cmake_flags.append("-DENABLE_EXAMPLES=1") # Don't stop on first error to find more clang-tidy errors in one run. - result.append('NINJA_FLAGS=-k0') + result.append("NINJA_FLAGS=-k0") if with_coverage: - cmake_flags.append('-DWITH_COVERAGE=1') + cmake_flags.append("-DWITH_COVERAGE=1") if version: - result.append("VERSION_STRING='{}'".format(version)) + result.append(f"VERSION_STRING='{version}'") if author: - result.append("AUTHOR='{}'".format(author)) + result.append(f"AUTHOR='{author}'") if official: - cmake_flags.append('-DYANDEX_OFFICIAL_BUILD=1') + cmake_flags.append("-DCLICKHOUSE_OFFICIAL_BUILD=1") - result.append('CMAKE_FLAGS="' + ' '.join(cmake_flags) + '"') + result.append('CMAKE_FLAGS="' + " ".join(cmake_flags) + '"') return result + if __name__ == "__main__": - logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') - parser = argparse.ArgumentParser(description="ClickHouse building script using prebuilt Docker image") - # 'performance' creates a combined .tgz with server and configs to be used for performance test. - parser.add_argument("--package-type", choices=['deb', 'binary', 'performance'], required=True) - parser.add_argument("--clickhouse-repo-path", default=os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir)) + logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s") + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="ClickHouse building script using prebuilt Docker image", + ) + # 'performance' creates a combined .tgz with server + # and configs to be used for performance test. + parser.add_argument( + "--package-type", + choices=("deb", "binary", "performance"), + required=True, + help="a build type", + ) + parser.add_argument( + "--clickhouse-repo-path", + default=os.path.join( + os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir + ), + help="ClickHouse git repository", + ) parser.add_argument("--output-dir", required=True) parser.add_argument("--build-type", choices=("debug", ""), default="") - parser.add_argument("--compiler", choices=("clang-11", "clang-11-darwin", "clang-11-darwin-aarch64", "clang-11-aarch64", - "clang-12", "clang-12-darwin", "clang-12-darwin-aarch64", "clang-12-aarch64", - "clang-13", "clang-13-darwin", "clang-13-darwin-aarch64", "clang-13-aarch64", "clang-13-ppc64le", - "clang-11-freebsd", "clang-12-freebsd", "clang-13-freebsd", "gcc-11"), default="clang-13") - parser.add_argument("--sanitizer", choices=("address", "thread", "memory", "undefined", ""), default="") + parser.add_argument( + "--compiler", + choices=( + "clang-11", + "clang-11-darwin", + "clang-11-darwin-aarch64", + "clang-11-aarch64", + "clang-12", + "clang-12-darwin", + "clang-12-darwin-aarch64", + "clang-12-aarch64", + "clang-13", + "clang-13-darwin", + "clang-13-darwin-aarch64", + "clang-13-aarch64", + "clang-13-ppc64le", + "clang-11-freebsd", + "clang-12-freebsd", + "clang-13-freebsd", + "gcc-11", + ), + default="clang-13", + help="a compiler to use", + ) + parser.add_argument( + "--sanitizer", + choices=("address", "thread", "memory", "undefined", ""), + default="", + ) parser.add_argument("--split-binary", action="store_true") parser.add_argument("--clang-tidy", action="store_true") - parser.add_argument("--cache", choices=("", "ccache", "distcc"), default="") - parser.add_argument("--ccache_dir", default= os.getenv("HOME", "") + '/.ccache') + parser.add_argument("--cache", choices=("ccache", "distcc", ""), default="") + parser.add_argument( + "--ccache_dir", + default=os.getenv("HOME", "") + "/.ccache", + help="a directory with ccache", + ) parser.add_argument("--distcc-hosts", nargs="+") parser.add_argument("--force-build-image", action="store_true") parser.add_argument("--version") - parser.add_argument("--author", default="clickhouse") + parser.add_argument("--author", default="clickhouse", help="a package author") parser.add_argument("--official", action="store_true") - parser.add_argument("--alien-pkgs", nargs='+', default=[]) + parser.add_argument("--additional-pkgs", action="store_true") parser.add_argument("--with-coverage", action="store_true") - parser.add_argument("--with-binaries", choices=("programs", "tests", ""), default="") - parser.add_argument("--docker-image-version", default="latest") + parser.add_argument( + "--with-binaries", choices=("programs", "tests", ""), default="" + ) + parser.add_argument( + "--docker-image-version", default="latest", help="docker image tag to use" + ) args = parser.parse_args() if not os.path.isabs(args.output_dir): args.output_dir = os.path.abspath(os.path.join(os.getcwd(), args.output_dir)) - image_type = 'binary' if args.package_type == 'performance' else args.package_type - image_name = IMAGE_MAP[image_type] + image_type = "binary" if args.package_type == "performance" else args.package_type + image_name = "clickhouse/binary-builder" if not os.path.isabs(args.clickhouse_repo_path): ch_root = os.path.abspath(os.path.join(os.getcwd(), args.clickhouse_repo_path)) else: ch_root = args.clickhouse_repo_path - if args.alien_pkgs and not image_type == "deb": - raise Exception("Can add alien packages only in deb build") + if args.additional_pkgs and image_type != "deb": + raise Exception("Can build additional packages only in deb build") - if args.with_binaries != "" and not image_type == "deb": + if args.with_binaries != "" and image_type != "deb": raise Exception("Can add additional binaries only in deb build") if args.with_binaries != "" and image_type == "deb": - logging.info("Should place {} to output".format(args.with_binaries)) + logging.info("Should place %s to output", args.with_binaries) dockerfile = os.path.join(ch_root, "docker/packager", image_type, "Dockerfile") image_with_version = image_name + ":" + args.docker_image_version - if image_type != "freebsd" and not check_image_exists_locally(image_name) or args.force_build_image: + if ( + image_type != "freebsd" + and not check_image_exists_locally(image_name) + or args.force_build_image + ): if not pull_image(image_with_version) or args.force_build_image: build_image(image_with_version, dockerfile) env_prepared = parse_env_variables( - args.build_type, args.compiler, args.sanitizer, args.package_type, image_type, - args.cache, args.distcc_hosts, args.split_binary, args.clang_tidy, - args.version, args.author, args.official, args.alien_pkgs, args.with_coverage, args.with_binaries) + args.build_type, + args.compiler, + args.sanitizer, + args.package_type, + image_type, + args.cache, + args.distcc_hosts, + args.split_binary, + args.clang_tidy, + args.version, + args.author, + args.official, + args.additional_pkgs, + args.with_coverage, + args.with_binaries, + ) - run_docker_image_with_env(image_name, args.output_dir, env_prepared, ch_root, args.ccache_dir, args.docker_image_version) - logging.info("Output placed into {}".format(args.output_dir)) + run_docker_image_with_env( + image_name, + args.output_dir, + env_prepared, + ch_root, + args.ccache_dir, + args.docker_image_version, + ) + logging.info("Output placed into %s", args.output_dir) diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index bd1e0292636..079d2872204 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -267,6 +267,7 @@ function run_tests local test_opts=( --hung-check --fast-tests-only + --no-random-settings --no-long --testname --shard diff --git a/docker/test/fuzzer/generate-test-j2.py b/docker/test/fuzzer/generate-test-j2.py index bcc1bf6bc84..11525163ed8 100755 --- a/docker/test/fuzzer/generate-test-j2.py +++ b/docker/test/fuzzer/generate-test-j2.py @@ -11,7 +11,7 @@ def removesuffix(text, suffix): https://www.python.org/dev/peps/pep-0616/ """ if suffix and text.endswith(suffix): - return text[:-len(suffix)] + return text[: -len(suffix)] else: return text[:] diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index e18c07bf2c1..74711f476f8 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -13,7 +13,7 @@ script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" echo "$script_dir" repo_dir=ch BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-13_debug_none_bundled_unsplitted_disable_False_binary"} -BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"} +BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"} function clone { diff --git a/docker/test/integration/hive_server/http_api_server.py b/docker/test/integration/hive_server/http_api_server.py index 4818b785c89..8a9d3da4846 100644 --- a/docker/test/integration/hive_server/http_api_server.py +++ b/docker/test/integration/hive_server/http_api_server.py @@ -3,55 +3,55 @@ import subprocess import datetime from flask import Flask, flash, request, redirect, url_for + def run_command(command, wait=False): print("{} - execute shell command:{}".format(datetime.datetime.now(), command)) lines = [] - p = subprocess.Popen(command, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=True) + p = subprocess.Popen( + command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True + ) if wait: - for l in iter(p.stdout.readline, b''): + for l in iter(p.stdout.readline, b""): lines.append(l) p.poll() return (lines, p.returncode) else: - return(iter(p.stdout.readline, b''), 0) + return (iter(p.stdout.readline, b""), 0) -UPLOAD_FOLDER = './' -ALLOWED_EXTENSIONS = {'txt', 'sh'} +UPLOAD_FOLDER = "./" +ALLOWED_EXTENSIONS = {"txt", "sh"} app = Flask(__name__) -app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER +app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER -@app.route('/') + +@app.route("/") def hello_world(): - return 'Hello World' + return "Hello World" def allowed_file(filename): - return '.' in filename and \ - filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS + return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS -@app.route('/upload', methods=['GET', 'POST']) +@app.route("/upload", methods=["GET", "POST"]) def upload_file(): - if request.method == 'POST': + if request.method == "POST": # check if the post request has the file part - if 'file' not in request.files: - flash('No file part') + if "file" not in request.files: + flash("No file part") return redirect(request.url) - file = request.files['file'] + file = request.files["file"] # If the user does not select a file, the browser submits an # empty file without a filename. - if file.filename == '': - flash('No selected file') + if file.filename == "": + flash("No selected file") return redirect(request.url) if file and allowed_file(file.filename): filename = file.filename - file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) - return redirect(url_for('upload_file', name=filename)) - return ''' + file.save(os.path.join(app.config["UPLOAD_FOLDER"], filename)) + return redirect(url_for("upload_file", name=filename)) + return """ Upload new File

Upload new File

@@ -59,12 +59,15 @@ def upload_file(): - ''' -@app.route('/run', methods=['GET', 'POST']) + """ + + +@app.route("/run", methods=["GET", "POST"]) def parse_request(): data = request.data # data is empty run_command(data, wait=True) - return 'Ok' + return "Ok" -if __name__ == '__main__': - app.run(port=5011) + +if __name__ == "__main__": + app.run(port=5011) diff --git a/docker/test/keeper-jepsen/run.sh b/docker/test/keeper-jepsen/run.sh index d7534270e2c..4dec82234bc 100644 --- a/docker/test/keeper-jepsen/run.sh +++ b/docker/test/keeper-jepsen/run.sh @@ -2,7 +2,7 @@ set -euo pipefail -CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-13_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"} +CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-13_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"} CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""} @@ -10,7 +10,7 @@ if [ -z "$CLICKHOUSE_REPO_PATH" ]; then CLICKHOUSE_REPO_PATH=ch rm -rf ch ||: mkdir ch ||: - wget -nv -nd -c "https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/repo/clickhouse_no_subs.tar.gz" + wget -nv -nd -c "https://clickhouse-test-reports.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/repo/clickhouse_no_subs.tar.gz" tar -C ch --strip-components=1 -xf clickhouse_no_subs.tar.gz ls -lath ||: fi diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 16ac304d7fb..54f71ce05bb 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -1294,15 +1294,15 @@ create table ci_checks engine File(TSVWithNamesAndTypes, 'ci-checks.tsv') select '' test_name, '$(sed -n 's/.*/\1/p' report.html)' test_status, 0 test_duration_ms, - 'https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/performance_comparison/report.html#fail1' report_url + 'https://clickhouse-test-reports.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/performance_comparison/report.html#fail1' report_url union all select test || ' #' || toString(query_index), 'slower' test_status, 0 test_duration_ms, - 'https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/performance_comparison/report.html#changes-in-performance.' + 'https://clickhouse-test-reports.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/performance_comparison/report.html#changes-in-performance.' || test || '.' || toString(query_index) report_url from queries where changed_fail != 0 and diff > 0 union all select test || ' #' || toString(query_index), 'unstable' test_status, 0 test_duration_ms, - 'https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/performance_comparison/report.html#unstable-queries.' + 'https://clickhouse-test-reports.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/performance_comparison/report.html#unstable-queries.' || test || '.' || toString(query_index) report_url from queries where unstable_fail != 0 ) diff --git a/docker/test/performance-comparison/download.sh b/docker/test/performance-comparison/download.sh index 8fa6eb5ec83..ae9e677713f 100755 --- a/docker/test/performance-comparison/download.sh +++ b/docker/test/performance-comparison/download.sh @@ -16,26 +16,17 @@ right_sha=$4 datasets=${CHPC_DATASETS-"hits1 hits10 hits100 values"} declare -A dataset_paths -if [[ $S3_URL == *"s3.amazonaws.com"* ]]; then - dataset_paths["hits10"]="https://clickhouse-private-datasets.s3.amazonaws.com/hits_10m_single/partitions/hits_10m_single.tar" - dataset_paths["hits100"]="https://clickhouse-private-datasets.s3.amazonaws.com/hits_100m_single/partitions/hits_100m_single.tar" - dataset_paths["hits1"]="https://clickhouse-datasets.s3.amazonaws.com/hits/partitions/hits_v1.tar" - dataset_paths["values"]="https://clickhouse-datasets.s3.amazonaws.com/values_with_expressions/partitions/test_values.tar" -else - dataset_paths["hits10"]="https://s3.mds.yandex.net/clickhouse-private-datasets/hits_10m_single/partitions/hits_10m_single.tar" - dataset_paths["hits100"]="https://s3.mds.yandex.net/clickhouse-private-datasets/hits_100m_single/partitions/hits_100m_single.tar" - dataset_paths["hits1"]="https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_v1.tar" - dataset_paths["values"]="https://clickhouse-datasets.s3.yandex.net/values_with_expressions/partitions/test_values.tar" -fi +dataset_paths["hits10"]="https://clickhouse-private-datasets.s3.amazonaws.com/hits_10m_single/partitions/hits_10m_single.tar" +dataset_paths["hits100"]="https://clickhouse-private-datasets.s3.amazonaws.com/hits_100m_single/partitions/hits_100m_single.tar" +dataset_paths["hits1"]="https://clickhouse-datasets.s3.amazonaws.com/hits/partitions/hits_v1.tar" +dataset_paths["values"]="https://clickhouse-datasets.s3.amazonaws.com/values_with_expressions/partitions/test_values.tar" function download { # Historically there were various paths for the performance test package. # Test all of them. - declare -a urls_to_try=("https://s3.amazonaws.com/clickhouse-builds/$left_pr/$left_sha/performance/performance.tgz" - "https://clickhouse-builds.s3.yandex.net/$left_pr/$left_sha/clickhouse_build_check/performance/performance.tgz" - ) + declare -a urls_to_try=("https://s3.amazonaws.com/clickhouse-builds/$left_pr/$left_sha/performance/performance.tgz") for path in "${urls_to_try[@]}" do diff --git a/docker/test/performance-comparison/entrypoint.sh b/docker/test/performance-comparison/entrypoint.sh index 3d37a6c0e92..767807d008b 100755 --- a/docker/test/performance-comparison/entrypoint.sh +++ b/docker/test/performance-comparison/entrypoint.sh @@ -4,7 +4,7 @@ set -ex CHPC_CHECK_START_TIMESTAMP="$(date +%s)" export CHPC_CHECK_START_TIMESTAMP -S3_URL=${S3_URL:="https://clickhouse-builds.s3.yandex.net"} +S3_URL=${S3_URL:="https://clickhouse-builds.s3.amazonaws.com"} COMMON_BUILD_PREFIX="/clickhouse_build_check" if [[ $S3_URL == *"s3.amazonaws.com"* ]]; then @@ -64,9 +64,7 @@ function find_reference_sha # Historically there were various path for the performance test package, # test all of them. unset found - declare -a urls_to_try=("https://s3.amazonaws.com/clickhouse-builds/0/$REF_SHA/performance/performance.tgz" - "https://clickhouse-builds.s3.yandex.net/0/$REF_SHA/clickhouse_build_check/performance/performance.tgz" - ) + declare -a urls_to_try=("https://s3.amazonaws.com/clickhouse-builds/0/$REF_SHA/performance/performance.tgz") for path in "${urls_to_try[@]}" do if curl_with_retry "$path" diff --git a/docker/test/performance-comparison/perf.py b/docker/test/performance-comparison/perf.py index 61987d34299..2266641397b 100755 --- a/docker/test/performance-comparison/perf.py +++ b/docker/test/performance-comparison/perf.py @@ -19,58 +19,126 @@ import xml.etree.ElementTree as et from threading import Thread from scipy import stats -logging.basicConfig(format='%(asctime)s: %(levelname)s: %(module)s: %(message)s', level='WARNING') +logging.basicConfig( + format="%(asctime)s: %(levelname)s: %(module)s: %(message)s", level="WARNING" +) total_start_seconds = time.perf_counter() stage_start_seconds = total_start_seconds + def reportStageEnd(stage): global stage_start_seconds, total_start_seconds current = time.perf_counter() - print(f'stage\t{stage}\t{current - stage_start_seconds:.3f}\t{current - total_start_seconds:.3f}') + print( + f"stage\t{stage}\t{current - stage_start_seconds:.3f}\t{current - total_start_seconds:.3f}" + ) stage_start_seconds = current def tsv_escape(s): - return s.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n').replace('\r','') + return ( + s.replace("\\", "\\\\") + .replace("\t", "\\t") + .replace("\n", "\\n") + .replace("\r", "") + ) -parser = argparse.ArgumentParser(description='Run performance test.') +parser = argparse.ArgumentParser(description="Run performance test.") # Explicitly decode files as UTF-8 because sometimes we have Russian characters in queries, and LANG=C is set. -parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs=1, help='test description file') -parser.add_argument('--host', nargs='*', default=['localhost'], help="Space-separated list of server hostname(s). Corresponds to '--port' options.") -parser.add_argument('--port', nargs='*', default=[9000], help="Space-separated list of server port(s). Corresponds to '--host' options.") -parser.add_argument('--runs', type=int, default=1, help='Number of query runs per server.') -parser.add_argument('--max-queries', type=int, default=None, help='Test no more than this number of queries, chosen at random.') -parser.add_argument('--queries-to-run', nargs='*', type=int, default=None, help='Space-separated list of indexes of queries to test.') -parser.add_argument('--max-query-seconds', type=int, default=15, help='For how many seconds at most a query is allowed to run. The script finishes with error if this time is exceeded.') -parser.add_argument('--prewarm-max-query-seconds', type=int, default=180, help='For how many seconds at most a prewarm (cold storage) query is allowed to run. The script finishes with error if this time is exceeded.') -parser.add_argument('--profile-seconds', type=int, default=0, help='For how many seconds to profile a query for which the performance has changed.') -parser.add_argument('--long', action='store_true', help='Do not skip the tests tagged as long.') -parser.add_argument('--print-queries', action='store_true', help='Print test queries and exit.') -parser.add_argument('--print-settings', action='store_true', help='Print test settings and exit.') -parser.add_argument('--keep-created-tables', action='store_true', help="Don't drop the created tables after the test.") -parser.add_argument('--use-existing-tables', action='store_true', help="Don't create or drop the tables, use the existing ones instead.") +parser.add_argument( + "file", + metavar="FILE", + type=argparse.FileType("r", encoding="utf-8"), + nargs=1, + help="test description file", +) +parser.add_argument( + "--host", + nargs="*", + default=["localhost"], + help="Space-separated list of server hostname(s). Corresponds to '--port' options.", +) +parser.add_argument( + "--port", + nargs="*", + default=[9000], + help="Space-separated list of server port(s). Corresponds to '--host' options.", +) +parser.add_argument( + "--runs", type=int, default=1, help="Number of query runs per server." +) +parser.add_argument( + "--max-queries", + type=int, + default=None, + help="Test no more than this number of queries, chosen at random.", +) +parser.add_argument( + "--queries-to-run", + nargs="*", + type=int, + default=None, + help="Space-separated list of indexes of queries to test.", +) +parser.add_argument( + "--max-query-seconds", + type=int, + default=15, + help="For how many seconds at most a query is allowed to run. The script finishes with error if this time is exceeded.", +) +parser.add_argument( + "--prewarm-max-query-seconds", + type=int, + default=180, + help="For how many seconds at most a prewarm (cold storage) query is allowed to run. The script finishes with error if this time is exceeded.", +) +parser.add_argument( + "--profile-seconds", + type=int, + default=0, + help="For how many seconds to profile a query for which the performance has changed.", +) +parser.add_argument( + "--long", action="store_true", help="Do not skip the tests tagged as long." +) +parser.add_argument( + "--print-queries", action="store_true", help="Print test queries and exit." +) +parser.add_argument( + "--print-settings", action="store_true", help="Print test settings and exit." +) +parser.add_argument( + "--keep-created-tables", + action="store_true", + help="Don't drop the created tables after the test.", +) +parser.add_argument( + "--use-existing-tables", + action="store_true", + help="Don't create or drop the tables, use the existing ones instead.", +) args = parser.parse_args() -reportStageEnd('start') +reportStageEnd("start") test_name = os.path.splitext(os.path.basename(args.file[0].name))[0] tree = et.parse(args.file[0]) root = tree.getroot() -reportStageEnd('parse') +reportStageEnd("parse") # Process query parameters -subst_elems = root.findall('substitutions/substitution') -available_parameters = {} # { 'table': ['hits_10m', 'hits_100m'], ... } +subst_elems = root.findall("substitutions/substitution") +available_parameters = {} # { 'table': ['hits_10m', 'hits_100m'], ... } for e in subst_elems: - name = e.find('name').text - values = [v.text for v in e.findall('values/value')] + name = e.find("name").text + values = [v.text for v in e.findall("values/value")] if not values: - raise Exception(f'No values given for substitution {{{name}}}') + raise Exception(f"No values given for substitution {{{name}}}") available_parameters[name] = values @@ -78,7 +146,7 @@ for e in subst_elems: # parameters. The set of parameters is determined based on the first list. # Note: keep the order of queries -- sometimes we have DROP IF EXISTS # followed by CREATE in create queries section, so the order matters. -def substitute_parameters(query_templates, other_templates = []): +def substitute_parameters(query_templates, other_templates=[]): query_results = [] other_results = [[]] * (len(other_templates)) for i, q in enumerate(query_templates): @@ -103,17 +171,21 @@ def substitute_parameters(query_templates, other_templates = []): # and reporting the queries marked as short. test_queries = [] is_short = [] -for e in root.findall('query'): - new_queries, [new_is_short] = substitute_parameters([e.text], [[e.attrib.get('short', '0')]]) +for e in root.findall("query"): + new_queries, [new_is_short] = substitute_parameters( + [e.text], [[e.attrib.get("short", "0")]] + ) test_queries += new_queries is_short += [eval(s) for s in new_is_short] -assert(len(test_queries) == len(is_short)) +assert len(test_queries) == len(is_short) # If we're given a list of queries to run, check that it makes sense. for i in args.queries_to_run or []: if i < 0 or i >= len(test_queries): - print(f'There is no query no. {i} in this test, only [{0}-{len(test_queries) - 1}] are present') + print( + f"There is no query no. {i} in this test, only [{0}-{len(test_queries) - 1}] are present" + ) exit(1) # If we're only asked to print the queries, do that and exit. @@ -125,60 +197,65 @@ if args.print_queries: # Print short queries for i, s in enumerate(is_short): if s: - print(f'short\t{i}') + print(f"short\t{i}") # If we're only asked to print the settings, do that and exit. These are settings # for clickhouse-benchmark, so we print them as command line arguments, e.g. # '--max_memory_usage=10000000'. if args.print_settings: - for s in root.findall('settings/*'): - print(f'--{s.tag}={s.text}') + for s in root.findall("settings/*"): + print(f"--{s.tag}={s.text}") exit(0) # Skip long tests if not args.long: - for tag in root.findall('.//tag'): - if tag.text == 'long': - print('skipped\tTest is tagged as long.') + for tag in root.findall(".//tag"): + if tag.text == "long": + print("skipped\tTest is tagged as long.") sys.exit(0) # Print report threshold for the test if it is set. ignored_relative_change = 0.05 -if 'max_ignored_relative_change' in root.attrib: +if "max_ignored_relative_change" in root.attrib: ignored_relative_change = float(root.attrib["max_ignored_relative_change"]) - print(f'report-threshold\t{ignored_relative_change}') + print(f"report-threshold\t{ignored_relative_change}") -reportStageEnd('before-connect') +reportStageEnd("before-connect") # Open connections -servers = [{'host': host or args.host[0], 'port': port or args.port[0]} for (host, port) in itertools.zip_longest(args.host, args.port)] +servers = [ + {"host": host or args.host[0], "port": port or args.port[0]} + for (host, port) in itertools.zip_longest(args.host, args.port) +] # Force settings_is_important to fail queries on unknown settings. -all_connections = [clickhouse_driver.Client(**server, settings_is_important=True) for server in servers] +all_connections = [ + clickhouse_driver.Client(**server, settings_is_important=True) for server in servers +] for i, s in enumerate(servers): print(f'server\t{i}\t{s["host"]}\t{s["port"]}') -reportStageEnd('connect') +reportStageEnd("connect") if not args.use_existing_tables: # Run drop queries, ignoring errors. Do this before all other activity, # because clickhouse_driver disconnects on error (this is not configurable), # and the new connection loses the changes in settings. - drop_query_templates = [q.text for q in root.findall('drop_query')] + drop_query_templates = [q.text for q in root.findall("drop_query")] drop_queries = substitute_parameters(drop_query_templates) for conn_index, c in enumerate(all_connections): for q in drop_queries: try: c.execute(q) - print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}') + print(f"drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}") except: pass - reportStageEnd('drop-1') + reportStageEnd("drop-1") # Apply settings. -settings = root.findall('settings/*') +settings = root.findall("settings/*") for conn_index, c in enumerate(all_connections): for s in settings: # requires clickhouse-driver >= 1.1.5 to accept arbitrary new settings @@ -189,48 +266,52 @@ for conn_index, c in enumerate(all_connections): # the test, which is wrong. c.execute("select 1") -reportStageEnd('settings') +reportStageEnd("settings") # Check tables that should exist. If they don't exist, just skip this test. -tables = [e.text for e in root.findall('preconditions/table_exists')] +tables = [e.text for e in root.findall("preconditions/table_exists")] for t in tables: for c in all_connections: try: res = c.execute("select 1 from {} limit 1".format(t)) except: exception_message = traceback.format_exception_only(*sys.exc_info()[:2])[-1] - skipped_message = ' '.join(exception_message.split('\n')[:2]) - print(f'skipped\t{tsv_escape(skipped_message)}') + skipped_message = " ".join(exception_message.split("\n")[:2]) + print(f"skipped\t{tsv_escape(skipped_message)}") sys.exit(0) -reportStageEnd('preconditions') +reportStageEnd("preconditions") if not args.use_existing_tables: # Run create and fill queries. We will run them simultaneously for both # servers, to save time. The weird XML search + filter is because we want to # keep the relative order of elements, and etree doesn't support the # appropriate xpath query. - create_query_templates = [q.text for q in root.findall('./*') - if q.tag in ('create_query', 'fill_query')] + create_query_templates = [ + q.text for q in root.findall("./*") if q.tag in ("create_query", "fill_query") + ] create_queries = substitute_parameters(create_query_templates) # Disallow temporary tables, because the clickhouse_driver reconnects on # errors, and temporary tables are destroyed. We want to be able to continue # after some errors. for q in create_queries: - if re.search('create temporary table', q, flags=re.IGNORECASE): - print(f"Temporary tables are not allowed in performance tests: '{q}'", - file = sys.stderr) + if re.search("create temporary table", q, flags=re.IGNORECASE): + print( + f"Temporary tables are not allowed in performance tests: '{q}'", + file=sys.stderr, + ) sys.exit(1) def do_create(connection, index, queries): for q in queries: connection.execute(q) - print(f'create\t{index}\t{connection.last_query.elapsed}\t{tsv_escape(q)}') + print(f"create\t{index}\t{connection.last_query.elapsed}\t{tsv_escape(q)}") threads = [ - Thread(target = do_create, args = (connection, index, create_queries)) - for index, connection in enumerate(all_connections)] + Thread(target=do_create, args=(connection, index, create_queries)) + for index, connection in enumerate(all_connections) + ] for t in threads: t.start() @@ -238,14 +319,16 @@ if not args.use_existing_tables: for t in threads: t.join() - reportStageEnd('create') + reportStageEnd("create") # By default, test all queries. queries_to_run = range(0, len(test_queries)) if args.max_queries: # If specified, test a limited number of queries chosen at random. - queries_to_run = random.sample(range(0, len(test_queries)), min(len(test_queries), args.max_queries)) + queries_to_run = random.sample( + range(0, len(test_queries)), min(len(test_queries), args.max_queries) + ) if args.queries_to_run: # Run the specified queries. @@ -255,16 +338,16 @@ if args.queries_to_run: profile_total_seconds = 0 for query_index in queries_to_run: q = test_queries[query_index] - query_prefix = f'{test_name}.query{query_index}' + query_prefix = f"{test_name}.query{query_index}" # We have some crazy long queries (about 100kB), so trim them to a sane # length. This means we can't use query text as an identifier and have to # use the test name + the test-wide query index. query_display_name = q if len(query_display_name) > 1000: - query_display_name = f'{query_display_name[:1000]}...({query_index})' + query_display_name = f"{query_display_name[:1000]}...({query_index})" - print(f'display-name\t{query_index}\t{tsv_escape(query_display_name)}') + print(f"display-name\t{query_index}\t{tsv_escape(query_display_name)}") # Prewarm: run once on both servers. Helps to bring the data into memory, # precompile the queries, etc. @@ -272,10 +355,10 @@ for query_index in queries_to_run: # new one. We want to run them on the new server only, so that the PR author # can ensure that the test works properly. Remember the errors we had on # each server. - query_error_on_connection = [None] * len(all_connections); + query_error_on_connection = [None] * len(all_connections) for conn_index, c in enumerate(all_connections): try: - prewarm_id = f'{query_prefix}.prewarm0' + prewarm_id = f"{query_prefix}.prewarm0" try: # During the warmup runs, we will also: @@ -283,25 +366,30 @@ for query_index in queries_to_run: # * collect profiler traces, which might be helpful for analyzing # test coverage. We disable profiler for normal runs because # it makes the results unstable. - res = c.execute(q, query_id = prewarm_id, - settings = { - 'max_execution_time': args.prewarm_max_query_seconds, - 'query_profiler_real_time_period_ns': 10000000, - 'memory_profiler_step': '4Mi', - }) + res = c.execute( + q, + query_id=prewarm_id, + settings={ + "max_execution_time": args.prewarm_max_query_seconds, + "query_profiler_real_time_period_ns": 10000000, + "memory_profiler_step": "4Mi", + }, + ) except clickhouse_driver.errors.Error as e: # Add query id to the exception to make debugging easier. e.args = (prewarm_id, *e.args) - e.message = prewarm_id + ': ' + e.message + e.message = prewarm_id + ": " + e.message raise - print(f'prewarm\t{query_index}\t{prewarm_id}\t{conn_index}\t{c.last_query.elapsed}') + print( + f"prewarm\t{query_index}\t{prewarm_id}\t{conn_index}\t{c.last_query.elapsed}" + ) except KeyboardInterrupt: raise except: # FIXME the driver reconnects on error and we lose settings, so this # might lead to further errors or unexpected behavior. - query_error_on_connection[conn_index] = traceback.format_exc(); + query_error_on_connection[conn_index] = traceback.format_exc() continue # Report all errors that ocurred during prewarm and decide what to do next. @@ -311,14 +399,14 @@ for query_index in queries_to_run: no_errors = [] for i, e in enumerate(query_error_on_connection): if e: - print(e, file = sys.stderr) + print(e, file=sys.stderr) else: no_errors.append(i) if len(no_errors) == 0: continue elif len(no_errors) < len(all_connections): - print(f'partial\t{query_index}\t{no_errors}') + print(f"partial\t{query_index}\t{no_errors}") this_query_connections = [all_connections[index] for index in no_errors] @@ -337,27 +425,34 @@ for query_index in queries_to_run: all_server_times.append([]) while True: - run_id = f'{query_prefix}.run{run}' + run_id = f"{query_prefix}.run{run}" for conn_index, c in enumerate(this_query_connections): try: - res = c.execute(q, query_id = run_id, settings = {'max_execution_time': args.max_query_seconds}) + res = c.execute( + q, + query_id=run_id, + settings={"max_execution_time": args.max_query_seconds}, + ) except clickhouse_driver.errors.Error as e: # Add query id to the exception to make debugging easier. e.args = (run_id, *e.args) - e.message = run_id + ': ' + e.message + e.message = run_id + ": " + e.message raise elapsed = c.last_query.elapsed all_server_times[conn_index].append(elapsed) server_seconds += elapsed - print(f'query\t{query_index}\t{run_id}\t{conn_index}\t{elapsed}') + print(f"query\t{query_index}\t{run_id}\t{conn_index}\t{elapsed}") if elapsed > args.max_query_seconds: # Do not stop processing pathologically slow queries, # since this may hide errors in other queries. - print(f'The query no. {query_index} is taking too long to run ({elapsed} s)', file=sys.stderr) + print( + f"The query no. {query_index} is taking too long to run ({elapsed} s)", + file=sys.stderr, + ) # Be careful with the counter, after this line it's the next iteration # already. @@ -386,7 +481,7 @@ for query_index in queries_to_run: break client_seconds = time.perf_counter() - start_seconds - print(f'client-time\t{query_index}\t{client_seconds}\t{server_seconds}') + print(f"client-time\t{query_index}\t{client_seconds}\t{server_seconds}") # Run additional profiling queries to collect profile data, but only if test times appeared to be different. # We have to do it after normal runs because otherwise it will affect test statistics too much @@ -397,13 +492,15 @@ for query_index in queries_to_run: # Don't fail if for some reason there are not enough measurements. continue - pvalue = stats.ttest_ind(all_server_times[0], all_server_times[1], equal_var = False).pvalue + pvalue = stats.ttest_ind( + all_server_times[0], all_server_times[1], equal_var=False + ).pvalue median = [statistics.median(t) for t in all_server_times] # Keep this consistent with the value used in report. Should eventually move # to (median[1] - median[0]) / min(median), which is compatible with "times" # difference we use in report (max(median) / min(median)). relative_diff = (median[1] - median[0]) / median[0] - print(f'diff\t{query_index}\t{median[0]}\t{median[1]}\t{relative_diff}\t{pvalue}') + print(f"diff\t{query_index}\t{median[0]}\t{median[1]}\t{relative_diff}\t{pvalue}") if abs(relative_diff) < ignored_relative_change or pvalue > 0.05: continue @@ -412,25 +509,31 @@ for query_index in queries_to_run: profile_start_seconds = time.perf_counter() run = 0 while time.perf_counter() - profile_start_seconds < args.profile_seconds: - run_id = f'{query_prefix}.profile{run}' + run_id = f"{query_prefix}.profile{run}" for conn_index, c in enumerate(this_query_connections): try: - res = c.execute(q, query_id = run_id, settings = {'query_profiler_real_time_period_ns': 10000000}) - print(f'profile\t{query_index}\t{run_id}\t{conn_index}\t{c.last_query.elapsed}') + res = c.execute( + q, + query_id=run_id, + settings={"query_profiler_real_time_period_ns": 10000000}, + ) + print( + f"profile\t{query_index}\t{run_id}\t{conn_index}\t{c.last_query.elapsed}" + ) except clickhouse_driver.errors.Error as e: # Add query id to the exception to make debugging easier. e.args = (run_id, *e.args) - e.message = run_id + ': ' + e.message + e.message = run_id + ": " + e.message raise run += 1 profile_total_seconds += time.perf_counter() - profile_start_seconds -print(f'profile-total\t{profile_total_seconds}') +print(f"profile-total\t{profile_total_seconds}") -reportStageEnd('run') +reportStageEnd("run") # Run drop queries if not args.keep_created_tables and not args.use_existing_tables: @@ -438,6 +541,6 @@ if not args.keep_created_tables and not args.use_existing_tables: for conn_index, c in enumerate(all_connections): for q in drop_queries: c.execute(q) - print(f'drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}') + print(f"drop\t{conn_index}\t{c.last_query.elapsed}\t{tsv_escape(q)}") - reportStageEnd('drop-2') + reportStageEnd("drop-2") diff --git a/docker/test/performance-comparison/report.py b/docker/test/performance-comparison/report.py index 4cff6b41949..0cb8481ee6e 100755 --- a/docker/test/performance-comparison/report.py +++ b/docker/test/performance-comparison/report.py @@ -12,9 +12,13 @@ import pprint import sys import traceback -parser = argparse.ArgumentParser(description='Create performance test report') -parser.add_argument('--report', default='main', choices=['main', 'all-queries'], - help='Which report to build') +parser = argparse.ArgumentParser(description="Create performance test report") +parser.add_argument( + "--report", + default="main", + choices=["main", "all-queries"], + help="Which report to build", +) args = parser.parse_args() tables = [] @@ -31,8 +35,8 @@ unstable_partial_queries = 0 # max seconds to run one query by itself, not counting preparation allowed_single_run_time = 2 -color_bad='#ffb0c0' -color_good='#b0d050' +color_bad = "#ffb0c0" +color_good = "#b0d050" header_template = """ @@ -151,24 +155,29 @@ tr:nth-child(odd) td {{filter: brightness(90%);}} table_anchor = 0 row_anchor = 0 + def currentTableAnchor(): global table_anchor - return f'{table_anchor}' + return f"{table_anchor}" + def newTableAnchor(): global table_anchor table_anchor += 1 return currentTableAnchor() + def currentRowAnchor(): global row_anchor global table_anchor - return f'{table_anchor}.{row_anchor}' + return f"{table_anchor}.{row_anchor}" + def nextRowAnchor(): global row_anchor global table_anchor - return f'{table_anchor}.{row_anchor + 1}' + return f"{table_anchor}.{row_anchor + 1}" + def advanceRowAnchor(): global row_anchor @@ -178,43 +187,58 @@ def advanceRowAnchor(): def tr(x, anchor=None): - #return '{x}'.format(a=a, x=str(x)) + # return '{x}'.format(a=a, x=str(x)) anchor = anchor if anchor else advanceRowAnchor() - return f'{x}' + return f"{x}" -def td(value, cell_attributes = ''): - return '{value}'.format( - cell_attributes = cell_attributes, - value = value) -def th(value, cell_attributes = ''): - return '{value}'.format( - cell_attributes = cell_attributes, - value = value) +def td(value, cell_attributes=""): + return "{value}".format( + cell_attributes=cell_attributes, value=value + ) -def tableRow(cell_values, cell_attributes = [], anchor=None): + +def th(value, cell_attributes=""): + return "{value}".format( + cell_attributes=cell_attributes, value=value + ) + + +def tableRow(cell_values, cell_attributes=[], anchor=None): return tr( - ''.join([td(v, a) - for v, a in itertools.zip_longest( - cell_values, cell_attributes, - fillvalue = '') - if a is not None and v is not None]), - anchor) + "".join( + [ + td(v, a) + for v, a in itertools.zip_longest( + cell_values, cell_attributes, fillvalue="" + ) + if a is not None and v is not None + ] + ), + anchor, + ) -def tableHeader(cell_values, cell_attributes = []): + +def tableHeader(cell_values, cell_attributes=[]): return tr( - ''.join([th(v, a) - for v, a in itertools.zip_longest( - cell_values, cell_attributes, - fillvalue = '') - if a is not None and v is not None])) + "".join( + [ + th(v, a) + for v, a in itertools.zip_longest( + cell_values, cell_attributes, fillvalue="" + ) + if a is not None and v is not None + ] + ) + ) + def tableStart(title): - cls = '-'.join(title.lower().split(' ')[:3]); + cls = "-".join(title.lower().split(" ")[:3]) global table_anchor table_anchor = cls anchor = currentTableAnchor() - help_anchor = '-'.join(title.lower().split(' ')); + help_anchor = "-".join(title.lower().split(" ")) return f"""

{title} @@ -223,12 +247,14 @@ def tableStart(title): """ + def tableEnd(): - return '
' + return "" + def tsvRows(n): try: - with open(n, encoding='utf-8') as fd: + with open(n, encoding="utf-8") as fd: result = [] for row in csv.reader(fd, delimiter="\t", quoting=csv.QUOTE_NONE): new_row = [] @@ -237,27 +263,32 @@ def tsvRows(n): # The second one (encode('latin1').decode('utf-8')) fixes the changes with unicode vs utf-8 chars, so # 'Чем зÐ�нимаеÑ�ЬÑ�Ñ�' is transformed back into 'Чем зАнимаешЬся'. - new_row.append(e.encode('utf-8').decode('unicode-escape').encode('latin1').decode('utf-8')) + new_row.append( + e.encode("utf-8") + .decode("unicode-escape") + .encode("latin1") + .decode("utf-8") + ) result.append(new_row) return result except: - report_errors.append( - traceback.format_exception_only( - *sys.exc_info()[:2])[-1]) + report_errors.append(traceback.format_exception_only(*sys.exc_info()[:2])[-1]) pass return [] + def htmlRows(n): rawRows = tsvRows(n) - result = '' + result = "" for row in rawRows: result += tableRow(row) return result + def addSimpleTable(caption, columns, rows, pos=None): global tables - text = '' + text = "" if not rows: return @@ -268,51 +299,63 @@ def addSimpleTable(caption, columns, rows, pos=None): text += tableEnd() tables.insert(pos if pos else len(tables), text) + def add_tested_commits(): global report_errors try: - addSimpleTable('Tested Commits', ['Old', 'New'], - [['
{}
'.format(x) for x in - [open('left-commit.txt').read(), - open('right-commit.txt').read()]]]) + addSimpleTable( + "Tested Commits", + ["Old", "New"], + [ + [ + "
{}
".format(x) + for x in [ + open("left-commit.txt").read(), + open("right-commit.txt").read(), + ] + ] + ], + ) except: # Don't fail if no commit info -- maybe it's a manual run. - report_errors.append( - traceback.format_exception_only( - *sys.exc_info()[:2])[-1]) + report_errors.append(traceback.format_exception_only(*sys.exc_info()[:2])[-1]) pass + def add_report_errors(): global tables global report_errors # Add the errors reported by various steps of comparison script try: - report_errors += [l.strip() for l in open('report/errors.log')] + report_errors += [l.strip() for l in open("report/errors.log")] except: - report_errors.append( - traceback.format_exception_only( - *sys.exc_info()[:2])[-1]) + report_errors.append(traceback.format_exception_only(*sys.exc_info()[:2])[-1]) pass if not report_errors: return - text = tableStart('Errors while Building the Report') - text += tableHeader(['Error']) + text = tableStart("Errors while Building the Report") + text += tableHeader(["Error"]) for x in report_errors: text += tableRow([x]) text += tableEnd() # Insert after Tested Commits tables.insert(1, text) - errors_explained.append([f'There were some errors while building the report']); + errors_explained.append( + [ + f'There were some errors while building the report' + ] + ) + def add_errors_explained(): if not errors_explained: return text = '' - text += tableStart('Error Summary') - text += tableHeader(['Description']) + text += tableStart("Error Summary") + text += tableHeader(["Description"]) for row in errors_explained: text += tableRow(row) text += tableEnd() @@ -321,59 +364,81 @@ def add_errors_explained(): tables.insert(1, text) -if args.report == 'main': +if args.report == "main": print((header_template.format())) add_tested_commits() - - run_error_rows = tsvRows('run-errors.tsv') + run_error_rows = tsvRows("run-errors.tsv") error_tests += len(run_error_rows) - addSimpleTable('Run Errors', ['Test', 'Error'], run_error_rows) + addSimpleTable("Run Errors", ["Test", "Error"], run_error_rows) if run_error_rows: - errors_explained.append([f'There were some errors while running the tests']); + errors_explained.append( + [ + f'There were some errors while running the tests' + ] + ) - - slow_on_client_rows = tsvRows('report/slow-on-client.tsv') + slow_on_client_rows = tsvRows("report/slow-on-client.tsv") error_tests += len(slow_on_client_rows) - addSimpleTable('Slow on Client', - ['Client time, s', 'Server time, s', 'Ratio', 'Test', 'Query'], - slow_on_client_rows) + addSimpleTable( + "Slow on Client", + ["Client time, s", "Server time, s", "Ratio", "Test", "Query"], + slow_on_client_rows, + ) if slow_on_client_rows: - errors_explained.append([f'Some queries are taking noticeable time client-side (missing `FORMAT Null`?)']); + errors_explained.append( + [ + f'Some queries are taking noticeable time client-side (missing `FORMAT Null`?)' + ] + ) - unmarked_short_rows = tsvRows('report/unexpected-query-duration.tsv') + unmarked_short_rows = tsvRows("report/unexpected-query-duration.tsv") error_tests += len(unmarked_short_rows) - addSimpleTable('Unexpected Query Duration', - ['Problem', 'Marked as "short"?', 'Run time, s', 'Test', '#', 'Query'], - unmarked_short_rows) + addSimpleTable( + "Unexpected Query Duration", + ["Problem", 'Marked as "short"?', "Run time, s", "Test", "#", "Query"], + unmarked_short_rows, + ) if unmarked_short_rows: - errors_explained.append([f'Some queries have unexpected duration']); + errors_explained.append( + [ + f'Some queries have unexpected duration' + ] + ) def add_partial(): - rows = tsvRows('report/partial-queries-report.tsv') + rows = tsvRows("report/partial-queries-report.tsv") if not rows: return global unstable_partial_queries, slow_average_tests, tables - text = tableStart('Partial Queries') - columns = ['Median time, s', 'Relative time variance', 'Test', '#', 'Query'] + text = tableStart("Partial Queries") + columns = ["Median time, s", "Relative time variance", "Test", "#", "Query"] text += tableHeader(columns) - attrs = ['' for c in columns] + attrs = ["" for c in columns] for row in rows: - anchor = f'{currentTableAnchor()}.{row[2]}.{row[3]}' + anchor = f"{currentTableAnchor()}.{row[2]}.{row[3]}" if float(row[1]) > 0.10: attrs[1] = f'style="background: {color_bad}"' unstable_partial_queries += 1 - errors_explained.append([f'The query no. {row[3]} of test \'{row[2]}\' has excessive variance of run time. Keep it below 10%']) + errors_explained.append( + [ + f"The query no. {row[3]} of test '{row[2]}' has excessive variance of run time. Keep it below 10%" + ] + ) else: - attrs[1] = '' + attrs[1] = "" if float(row[0]) > allowed_single_run_time: attrs[0] = f'style="background: {color_bad}"' - errors_explained.append([f'The query no. {row[3]} of test \'{row[2]}\' is taking too long to run. Keep the run time below {allowed_single_run_time} seconds"']) + errors_explained.append( + [ + f'The query no. {row[3]} of test \'{row[2]}\' is taking too long to run. Keep the run time below {allowed_single_run_time} seconds"' + ] + ) slow_average_tests += 1 else: - attrs[0] = '' + attrs[0] = "" text += tableRow(row, attrs, anchor) text += tableEnd() tables.append(text) @@ -381,41 +446,45 @@ if args.report == 'main': add_partial() def add_changes(): - rows = tsvRows('report/changed-perf.tsv') + rows = tsvRows("report/changed-perf.tsv") if not rows: return global faster_queries, slower_queries, tables - text = tableStart('Changes in Performance') + text = tableStart("Changes in Performance") columns = [ - 'Old, s', # 0 - 'New, s', # 1 - 'Ratio of speedup (-) or slowdown (+)', # 2 - 'Relative difference (new − old) / old', # 3 - 'p < 0.01 threshold', # 4 - '', # Failed # 5 - 'Test', # 6 - '#', # 7 - 'Query', # 8 - ] - attrs = ['' for c in columns] + "Old, s", # 0 + "New, s", # 1 + "Ratio of speedup (-) or slowdown (+)", # 2 + "Relative difference (new − old) / old", # 3 + "p < 0.01 threshold", # 4 + "", # Failed # 5 + "Test", # 6 + "#", # 7 + "Query", # 8 + ] + attrs = ["" for c in columns] attrs[5] = None text += tableHeader(columns, attrs) for row in rows: - anchor = f'{currentTableAnchor()}.{row[6]}.{row[7]}' + anchor = f"{currentTableAnchor()}.{row[6]}.{row[7]}" if int(row[5]): - if float(row[3]) < 0.: + if float(row[3]) < 0.0: faster_queries += 1 attrs[2] = attrs[3] = f'style="background: {color_good}"' else: slower_queries += 1 attrs[2] = attrs[3] = f'style="background: {color_bad}"' - errors_explained.append([f'The query no. {row[7]} of test \'{row[6]}\' has slowed down']) + errors_explained.append( + [ + f"The query no. {row[7]} of test '{row[6]}' has slowed down" + ] + ) else: - attrs[2] = attrs[3] = '' + attrs[2] = attrs[3] = "" text += tableRow(row, attrs, anchor) @@ -427,35 +496,35 @@ if args.report == 'main': def add_unstable_queries(): global unstable_queries, very_unstable_queries, tables - unstable_rows = tsvRows('report/unstable-queries.tsv') + unstable_rows = tsvRows("report/unstable-queries.tsv") if not unstable_rows: return unstable_queries += len(unstable_rows) columns = [ - 'Old, s', #0 - 'New, s', #1 - 'Relative difference (new - old)/old', #2 - 'p < 0.01 threshold', #3 - '', # Failed #4 - 'Test', #5 - '#', #6 - 'Query' #7 + "Old, s", # 0 + "New, s", # 1 + "Relative difference (new - old)/old", # 2 + "p < 0.01 threshold", # 3 + "", # Failed #4 + "Test", # 5 + "#", # 6 + "Query", # 7 ] - attrs = ['' for c in columns] + attrs = ["" for c in columns] attrs[4] = None - text = tableStart('Unstable Queries') + text = tableStart("Unstable Queries") text += tableHeader(columns, attrs) for r in unstable_rows: - anchor = f'{currentTableAnchor()}.{r[5]}.{r[6]}' + anchor = f"{currentTableAnchor()}.{r[5]}.{r[6]}" if int(r[4]): very_unstable_queries += 1 attrs[3] = f'style="background: {color_bad}"' else: - attrs[3] = '' + attrs[3] = "" # Just don't add the slightly unstable queries we don't consider # errors. It's not clear what the user should do with them. continue @@ -470,53 +539,70 @@ if args.report == 'main': add_unstable_queries() - skipped_tests_rows = tsvRows('analyze/skipped-tests.tsv') - addSimpleTable('Skipped Tests', ['Test', 'Reason'], skipped_tests_rows) + skipped_tests_rows = tsvRows("analyze/skipped-tests.tsv") + addSimpleTable("Skipped Tests", ["Test", "Reason"], skipped_tests_rows) - addSimpleTable('Test Performance Changes', - ['Test', 'Ratio of speedup (-) or slowdown (+)', 'Queries', 'Total not OK', 'Changed perf', 'Unstable'], - tsvRows('report/test-perf-changes.tsv')) + addSimpleTable( + "Test Performance Changes", + [ + "Test", + "Ratio of speedup (-) or slowdown (+)", + "Queries", + "Total not OK", + "Changed perf", + "Unstable", + ], + tsvRows("report/test-perf-changes.tsv"), + ) def add_test_times(): global slow_average_tests, tables - rows = tsvRows('report/test-times.tsv') + rows = tsvRows("report/test-times.tsv") if not rows: return columns = [ - 'Test', #0 - 'Wall clock time, entire test, s', #1 - 'Total client time for measured query runs, s', #2 - 'Queries', #3 - 'Longest query, total for measured runs, s', #4 - 'Wall clock time per query, s', #5 - 'Shortest query, total for measured runs, s', #6 - '', # Runs #7 - ] - attrs = ['' for c in columns] + "Test", # 0 + "Wall clock time, entire test, s", # 1 + "Total client time for measured query runs, s", # 2 + "Queries", # 3 + "Longest query, total for measured runs, s", # 4 + "Wall clock time per query, s", # 5 + "Shortest query, total for measured runs, s", # 6 + "", # Runs #7 + ] + attrs = ["" for c in columns] attrs[7] = None - text = tableStart('Test Times') + text = tableStart("Test Times") text += tableHeader(columns, attrs) - allowed_average_run_time = 3.75 # 60 seconds per test at (7 + 1) * 2 runs + allowed_average_run_time = 3.75 # 60 seconds per test at (7 + 1) * 2 runs for r in rows: - anchor = f'{currentTableAnchor()}.{r[0]}' + anchor = f"{currentTableAnchor()}.{r[0]}" total_runs = (int(r[7]) + 1) * 2 # one prewarm run, two servers - if r[0] != 'Total' and float(r[5]) > allowed_average_run_time * total_runs: + if r[0] != "Total" and float(r[5]) > allowed_average_run_time * total_runs: # FIXME should be 15s max -- investigate parallel_insert slow_average_tests += 1 attrs[5] = f'style="background: {color_bad}"' - errors_explained.append([f'The test \'{r[0]}\' is too slow to run as a whole. Investigate whether the create and fill queries can be sped up']) + errors_explained.append( + [ + f"The test '{r[0]}' is too slow to run as a whole. Investigate whether the create and fill queries can be sped up" + ] + ) else: - attrs[5] = '' + attrs[5] = "" - if r[0] != 'Total' and float(r[4]) > allowed_single_run_time * total_runs: + if r[0] != "Total" and float(r[4]) > allowed_single_run_time * total_runs: slow_average_tests += 1 attrs[4] = f'style="background: {color_bad}"' - errors_explained.append([f'Some query of the test \'{r[0]}\' is too slow to run. See the all queries report']) + errors_explained.append( + [ + f"Some query of the test '{r[0]}' is too slow to run. See the all queries report" + ] + ) else: - attrs[4] = '' + attrs[4] = "" text += tableRow(r, attrs, anchor) @@ -525,10 +611,17 @@ if args.report == 'main': add_test_times() - addSimpleTable('Metric Changes', - ['Metric', 'Old median value', 'New median value', - 'Relative difference', 'Times difference'], - tsvRows('metrics/changes.tsv')) + addSimpleTable( + "Metric Changes", + [ + "Metric", + "Old median value", + "New median value", + "Relative difference", + "Times difference", + ], + tsvRows("metrics/changes.tsv"), + ) add_report_errors() add_errors_explained() @@ -536,7 +629,8 @@ if args.report == 'main': for t in tables: print(t) - print(f""" + print( + f""" - """) + """ + ) - status = 'success' - message = 'See the report' + status = "success" + message = "See the report" message_array = [] if slow_average_tests: - status = 'failure' - message_array.append(str(slow_average_tests) + ' too long') + status = "failure" + message_array.append(str(slow_average_tests) + " too long") if faster_queries: - message_array.append(str(faster_queries) + ' faster') + message_array.append(str(faster_queries) + " faster") if slower_queries: if slower_queries > 3: - status = 'failure' - message_array.append(str(slower_queries) + ' slower') + status = "failure" + message_array.append(str(slower_queries) + " slower") if unstable_partial_queries: very_unstable_queries += unstable_partial_queries - status = 'failure' + status = "failure" # Don't show mildly unstable queries, only the very unstable ones we # treat as errors. if very_unstable_queries: if very_unstable_queries > 5: error_tests += very_unstable_queries - status = 'failure' - message_array.append(str(very_unstable_queries) + ' unstable') + status = "failure" + message_array.append(str(very_unstable_queries) + " unstable") error_tests += slow_average_tests if error_tests: - status = 'failure' - message_array.insert(0, str(error_tests) + ' errors') + status = "failure" + message_array.insert(0, str(error_tests) + " errors") if message_array: - message = ', '.join(message_array) + message = ", ".join(message_array) if report_errors: - status = 'failure' - message = 'Errors while building the report.' + status = "failure" + message = "Errors while building the report." - print((""" + print( + ( + """ - """.format(status=status, message=message))) + """.format( + status=status, message=message + ) + ) + ) -elif args.report == 'all-queries': +elif args.report == "all-queries": print((header_template.format())) add_tested_commits() def add_all_queries(): - rows = tsvRows('report/all-queries.tsv') + rows = tsvRows("report/all-queries.tsv") if not rows: return columns = [ - '', # Changed #0 - '', # Unstable #1 - 'Old, s', #2 - 'New, s', #3 - 'Ratio of speedup (-) or slowdown (+)', #4 - 'Relative difference (new − old) / old', #5 - 'p < 0.01 threshold', #6 - 'Test', #7 - '#', #8 - 'Query', #9 - ] - attrs = ['' for c in columns] + "", # Changed #0 + "", # Unstable #1 + "Old, s", # 2 + "New, s", # 3 + "Ratio of speedup (-) or slowdown (+)", # 4 + "Relative difference (new − old) / old", # 5 + "p < 0.01 threshold", # 6 + "Test", # 7 + "#", # 8 + "Query", # 9 + ] + attrs = ["" for c in columns] attrs[0] = None attrs[1] = None - text = tableStart('All Query Times') + text = tableStart("All Query Times") text += tableHeader(columns, attrs) for r in rows: - anchor = f'{currentTableAnchor()}.{r[7]}.{r[8]}' + anchor = f"{currentTableAnchor()}.{r[7]}.{r[8]}" if int(r[1]): attrs[6] = f'style="background: {color_bad}"' else: - attrs[6] = '' + attrs[6] = "" if int(r[0]): - if float(r[5]) > 0.: + if float(r[5]) > 0.0: attrs[4] = attrs[5] = f'style="background: {color_bad}"' else: attrs[4] = attrs[5] = f'style="background: {color_good}"' else: - attrs[4] = attrs[5] = '' + attrs[4] = attrs[5] = "" if (float(r[2]) + float(r[3])) / 2 > allowed_single_run_time: attrs[2] = f'style="background: {color_bad}"' attrs[3] = f'style="background: {color_bad}"' else: - attrs[2] = '' - attrs[3] = '' + attrs[2] = "" + attrs[3] = "" text += tableRow(r, attrs, anchor) @@ -655,7 +756,8 @@ elif args.report == 'all-queries': for t in tables: print(t) - print(f""" + print( + f""" - """) + """ + ) diff --git a/docker/test/split_build_smoke_test/process_split_build_smoke_test_result.py b/docker/test/split_build_smoke_test/process_split_build_smoke_test_result.py index 58d6ba8c62a..b5bc82e6818 100755 --- a/docker/test/split_build_smoke_test/process_split_build_smoke_test_result.py +++ b/docker/test/split_build_smoke_test/process_split_build_smoke_test_result.py @@ -7,18 +7,19 @@ import csv RESULT_LOG_NAME = "run.log" + def process_result(result_folder): status = "success" - description = 'Server started and responded' + description = "Server started and responded" summary = [("Smoke test", "OK")] - with open(os.path.join(result_folder, RESULT_LOG_NAME), 'r') as run_log: - lines = run_log.read().split('\n') - if not lines or lines[0].strip() != 'OK': + with open(os.path.join(result_folder, RESULT_LOG_NAME), "r") as run_log: + lines = run_log.read().split("\n") + if not lines or lines[0].strip() != "OK": status = "failure" - logging.info("Lines is not ok: %s", str('\n'.join(lines))) + logging.info("Lines is not ok: %s", str("\n".join(lines))) summary = [("Smoke test", "FAIL")] - description = 'Server failed to respond, see result in logs' + description = "Server failed to respond, see result in logs" result_logs = [] server_log_path = os.path.join(result_folder, "clickhouse-server.log") @@ -38,20 +39,22 @@ def process_result(result_folder): def write_results(results_file, status_file, results, status): - with open(results_file, 'w') as f: - out = csv.writer(f, delimiter='\t') + with open(results_file, "w") as f: + out = csv.writer(f, delimiter="\t") out.writerows(results) - with open(status_file, 'w') as f: - out = csv.writer(f, delimiter='\t') + with open(status_file, "w") as f: + out = csv.writer(f, delimiter="\t") out.writerow(status) if __name__ == "__main__": - logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') - parser = argparse.ArgumentParser(description="ClickHouse script for parsing results of split build smoke test") - parser.add_argument("--in-results-dir", default='/test_output/') - parser.add_argument("--out-results-file", default='/test_output/test_results.tsv') - parser.add_argument("--out-status-file", default='/test_output/check_status.tsv') + logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s") + parser = argparse.ArgumentParser( + description="ClickHouse script for parsing results of split build smoke test" + ) + parser.add_argument("--in-results-dir", default="/test_output/") + parser.add_argument("--out-results-file", default="/test_output/test_results.tsv") + parser.add_argument("--out-status-file", default="/test_output/check_status.tsv") args = parser.parse_args() state, description, test_results, logs = process_result(args.in_results_dir) diff --git a/docker/test/sqlancer/process_sqlancer_result.py b/docker/test/sqlancer/process_sqlancer_result.py index ede3cabc1c5..37b8f465498 100755 --- a/docker/test/sqlancer/process_sqlancer_result.py +++ b/docker/test/sqlancer/process_sqlancer_result.py @@ -10,11 +10,18 @@ def process_result(result_folder): status = "success" summary = [] paths = [] - tests = ["TLPWhere", "TLPGroupBy", "TLPHaving", "TLPWhereGroupBy", "TLPDistinct", "TLPAggregate"] + tests = [ + "TLPWhere", + "TLPGroupBy", + "TLPHaving", + "TLPWhereGroupBy", + "TLPDistinct", + "TLPAggregate", + ] for test in tests: - err_path = '{}/{}.err'.format(result_folder, test) - out_path = '{}/{}.out'.format(result_folder, test) + err_path = "{}/{}.err".format(result_folder, test) + out_path = "{}/{}.out".format(result_folder, test) if not os.path.exists(err_path): logging.info("No output err on path %s", err_path) summary.append((test, "SKIPPED")) @@ -23,24 +30,24 @@ def process_result(result_folder): else: paths.append(err_path) paths.append(out_path) - with open(err_path, 'r') as f: - if 'AssertionError' in f.read(): + with open(err_path, "r") as f: + if "AssertionError" in f.read(): summary.append((test, "FAIL")) - status = 'failure' + status = "failure" else: summary.append((test, "OK")) - logs_path = '{}/logs.tar.gz'.format(result_folder) + logs_path = "{}/logs.tar.gz".format(result_folder) if not os.path.exists(logs_path): logging.info("No logs tar on path %s", logs_path) else: paths.append(logs_path) - stdout_path = '{}/stdout.log'.format(result_folder) + stdout_path = "{}/stdout.log".format(result_folder) if not os.path.exists(stdout_path): logging.info("No stdout log on path %s", stdout_path) else: paths.append(stdout_path) - stderr_path = '{}/stderr.log'.format(result_folder) + stderr_path = "{}/stderr.log".format(result_folder) if not os.path.exists(stderr_path): logging.info("No stderr log on path %s", stderr_path) else: @@ -52,20 +59,22 @@ def process_result(result_folder): def write_results(results_file, status_file, results, status): - with open(results_file, 'w') as f: - out = csv.writer(f, delimiter='\t') + with open(results_file, "w") as f: + out = csv.writer(f, delimiter="\t") out.writerows(results) - with open(status_file, 'w') as f: - out = csv.writer(f, delimiter='\t') + with open(status_file, "w") as f: + out = csv.writer(f, delimiter="\t") out.writerow(status) if __name__ == "__main__": - logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') - parser = argparse.ArgumentParser(description="ClickHouse script for parsing results of sqlancer test") - parser.add_argument("--in-results-dir", default='/test_output/') - parser.add_argument("--out-results-file", default='/test_output/test_results.tsv') - parser.add_argument("--out-status-file", default='/test_output/check_status.tsv') + logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s") + parser = argparse.ArgumentParser( + description="ClickHouse script for parsing results of sqlancer test" + ) + parser.add_argument("--in-results-dir", default="/test_output/") + parser.add_argument("--out-results-file", default="/test_output/test_results.tsv") + parser.add_argument("--out-status-file", default="/test_output/check_status.tsv") args = parser.parse_args() state, description, test_results, logs = process_result(args.in_results_dir) diff --git a/docker/test/stateful/Dockerfile b/docker/test/stateful/Dockerfile index 93e7cebb857..543cf113b2b 100644 --- a/docker/test/stateful/Dockerfile +++ b/docker/test/stateful/Dockerfile @@ -11,7 +11,7 @@ RUN apt-get update -y \ COPY s3downloader /s3downloader -ENV S3_URL="https://clickhouse-datasets.s3.yandex.net" +ENV S3_URL="https://clickhouse-datasets.s3.amazonaws.com" ENV DATASETS="hits visits" ENV EXPORT_S3_STORAGE_POLICIES=1 diff --git a/docker/test/stateful/s3downloader b/docker/test/stateful/s3downloader index eb3b3cd9faf..b1302877d6a 100755 --- a/docker/test/stateful/s3downloader +++ b/docker/test/stateful/s3downloader @@ -10,7 +10,7 @@ import requests import tempfile -DEFAULT_URL = 'https://clickhouse-datasets.s3.yandex.net' +DEFAULT_URL = 'https://clickhouse-datasets.s3.amazonaws.com' AVAILABLE_DATASETS = { 'hits': 'hits_v1.tar', diff --git a/docker/test/stress/Dockerfile b/docker/test/stress/Dockerfile index 495c12f4f83..ba6daffc014 100644 --- a/docker/test/stress/Dockerfile +++ b/docker/test/stress/Dockerfile @@ -29,7 +29,7 @@ COPY ./download_previous_release /download_previous_release COPY run.sh / ENV DATASETS="hits visits" -ENV S3_URL="https://clickhouse-datasets.s3.yandex.net" +ENV S3_URL="https://clickhouse-datasets.s3.amazonaws.com" ENV EXPORT_S3_STORAGE_POLICIES=1 CMD ["/bin/bash", "/run.sh"] diff --git a/docker/test/stress/download_previous_release b/docker/test/stress/download_previous_release index 364c863423b..ea3d376ad90 100755 --- a/docker/test/stress/download_previous_release +++ b/docker/test/stress/download_previous_release @@ -4,6 +4,9 @@ import requests import re import os +from requests.adapters import HTTPAdapter +from requests.packages.urllib3.util.retry import Retry + CLICKHOUSE_TAGS_URL = "https://api.github.com/repos/ClickHouse/ClickHouse/tags" CLICKHOUSE_COMMON_STATIC_DOWNLOAD_URL = "https://github.com/ClickHouse/ClickHouse/releases/download/v{version}-{type}/clickhouse-common-static_{version}_amd64.deb" @@ -66,8 +69,18 @@ def get_previous_release(server_version): return previous_release -def download_packet(url, local_file_name): - response = requests.get(url) +def download_packet(url, local_file_name, retries=10, backoff_factor=0.3): + session = requests.Session() + retry = Retry( + total=retries, + read=retries, + connect=retries, + backoff_factor=backoff_factor, + ) + adapter = HTTPAdapter(max_retries=retry) + session.mount('http://', adapter) + session.mount('https://', adapter) + response = session.get(url) print(url) if response.ok: open(PACKETS_DIR + local_file_name, 'wb').write(response.content) diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index 4af74d3ba54..3cef5b008db 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -131,9 +131,6 @@ function start() # use root to match with current uid clickhouse start --user root >/var/log/clickhouse-server/stdout.log 2>>/var/log/clickhouse-server/stderr.log sleep 0.5 - cat /var/log/clickhouse-server/stdout.log - tail -n200 /var/log/clickhouse-server/stderr.log - tail -n200 /var/log/clickhouse-server/clickhouse-server.log counter=$((counter + 1)) done @@ -211,14 +208,12 @@ stop start clickhouse-client --query "SELECT 'Server successfully started', 'OK'" >> /test_output/test_results.tsv \ - || echo -e 'Server failed to start\tFAIL' >> /test_output/test_results.tsv + || (echo -e 'Server failed to start (see application_errors.txt)\tFAIL' >> /test_output/test_results.tsv \ + && grep -Fa ".*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt) [ -f /var/log/clickhouse-server/clickhouse-server.log ] || echo -e "Server log does not exist\tFAIL" [ -f /var/log/clickhouse-server/stderr.log ] || echo -e "Stderr log does not exist\tFAIL" -# Print Fatal log messages to stdout -zgrep -Fa " " /var/log/clickhouse-server/clickhouse-server.log* - # Grep logs for sanitizer asserts, crashes and other critical errors # Sanitizer asserts @@ -235,20 +230,26 @@ zgrep -Fa " Application: Child process was terminated by signal 9" /var/ || echo -e 'No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv # Logical errors -zgrep -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \ - && echo -e 'Logical error thrown (see clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \ +zgrep -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log* > /test_output/logical_errors.txt \ + && echo -e 'Logical error thrown (see clickhouse-server.log or logical_errors.txt)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'No logical errors\tOK' >> /test_output/test_results.tsv +# Remove file logical_errors.txt if it's empty +[ -s /test_output/logical_errors.txt ] || rm /test_output/logical_errors.txt + # Crash zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \ && echo -e 'Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Not crashed\tOK' >> /test_output/test_results.tsv # It also checks for crash without stacktrace (printed by watchdog) -zgrep -Fa " " /var/log/clickhouse-server/clickhouse-server.log* > /dev/null \ - && echo -e 'Fatal message in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \ +zgrep -Fa " " /var/log/clickhouse-server/clickhouse-server.log* > /test_output/fatal_messages.txt \ + && echo -e 'Fatal message in clickhouse-server.log (see fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv +# Remove file fatal_messages.txt if it's empty +[ -s /test_output/fatal_messages.txt ] || rm /test_output/fatal_messages.txt + zgrep -Fa "########################################" /test_output/* > /dev/null \ && echo -e 'Killed by signal (output files)\tFAIL' >> /test_output/test_results.tsv @@ -259,12 +260,12 @@ echo -e "Backward compatibility check\n" echo "Download previous release server" mkdir previous_release_package_folder -clickhouse-client --query="SELECT version()" | ./download_previous_release && echo -e 'Download script exit code\tOK' >> /test_output/backward_compatibility_check_results.tsv \ - || echo -e 'Download script failed\tFAIL' >> /test_output/backward_compatibility_check_results.tsv +clickhouse-client --query="SELECT version()" | ./download_previous_release && echo -e 'Download script exit code\tOK' >> /test_output/test_results.tsv \ + || echo -e 'Download script failed\tFAIL' >> /test_output/test_results.tsv if [ "$(ls -A previous_release_package_folder/clickhouse-common-static_*.deb && ls -A previous_release_package_folder/clickhouse-server_*.deb)" ] then - echo -e "Successfully downloaded previous release packets\tOK" >> /test_output/backward_compatibility_check_results.tsv + echo -e "Successfully downloaded previous release packets\tOK" >> /test_output/test_results.tsv stop # Uninstall current packages @@ -290,8 +291,8 @@ then mkdir tmp_stress_output ./stress --backward-compatibility-check --output-folder tmp_stress_output --global-time-limit=1200 \ - && echo -e 'Test script exit code\tOK' >> /test_output/backward_compatibility_check_results.tsv \ - || echo -e 'Test script failed\tFAIL' >> /test_output/backward_compatibility_check_results.tsv + && echo -e 'Backward compatibility check: Test script exit code\tOK' >> /test_output/test_results.tsv \ + || echo -e 'Backward compatibility check: Test script failed\tFAIL' >> /test_output/test_results.tsv rm -rf tmp_stress_output clickhouse-client --query="SELECT 'Tables count:', count() FROM system.tables" @@ -301,8 +302,9 @@ then # Start new server configure start 500 - clickhouse-client --query "SELECT 'Server successfully started', 'OK'" >> /test_output/backward_compatibility_check_results.tsv \ - || echo -e 'Server failed to start\tFAIL' >> /test_output/backward_compatibility_check_results.tsv + clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK'" >> /test_output/test_results.tsv \ + || (echo -e 'Backward compatibility check: Server failed to start\tFAIL' >> /test_output/test_results.tsv \ + && grep -Fa ".*Application" /var/log/clickhouse-server/clickhouse-server.log >> /test_output/bc_check_application_errors.txt) clickhouse-client --query="SELECT 'Server version: ', version()" @@ -312,10 +314,12 @@ then stop # Error messages (we should ignore some errors) + echo "Check for Error messages in server log:" zgrep -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \ -e "Code: 236. DB::Exception: Cancelled mutating parts" \ -e "REPLICA_IS_ALREADY_ACTIVE" \ -e "REPLICA_IS_ALREADY_EXIST" \ + -e "ALL_REPLICAS_LOST" \ -e "DDLWorker: Cannot parse DDL task query" \ -e "RaftInstance: failed to accept a rpc connection due to error 125" \ -e "UNKNOWN_DATABASE" \ @@ -328,47 +332,53 @@ then -e "Code: 1000, e.code() = 111, Connection refused" \ -e "UNFINISHED" \ -e "Renaming unexpected part" \ - /var/log/clickhouse-server/clickhouse-server.log | zgrep -Fa "" > /dev/null \ - && echo -e 'Error message in clickhouse-server.log\tFAIL' >> /test_output/backward_compatibility_check_results.tsv \ - || echo -e 'No Error messages in clickhouse-server.log\tOK' >> /test_output/backward_compatibility_check_results.tsv + /var/log/clickhouse-server/clickhouse-server.log | zgrep -Fa "" > /test_output/bc_check_error_messages.txt \ + && echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \ + || echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv + + # Remove file bc_check_error_messages.txt if it's empty + [ -s /test_output/bc_check_error_messages.txt ] || rm /test_output/bc_check_error_messages.txt # Sanitizer asserts zgrep -Fa "==================" /var/log/clickhouse-server/stderr.log >> /test_output/tmp zgrep -Fa "WARNING" /var/log/clickhouse-server/stderr.log >> /test_output/tmp zgrep -Fav "ASan doesn't fully support makecontext/swapcontext functions" /test_output/tmp > /dev/null \ - && echo -e 'Sanitizer assert (in stderr.log)\tFAIL' >> /test_output/backward_compatibility_check_results.tsv \ - || echo -e 'No sanitizer asserts\tOK' >> /test_output/backward_compatibility_check_results.tsv + && echo -e 'Backward compatibility check: Sanitizer assert (in stderr.log)\tFAIL' >> /test_output/test_results.tsv \ + || echo -e 'Backward compatibility check: No sanitizer asserts\tOK' >> /test_output/test_results.tsv rm -f /test_output/tmp # OOM zgrep -Fa " Application: Child process was terminated by signal 9" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ - && echo -e 'OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/backward_compatibility_check_results.tsv \ - || echo -e 'No OOM messages in clickhouse-server.log\tOK' >> /test_output/backward_compatibility_check_results.tsv + && echo -e 'Backward compatibility check: OOM killer (or signal 9) in clickhouse-server.log\tFAIL' >> /test_output/test_results.tsv \ + || echo -e 'Backward compatibility check: No OOM messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv # Logical errors - zgrep -Fa "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ - && echo -e 'Logical error thrown (see clickhouse-server.log)\tFAIL' >> /test_output/backward_compatibility_check_results.tsv \ - || echo -e 'No logical errors\tOK' >> /test_output/backward_compatibility_check_results.tsv + echo "Check for Logical errors in server log:" + zgrep -Fa -A20 "Code: 49, e.displayText() = DB::Exception:" /var/log/clickhouse-server/clickhouse-server.log > /test_output/bc_check_logical_errors.txt \ + && echo -e 'Backward compatibility check: Logical error thrown (see clickhouse-server.log or bc_check_logical_errors.txt)\tFAIL' >> /test_output/test_results.tsv \ + || echo -e 'Backward compatibility check: No logical errors\tOK' >> /test_output/test_results.tsv + + # Remove file bc_check_logical_errors.txt if it's empty + [ -s /test_output/bc_check_logical_errors.txt ] || rm /test_output/bc_check_logical_errors.txt # Crash zgrep -Fa "########################################" /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ - && echo -e 'Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/backward_compatibility_check_results.tsv \ - || echo -e 'Not crashed\tOK' >> /test_output/backward_compatibility_check_results.tsv + && echo -e 'Backward compatibility check: Killed by signal (in clickhouse-server.log)\tFAIL' >> /test_output/test_results.tsv \ + || echo -e 'Backward compatibility check: Not crashed\tOK' >> /test_output/test_results.tsv # It also checks for crash without stacktrace (printed by watchdog) - zgrep -Fa " " /var/log/clickhouse-server/clickhouse-server.log > /dev/null \ - && echo -e 'Fatal message in clickhouse-server.log\tFAIL' >> /test_output/backward_compatibility_check_results.tsv \ - || echo -e 'No fatal messages in clickhouse-server.log\tOK' >> /test_output/backward_compatibility_check_results.tsv + echo "Check for Fatal message in server log:" + zgrep -Fa " " /var/log/clickhouse-server/clickhouse-server.log > /test_output/bc_check_fatal_messages.txt \ + && echo -e 'Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)\tFAIL' >> /test_output/test_results.tsv \ + || echo -e 'Backward compatibility check: No fatal messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv + + # Remove file bc_check_fatal_messages.txt if it's empty + [ -s /test_output/bc_check_fatal_messages.txt ] || rm /test_output/bc_check_fatal_messages.txt else - echo -e "Failed to download previous release packets\tFAIL" >> /test_output/backward_compatibility_check_results.tsv + echo -e "Backward compatibility check: Failed to download previous release packets\tFAIL" >> /test_output/test_results.tsv fi -zgrep -Fa "FAIL" /test_output/backward_compatibility_check_results.tsv > /dev/null \ - && echo -e 'Backward compatibility check\tFAIL' >> /test_output/test_results.tsv \ - || echo -e 'Backward compatibility check\tOK' >> /test_output/test_results.tsv - - # Put logs into /test_output/ for log_file in /var/log/clickhouse-server/clickhouse-server.log* do diff --git a/docker/test/style/Dockerfile b/docker/test/style/Dockerfile index 85c751edfbe..3101ab84c40 100644 --- a/docker/test/style/Dockerfile +++ b/docker/test/style/Dockerfile @@ -16,7 +16,7 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \ python3-pip \ shellcheck \ yamllint \ - && pip3 install codespell PyGithub boto3 unidiff dohq-artifactory + && pip3 install black boto3 codespell dohq-artifactory PyGithub unidiff # Architecture of the image when BuildKit/buildx is used ARG TARGETARCH diff --git a/docker/test/style/process_style_check_result.py b/docker/test/style/process_style_check_result.py index 655b7d70243..6472ff21f5e 100755 --- a/docker/test/style/process_style_check_result.py +++ b/docker/test/style/process_style_check_result.py @@ -14,6 +14,7 @@ def process_result(result_folder): ("header duplicates", "duplicate_output.txt"), ("shellcheck", "shellcheck_output.txt"), ("style", "style_output.txt"), + ("black", "black_output.txt"), ("typos", "typos_output.txt"), ("whitespaces", "whitespaces_output.txt"), ("workflows", "workflows_output.txt"), diff --git a/docker/test/style/run.sh b/docker/test/style/run.sh index ce3ea4e50a6..651883511e8 100755 --- a/docker/test/style/run.sh +++ b/docker/test/style/run.sh @@ -7,11 +7,13 @@ echo "Check duplicates" | ts ./check-duplicate-includes.sh |& tee /test_output/duplicate_output.txt echo "Check style" | ts ./check-style -n |& tee /test_output/style_output.txt +echo "Check python formatting with black" | ts +./check-black -n |& tee /test_output/black_output.txt echo "Check typos" | ts ./check-typos |& tee /test_output/typos_output.txt echo "Check whitespaces" | ts ./check-whitespaces -n |& tee /test_output/whitespaces_output.txt -echo "Check sorkflows" | ts +echo "Check workflows" | ts ./check-workflows |& tee /test_output/workflows_output.txt echo "Check shell scripts with shellcheck" | ts ./shellcheck-run.sh |& tee /test_output/shellcheck_output.txt diff --git a/docker/test/testflows/runner/process_testflows_result.py b/docker/test/testflows/runner/process_testflows_result.py index 37d0b6a69d1..8bfc4ac0b0f 100755 --- a/docker/test/testflows/runner/process_testflows_result.py +++ b/docker/test/testflows/runner/process_testflows_result.py @@ -22,9 +22,9 @@ def process_result(result_folder): total_other = 0 test_results = [] for test in results["tests"]: - test_name = test['test']['test_name'] - test_result = test['result']['result_type'].upper() - test_time = str(test['result']['message_rtime']) + test_name = test["test"]["test_name"] + test_result = test["result"]["result_type"].upper() + test_time = str(test["result"]["message_rtime"]) total_tests += 1 if test_result == "OK": total_ok += 1 @@ -39,24 +39,29 @@ def process_result(result_folder): else: status = "success" - description = "failed: {}, passed: {}, other: {}".format(total_fail, total_ok, total_other) + description = "failed: {}, passed: {}, other: {}".format( + total_fail, total_ok, total_other + ) return status, description, test_results, [json_path, test_binary_log] def write_results(results_file, status_file, results, status): - with open(results_file, 'w') as f: - out = csv.writer(f, delimiter='\t') + with open(results_file, "w") as f: + out = csv.writer(f, delimiter="\t") out.writerows(results) - with open(status_file, 'w') as f: - out = csv.writer(f, delimiter='\t') + with open(status_file, "w") as f: + out = csv.writer(f, delimiter="\t") out.writerow(status) + if __name__ == "__main__": - logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') - parser = argparse.ArgumentParser(description="ClickHouse script for parsing results of Testflows tests") - parser.add_argument("--in-results-dir", default='./') - parser.add_argument("--out-results-file", default='./test_results.tsv') - parser.add_argument("--out-status-file", default='./check_status.tsv') + logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s") + parser = argparse.ArgumentParser( + description="ClickHouse script for parsing results of Testflows tests" + ) + parser.add_argument("--in-results-dir", default="./") + parser.add_argument("--out-results-file", default="./test_results.tsv") + parser.add_argument("--out-status-file", default="./check_status.tsv") args = parser.parse_args() state, description, test_results, logs = process_result(args.in_results_dir) @@ -64,4 +69,3 @@ if __name__ == "__main__": status = (state, description) write_results(args.out_results_file, args.out_status_file, test_results, status) logging.info("Result written") - diff --git a/docker/test/unit/process_unit_tests_result.py b/docker/test/unit/process_unit_tests_result.py index 7219aa13b82..0550edc7c25 100755 --- a/docker/test/unit/process_unit_tests_result.py +++ b/docker/test/unit/process_unit_tests_result.py @@ -5,24 +5,26 @@ import logging import argparse import csv -OK_SIGN = 'OK ]' -FAILED_SIGN = 'FAILED ]' -SEGFAULT = 'Segmentation fault' -SIGNAL = 'received signal SIG' -PASSED = 'PASSED' +OK_SIGN = "OK ]" +FAILED_SIGN = "FAILED ]" +SEGFAULT = "Segmentation fault" +SIGNAL = "received signal SIG" +PASSED = "PASSED" + def get_test_name(line): - elements = reversed(line.split(' ')) + elements = reversed(line.split(" ")) for element in elements: - if '(' not in element and ')' not in element: + if "(" not in element and ")" not in element: return element raise Exception("No test name in line '{}'".format(line)) + def process_result(result_folder): summary = [] total_counter = 0 failed_counter = 0 - result_log_path = '{}/test_result.txt'.format(result_folder) + result_log_path = "{}/test_result.txt".format(result_folder) if not os.path.exists(result_log_path): logging.info("No output log on path %s", result_log_path) return "exception", "No output log", [] @@ -30,7 +32,7 @@ def process_result(result_folder): status = "success" description = "" passed = False - with open(result_log_path, 'r') as test_result: + with open(result_log_path, "r") as test_result: for line in test_result: if OK_SIGN in line: logging.info("Found ok line: '%s'", line) @@ -38,7 +40,7 @@ def process_result(result_folder): logging.info("Test name: '%s'", test_name) summary.append((test_name, "OK")) total_counter += 1 - elif FAILED_SIGN in line and 'listed below' not in line and 'ms)' in line: + elif FAILED_SIGN in line and "listed below" not in line and "ms)" in line: logging.info("Found fail line: '%s'", line) test_name = get_test_name(line.strip()) logging.info("Test name: '%s'", test_name) @@ -67,25 +69,30 @@ def process_result(result_folder): status = "failure" if not description: - description += "fail: {}, passed: {}".format(failed_counter, total_counter - failed_counter) + description += "fail: {}, passed: {}".format( + failed_counter, total_counter - failed_counter + ) return status, description, summary def write_results(results_file, status_file, results, status): - with open(results_file, 'w') as f: - out = csv.writer(f, delimiter='\t') + with open(results_file, "w") as f: + out = csv.writer(f, delimiter="\t") out.writerows(results) - with open(status_file, 'w') as f: - out = csv.writer(f, delimiter='\t') + with open(status_file, "w") as f: + out = csv.writer(f, delimiter="\t") out.writerow(status) + if __name__ == "__main__": - logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') - parser = argparse.ArgumentParser(description="ClickHouse script for parsing results of unit tests") - parser.add_argument("--in-results-dir", default='/test_output/') - parser.add_argument("--out-results-file", default='/test_output/test_results.tsv') - parser.add_argument("--out-status-file", default='/test_output/check_status.tsv') + logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s") + parser = argparse.ArgumentParser( + description="ClickHouse script for parsing results of unit tests" + ) + parser.add_argument("--in-results-dir", default="/test_output/") + parser.add_argument("--out-results-file", default="/test_output/test_results.tsv") + parser.add_argument("--out-status-file", default="/test_output/check_status.tsv") args = parser.parse_args() state, description, test_results = process_result(args.in_results_dir) @@ -93,4 +100,3 @@ if __name__ == "__main__": status = (state, description) write_results(args.out_results_file, args.out_status_file, test_results, status) logging.info("Result written") - diff --git a/docker/test/util/process_functional_tests_result.py b/docker/test/util/process_functional_tests_result.py index 82df170686d..dadda55c830 100755 --- a/docker/test/util/process_functional_tests_result.py +++ b/docker/test/util/process_functional_tests_result.py @@ -16,6 +16,7 @@ NO_TASK_TIMEOUT_SIGNS = ["All tests have finished", "No tests were run"] RETRIES_SIGN = "Some tests were restarted" + def process_test_log(log_path): total = 0 skipped = 0 @@ -26,7 +27,7 @@ def process_test_log(log_path): retries = False task_timeout = True test_results = [] - with open(log_path, 'r') as test_file: + with open(log_path, "r") as test_file: for line in test_file: original_line = line line = line.strip() @@ -36,12 +37,15 @@ def process_test_log(log_path): hung = True if RETRIES_SIGN in line: retries = True - if any(sign in line for sign in (OK_SIGN, FAIL_SIGN, UNKNOWN_SIGN, SKIPPED_SIGN)): - test_name = line.split(' ')[2].split(':')[0] + if any( + sign in line + for sign in (OK_SIGN, FAIL_SIGN, UNKNOWN_SIGN, SKIPPED_SIGN) + ): + test_name = line.split(" ")[2].split(":")[0] - test_time = '' + test_time = "" try: - time_token = line.split(']')[1].strip().split()[0] + time_token = line.split("]")[1].strip().split()[0] float(time_token) test_time = time_token except: @@ -66,9 +70,22 @@ def process_test_log(log_path): elif len(test_results) > 0 and test_results[-1][1] == "FAIL": test_results[-1][3].append(original_line) - test_results = [(test[0], test[1], test[2], ''.join(test[3])) for test in test_results] + test_results = [ + (test[0], test[1], test[2], "".join(test[3])) for test in test_results + ] + + return ( + total, + skipped, + unknown, + failed, + success, + hung, + task_timeout, + retries, + test_results, + ) - return total, skipped, unknown, failed, success, hung, task_timeout, retries, test_results def process_result(result_path): test_results = [] @@ -76,16 +93,26 @@ def process_result(result_path): description = "" files = os.listdir(result_path) if files: - logging.info("Find files in result folder %s", ','.join(files)) - result_path = os.path.join(result_path, 'test_result.txt') + logging.info("Find files in result folder %s", ",".join(files)) + result_path = os.path.join(result_path, "test_result.txt") else: result_path = None description = "No output log" state = "error" if result_path and os.path.exists(result_path): - total, skipped, unknown, failed, success, hung, task_timeout, retries, test_results = process_test_log(result_path) - is_flacky_check = 1 < int(os.environ.get('NUM_TRIES', 1)) + ( + total, + skipped, + unknown, + failed, + success, + hung, + task_timeout, + retries, + test_results, + ) = process_test_log(result_path) + is_flacky_check = 1 < int(os.environ.get("NUM_TRIES", 1)) logging.info("Is flacky check: %s", is_flacky_check) # If no tests were run (success == 0) it indicates an error (e.g. server did not start or crashed immediately) # But it's Ok for "flaky checks" - they can contain just one test for check which is marked as skipped. @@ -120,20 +147,22 @@ def process_result(result_path): def write_results(results_file, status_file, results, status): - with open(results_file, 'w') as f: - out = csv.writer(f, delimiter='\t') + with open(results_file, "w") as f: + out = csv.writer(f, delimiter="\t") out.writerows(results) - with open(status_file, 'w') as f: - out = csv.writer(f, delimiter='\t') + with open(status_file, "w") as f: + out = csv.writer(f, delimiter="\t") out.writerow(status) if __name__ == "__main__": - logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') - parser = argparse.ArgumentParser(description="ClickHouse script for parsing results of functional tests") - parser.add_argument("--in-results-dir", default='/test_output/') - parser.add_argument("--out-results-file", default='/test_output/test_results.tsv') - parser.add_argument("--out-status-file", default='/test_output/check_status.tsv') + logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s") + parser = argparse.ArgumentParser( + description="ClickHouse script for parsing results of functional tests" + ) + parser.add_argument("--in-results-dir", default="/test_output/") + parser.add_argument("--out-results-file", default="/test_output/test_results.tsv") + parser.add_argument("--out-status-file", default="/test_output/check_status.tsv") args = parser.parse_args() state, description, test_results = process_result(args.in_results_dir) diff --git a/docs/en/development/continuous-integration.md b/docs/en/development/continuous-integration.md index 81887eb8b8e..f9dfebff3f9 100644 --- a/docs/en/development/continuous-integration.md +++ b/docs/en/development/continuous-integration.md @@ -71,6 +71,8 @@ This check means that the CI system started to process the pull request. When it Performs some simple regex-based checks of code style, using the [`utils/check-style/check-style`](https://github.com/ClickHouse/ClickHouse/blob/master/utils/check-style/check-style) binary (note that it can be run locally). If it fails, fix the style errors following the [code style guide](style.md). +Python code is checked with [black](https://github.com/psf/black/). + ### Report Details - [Status page example](https://clickhouse-test-reports.s3.yandex.net/12550/659c78c7abb56141723af6a81bfae39335aa8cb2/style_check.html) - `output.txt` contains the check resulting errors (invalid tabulation etc), blank page means no errors. [Successful result example](https://clickhouse-test-reports.s3.yandex.net/12550/659c78c7abb56141723af6a81bfae39335aa8cb2/style_check/output.txt). diff --git a/docs/en/engines/table-engines/integrations/hive.md b/docs/en/engines/table-engines/integrations/hive.md index b804b9c2279..61147467690 100644 --- a/docs/en/engines/table-engines/integrations/hive.md +++ b/docs/en/engines/table-engines/integrations/hive.md @@ -137,7 +137,7 @@ CREATE TABLE test.test_orc `f_array_array_float` Array(Array(Float32)), `day` String ) -ENGINE = Hive('thrift://202.168.117.26:9083', 'test', 'test_orc') +ENGINE = Hive('thrift://localhost:9083', 'test', 'test_orc') PARTITION BY day ``` diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 84865221711..a7066fca087 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -402,7 +402,7 @@ Parsing allows the presence of the additional field `tskv` without the equal sig Comma Separated Values format ([RFC](https://tools.ietf.org/html/rfc4180)). -When formatting, rows are enclosed in double-quotes. A double quote inside a string is output as two double quotes in a row. There are no other rules for escaping characters. Date and date-time are enclosed in double-quotes. Numbers are output without quotes. Values are separated by a delimiter character, which is `,` by default. The delimiter character is defined in the setting [format_csv_delimiter](../operations/settings/settings.md#settings-format_csv_delimiter). Rows are separated using the Unix line feed (LF). Arrays are serialized in CSV as follows: first, the array is serialized to a string as in TabSeparated format, and then the resulting string is output to CSV in double-quotes. Tuples in CSV format are serialized as separate columns (that is, their nesting in the tuple is lost). +When formatting, strings are enclosed in double-quotes. A double quote inside a string is output as two double quotes in a row. There are no other rules for escaping characters. Date and date-time are enclosed in double-quotes. Numbers are output without quotes. Values are separated by a delimiter character, which is `,` by default. The delimiter character is defined in the setting [format_csv_delimiter](../operations/settings/settings.md#settings-format_csv_delimiter). Rows are separated using the Unix line feed (LF). Arrays are serialized in CSV as follows: first, the array is serialized to a string as in TabSeparated format, and then the resulting string is output to CSV in double-quotes. Tuples in CSV format are serialized as separate columns (that is, their nesting in the tuple is lost). ``` bash $ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FORMAT CSV" < data.csv @@ -410,7 +410,7 @@ $ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FOR \*By default, the delimiter is `,`. See the [format_csv_delimiter](../operations/settings/settings.md#settings-format_csv_delimiter) setting for more information. -When parsing, all values can be parsed either with or without quotes. Both double and single quotes are supported. Rows can also be arranged without quotes. In this case, they are parsed up to the delimiter character or line feed (CR or LF). In violation of the RFC, when parsing rows without quotes, the leading and trailing spaces and tabs are ignored. For the line feed, Unix (LF), Windows (CR LF) and Mac OS Classic (CR LF) types are all supported. +When parsing, all values can be parsed either with or without quotes. Both double and single quotes are supported. Strings can also be arranged without quotes. In this case, they are parsed up to the delimiter character or line feed (CR or LF). In violation of the RFC, when parsing strings without quotes, the leading and trailing spaces and tabs are ignored. For the line feed, Unix (LF), Windows (CR LF) and Mac OS Classic (CR LF) types are all supported. If setting [input_format_csv_empty_as_default](../operations/settings/settings.md#settings-input_format_csv_empty_as_default) is enabled, empty unquoted input values are replaced with default values. For complex default expressions [input_format_defaults_for_omitted_fields](../operations/settings/settings.md#settings-input_format_defaults_for_omitted_fields) must be enabled too. diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index 9c7fab7424d..ad199ce452e 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -195,5 +195,6 @@ toc_title: Adopters | ООО «МПЗ Богородский» | Agriculture | — | — | — | [Article in Russian, November 2020](https://cloud.yandex.ru/cases/okraina) | | ДомКлик | Real Estate | — | — | — | [Article in Russian, October 2021](https://habr.com/ru/company/domclick/blog/585936/) | | АС "Стрела" | Transportation | — | — | — | [Job posting, Jan 2022](https://vk.com/topic-111905078_35689124?post=3553) | +| Piwik PRO | Web Analytics | — | — | — | [Official website, Dec 2018](https://piwik.pro/blog/piwik-pro-clickhouse-faster-efficient-reports/) | [Original article](https://clickhouse.com/docs/en/introduction/adopters/) diff --git a/docs/en/operations/caches.md b/docs/en/operations/caches.md index 279204a8af1..9aa6419d89c 100644 --- a/docs/en/operations/caches.md +++ b/docs/en/operations/caches.md @@ -5,7 +5,7 @@ toc_title: Caches # Cache Types {#cache-types} -When performing queries, ClichHouse uses different caches. +When performing queries, ClickHouse uses different caches. Main cache types: diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 266abadb087..88c43c9c3c2 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -1616,3 +1616,14 @@ Possible values: Default value: `10000`. +## global_memory_usage_overcommit_max_wait_microseconds {#global_memory_usage_overcommit_max_wait_microseconds} + +Sets maximum waiting time for global overcommit tracker. + +Possible values: + +- Positive integer. + +Default value: `0`. + + diff --git a/docs/en/operations/settings/memory-overcommit.md b/docs/en/operations/settings/memory-overcommit.md new file mode 100644 index 00000000000..3f99382b826 --- /dev/null +++ b/docs/en/operations/settings/memory-overcommit.md @@ -0,0 +1,31 @@ +# Memory overcommit + +Memory overcommit is an experimental technique intended to allow to set more flexible memory limits for queries. + +The idea of this technique is to introduce settings which can represent guaranteed amount of memory a query can use. +When memory overcommit is enabled and the memory limit is reached ClickHouse will select the most overcommitted query and try to free memory by killing this query. + +When memory limit is reached any query will wait some time during atempt to allocate new memory. +If timeout is passed and memory is freed, the query continues execution. Otherwise an exception will be thrown and the query is killed. + +Selection of query to stop or kill is performed by either global or user overcommit trackers depending on what memory limit is reached. + +## User overcommit tracker + +User overcommit tracker finds a query with the biggest overcommit ratio in the user's query list. +Overcommit ratio is computed as number of allocated bytes divided by value of `max_guaranteed_memory_usage` setting. + +Waiting timeout is set by `memory_usage_overcommit_max_wait_microseconds` setting. + +**Example** + +```sql +SELECT number FROM numbers(1000) GROUP BY number SETTINGS max_guaranteed_memory_usage=4000, memory_usage_overcommit_max_wait_microseconds=500 +``` + +## Global overcommit tracker + +Global overcommit tracker finds a query with the biggest overcommit ratio in the list of all queries. +In this case overcommit ratio is computed as number of allocated bytes divided by value of `max_guaranteed_memory_usage_for_user` setting. + +Waiting timeout is set by `global_memory_usage_overcommit_max_wait_microseconds` parameter in the configuration file. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 93075284cfc..91bf0812de4 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -4220,10 +4220,36 @@ Possible values: - 0 — Disabled. - 1 — Enabled. The wait time equal shutdown_wait_unfinished config. -Default value: 0. +Default value: `0`. ## shutdown_wait_unfinished The waiting time in seconds for currently handled connections when shutdown server. -Default Value: 5. +Default Value: `5`. + +## max_guaranteed_memory_usage + +Maximum guaranteed memory usage for processing of single query. +It represents soft limit in case when hard limit is reached on user level. +Zero means unlimited. +Read more about [memory overcommit](memory-overcommit.md). + +Default value: `0`. + +## memory_usage_overcommit_max_wait_microseconds + +Maximum time thread will wait for memory to be freed in the case of memory overcommit on a user level. +If the timeout is reached and memory is not freed, an exception is thrown. +Read more about [memory overcommit](memory-overcommit.md). + +Default value: `0`. + +## max_guaranteed_memory_usage_for_user + +Maximum guaranteed memory usage for processing all concurrently running queries for the user. +It represents soft limit in case when hard limit is reached on global level. +Zero means unlimited. +Read more about [memory overcommit](memory-overcommit.md). + +Default value: `0`. diff --git a/docs/tools/amp.py b/docs/tools/amp.py index 22417407946..584a40c4bba 100644 --- a/docs/tools/amp.py +++ b/docs/tools/amp.py @@ -15,24 +15,24 @@ import website def prepare_amp_html(lang, args, root, site_temp, main_site_dir): src_path = root - src_index = os.path.join(src_path, 'index.html') + src_index = os.path.join(src_path, "index.html") rel_path = os.path.relpath(src_path, site_temp) - dst_path = os.path.join(main_site_dir, rel_path, 'amp') - dst_index = os.path.join(dst_path, 'index.html') + dst_path = os.path.join(main_site_dir, rel_path, "amp") + dst_index = os.path.join(dst_path, "index.html") - logging.debug(f'Generating AMP version for {rel_path} ({lang})') + logging.debug(f"Generating AMP version for {rel_path} ({lang})") os.makedirs(dst_path) - with open(src_index, 'r') as f: + with open(src_index, "r") as f: content = f.read() - css_in = ' '.join(website.get_css_in(args)) + css_in = " ".join(website.get_css_in(args)) command = f"purifycss --min {css_in} '{src_index}'" logging.debug(command) - inline_css = subprocess.check_output(command, shell=True).decode('utf-8') - inline_css = inline_css.replace('!important', '').replace('/*!', '/*') + inline_css = subprocess.check_output(command, shell=True).decode("utf-8") + inline_css = inline_css.replace("!important", "").replace("/*!", "/*") inline_css = cssmin.cssmin(inline_css) - content = content.replace('CUSTOM_CSS_PLACEHOLDER', inline_css) + content = content.replace("CUSTOM_CSS_PLACEHOLDER", inline_css) - with open(dst_index, 'w') as f: + with open(dst_index, "w") as f: f.write(content) return dst_index @@ -40,15 +40,12 @@ def prepare_amp_html(lang, args, root, site_temp, main_site_dir): def build_amp(lang, args, cfg): # AMP docs: https://amp.dev/documentation/ - logging.info(f'Building AMP version for {lang}') + logging.info(f"Building AMP version for {lang}") with util.temp_dir() as site_temp: - extra = cfg.data['extra'] - main_site_dir = cfg.data['site_dir'] - extra['is_amp'] = True - cfg.load_dict({ - 'site_dir': site_temp, - 'extra': extra - }) + extra = cfg.data["extra"] + main_site_dir = cfg.data["site_dir"] + extra["is_amp"] = True + cfg.load_dict({"site_dir": site_temp, "extra": extra}) try: mkdocs.commands.build.build(cfg) @@ -60,50 +57,49 @@ def build_amp(lang, args, cfg): paths = [] for root, _, filenames in os.walk(site_temp): - if 'index.html' in filenames: - paths.append(prepare_amp_html(lang, args, root, site_temp, main_site_dir)) - logging.info(f'Finished building AMP version for {lang}') + if "index.html" in filenames: + paths.append( + prepare_amp_html(lang, args, root, site_temp, main_site_dir) + ) + logging.info(f"Finished building AMP version for {lang}") def html_to_amp(content): - soup = bs4.BeautifulSoup( - content, - features='html.parser' - ) + soup = bs4.BeautifulSoup(content, features="html.parser") for tag in soup.find_all(): - if tag.attrs.get('id') == 'tostring': - tag.attrs['id'] = '_tostring' - if tag.name == 'img': - tag.name = 'amp-img' - tag.attrs['layout'] = 'responsive' - src = tag.attrs['src'] - if not (src.startswith('/') or src.startswith('http')): - tag.attrs['src'] = f'../{src}' - if not tag.attrs.get('width'): - tag.attrs['width'] = '640' - if not tag.attrs.get('height'): - tag.attrs['height'] = '320' - if tag.name == 'iframe': - tag.name = 'amp-iframe' - tag.attrs['layout'] = 'responsive' - del tag.attrs['alt'] - del tag.attrs['allowfullscreen'] - if not tag.attrs.get('width'): - tag.attrs['width'] = '640' - if not tag.attrs.get('height'): - tag.attrs['height'] = '320' - elif tag.name == 'a': - href = tag.attrs.get('href') + if tag.attrs.get("id") == "tostring": + tag.attrs["id"] = "_tostring" + if tag.name == "img": + tag.name = "amp-img" + tag.attrs["layout"] = "responsive" + src = tag.attrs["src"] + if not (src.startswith("/") or src.startswith("http")): + tag.attrs["src"] = f"../{src}" + if not tag.attrs.get("width"): + tag.attrs["width"] = "640" + if not tag.attrs.get("height"): + tag.attrs["height"] = "320" + if tag.name == "iframe": + tag.name = "amp-iframe" + tag.attrs["layout"] = "responsive" + del tag.attrs["alt"] + del tag.attrs["allowfullscreen"] + if not tag.attrs.get("width"): + tag.attrs["width"] = "640" + if not tag.attrs.get("height"): + tag.attrs["height"] = "320" + elif tag.name == "a": + href = tag.attrs.get("href") if href: - if not (href.startswith('/') or href.startswith('http')): - if '#' in href: - href, anchor = href.split('#') + if not (href.startswith("/") or href.startswith("http")): + if "#" in href: + href, anchor = href.split("#") else: anchor = None - href = f'../{href}amp/' + href = f"../{href}amp/" if anchor: - href = f'{href}#{anchor}' - tag.attrs['href'] = href + href = f"{href}#{anchor}" + tag.attrs["href"] = href content = str(soup) return website.minify_html(content) diff --git a/docs/tools/blog.py b/docs/tools/blog.py index b58523504a3..d1fc540d8bf 100644 --- a/docs/tools/blog.py +++ b/docs/tools/blog.py @@ -17,54 +17,52 @@ import util def build_for_lang(lang, args): - logging.info(f'Building {lang} blog') + logging.info(f"Building {lang} blog") try: theme_cfg = { - 'name': None, - 'custom_dir': os.path.join(os.path.dirname(__file__), '..', args.theme_dir), - 'language': lang, - 'direction': 'ltr', - 'static_templates': ['404.html'], - 'extra': { - 'now': int(time.mktime(datetime.datetime.now().timetuple())) # TODO better way to avoid caching - } + "name": None, + "custom_dir": os.path.join(os.path.dirname(__file__), "..", args.theme_dir), + "language": lang, + "direction": "ltr", + "static_templates": ["404.html"], + "extra": { + "now": int( + time.mktime(datetime.datetime.now().timetuple()) + ) # TODO better way to avoid caching + }, } # the following list of languages is sorted according to # https://en.wikipedia.org/wiki/List_of_languages_by_total_number_of_speakers - languages = { - 'en': 'English' - } + languages = {"en": "English"} - site_names = { - 'en': 'ClickHouse Blog' - } + site_names = {"en": "ClickHouse Blog"} assert len(site_names) == len(languages) site_dir = os.path.join(args.blog_output_dir, lang) - plugins = ['macros'] + plugins = ["macros"] if args.htmlproofer: - plugins.append('htmlproofer') + plugins.append("htmlproofer") - website_url = 'https://clickhouse.com' - site_name = site_names.get(lang, site_names['en']) + website_url = "https://clickhouse.com" + site_name = site_names.get(lang, site_names["en"]) blog_nav, post_meta = nav.build_blog_nav(lang, args) raw_config = dict( site_name=site_name, - site_url=f'{website_url}/blog/{lang}/', + site_url=f"{website_url}/blog/{lang}/", docs_dir=os.path.join(args.blog_dir, lang), site_dir=site_dir, strict=True, theme=theme_cfg, nav=blog_nav, - copyright='©2016–2022 ClickHouse, Inc.', + copyright="©2016–2022 ClickHouse, Inc.", use_directory_urls=True, - repo_name='ClickHouse/ClickHouse', - repo_url='https://github.com/ClickHouse/ClickHouse/', - edit_uri=f'edit/master/website/blog/{lang}', + repo_name="ClickHouse/ClickHouse", + repo_url="https://github.com/ClickHouse/ClickHouse/", + edit_uri=f"edit/master/website/blog/{lang}", markdown_extensions=mdx_clickhouse.MARKDOWN_EXTENSIONS, plugins=plugins, extra=dict( @@ -75,12 +73,12 @@ def build_for_lang(lang, args): website_url=website_url, events=args.events, languages=languages, - includes_dir=os.path.join(os.path.dirname(__file__), '..', '_includes'), + includes_dir=os.path.join(os.path.dirname(__file__), "..", "_includes"), is_amp=False, is_blog=True, post_meta=post_meta, - today=datetime.date.today().isoformat() - ) + today=datetime.date.today().isoformat(), + ), ) cfg = config.load_config(**raw_config) @@ -89,21 +87,28 @@ def build_for_lang(lang, args): redirects.build_blog_redirects(args) env = util.init_jinja2_env(args) - with open(os.path.join(args.website_dir, 'templates', 'blog', 'rss.xml'), 'rb') as f: - rss_template_string = f.read().decode('utf-8').strip() + with open( + os.path.join(args.website_dir, "templates", "blog", "rss.xml"), "rb" + ) as f: + rss_template_string = f.read().decode("utf-8").strip() rss_template = env.from_string(rss_template_string) - with open(os.path.join(args.blog_output_dir, lang, 'rss.xml'), 'w') as f: - f.write(rss_template.render({'config': raw_config})) + with open(os.path.join(args.blog_output_dir, lang, "rss.xml"), "w") as f: + f.write(rss_template.render({"config": raw_config})) - logging.info(f'Finished building {lang} blog') + logging.info(f"Finished building {lang} blog") except exceptions.ConfigurationError as e: - raise SystemExit('\n' + str(e)) + raise SystemExit("\n" + str(e)) def build_blog(args): tasks = [] - for lang in args.blog_lang.split(','): + for lang in args.blog_lang.split(","): if lang: - tasks.append((lang, args,)) + tasks.append( + ( + lang, + args, + ) + ) util.run_function_in_parallel(build_for_lang, tasks, threads=False) diff --git a/docs/tools/build.py b/docs/tools/build.py index e4f6718699a..612be0229d3 100755 --- a/docs/tools/build.py +++ b/docs/tools/build.py @@ -30,76 +30,76 @@ import website from cmake_in_clickhouse_generator import generate_cmake_flags_files + class ClickHouseMarkdown(markdown.extensions.Extension): class ClickHousePreprocessor(markdown.util.Processor): def run(self, lines): for line in lines: - if '' not in line: + if "" not in line: yield line def extendMarkdown(self, md): - md.preprocessors.register(self.ClickHousePreprocessor(), 'clickhouse_preprocessor', 31) + md.preprocessors.register( + self.ClickHousePreprocessor(), "clickhouse_preprocessor", 31 + ) markdown.extensions.ClickHouseMarkdown = ClickHouseMarkdown def build_for_lang(lang, args): - logging.info(f'Building {lang} docs') - os.environ['SINGLE_PAGE'] = '0' + logging.info(f"Building {lang} docs") + os.environ["SINGLE_PAGE"] = "0" try: theme_cfg = { - 'name': None, - 'custom_dir': os.path.join(os.path.dirname(__file__), '..', args.theme_dir), - 'language': lang, - 'direction': 'rtl' if lang == 'fa' else 'ltr', - 'static_templates': ['404.html'], - 'extra': { - 'now': int(time.mktime(datetime.datetime.now().timetuple())) # TODO better way to avoid caching - } + "name": None, + "custom_dir": os.path.join(os.path.dirname(__file__), "..", args.theme_dir), + "language": lang, + "direction": "rtl" if lang == "fa" else "ltr", + "static_templates": ["404.html"], + "extra": { + "now": int( + time.mktime(datetime.datetime.now().timetuple()) + ) # TODO better way to avoid caching + }, } # the following list of languages is sorted according to # https://en.wikipedia.org/wiki/List_of_languages_by_total_number_of_speakers - languages = { - 'en': 'English', - 'zh': '中文', - 'ru': 'Русский', - 'ja': '日本語' - } + languages = {"en": "English", "zh": "中文", "ru": "Русский", "ja": "日本語"} site_names = { - 'en': 'ClickHouse %s Documentation', - 'zh': 'ClickHouse文档 %s', - 'ru': 'Документация ClickHouse %s', - 'ja': 'ClickHouseドキュメント %s' + "en": "ClickHouse %s Documentation", + "zh": "ClickHouse文档 %s", + "ru": "Документация ClickHouse %s", + "ja": "ClickHouseドキュメント %s", } assert len(site_names) == len(languages) site_dir = os.path.join(args.docs_output_dir, lang) - plugins = ['macros'] + plugins = ["macros"] if args.htmlproofer: - plugins.append('htmlproofer') + plugins.append("htmlproofer") - website_url = 'https://clickhouse.com' - site_name = site_names.get(lang, site_names['en']) % '' - site_name = site_name.replace(' ', ' ') + website_url = "https://clickhouse.com" + site_name = site_names.get(lang, site_names["en"]) % "" + site_name = site_name.replace(" ", " ") raw_config = dict( site_name=site_name, - site_url=f'{website_url}/docs/{lang}/', + site_url=f"{website_url}/docs/{lang}/", docs_dir=os.path.join(args.docs_dir, lang), site_dir=site_dir, strict=True, theme=theme_cfg, - copyright='©2016–2022 ClickHouse, Inc.', + copyright="©2016–2022 ClickHouse, Inc.", use_directory_urls=True, - repo_name='ClickHouse/ClickHouse', - repo_url='https://github.com/ClickHouse/ClickHouse/', - edit_uri=f'edit/master/docs/{lang}', + repo_name="ClickHouse/ClickHouse", + repo_url="https://github.com/ClickHouse/ClickHouse/", + edit_uri=f"edit/master/docs/{lang}", markdown_extensions=mdx_clickhouse.MARKDOWN_EXTENSIONS, plugins=plugins, extra=dict( @@ -111,16 +111,16 @@ def build_for_lang(lang, args): website_url=website_url, events=args.events, languages=languages, - includes_dir=os.path.join(os.path.dirname(__file__), '..', '_includes'), + includes_dir=os.path.join(os.path.dirname(__file__), "..", "_includes"), is_amp=False, - is_blog=False - ) + is_blog=False, + ), ) # Clean to be safe if last build finished abnormally single_page.remove_temporary_files(lang, args) - raw_config['nav'] = nav.build_docs_nav(lang, args) + raw_config["nav"] = nav.build_docs_nav(lang, args) cfg = config.load_config(**raw_config) @@ -131,21 +131,28 @@ def build_for_lang(lang, args): amp.build_amp(lang, args, cfg) if not args.skip_single_page: - single_page.build_single_page_version(lang, args, raw_config.get('nav'), cfg) + single_page.build_single_page_version( + lang, args, raw_config.get("nav"), cfg + ) mdx_clickhouse.PatchedMacrosPlugin.disabled = False - logging.info(f'Finished building {lang} docs') + logging.info(f"Finished building {lang} docs") except exceptions.ConfigurationError as e: - raise SystemExit('\n' + str(e)) + raise SystemExit("\n" + str(e)) def build_docs(args): tasks = [] - for lang in args.lang.split(','): + for lang in args.lang.split(","): if lang: - tasks.append((lang, args,)) + tasks.append( + ( + lang, + args, + ) + ) util.run_function_in_parallel(build_for_lang, tasks, threads=False) redirects.build_docs_redirects(args) @@ -171,56 +178,64 @@ def build(args): redirects.build_static_redirects(args) -if __name__ == '__main__': - os.chdir(os.path.join(os.path.dirname(__file__), '..')) +if __name__ == "__main__": + os.chdir(os.path.join(os.path.dirname(__file__), "..")) # A root path to ClickHouse source code. - src_dir = '..' + src_dir = ".." - website_dir = os.path.join(src_dir, 'website') + website_dir = os.path.join(src_dir, "website") arg_parser = argparse.ArgumentParser() - arg_parser.add_argument('--lang', default='en,ru,zh,ja') - arg_parser.add_argument('--blog-lang', default='en') - arg_parser.add_argument('--docs-dir', default='.') - arg_parser.add_argument('--theme-dir', default=website_dir) - arg_parser.add_argument('--website-dir', default=website_dir) - arg_parser.add_argument('--src-dir', default=src_dir) - arg_parser.add_argument('--blog-dir', default=os.path.join(website_dir, 'blog')) - arg_parser.add_argument('--output-dir', default='build') - arg_parser.add_argument('--nav-limit', type=int, default='0') - arg_parser.add_argument('--skip-multi-page', action='store_true') - arg_parser.add_argument('--skip-single-page', action='store_true') - arg_parser.add_argument('--skip-amp', action='store_true') - arg_parser.add_argument('--skip-website', action='store_true') - arg_parser.add_argument('--skip-blog', action='store_true') - arg_parser.add_argument('--skip-git-log', action='store_true') - arg_parser.add_argument('--skip-docs', action='store_true') - arg_parser.add_argument('--test-only', action='store_true') - arg_parser.add_argument('--minify', action='store_true') - arg_parser.add_argument('--htmlproofer', action='store_true') - arg_parser.add_argument('--no-docs-macros', action='store_true') - arg_parser.add_argument('--save-raw-single-page', type=str) - arg_parser.add_argument('--livereload', type=int, default='0') - arg_parser.add_argument('--verbose', action='store_true') + arg_parser.add_argument("--lang", default="en,ru,zh,ja") + arg_parser.add_argument("--blog-lang", default="en") + arg_parser.add_argument("--docs-dir", default=".") + arg_parser.add_argument("--theme-dir", default=website_dir) + arg_parser.add_argument("--website-dir", default=website_dir) + arg_parser.add_argument("--src-dir", default=src_dir) + arg_parser.add_argument("--blog-dir", default=os.path.join(website_dir, "blog")) + arg_parser.add_argument("--output-dir", default="build") + arg_parser.add_argument("--nav-limit", type=int, default="0") + arg_parser.add_argument("--skip-multi-page", action="store_true") + arg_parser.add_argument("--skip-single-page", action="store_true") + arg_parser.add_argument("--skip-amp", action="store_true") + arg_parser.add_argument("--skip-website", action="store_true") + arg_parser.add_argument("--skip-blog", action="store_true") + arg_parser.add_argument("--skip-git-log", action="store_true") + arg_parser.add_argument("--skip-docs", action="store_true") + arg_parser.add_argument("--test-only", action="store_true") + arg_parser.add_argument("--minify", action="store_true") + arg_parser.add_argument("--htmlproofer", action="store_true") + arg_parser.add_argument("--no-docs-macros", action="store_true") + arg_parser.add_argument("--save-raw-single-page", type=str) + arg_parser.add_argument("--livereload", type=int, default="0") + arg_parser.add_argument("--verbose", action="store_true") args = arg_parser.parse_args() args.minify = False # TODO remove logging.basicConfig( - level=logging.DEBUG if args.verbose else logging.INFO, - stream=sys.stderr + level=logging.DEBUG if args.verbose else logging.INFO, stream=sys.stderr ) - logging.getLogger('MARKDOWN').setLevel(logging.INFO) + logging.getLogger("MARKDOWN").setLevel(logging.INFO) - args.docs_output_dir = os.path.join(os.path.abspath(args.output_dir), 'docs') - args.blog_output_dir = os.path.join(os.path.abspath(args.output_dir), 'blog') + args.docs_output_dir = os.path.join(os.path.abspath(args.output_dir), "docs") + args.blog_output_dir = os.path.join(os.path.abspath(args.output_dir), "blog") from github import get_events - args.rev = subprocess.check_output('git rev-parse HEAD', shell=True).decode('utf-8').strip() - args.rev_short = subprocess.check_output('git rev-parse --short HEAD', shell=True).decode('utf-8').strip() - args.rev_url = f'https://github.com/ClickHouse/ClickHouse/commit/{args.rev}' + + args.rev = ( + subprocess.check_output("git rev-parse HEAD", shell=True) + .decode("utf-8") + .strip() + ) + args.rev_short = ( + subprocess.check_output("git rev-parse --short HEAD", shell=True) + .decode("utf-8") + .strip() + ) + args.rev_url = f"https://github.com/ClickHouse/ClickHouse/commit/{args.rev}" args.events = get_events(args) if args.test_only: @@ -233,18 +248,20 @@ if __name__ == '__main__': mdx_clickhouse.PatchedMacrosPlugin.skip_git_log = True from build import build + build(args) if args.livereload: - new_args = [arg for arg in sys.argv if not arg.startswith('--livereload')] - new_args = sys.executable + ' ' + ' '.join(new_args) + new_args = [arg for arg in sys.argv if not arg.startswith("--livereload")] + new_args = sys.executable + " " + " ".join(new_args) server = livereload.Server() - server.watch(args.docs_dir + '**/*', livereload.shell(new_args, cwd='tools', shell=True)) - server.watch(args.website_dir + '**/*', livereload.shell(new_args, cwd='tools', shell=True)) - server.serve( - root=args.output_dir, - host='0.0.0.0', - port=args.livereload + server.watch( + args.docs_dir + "**/*", livereload.shell(new_args, cwd="tools", shell=True) ) + server.watch( + args.website_dir + "**/*", + livereload.shell(new_args, cwd="tools", shell=True), + ) + server.serve(root=args.output_dir, host="0.0.0.0", port=args.livereload) sys.exit(0) diff --git a/docs/tools/cmake_in_clickhouse_generator.py b/docs/tools/cmake_in_clickhouse_generator.py index aa4cbbddd18..9bbc94fd206 100644 --- a/docs/tools/cmake_in_clickhouse_generator.py +++ b/docs/tools/cmake_in_clickhouse_generator.py @@ -6,11 +6,13 @@ from typing import TextIO, List, Tuple, Optional, Dict Entity = Tuple[str, str, str] # https://regex101.com/r/R6iogw/12 -cmake_option_regex: str = r"^\s*option\s*\(([A-Z_0-9${}]+)\s*(?:\"((?:.|\n)*?)\")?\s*(.*)?\).*$" +cmake_option_regex: str = ( + r"^\s*option\s*\(([A-Z_0-9${}]+)\s*(?:\"((?:.|\n)*?)\")?\s*(.*)?\).*$" +) ch_master_url: str = "https://github.com/clickhouse/clickhouse/blob/master/" -name_str: str = "[`{name}`](" + ch_master_url + "{path}#L{line})" +name_str: str = '[`{name}`](' + ch_master_url + "{path}#L{line})" default_anchor_str: str = "[`{name}`](#{anchor})" comment_var_regex: str = r"\${(.+)}" @@ -27,11 +29,15 @@ entities: Dict[str, Tuple[str, str]] = {} def make_anchor(t: str) -> str: - return "".join(["-" if i == "_" else i.lower() for i in t if i.isalpha() or i == "_"]) + return "".join( + ["-" if i == "_" else i.lower() for i in t if i.isalpha() or i == "_"] + ) + def process_comment(comment: str) -> str: return re.sub(comment_var_regex, comment_var_replace, comment, flags=re.MULTILINE) + def build_entity(path: str, entity: Entity, line_comment: Tuple[int, str]) -> None: (line, comment) = line_comment (name, description, default) = entity @@ -47,22 +53,22 @@ def build_entity(path: str, entity: Entity, line_comment: Tuple[int, str]) -> No formatted_default: str = "`" + default + "`" formatted_name: str = name_str.format( - anchor=make_anchor(name), - name=name, - path=path, - line=line) + anchor=make_anchor(name), name=name, path=path, line=line + ) formatted_description: str = "".join(description.split("\n")) formatted_comment: str = process_comment(comment) formatted_entity: str = "| {} | {} | {} | {} |".format( - formatted_name, formatted_default, formatted_description, formatted_comment) + formatted_name, formatted_default, formatted_description, formatted_comment + ) entities[name] = path, formatted_entity + def process_file(root_path: str, file_path: str, file_name: str) -> None: - with open(os.path.join(file_path, file_name), 'r') as cmake_file: + with open(os.path.join(file_path, file_name), "r") as cmake_file: contents: str = cmake_file.read() def get_line_and_comment(target: str) -> Tuple[int, str]: @@ -70,10 +76,10 @@ def process_file(root_path: str, file_path: str, file_name: str) -> None: comment: str = "" for n, line in enumerate(contents_list): - if 'option' not in line.lower() or target not in line: + if "option" not in line.lower() or target not in line: continue - for maybe_comment_line in contents_list[n - 1::-1]: + for maybe_comment_line in contents_list[n - 1 :: -1]: if not re.match("\s*#\s*", maybe_comment_line): break @@ -82,16 +88,22 @@ def process_file(root_path: str, file_path: str, file_name: str) -> None: # line numbering starts with 1 return n + 1, comment - matches: Optional[List[Entity]] = re.findall(cmake_option_regex, contents, re.MULTILINE) + matches: Optional[List[Entity]] = re.findall( + cmake_option_regex, contents, re.MULTILINE + ) - - file_rel_path_with_name: str = os.path.join(file_path[len(root_path):], file_name) - if file_rel_path_with_name.startswith('/'): + file_rel_path_with_name: str = os.path.join( + file_path[len(root_path) :], file_name + ) + if file_rel_path_with_name.startswith("/"): file_rel_path_with_name = file_rel_path_with_name[1:] if matches: for entity in matches: - build_entity(file_rel_path_with_name, entity, get_line_and_comment(entity[0])) + build_entity( + file_rel_path_with_name, entity, get_line_and_comment(entity[0]) + ) + def process_folder(root_path: str, name: str) -> None: for root, _, files in os.walk(os.path.join(root_path, name)): @@ -99,12 +111,19 @@ def process_folder(root_path: str, name: str) -> None: if f == "CMakeLists.txt" or ".cmake" in f: process_file(root_path, root, f) -def generate_cmake_flags_files() -> None: - root_path: str = os.path.join(os.path.dirname(__file__), '..', '..') - output_file_name: str = os.path.join(root_path, "docs/en/development/cmake-in-clickhouse.md") - header_file_name: str = os.path.join(root_path, "docs/_includes/cmake_in_clickhouse_header.md") - footer_file_name: str = os.path.join(root_path, "docs/_includes/cmake_in_clickhouse_footer.md") +def generate_cmake_flags_files() -> None: + root_path: str = os.path.join(os.path.dirname(__file__), "..", "..") + + output_file_name: str = os.path.join( + root_path, "docs/en/development/cmake-in-clickhouse.md" + ) + header_file_name: str = os.path.join( + root_path, "docs/_includes/cmake_in_clickhouse_header.md" + ) + footer_file_name: str = os.path.join( + root_path, "docs/_includes/cmake_in_clickhouse_footer.md" + ) process_file(root_path, root_path, "CMakeLists.txt") process_file(root_path, os.path.join(root_path, "programs"), "CMakeLists.txt") @@ -127,8 +146,10 @@ def generate_cmake_flags_files() -> None: f.write(entities[k][1] + "\n") ignored_keys.append(k) - f.write("\n### External libraries\nNote that ClickHouse uses forks of these libraries, see https://github.com/ClickHouse-Extras.\n" + - table_header) + f.write( + "\n### External libraries\nNote that ClickHouse uses forks of these libraries, see https://github.com/ClickHouse-Extras.\n" + + table_header + ) for k in sorted_keys: if k.startswith("ENABLE_") and ".cmake" in entities[k][0]: @@ -143,15 +164,18 @@ def generate_cmake_flags_files() -> None: with open(footer_file_name, "r") as footer: f.write(footer.read()) - other_languages = ["docs/ja/development/cmake-in-clickhouse.md", - "docs/zh/development/cmake-in-clickhouse.md", - "docs/ru/development/cmake-in-clickhouse.md"] + other_languages = [ + "docs/ja/development/cmake-in-clickhouse.md", + "docs/zh/development/cmake-in-clickhouse.md", + "docs/ru/development/cmake-in-clickhouse.md", + ] for lang in other_languages: other_file_name = os.path.join(root_path, lang) if os.path.exists(other_file_name): - os.unlink(other_file_name) + os.unlink(other_file_name) os.symlink(output_file_name, other_file_name) -if __name__ == '__main__': + +if __name__ == "__main__": generate_cmake_flags_files() diff --git a/docs/tools/easy_diff.py b/docs/tools/easy_diff.py index 22d305d3da3..14e3ca91776 100755 --- a/docs/tools/easy_diff.py +++ b/docs/tools/easy_diff.py @@ -8,7 +8,7 @@ import contextlib from git import cmd from tempfile import NamedTemporaryFile -SCRIPT_DESCRIPTION = ''' +SCRIPT_DESCRIPTION = """ usage: ./easy_diff.py language/document path Show the difference between a language document and an English document. @@ -53,16 +53,16 @@ SCRIPT_DESCRIPTION = ''' OPTIONS: -h, --help show this help message and exit --no-pager use stdout as difference result output -''' +""" SCRIPT_PATH = os.path.abspath(__file__) -CLICKHOUSE_REPO_HOME = os.path.join(os.path.dirname(SCRIPT_PATH), '..', '..') +CLICKHOUSE_REPO_HOME = os.path.join(os.path.dirname(SCRIPT_PATH), "..", "..") SCRIPT_COMMAND_EXECUTOR = cmd.Git(CLICKHOUSE_REPO_HOME) SCRIPT_COMMAND_PARSER = argparse.ArgumentParser(add_help=False) -SCRIPT_COMMAND_PARSER.add_argument('path', type=bytes, nargs='?', default=None) -SCRIPT_COMMAND_PARSER.add_argument('--no-pager', action='store_true', default=False) -SCRIPT_COMMAND_PARSER.add_argument('-h', '--help', action='store_true', default=False) +SCRIPT_COMMAND_PARSER.add_argument("path", type=bytes, nargs="?", default=None) +SCRIPT_COMMAND_PARSER.add_argument("--no-pager", action="store_true", default=False) +SCRIPT_COMMAND_PARSER.add_argument("-h", "--help", action="store_true", default=False) def execute(commands): @@ -70,19 +70,41 @@ def execute(commands): def get_hash(file_name): - return execute(['git', 'log', '-n', '1', '--pretty=format:"%H"', file_name]) + return execute(["git", "log", "-n", "1", '--pretty=format:"%H"', file_name]) def diff_file(reference_file, working_file, out): if not os.path.exists(reference_file): - raise RuntimeError('reference file [' + os.path.abspath(reference_file) + '] is not exists.') + raise RuntimeError( + "reference file [" + os.path.abspath(reference_file) + "] is not exists." + ) if os.path.islink(working_file): out.writelines(["Need translate document:" + os.path.abspath(reference_file)]) elif not os.path.exists(working_file): - out.writelines(['Need link document ' + os.path.abspath(reference_file) + ' to ' + os.path.abspath(working_file)]) + out.writelines( + [ + "Need link document " + + os.path.abspath(reference_file) + + " to " + + os.path.abspath(working_file) + ] + ) elif get_hash(working_file) != get_hash(reference_file): - out.writelines([(execute(['git', 'diff', get_hash(working_file).strip('"'), reference_file]).encode('utf-8'))]) + out.writelines( + [ + ( + execute( + [ + "git", + "diff", + get_hash(working_file).strip('"'), + reference_file, + ] + ).encode("utf-8") + ) + ] + ) return 0 @@ -94,20 +116,30 @@ def diff_directory(reference_directory, working_directory, out): for list_item in os.listdir(reference_directory): working_item = os.path.join(working_directory, list_item) reference_item = os.path.join(reference_directory, list_item) - if diff_file(reference_item, working_item, out) if os.path.isfile(reference_item) else diff_directory(reference_item, working_item, out) != 0: + if ( + diff_file(reference_item, working_item, out) + if os.path.isfile(reference_item) + else diff_directory(reference_item, working_item, out) != 0 + ): return 1 return 0 -def find_language_doc(custom_document, other_language='en', children=[]): +def find_language_doc(custom_document, other_language="en", children=[]): if len(custom_document) == 0: - raise RuntimeError('The ' + os.path.join(custom_document, *children) + " is not in docs directory.") + raise RuntimeError( + "The " + + os.path.join(custom_document, *children) + + " is not in docs directory." + ) - if os.path.samefile(os.path.join(CLICKHOUSE_REPO_HOME, 'docs'), custom_document): - return os.path.join(CLICKHOUSE_REPO_HOME, 'docs', other_language, *children[1:]) + if os.path.samefile(os.path.join(CLICKHOUSE_REPO_HOME, "docs"), custom_document): + return os.path.join(CLICKHOUSE_REPO_HOME, "docs", other_language, *children[1:]) children.insert(0, os.path.split(custom_document)[1]) - return find_language_doc(os.path.split(custom_document)[0], other_language, children) + return find_language_doc( + os.path.split(custom_document)[0], other_language, children + ) class ToPager: @@ -119,7 +151,7 @@ class ToPager: def close(self): self.temp_named_file.flush() - git_pager = execute(['git', 'var', 'GIT_PAGER']) + git_pager = execute(["git", "var", "GIT_PAGER"]) subprocess.check_call([git_pager, self.temp_named_file.name]) self.temp_named_file.close() @@ -135,12 +167,20 @@ class ToStdOut: self.system_stdout_stream = system_stdout_stream -if __name__ == '__main__': +if __name__ == "__main__": arguments = SCRIPT_COMMAND_PARSER.parse_args() if arguments.help or not arguments.path: sys.stdout.write(SCRIPT_DESCRIPTION) sys.exit(0) - working_language = os.path.join(CLICKHOUSE_REPO_HOME, 'docs', arguments.path) - with contextlib.closing(ToStdOut(sys.stdout) if arguments.no_pager else ToPager(NamedTemporaryFile('r+'))) as writer: - exit(diff_directory(find_language_doc(working_language), working_language, writer)) + working_language = os.path.join(CLICKHOUSE_REPO_HOME, "docs", arguments.path) + with contextlib.closing( + ToStdOut(sys.stdout) + if arguments.no_pager + else ToPager(NamedTemporaryFile("r+")) + ) as writer: + exit( + diff_directory( + find_language_doc(working_language), working_language, writer + ) + ) diff --git a/docs/tools/github.py b/docs/tools/github.py index 465695d1512..3a6f155e25d 100644 --- a/docs/tools/github.py +++ b/docs/tools/github.py @@ -16,27 +16,26 @@ import util def get_events(args): events = [] skip = True - with open(os.path.join(args.docs_dir, '..', 'README.md')) as f: + with open(os.path.join(args.docs_dir, "..", "README.md")) as f: for line in f: if skip: - if 'Upcoming Events' in line: + if "Upcoming Events" in line: skip = False else: if not line: continue - line = line.strip().split('](') + line = line.strip().split("](") if len(line) == 2: - tail = line[1].split(') ') - events.append({ - 'signup_link': tail[0], - 'event_name': line[0].replace('* [', ''), - 'event_date': tail[1].replace('on ', '').replace('.', '') - }) + tail = line[1].split(") ") + events.append( + { + "signup_link": tail[0], + "event_name": line[0].replace("* [", ""), + "event_date": tail[1].replace("on ", "").replace(".", ""), + } + ) return events -if __name__ == '__main__': - logging.basicConfig( - level=logging.DEBUG, - stream=sys.stderr - ) +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG, stream=sys.stderr) diff --git a/docs/tools/mdx_clickhouse.py b/docs/tools/mdx_clickhouse.py index 18ecc890b6e..6b5a5bb5813 100755 --- a/docs/tools/mdx_clickhouse.py +++ b/docs/tools/mdx_clickhouse.py @@ -16,74 +16,79 @@ import slugify as slugify_impl def slugify(value, separator): - return slugify_impl.slugify(value, separator=separator, word_boundary=True, save_order=True) + return slugify_impl.slugify( + value, separator=separator, word_boundary=True, save_order=True + ) MARKDOWN_EXTENSIONS = [ - 'mdx_clickhouse', - 'admonition', - 'attr_list', - 'def_list', - 'codehilite', - 'nl2br', - 'sane_lists', - 'pymdownx.details', - 'pymdownx.magiclink', - 'pymdownx.superfences', - 'extra', - { - 'toc': { - 'permalink': True, - 'slugify': slugify - } - } + "mdx_clickhouse", + "admonition", + "attr_list", + "def_list", + "codehilite", + "nl2br", + "sane_lists", + "pymdownx.details", + "pymdownx.magiclink", + "pymdownx.superfences", + "extra", + {"toc": {"permalink": True, "slugify": slugify}}, ] class ClickHouseLinkMixin(object): - def handleMatch(self, m, data): - single_page = (os.environ.get('SINGLE_PAGE') == '1') + single_page = os.environ.get("SINGLE_PAGE") == "1" try: el, start, end = super(ClickHouseLinkMixin, self).handleMatch(m, data) except IndexError: return if el is not None: - href = el.get('href') or '' - is_external = href.startswith('http:') or href.startswith('https:') + href = el.get("href") or "" + is_external = href.startswith("http:") or href.startswith("https:") if is_external: - if not href.startswith('https://clickhouse.com'): - el.set('rel', 'external nofollow noreferrer') + if not href.startswith("https://clickhouse.com"): + el.set("rel", "external nofollow noreferrer") elif single_page: - if '#' in href: - el.set('href', '#' + href.split('#', 1)[1]) + if "#" in href: + el.set("href", "#" + href.split("#", 1)[1]) else: - el.set('href', '#' + href.replace('/index.md', '/').replace('.md', '/')) + el.set( + "href", "#" + href.replace("/index.md", "/").replace(".md", "/") + ) return el, start, end -class ClickHouseAutolinkPattern(ClickHouseLinkMixin, markdown.inlinepatterns.AutolinkInlineProcessor): +class ClickHouseAutolinkPattern( + ClickHouseLinkMixin, markdown.inlinepatterns.AutolinkInlineProcessor +): pass -class ClickHouseLinkPattern(ClickHouseLinkMixin, markdown.inlinepatterns.LinkInlineProcessor): +class ClickHouseLinkPattern( + ClickHouseLinkMixin, markdown.inlinepatterns.LinkInlineProcessor +): pass class ClickHousePreprocessor(markdown.util.Processor): def run(self, lines): for line in lines: - if '' not in line: + if "" not in line: yield line class ClickHouseMarkdown(markdown.extensions.Extension): - def extendMarkdown(self, md, md_globals): - md.preprocessors['clickhouse'] = ClickHousePreprocessor() - md.inlinePatterns['link'] = ClickHouseLinkPattern(markdown.inlinepatterns.LINK_RE, md) - md.inlinePatterns['autolink'] = ClickHouseAutolinkPattern(markdown.inlinepatterns.AUTOLINK_RE, md) + md.preprocessors["clickhouse"] = ClickHousePreprocessor() + md.inlinePatterns["link"] = ClickHouseLinkPattern( + markdown.inlinepatterns.LINK_RE, md + ) + md.inlinePatterns["autolink"] = ClickHouseAutolinkPattern( + markdown.inlinepatterns.AUTOLINK_RE, md + ) def makeExtension(**kwargs): @@ -92,10 +97,8 @@ def makeExtension(**kwargs): def get_translations(dirname, lang): import babel.support - return babel.support.Translations.load( - dirname=dirname, - locales=[lang, 'en'] - ) + + return babel.support.Translations.load(dirname=dirname, locales=[lang, "en"]) class PatchedMacrosPlugin(macros.plugin.MacrosPlugin): @@ -104,22 +107,22 @@ class PatchedMacrosPlugin(macros.plugin.MacrosPlugin): def on_config(self, config): super(PatchedMacrosPlugin, self).on_config(config) - self.env.comment_start_string = '{##' - self.env.comment_end_string = '##}' - self.env.loader = jinja2.FileSystemLoader([ - os.path.join(config.data['site_dir']), - os.path.join(config.data['extra']['includes_dir']) - ]) + self.env.comment_start_string = "{##" + self.env.comment_end_string = "##}" + self.env.loader = jinja2.FileSystemLoader( + [ + os.path.join(config.data["site_dir"]), + os.path.join(config.data["extra"]["includes_dir"]), + ] + ) def on_env(self, env, config, files): import util - env.add_extension('jinja2.ext.i18n') - dirname = os.path.join(config.data['theme'].dirs[0], 'locale') - lang = config.data['theme']['language'] - env.install_gettext_translations( - get_translations(dirname, lang), - newstyle=True - ) + + env.add_extension("jinja2.ext.i18n") + dirname = os.path.join(config.data["theme"].dirs[0], "locale") + lang = config.data["theme"]["language"] + env.install_gettext_translations(get_translations(dirname, lang), newstyle=True) util.init_jinja2_filters(env) return env @@ -130,13 +133,17 @@ class PatchedMacrosPlugin(macros.plugin.MacrosPlugin): return markdown def on_page_markdown(self, markdown, page, config, files): - markdown = super(PatchedMacrosPlugin, self).on_page_markdown(markdown, page, config, files) + markdown = super(PatchedMacrosPlugin, self).on_page_markdown( + markdown, page, config, files + ) if os.path.islink(page.file.abs_src_path): - lang = config.data['theme']['language'] - page.canonical_url = page.canonical_url.replace(f'/{lang}/', '/en/', 1) + lang = config.data["theme"]["language"] + page.canonical_url = page.canonical_url.replace(f"/{lang}/", "/en/", 1) - if config.data['extra'].get('version_prefix') or config.data['extra'].get('single_page'): + if config.data["extra"].get("version_prefix") or config.data["extra"].get( + "single_page" + ): return markdown if self.skip_git_log: return markdown diff --git a/docs/tools/nav.py b/docs/tools/nav.py index db64d1ba404..e3df85bbe4e 100644 --- a/docs/tools/nav.py +++ b/docs/tools/nav.py @@ -10,57 +10,59 @@ import util def find_first_header(content): - for line in content.split('\n'): - if line.startswith('#'): - no_hash = line.lstrip('#') - return no_hash.split('{', 1)[0].strip() + for line in content.split("\n"): + if line.startswith("#"): + no_hash = line.lstrip("#") + return no_hash.split("{", 1)[0].strip() def build_nav_entry(root, args): - if root.endswith('images'): + if root.endswith("images"): return None, None, None result_items = [] - index_meta, index_content = util.read_md_file(os.path.join(root, 'index.md')) - current_title = index_meta.get('toc_folder_title', index_meta.get('toc_title')) - current_title = current_title or index_meta.get('title', find_first_header(index_content)) + index_meta, index_content = util.read_md_file(os.path.join(root, "index.md")) + current_title = index_meta.get("toc_folder_title", index_meta.get("toc_title")) + current_title = current_title or index_meta.get( + "title", find_first_header(index_content) + ) for filename in os.listdir(root): path = os.path.join(root, filename) if os.path.isdir(path): prio, title, payload = build_nav_entry(path, args) if title and payload: result_items.append((prio, title, payload)) - elif filename.endswith('.md'): + elif filename.endswith(".md"): path = os.path.join(root, filename) - meta = '' - content = '' + meta = "" + content = "" try: meta, content = util.read_md_file(path) except: - print('Error in file: {}'.format(path)) + print("Error in file: {}".format(path)) raise - path = path.split('/', 2)[-1] - title = meta.get('toc_title', find_first_header(content)) + path = path.split("/", 2)[-1] + title = meta.get("toc_title", find_first_header(content)) if title: - title = title.strip().rstrip('.') + title = title.strip().rstrip(".") else: - title = meta.get('toc_folder_title', 'hidden') - prio = meta.get('toc_priority', 9999) - logging.debug(f'Nav entry: {prio}, {title}, {path}') - if meta.get('toc_hidden') or not content.strip(): - title = 'hidden' - if title == 'hidden': - title = 'hidden-' + hashlib.sha1(content.encode('utf-8')).hexdigest() + title = meta.get("toc_folder_title", "hidden") + prio = meta.get("toc_priority", 9999) + logging.debug(f"Nav entry: {prio}, {title}, {path}") + if meta.get("toc_hidden") or not content.strip(): + title = "hidden" + if title == "hidden": + title = "hidden-" + hashlib.sha1(content.encode("utf-8")).hexdigest() if args.nav_limit and len(result_items) >= args.nav_limit: break result_items.append((prio, title, path)) result_items = sorted(result_items, key=lambda x: (x[0], x[1])) result = collections.OrderedDict([(item[1], item[2]) for item in result_items]) - if index_meta.get('toc_hidden_folder'): - current_title += '|hidden-folder' - return index_meta.get('toc_priority', 10000), current_title, result + if index_meta.get("toc_hidden_folder"): + current_title += "|hidden-folder" + return index_meta.get("toc_priority", 10000), current_title, result def build_docs_nav(lang, args): @@ -70,7 +72,7 @@ def build_docs_nav(lang, args): index_key = None for key, value in list(nav.items()): if key and value: - if value == 'index.md': + if value == "index.md": index_key = key continue result.append({key: value}) @@ -78,7 +80,7 @@ def build_docs_nav(lang, args): break if index_key: key = list(result[0].keys())[0] - result[0][key][index_key] = 'index.md' + result[0][key][index_key] = "index.md" result[0][key].move_to_end(index_key, last=False) return result @@ -86,7 +88,7 @@ def build_docs_nav(lang, args): def build_blog_nav(lang, args): blog_dir = os.path.join(args.blog_dir, lang) years = sorted(os.listdir(blog_dir), reverse=True) - result_nav = [{'hidden': 'index.md'}] + result_nav = [{"hidden": "index.md"}] post_meta = collections.OrderedDict() for year in years: year_dir = os.path.join(blog_dir, year) @@ -97,38 +99,53 @@ def build_blog_nav(lang, args): post_meta_items = [] for post in os.listdir(year_dir): post_path = os.path.join(year_dir, post) - if not post.endswith('.md'): - raise RuntimeError(f'Unexpected non-md file in posts folder: {post_path}') + if not post.endswith(".md"): + raise RuntimeError( + f"Unexpected non-md file in posts folder: {post_path}" + ) meta, _ = util.read_md_file(post_path) - post_date = meta['date'] - post_title = meta['title'] + post_date = meta["date"] + post_title = meta["title"] if datetime.date.fromisoformat(post_date) > datetime.date.today(): continue posts.append( - (post_date, post_title, os.path.join(year, post),) + ( + post_date, + post_title, + os.path.join(year, post), + ) ) if post_title in post_meta: - raise RuntimeError(f'Duplicate post title: {post_title}') - if not post_date.startswith(f'{year}-'): - raise RuntimeError(f'Post date {post_date} doesn\'t match the folder year {year}: {post_title}') - post_url_part = post.replace('.md', '') - post_meta_items.append((post_date, { - 'date': post_date, - 'title': post_title, - 'image': meta.get('image'), - 'url': f'/blog/{lang}/{year}/{post_url_part}/' - },)) + raise RuntimeError(f"Duplicate post title: {post_title}") + if not post_date.startswith(f"{year}-"): + raise RuntimeError( + f"Post date {post_date} doesn't match the folder year {year}: {post_title}" + ) + post_url_part = post.replace(".md", "") + post_meta_items.append( + ( + post_date, + { + "date": post_date, + "title": post_title, + "image": meta.get("image"), + "url": f"/blog/{lang}/{year}/{post_url_part}/", + }, + ) + ) for _, title, path in sorted(posts, reverse=True): result_nav[-1][year][title] = path - for _, post_meta_item in sorted(post_meta_items, - reverse=True, - key=lambda item: item[0]): - post_meta[post_meta_item['title']] = post_meta_item + for _, post_meta_item in sorted( + post_meta_items, reverse=True, key=lambda item: item[0] + ): + post_meta[post_meta_item["title"]] = post_meta_item return result_nav, post_meta def _custom_get_navigation(files, config): - nav_config = config['nav'] or mkdocs.structure.nav.nest_paths(f.src_path for f in files.documentation_pages()) + nav_config = config["nav"] or mkdocs.structure.nav.nest_paths( + f.src_path for f in files.documentation_pages() + ) items = mkdocs.structure.nav._data_to_navigation(nav_config, files, config) if not isinstance(items, list): items = [items] @@ -138,19 +155,25 @@ def _custom_get_navigation(files, config): mkdocs.structure.nav._add_previous_and_next_links(pages) mkdocs.structure.nav._add_parent_links(items) - missing_from_config = [file for file in files.documentation_pages() if file.page is None] + missing_from_config = [ + file for file in files.documentation_pages() if file.page is None + ] if missing_from_config: - files._files = [file for file in files._files if file not in missing_from_config] + files._files = [ + file for file in files._files if file not in missing_from_config + ] links = mkdocs.structure.nav._get_by_type(items, mkdocs.structure.nav.Link) for link in links: - scheme, netloc, path, params, query, fragment = mkdocs.structure.nav.urlparse(link.url) + scheme, netloc, path, params, query, fragment = mkdocs.structure.nav.urlparse( + link.url + ) if scheme or netloc: mkdocs.structure.nav.log.debug( "An external link to '{}' is included in " "the 'nav' configuration.".format(link.url) ) - elif link.url.startswith('/'): + elif link.url.startswith("/"): mkdocs.structure.nav.log.debug( "An absolute path to '{}' is included in the 'nav' configuration, " "which presumably points to an external resource.".format(link.url) diff --git a/docs/tools/redirects.py b/docs/tools/redirects.py index 1f0a3bb4b74..5d222376683 100644 --- a/docs/tools/redirects.py +++ b/docs/tools/redirects.py @@ -7,8 +7,9 @@ def write_redirect_html(out_path, to_url): os.makedirs(out_dir) except OSError: pass - with open(out_path, 'w') as f: - f.write(f''' + with open(out_path, "w") as f: + f.write( + f""" @@ -22,18 +23,20 @@ def write_redirect_html(out_path, to_url): If you are not redirected automatically, follow this link. -''') +""" + ) def build_redirect_html(args, base_prefix, lang, output_dir, from_path, to_path): out_path = os.path.join( - output_dir, lang, - from_path.replace('/index.md', '/index.html').replace('.md', '/index.html') + output_dir, + lang, + from_path.replace("/index.md", "/index.html").replace(".md", "/index.html"), ) - target_path = to_path.replace('/index.md', '/').replace('.md', '/') + target_path = to_path.replace("/index.md", "/").replace(".md", "/") - if target_path[0:7] != 'http://' and target_path[0:8] != 'https://': - to_url = f'/{base_prefix}/{lang}/{target_path}' + if target_path[0:7] != "http://" and target_path[0:8] != "https://": + to_url = f"/{base_prefix}/{lang}/{target_path}" else: to_url = target_path @@ -42,33 +45,48 @@ def build_redirect_html(args, base_prefix, lang, output_dir, from_path, to_path) def build_docs_redirects(args): - with open(os.path.join(args.docs_dir, 'redirects.txt'), 'r') as f: + with open(os.path.join(args.docs_dir, "redirects.txt"), "r") as f: for line in f: - for lang in args.lang.split(','): - from_path, to_path = line.split(' ', 1) - build_redirect_html(args, 'docs', lang, args.docs_output_dir, from_path, to_path) + for lang in args.lang.split(","): + from_path, to_path = line.split(" ", 1) + build_redirect_html( + args, "docs", lang, args.docs_output_dir, from_path, to_path + ) def build_blog_redirects(args): - for lang in args.blog_lang.split(','): - redirects_path = os.path.join(args.blog_dir, lang, 'redirects.txt') + for lang in args.blog_lang.split(","): + redirects_path = os.path.join(args.blog_dir, lang, "redirects.txt") if os.path.exists(redirects_path): - with open(redirects_path, 'r') as f: + with open(redirects_path, "r") as f: for line in f: - from_path, to_path = line.split(' ', 1) - build_redirect_html(args, 'blog', lang, args.blog_output_dir, from_path, to_path) + from_path, to_path = line.split(" ", 1) + build_redirect_html( + args, "blog", lang, args.blog_output_dir, from_path, to_path + ) def build_static_redirects(args): for static_redirect in [ - ('benchmark.html', '/benchmark/dbms/'), - ('benchmark_hardware.html', '/benchmark/hardware/'), - ('tutorial.html', '/docs/en/getting_started/tutorial/',), - ('reference_en.html', '/docs/en/single/', ), - ('reference_ru.html', '/docs/ru/single/',), - ('docs/index.html', '/docs/en/',), + ("benchmark.html", "/benchmark/dbms/"), + ("benchmark_hardware.html", "/benchmark/hardware/"), + ( + "tutorial.html", + "/docs/en/getting_started/tutorial/", + ), + ( + "reference_en.html", + "/docs/en/single/", + ), + ( + "reference_ru.html", + "/docs/ru/single/", + ), + ( + "docs/index.html", + "/docs/en/", + ), ]: write_redirect_html( - os.path.join(args.output_dir, static_redirect[0]), - static_redirect[1] + os.path.join(args.output_dir, static_redirect[0]), static_redirect[1] ) diff --git a/docs/tools/requirements.txt b/docs/tools/requirements.txt index 8bf1a5f477c..c48a70b0909 100644 --- a/docs/tools/requirements.txt +++ b/docs/tools/requirements.txt @@ -10,7 +10,7 @@ cssmin==0.2.0 future==0.18.2 htmlmin==0.1.12 idna==2.10 -Jinja2>=3.0.3 +Jinja2==3.0.3 jinja2-highlight==0.6.1 jsmin==3.0.0 livereload==2.6.3 diff --git a/docs/tools/single_page.py b/docs/tools/single_page.py index 3d32ba30a21..ed285fce9f8 100644 --- a/docs/tools/single_page.py +++ b/docs/tools/single_page.py @@ -12,7 +12,8 @@ import test import util import website -TEMPORARY_FILE_NAME = 'single.md' +TEMPORARY_FILE_NAME = "single.md" + def recursive_values(item): if isinstance(item, dict): @@ -25,11 +26,14 @@ def recursive_values(item): yield item -anchor_not_allowed_chars = re.compile(r'[^\w\-]') -def generate_anchor_from_path(path): - return re.sub(anchor_not_allowed_chars, '-', path) +anchor_not_allowed_chars = re.compile(r"[^\w\-]") -absolute_link = re.compile(r'^https?://') + +def generate_anchor_from_path(path): + return re.sub(anchor_not_allowed_chars, "-", path) + + +absolute_link = re.compile(r"^https?://") def replace_link(match, path): @@ -40,46 +44,55 @@ def replace_link(match, path): if re.search(absolute_link, link): return match.group(0) - if link.endswith('/'): - link = link[0:-1] + '.md' + if link.endswith("/"): + link = link[0:-1] + ".md" - return '{}(#{})'.format(title, generate_anchor_from_path(os.path.normpath(os.path.join(os.path.dirname(path), link)))) + return "{}(#{})".format( + title, + generate_anchor_from_path( + os.path.normpath(os.path.join(os.path.dirname(path), link)) + ), + ) # Concatenates Markdown files to a single file. def concatenate(lang, docs_path, single_page_file, nav): lang_path = os.path.join(docs_path, lang) - proj_config = f'{docs_path}/toc_{lang}.yml' + proj_config = f"{docs_path}/toc_{lang}.yml" if os.path.exists(proj_config): with open(proj_config) as cfg_file: - nav = yaml.full_load(cfg_file.read())['nav'] + nav = yaml.full_load(cfg_file.read())["nav"] files_to_concatenate = list(recursive_values(nav)) files_count = len(files_to_concatenate) - logging.info(f'{files_count} files will be concatenated into single md-file for {lang}.') - logging.debug('Concatenating: ' + ', '.join(files_to_concatenate)) - assert files_count > 0, f'Empty single-page for {lang}' + logging.info( + f"{files_count} files will be concatenated into single md-file for {lang}." + ) + logging.debug("Concatenating: " + ", ".join(files_to_concatenate)) + assert files_count > 0, f"Empty single-page for {lang}" - link_regexp = re.compile(r'(\[[^\]]+\])\(([^)#]+)(?:#[^\)]+)?\)') + link_regexp = re.compile(r"(\[[^\]]+\])\(([^)#]+)(?:#[^\)]+)?\)") for path in files_to_concatenate: try: with open(os.path.join(lang_path, path)) as f: # Insert a horizontal ruler. Then insert an anchor that we will link to. Its name will be a path to the .md file. - single_page_file.write('\n______\n\n' % generate_anchor_from_path(path)) + single_page_file.write( + '\n______\n\n' % generate_anchor_from_path(path) + ) in_metadata = False for line in f: # Skip YAML metadata. - if line == '---\n': + if line == "---\n": in_metadata = not in_metadata continue if not in_metadata: # Increase the level of headers. - if line.startswith('#'): - line = '#' + line + if line.startswith("#"): + line = "#" + line # Replace links within the docs. @@ -87,14 +100,19 @@ def concatenate(lang, docs_path, single_page_file, nav): line = re.sub( link_regexp, lambda match: replace_link(match, path), - line) + line, + ) # If failed to replace the relative link, print to log # But with some exceptions: # - "../src/" -- for cmake-in-clickhouse.md (link to sources) # - "../usr/share" -- changelog entry that has "../usr/share/zoneinfo" - if '../' in line and (not '../usr/share' in line) and (not '../src/' in line): - logging.info('Failed to resolve relative link:') + if ( + "../" in line + and (not "../usr/share" in line) + and (not "../src/" in line) + ): + logging.info("Failed to resolve relative link:") logging.info(path) logging.info(line) @@ -105,9 +123,11 @@ def concatenate(lang, docs_path, single_page_file, nav): single_page_file.flush() + def get_temporary_file_name(lang, args): return os.path.join(args.docs_dir, lang, TEMPORARY_FILE_NAME) + def remove_temporary_files(lang, args): single_md_path = get_temporary_file_name(lang, args) if os.path.exists(single_md_path): @@ -115,14 +135,14 @@ def remove_temporary_files(lang, args): def build_single_page_version(lang, args, nav, cfg): - logging.info(f'Building single page version for {lang}') - os.environ['SINGLE_PAGE'] = '1' - extra = cfg.data['extra'] - extra['single_page'] = True - extra['is_amp'] = False + logging.info(f"Building single page version for {lang}") + os.environ["SINGLE_PAGE"] = "1" + extra = cfg.data["extra"] + extra["single_page"] = True + extra["is_amp"] = False single_md_path = get_temporary_file_name(lang, args) - with open(single_md_path, 'w') as single_md: + with open(single_md_path, "w") as single_md: concatenate(lang, args.docs_dir, single_md, nav) with util.temp_dir() as site_temp: @@ -132,72 +152,83 @@ def build_single_page_version(lang, args, nav, cfg): shutil.copytree(docs_src_lang, docs_temp_lang) for root, _, filenames in os.walk(docs_temp_lang): for filename in filenames: - if filename != 'single.md' and filename.endswith('.md'): + if filename != "single.md" and filename.endswith(".md"): os.unlink(os.path.join(root, filename)) - cfg.load_dict({ - 'docs_dir': docs_temp_lang, - 'site_dir': site_temp, - 'extra': extra, - 'nav': [ - {cfg.data.get('site_name'): 'single.md'} - ] - }) + cfg.load_dict( + { + "docs_dir": docs_temp_lang, + "site_dir": site_temp, + "extra": extra, + "nav": [{cfg.data.get("site_name"): "single.md"}], + } + ) if not args.test_only: mkdocs.commands.build.build(cfg) - single_page_output_path = os.path.join(args.docs_dir, args.docs_output_dir, lang, 'single') + single_page_output_path = os.path.join( + args.docs_dir, args.docs_output_dir, lang, "single" + ) if os.path.exists(single_page_output_path): shutil.rmtree(single_page_output_path) shutil.copytree( - os.path.join(site_temp, 'single'), - single_page_output_path + os.path.join(site_temp, "single"), single_page_output_path ) - single_page_index_html = os.path.join(single_page_output_path, 'index.html') - single_page_content_js = os.path.join(single_page_output_path, 'content.js') + single_page_index_html = os.path.join( + single_page_output_path, "index.html" + ) + single_page_content_js = os.path.join( + single_page_output_path, "content.js" + ) - with open(single_page_index_html, 'r') as f: - sp_prefix, sp_js, sp_suffix = f.read().split('') + with open(single_page_index_html, "r") as f: + sp_prefix, sp_js, sp_suffix = f.read().split("") - with open(single_page_index_html, 'w') as f: + with open(single_page_index_html, "w") as f: f.write(sp_prefix) f.write(sp_suffix) - with open(single_page_content_js, 'w') as f: + with open(single_page_content_js, "w") as f: if args.minify: import jsmin + sp_js = jsmin.jsmin(sp_js) f.write(sp_js) - logging.info(f'Re-building single page for {lang} pdf/test') + logging.info(f"Re-building single page for {lang} pdf/test") with util.temp_dir() as test_dir: - extra['single_page'] = False - cfg.load_dict({ - 'docs_dir': docs_temp_lang, - 'site_dir': test_dir, - 'extra': extra, - 'nav': [ - {cfg.data.get('site_name'): 'single.md'} - ] - }) + extra["single_page"] = False + cfg.load_dict( + { + "docs_dir": docs_temp_lang, + "site_dir": test_dir, + "extra": extra, + "nav": [{cfg.data.get("site_name"): "single.md"}], + } + ) mkdocs.commands.build.build(cfg) - css_in = ' '.join(website.get_css_in(args)) - js_in = ' '.join(website.get_js_in(args)) - subprocess.check_call(f'cat {css_in} > {test_dir}/css/base.css', shell=True) - subprocess.check_call(f'cat {js_in} > {test_dir}/js/base.js', shell=True) + css_in = " ".join(website.get_css_in(args)) + js_in = " ".join(website.get_js_in(args)) + subprocess.check_call( + f"cat {css_in} > {test_dir}/css/base.css", shell=True + ) + subprocess.check_call( + f"cat {js_in} > {test_dir}/js/base.js", shell=True + ) if args.save_raw_single_page: shutil.copytree(test_dir, args.save_raw_single_page) - logging.info(f'Running tests for {lang}') + logging.info(f"Running tests for {lang}") test.test_single_page( - os.path.join(test_dir, 'single', 'index.html'), lang) + os.path.join(test_dir, "single", "index.html"), lang + ) - logging.info(f'Finished building single page version for {lang}') + logging.info(f"Finished building single page version for {lang}") remove_temporary_files(lang, args) diff --git a/docs/tools/test.py b/docs/tools/test.py index 1ea07c45192..d0469d042ee 100755 --- a/docs/tools/test.py +++ b/docs/tools/test.py @@ -8,14 +8,11 @@ import subprocess def test_single_page(input_path, lang): - if not (lang == 'en'): + if not (lang == "en"): return with open(input_path) as f: - soup = bs4.BeautifulSoup( - f, - features='html.parser' - ) + soup = bs4.BeautifulSoup(f, features="html.parser") anchor_points = set() @@ -23,30 +20,27 @@ def test_single_page(input_path, lang): links_to_nowhere = 0 for tag in soup.find_all(): - for anchor_point in [tag.attrs.get('name'), tag.attrs.get('id')]: + for anchor_point in [tag.attrs.get("name"), tag.attrs.get("id")]: if anchor_point: anchor_points.add(anchor_point) for tag in soup.find_all(): - href = tag.attrs.get('href') - if href and href.startswith('#') and href != '#': + href = tag.attrs.get("href") + if href and href.startswith("#") and href != "#": if href[1:] not in anchor_points: links_to_nowhere += 1 logging.info("Tag %s", tag) - logging.info('Link to nowhere: %s' % href) + logging.info("Link to nowhere: %s" % href) if links_to_nowhere: - logging.error(f'Found {links_to_nowhere} links to nowhere in {lang}') + logging.error(f"Found {links_to_nowhere} links to nowhere in {lang}") sys.exit(1) if len(anchor_points) <= 10: - logging.error('Html parsing is probably broken') + logging.error("Html parsing is probably broken") sys.exit(1) -if __name__ == '__main__': - logging.basicConfig( - level=logging.DEBUG, - stream=sys.stderr - ) +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG, stream=sys.stderr) test_single_page(sys.argv[1], sys.argv[2]) diff --git a/docs/tools/util.py b/docs/tools/util.py index 25961561f99..fb2f135c85e 100644 --- a/docs/tools/util.py +++ b/docs/tools/util.py @@ -15,7 +15,7 @@ import yaml @contextlib.contextmanager def temp_dir(): - path = tempfile.mkdtemp(dir=os.environ.get('TEMP')) + path = tempfile.mkdtemp(dir=os.environ.get("TEMP")) try: yield path finally: @@ -34,7 +34,7 @@ def cd(new_cwd): def get_free_port(): with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: - s.bind(('', 0)) + s.bind(("", 0)) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) return s.getsockname()[1] @@ -61,12 +61,12 @@ def read_md_file(path): meta_text = [] content = [] if os.path.exists(path): - with open(path, 'r') as f: + with open(path, "r") as f: for line in f: - if line.startswith('---'): + if line.startswith("---"): if in_meta: in_meta = False - meta = yaml.full_load(''.join(meta_text)) + meta = yaml.full_load("".join(meta_text)) else: in_meta = True else: @@ -74,7 +74,7 @@ def read_md_file(path): meta_text.append(line) else: content.append(line) - return meta, ''.join(content) + return meta, "".join(content) def write_md_file(path, meta, content): @@ -82,13 +82,13 @@ def write_md_file(path, meta, content): if not os.path.exists(dirname): os.makedirs(dirname) - with open(path, 'w') as f: + with open(path, "w") as f: if meta: - print('---', file=f) + print("---", file=f) yaml.dump(meta, f) - print('---', file=f) - if not content.startswith('\n'): - print('', file=f) + print("---", file=f) + if not content.startswith("\n"): + print("", file=f) f.write(content) @@ -100,7 +100,7 @@ def represent_ordereddict(dumper, data): value.append((node_key, node_value)) - return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value) + return yaml.nodes.MappingNode("tag:yaml.org,2002:map", value) yaml.add_representer(collections.OrderedDict, represent_ordereddict) @@ -109,30 +109,31 @@ yaml.add_representer(collections.OrderedDict, represent_ordereddict) def init_jinja2_filters(env): import amp import website + chunk_size = 10240 - env.filters['chunks'] = lambda line: [line[i:i + chunk_size] for i in range(0, len(line), chunk_size)] - env.filters['html_to_amp'] = amp.html_to_amp - env.filters['adjust_markdown_html'] = website.adjust_markdown_html - env.filters['to_rfc882'] = lambda d: datetime.datetime.strptime(d, '%Y-%m-%d').strftime('%a, %d %b %Y %H:%M:%S GMT') + env.filters["chunks"] = lambda line: [ + line[i : i + chunk_size] for i in range(0, len(line), chunk_size) + ] + env.filters["html_to_amp"] = amp.html_to_amp + env.filters["adjust_markdown_html"] = website.adjust_markdown_html + env.filters["to_rfc882"] = lambda d: datetime.datetime.strptime( + d, "%Y-%m-%d" + ).strftime("%a, %d %b %Y %H:%M:%S GMT") def init_jinja2_env(args): import mdx_clickhouse + env = jinja2.Environment( - loader=jinja2.FileSystemLoader([ - args.website_dir, - os.path.join(args.docs_dir, '_includes') - ]), - extensions=[ - 'jinja2.ext.i18n', - 'jinja2_highlight.HighlightExtension' - ] + loader=jinja2.FileSystemLoader( + [args.website_dir, os.path.join(args.docs_dir, "_includes")] + ), + extensions=["jinja2.ext.i18n", "jinja2_highlight.HighlightExtension"], ) - env.extend(jinja2_highlight_cssclass='syntax p-3 my-3') - translations_dir = os.path.join(args.website_dir, 'locale') + env.extend(jinja2_highlight_cssclass="syntax p-3 my-3") + translations_dir = os.path.join(args.website_dir, "locale") env.install_gettext_translations( - mdx_clickhouse.get_translations(translations_dir, 'en'), - newstyle=True + mdx_clickhouse.get_translations(translations_dir, "en"), newstyle=True ) init_jinja2_filters(env) return env diff --git a/docs/tools/website.py b/docs/tools/website.py index de4cc14670c..2c748d96414 100644 --- a/docs/tools/website.py +++ b/docs/tools/website.py @@ -17,108 +17,112 @@ import util def handle_iframe(iframe, soup): - allowed_domains = ['https://www.youtube.com/', 'https://datalens.yandex/'] + allowed_domains = ["https://www.youtube.com/", "https://datalens.yandex/"] illegal_domain = True - iframe_src = iframe.attrs['src'] + iframe_src = iframe.attrs["src"] for domain in allowed_domains: if iframe_src.startswith(domain): illegal_domain = False break if illegal_domain: - raise RuntimeError(f'iframe from illegal domain: {iframe_src}') - wrapper = soup.new_tag('div') - wrapper.attrs['class'] = ['embed-responsive', 'embed-responsive-16by9'] + raise RuntimeError(f"iframe from illegal domain: {iframe_src}") + wrapper = soup.new_tag("div") + wrapper.attrs["class"] = ["embed-responsive", "embed-responsive-16by9"] iframe.insert_before(wrapper) iframe.extract() wrapper.insert(0, iframe) - if 'width' in iframe.attrs: - del iframe.attrs['width'] - if 'height' in iframe.attrs: - del iframe.attrs['height'] - iframe.attrs['allow'] = 'accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture' - iframe.attrs['class'] = 'embed-responsive-item' - iframe.attrs['frameborder'] = '0' - iframe.attrs['allowfullscreen'] = '1' + if "width" in iframe.attrs: + del iframe.attrs["width"] + if "height" in iframe.attrs: + del iframe.attrs["height"] + iframe.attrs[ + "allow" + ] = "accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" + iframe.attrs["class"] = "embed-responsive-item" + iframe.attrs["frameborder"] = "0" + iframe.attrs["allowfullscreen"] = "1" def adjust_markdown_html(content): - soup = bs4.BeautifulSoup( - content, - features='html.parser' - ) + soup = bs4.BeautifulSoup(content, features="html.parser") - for a in soup.find_all('a'): - a_class = a.attrs.get('class') - a_href = a.attrs.get('href') - if a_class and 'headerlink' in a_class: - a.string = '\xa0' - if a_href and a_href.startswith('http'): - a.attrs['target'] = '_blank' + for a in soup.find_all("a"): + a_class = a.attrs.get("class") + a_href = a.attrs.get("href") + if a_class and "headerlink" in a_class: + a.string = "\xa0" + if a_href and a_href.startswith("http"): + a.attrs["target"] = "_blank" - for code in soup.find_all('code'): - code_class = code.attrs.get('class') + for code in soup.find_all("code"): + code_class = code.attrs.get("class") if code_class: - code.attrs['class'] = code_class + ['syntax'] + code.attrs["class"] = code_class + ["syntax"] else: - code.attrs['class'] = 'syntax' + code.attrs["class"] = "syntax" - for iframe in soup.find_all('iframe'): + for iframe in soup.find_all("iframe"): handle_iframe(iframe, soup) - for img in soup.find_all('img'): - if img.attrs.get('alt') == 'iframe': - img.name = 'iframe' - img.string = '' + for img in soup.find_all("img"): + if img.attrs.get("alt") == "iframe": + img.name = "iframe" + img.string = "" handle_iframe(img, soup) continue - img_class = img.attrs.get('class') + img_class = img.attrs.get("class") if img_class: - img.attrs['class'] = img_class + ['img-fluid'] + img.attrs["class"] = img_class + ["img-fluid"] else: - img.attrs['class'] = 'img-fluid' + img.attrs["class"] = "img-fluid" - for details in soup.find_all('details'): - for summary in details.find_all('summary'): + for details in soup.find_all("details"): + for summary in details.find_all("summary"): if summary.parent != details: summary.extract() details.insert(0, summary) - for dd in soup.find_all('dd'): - dd_class = dd.attrs.get('class') + for dd in soup.find_all("dd"): + dd_class = dd.attrs.get("class") if dd_class: - dd.attrs['class'] = dd_class + ['pl-3'] + dd.attrs["class"] = dd_class + ["pl-3"] else: - dd.attrs['class'] = 'pl-3' + dd.attrs["class"] = "pl-3" - for div in soup.find_all('div'): - div_class = div.attrs.get('class') - is_admonition = div_class and 'admonition' in div.attrs.get('class') + for div in soup.find_all("div"): + div_class = div.attrs.get("class") + is_admonition = div_class and "admonition" in div.attrs.get("class") if is_admonition: - for a in div.find_all('a'): - a_class = a.attrs.get('class') + for a in div.find_all("a"): + a_class = a.attrs.get("class") if a_class: - a.attrs['class'] = a_class + ['alert-link'] + a.attrs["class"] = a_class + ["alert-link"] else: - a.attrs['class'] = 'alert-link' + a.attrs["class"] = "alert-link" - for p in div.find_all('p'): - p_class = p.attrs.get('class') - if is_admonition and p_class and ('admonition-title' in p_class): - p.attrs['class'] = p_class + ['alert-heading', 'display-4', 'text-reset', 'mb-2'] + for p in div.find_all("p"): + p_class = p.attrs.get("class") + if is_admonition and p_class and ("admonition-title" in p_class): + p.attrs["class"] = p_class + [ + "alert-heading", + "display-4", + "text-reset", + "mb-2", + ] if is_admonition: - div.attrs['role'] = 'alert' - if ('info' in div_class) or ('note' in div_class): - mode = 'alert-primary' - elif ('attention' in div_class) or ('warning' in div_class): - mode = 'alert-warning' - elif 'important' in div_class: - mode = 'alert-danger' - elif 'tip' in div_class: - mode = 'alert-info' + div.attrs["role"] = "alert" + if ("info" in div_class) or ("note" in div_class): + mode = "alert-primary" + elif ("attention" in div_class) or ("warning" in div_class): + mode = "alert-warning" + elif "important" in div_class: + mode = "alert-danger" + elif "tip" in div_class: + mode = "alert-info" else: - mode = 'alert-secondary' - div.attrs['class'] = div_class + ['alert', 'pb-0', 'mb-4', mode] + mode = "alert-secondary" + div.attrs["class"] = div_class + ["alert", "pb-0", "mb-4", mode] return str(soup) @@ -128,61 +132,63 @@ def minify_html(content): def build_website(args): - logging.info('Building website') + logging.info("Building website") env = util.init_jinja2_env(args) shutil.copytree( args.website_dir, args.output_dir, ignore=shutil.ignore_patterns( - '*.md', - '*.sh', - '*.css', - '*.json', - 'js/*.js', - 'build', - 'docs', - 'public', - 'node_modules', - 'src', - 'templates', - 'locale', - '.gitkeep' - ) + "*.md", + "*.sh", + "*.css", + "*.json", + "js/*.js", + "build", + "docs", + "public", + "node_modules", + "src", + "templates", + "locale", + ".gitkeep", + ), ) shutil.copytree( - os.path.join(args.website_dir, 'images'), - os.path.join(args.output_dir, 'docs', 'images') + os.path.join(args.website_dir, "images"), + os.path.join(args.output_dir, "docs", "images"), ) # This file can be requested to check for available ClickHouse releases. shutil.copy2( - os.path.join(args.src_dir, 'utils', 'list-versions', 'version_date.tsv'), - os.path.join(args.output_dir, 'data', 'version_date.tsv')) + os.path.join(args.src_dir, "utils", "list-versions", "version_date.tsv"), + os.path.join(args.output_dir, "data", "version_date.tsv"), + ) # This file can be requested to install ClickHouse. shutil.copy2( - os.path.join(args.src_dir, 'docs', '_includes', 'install', 'universal.sh'), - os.path.join(args.output_dir, 'data', 'install.sh')) + os.path.join(args.src_dir, "docs", "_includes", "install", "universal.sh"), + os.path.join(args.output_dir, "data", "install.sh"), + ) for root, _, filenames in os.walk(args.output_dir): for filename in filenames: - if filename == 'main.html': + if filename == "main.html": continue path = os.path.join(root, filename) - if not filename.endswith('.html'): + if not filename.endswith(".html"): continue - logging.info('Processing %s', path) - with open(path, 'rb') as f: - content = f.read().decode('utf-8') + logging.info("Processing %s", path) + with open(path, "rb") as f: + content = f.read().decode("utf-8") template = env.from_string(content) content = template.render(args.__dict__) - with open(path, 'wb') as f: - f.write(content.encode('utf-8')) + with open(path, "wb") as f: + f.write(content.encode("utf-8")) def get_css_in(args): @@ -193,7 +199,7 @@ def get_css_in(args): f"'{args.website_dir}/css/blog.css'", f"'{args.website_dir}/css/docs.css'", f"'{args.website_dir}/css/highlight.css'", - f"'{args.website_dir}/css/main.css'" + f"'{args.website_dir}/css/main.css'", ] @@ -207,42 +213,41 @@ def get_js_in(args): f"'{args.website_dir}/js/index.js'", f"'{args.website_dir}/js/docsearch.js'", f"'{args.website_dir}/js/docs.js'", - f"'{args.website_dir}/js/main.js'" + f"'{args.website_dir}/js/main.js'", ] def minify_file(path, css_digest, js_digest): - if not ( - path.endswith('.html') or - path.endswith('.css') - ): + if not (path.endswith(".html") or path.endswith(".css")): return - logging.info('Minifying %s', path) - with open(path, 'rb') as f: - content = f.read().decode('utf-8') - if path.endswith('.html'): + logging.info("Minifying %s", path) + with open(path, "rb") as f: + content = f.read().decode("utf-8") + if path.endswith(".html"): content = minify_html(content) - content = content.replace('base.css?css_digest', f'base.css?{css_digest}') - content = content.replace('base.js?js_digest', f'base.js?{js_digest}') -# TODO: restore cssmin -# elif path.endswith('.css'): -# content = cssmin.cssmin(content) -# TODO: restore jsmin -# elif path.endswith('.js'): -# content = jsmin.jsmin(content) - with open(path, 'wb') as f: - f.write(content.encode('utf-8')) + content = content.replace("base.css?css_digest", f"base.css?{css_digest}") + content = content.replace("base.js?js_digest", f"base.js?{js_digest}") + # TODO: restore cssmin + # elif path.endswith('.css'): + # content = cssmin.cssmin(content) + # TODO: restore jsmin + # elif path.endswith('.js'): + # content = jsmin.jsmin(content) + with open(path, "wb") as f: + f.write(content.encode("utf-8")) def minify_website(args): - css_in = ' '.join(get_css_in(args)) - css_out = f'{args.output_dir}/docs/css/base.css' - os.makedirs(f'{args.output_dir}/docs/css') + css_in = " ".join(get_css_in(args)) + css_out = f"{args.output_dir}/docs/css/base.css" + os.makedirs(f"{args.output_dir}/docs/css") if args.minify and False: # TODO: return closure - command = f"purifycss -w '*algolia*' --min {css_in} '{args.output_dir}/*.html' " \ + command = ( + f"purifycss -w '*algolia*' --min {css_in} '{args.output_dir}/*.html' " f"'{args.output_dir}/docs/en/**/*.html' '{args.website_dir}/js/**/*.js' > {css_out}" + ) logging.info(css_in) logging.info(command) output = subprocess.check_output(command, shell=True) @@ -251,51 +256,60 @@ def minify_website(args): else: command = f"cat {css_in}" output = subprocess.check_output(command, shell=True) - with open(css_out, 'wb+') as f: + with open(css_out, "wb+") as f: f.write(output) - with open(css_out, 'rb') as f: + with open(css_out, "rb") as f: css_digest = hashlib.sha3_224(f.read()).hexdigest()[0:8] - js_in = ' '.join(get_js_in(args)) - js_out = f'{args.output_dir}/docs/js/base.js' - os.makedirs(f'{args.output_dir}/docs/js') + js_in = " ".join(get_js_in(args)) + js_out = f"{args.output_dir}/docs/js/base.js" + os.makedirs(f"{args.output_dir}/docs/js") if args.minify and False: # TODO: return closure js_in = [js[1:-1] for js in js_in] closure_args = [ - '--js', *js_in, '--js_output_file', js_out, - '--compilation_level', 'SIMPLE', - '--dependency_mode', 'NONE', - '--third_party', '--use_types_for_optimization', - '--isolation_mode', 'IIFE' + "--js", + *js_in, + "--js_output_file", + js_out, + "--compilation_level", + "SIMPLE", + "--dependency_mode", + "NONE", + "--third_party", + "--use_types_for_optimization", + "--isolation_mode", + "IIFE", ] logging.info(closure_args) if closure.run(*closure_args): - raise RuntimeError('failed to run closure compiler') - with open(js_out, 'r') as f: + raise RuntimeError("failed to run closure compiler") + with open(js_out, "r") as f: js_content = jsmin.jsmin(f.read()) - with open(js_out, 'w') as f: + with open(js_out, "w") as f: f.write(js_content) else: command = f"cat {js_in}" output = subprocess.check_output(command, shell=True) - with open(js_out, 'wb+') as f: + with open(js_out, "wb+") as f: f.write(output) - with open(js_out, 'rb') as f: + with open(js_out, "rb") as f: js_digest = hashlib.sha3_224(f.read()).hexdigest()[0:8] logging.info(js_digest) if args.minify: - logging.info('Minifying website') + logging.info("Minifying website") with concurrent.futures.ThreadPoolExecutor() as executor: futures = [] for root, _, filenames in os.walk(args.output_dir): for filename in filenames: path = os.path.join(root, filename) - futures.append(executor.submit(minify_file, path, css_digest, js_digest)) + futures.append( + executor.submit(minify_file, path, css_digest, js_digest) + ) for future in futures: exc = future.exception() if exc: @@ -304,24 +318,28 @@ def minify_website(args): def process_benchmark_results(args): - benchmark_root = os.path.join(args.website_dir, 'benchmark') + benchmark_root = os.path.join(args.website_dir, "benchmark") required_keys = { - 'dbms': ['result'], - 'hardware': ['result', 'system', 'system_full', 'kind'] + "dbms": ["result"], + "hardware": ["result", "system", "system_full", "kind"], } - for benchmark_kind in ['dbms', 'hardware']: + for benchmark_kind in ["dbms", "hardware"]: results = [] - results_root = os.path.join(benchmark_root, benchmark_kind, 'results') + results_root = os.path.join(benchmark_root, benchmark_kind, "results") for result in sorted(os.listdir(results_root)): result_file = os.path.join(results_root, result) - logging.debug(f'Reading benchmark result from {result_file}') - with open(result_file, 'r') as f: + logging.debug(f"Reading benchmark result from {result_file}") + with open(result_file, "r") as f: result = json.loads(f.read()) for item in result: for required_key in required_keys[benchmark_kind]: - assert required_key in item, f'No "{required_key}" in {result_file}' + assert ( + required_key in item + ), f'No "{required_key}" in {result_file}' results += result - results_js = os.path.join(args.output_dir, 'benchmark', benchmark_kind, 'results.js') - with open(results_js, 'w') as f: + results_js = os.path.join( + args.output_dir, "benchmark", benchmark_kind, "results.js" + ) + with open(results_js, "w") as f: data = json.dumps(results) - f.write(f'var results = {data};') + f.write(f"var results = {data};") diff --git a/docs/zh/development/continuous-integration.md b/docs/zh/development/continuous-integration.md index 4f37b6f88c7..5bebb3aec2a 100644 --- a/docs/zh/development/continuous-integration.md +++ b/docs/zh/development/continuous-integration.md @@ -42,6 +42,8 @@ git push 使用`utils/check-style/check-style`二进制文件执行一些简单的基于正则表达式的代码样式检查(注意, 它可以在本地运行). 如果失败, 按照[代码样式指南](./style.md)修复样式错误. +使用 [black](https://github.com/psf/black/) 檢查 python 代碼. + ### 报告详情 {#report-details} - [状态页示例](https://clickhouse-test-reports.s3.yandex.net/12550/659c78c7abb56141723af6a81bfae39335aa8cb2/style_check.html) - `docs_output.txt`记录了查结果错误(无效表格等), 空白页表示没有错误. [成功结果案例](https://clickhouse-test-reports.s3.yandex.net/12550/659c78c7abb56141723af6a81bfae39335aa8cb2/style_check/output.txt) diff --git a/docs/zh/engines/table-engines/integrations/hive.md b/docs/zh/engines/table-engines/integrations/hive.md index aa2c82d902a..24e0834d2fc 100644 --- a/docs/zh/engines/table-engines/integrations/hive.md +++ b/docs/zh/engines/table-engines/integrations/hive.md @@ -140,7 +140,7 @@ CREATE TABLE test.test_orc `f_array_array_float` Array(Array(Float32)), `day` String ) -ENGINE = Hive('thrift://202.168.117.26:9083', 'test', 'test_orc') +ENGINE = Hive('thrift://localhost:9083', 'test', 'test_orc') PARTITION BY day ``` diff --git a/docs/zh/operations/system-tables/functions.md b/docs/zh/operations/system-tables/functions.md index 695c7b7fee1..75df1f65c1f 100644 --- a/docs/zh/operations/system-tables/functions.md +++ b/docs/zh/operations/system-tables/functions.md @@ -15,7 +15,7 @@ ``` ┌─name─────────────────────┬─is_aggregate─┬─case_insensitive─┬─alias_to─┐ │ sumburConsistentHash │ 0 │ 0 │ │ -│ yandexConsistentHash │ 0 │ 0 │ │ +│ kostikConsistentHash │ 0 │ 0 │ │ │ demangle │ 0 │ 0 │ │ │ addressToLine │ 0 │ 0 │ │ │ JSONExtractRaw │ 0 │ 0 │ │ diff --git a/packages/.gitignore b/packages/.gitignore new file mode 100644 index 00000000000..355164c1265 --- /dev/null +++ b/packages/.gitignore @@ -0,0 +1 @@ +*/ diff --git a/packages/build b/packages/build new file mode 100755 index 00000000000..53a7538f80e --- /dev/null +++ b/packages/build @@ -0,0 +1,156 @@ +#!/usr/bin/env bash + +set -e + +# Avoid dependency on locale +LC_ALL=C + +# Normalize output directory +if [ -n "$OUTPUT_DIR" ]; then + OUTPUT_DIR=$(realpath -m "$OUTPUT_DIR") +fi + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +cd "$CUR_DIR" + +ROOT_DIR=$(readlink -f "$(git rev-parse --show-cdup)") + +PKG_ROOT='root' + +DEB_ARCH=${DEB_ARCH:-amd64} +OUTPUT_DIR=${OUTPUT_DIR:-$ROOT_DIR} +[ -d "${OUTPUT_DIR}" ] || mkdir -p "${OUTPUT_DIR}" +SANITIZER=${SANITIZER:-""} +SOURCE=${SOURCE:-$PKG_ROOT} + +HELP="${0} [--test] [--rpm] [-h|--help] + --test - adds '+test' prefix to version + --apk - build APK packages + --rpm - build RPM packages + --tgz - build tarball package + --help - show this help and exit + +Used envs: + DEB_ARCH='${DEB_ARCH}' + OUTPUT_DIR='${OUTPUT_DIR}' - where the artifact will be placed + SANITIZER='${SANITIZER}' - if any sanitizer is used, affects version string + SOURCE='${SOURCE}' - directory with sources tree + VERSION_STRING='${VERSION_STRING}' - the package version to overwrite +" + +if [ -z "${VERSION_STRING}" ]; then + # Get CLICKHOUSE_VERSION_STRING from the current git repo + eval "$("$ROOT_DIR/tests/ci/version_helper.py" -e)" +else + CLICKHOUSE_VERSION_STRING=${VERSION_STRING} +fi +export CLICKHOUSE_VERSION_STRING + + + +while [[ $1 == --* ]] +do + case "$1" in + --test ) + VERSION_POSTFIX+='+test' + shift ;; + --apk ) + MAKE_APK=1 + shift ;; + --rpm ) + MAKE_RPM=1 + shift ;; + --tgz ) + MAKE_TGZ=1 + shift ;; + --help ) + echo "$HELP" + exit ;; + * ) + echo "Unknown option $1" + exit 2 ;; + esac +done + +function deb2tgz { + local FILE PKG_NAME PKG_DIR PKG_PATH TARBALL + FILE=$1 + PKG_NAME=${FILE##*/}; PKG_NAME=${PKG_NAME%%_*} + PKG_DIR="$PKG_NAME-$CLICKHOUSE_VERSION_STRING" + PKG_PATH="$OUTPUT_DIR/$PKG_NAME-$CLICKHOUSE_VERSION_STRING" + TARBALL="$OUTPUT_DIR/$PKG_NAME-$CLICKHOUSE_VERSION_STRING-$DEB_ARCH.tgz" + rm -rf "$PKG_PATH" + dpkg-deb -R "$FILE" "$PKG_PATH" + mkdir -p "$PKG_PATH/install" + cat > "$PKG_PATH/install/doinst.sh" << 'EOF' +#!/bin/sh +set -e + +SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" +for filepath in `find $SCRIPTPATH/.. -type f -or -type l | grep -v "\.\./install/"`; do + destpath=${filepath##$SCRIPTPATH/..} + mkdir -p $(dirname "$destpath") + cp -r "$filepath" "$destpath" +done +EOF + chmod +x "$PKG_PATH/install/doinst.sh" + if [ -f "$PKG_PATH/DEBIAN/postinst" ]; then + tail +2 "$PKG_PATH/DEBIAN/postinst" > "$PKG_PATH/install/doinst.sh" + fi + rm -rf "$PKG_PATH/DEBIAN" + if [ -f "/usr/bin/pigz" ]; then + tar --use-compress-program=pigz -cf "$TARBALL" -C "$OUTPUT_DIR" "$PKG_DIR" + else + tar -czf "$TARBALL" -C "$OUTPUT_DIR" "$PKG_DIR" + fi + + rm -r "$PKG_PATH" +} + +# Build options +if [ -n "$SANITIZER" ]; then + if [[ "$SANITIZER" == "address" ]]; then VERSION_POSTFIX+="+asan" + elif [[ "$SANITIZER" == "thread" ]]; then VERSION_POSTFIX+="+tsan" + elif [[ "$SANITIZER" == "memory" ]]; then VERSION_POSTFIX+="+msan" + elif [[ "$SANITIZER" == "undefined" ]]; then VERSION_POSTFIX+="+ubsan" + else + echo "Unknown value of SANITIZER variable: $SANITIZER" + exit 3 + fi +elif [[ $BUILD_TYPE == 'debug' ]]; then + VERSION_POSTFIX+="+debug" +fi + +if [[ "$PKG_ROOT" != "$SOURCE" ]]; then + # packages are built only from PKG_SOURCE + rm -rf "./$PKG_ROOT" + ln -sf "$SOURCE" "$PKG_SOURCE" +fi + +CLICKHOUSE_VERSION_STRING+=$VERSION_POSTFIX +echo -e "\nCurrent version is $CLICKHOUSE_VERSION_STRING" + +for config in clickhouse*.yaml; do + echo "Building deb package for $config" + + # Preserve package path + exec 9>&1 + PKG_PATH=$(nfpm package --target "$OUTPUT_DIR" --config "$config" --packager deb | tee /dev/fd/9) + PKG_PATH=${PKG_PATH##*created package: } + exec 9>&- + + if [ -n "$MAKE_APK" ]; then + echo "Building apk package for $config" + nfpm package --target "$OUTPUT_DIR" --config "$config" --packager apk + fi + if [ -n "$MAKE_RPM" ]; then + echo "Building rpm package for $config" + nfpm package --target "$OUTPUT_DIR" --config "$config" --packager rpm + fi + if [ -n "$MAKE_TGZ" ]; then + echo "Building tarball for $config" + deb2tgz "$PKG_PATH" + fi +done + +# vim: ts=4: sw=4: sts=4: expandtab diff --git a/packages/clickhouse-client.yaml b/packages/clickhouse-client.yaml new file mode 100644 index 00000000000..2a1389b6625 --- /dev/null +++ b/packages/clickhouse-client.yaml @@ -0,0 +1,57 @@ +# package sources should be placed in ${PWD}/root +# nfpm should run from the same directory with a config +name: "clickhouse-client" +arch: "all" +platform: "linux" +version: "${CLICKHOUSE_VERSION_STRING}" +vendor: "ClickHouse Inc." +homepage: "https://clickhouse.com" +license: "Apache" +section: "database" +priority: "optional" + +replaces: +- clickhouse-compressor +conflicts: +- clickhouse-compressor + +maintainer: "ClickHouse Dev Team " +description: | + Client binary for ClickHouse + ClickHouse is a column-oriented database management system + that allows generating analytical data reports in real time. + This package provides clickhouse-client , clickhouse-local and clickhouse-benchmark + +overrides: + deb: + depends: + - clickhouse-common-static (= ${CLICKHOUSE_VERSION_STRING}) + rpm: + depends: + - clickhouse-common-static = ${CLICKHOUSE_VERSION_STRING} + +contents: +- src: root/etc/clickhouse-client/config.xml + dst: /etc/clickhouse-client/config.xml + type: config +- src: root/usr/bin/clickhouse-benchmark + dst: /usr/bin/clickhouse-benchmark +- src: root/usr/bin/clickhouse-compressor + dst: /usr/bin/clickhouse-compressor +- src: root/usr/bin/clickhouse-format + dst: /usr/bin/clickhouse-format +- src: root/usr/bin/clickhouse-client + dst: /usr/bin/clickhouse-client +- src: root/usr/bin/clickhouse-local + dst: /usr/bin/clickhouse-local +- src: root/usr/bin/clickhouse-obfuscator + dst: /usr/bin/clickhouse-obfuscator +# docs +- src: ../AUTHORS + dst: /usr/share/doc/clickhouse-client/AUTHORS +- src: ../CHANGELOG.md + dst: /usr/share/doc/clickhouse-client/CHANGELOG.md +- src: ../LICENSE + dst: /usr/share/doc/clickhouse-client/LICENSE +- src: ../README.md + dst: /usr/share/doc/clickhouse-client/README.md diff --git a/packages/clickhouse-common-static-dbg.yaml b/packages/clickhouse-common-static-dbg.yaml new file mode 100644 index 00000000000..12a1594bd30 --- /dev/null +++ b/packages/clickhouse-common-static-dbg.yaml @@ -0,0 +1,38 @@ +# package sources should be placed in ${PWD}/root +# nfpm should run from the same directory with a config +name: "clickhouse-common-static-dbg" +arch: "${DEB_ARCH}" # amd64, arm64 +platform: "linux" +version: "${CLICKHOUSE_VERSION_STRING}" +vendor: "ClickHouse Inc." +homepage: "https://clickhouse.com" +license: "Apache" +section: "database" +priority: "optional" + +replaces: +- clickhouse-common-dbg +conflicts: +- clickhouse-common-dbg + +maintainer: "ClickHouse Dev Team " +description: | + debugging symbols for clickhouse-common-static + This package contains the debugging symbols for clickhouse-common. + +contents: +- src: root/usr/lib/debug/usr/bin/clickhouse.debug + dst: /usr/lib/debug/usr/bin/clickhouse.debug +- src: root/usr/lib/debug/usr/bin/clickhouse-odbc-bridge.debug + dst: /usr/lib/debug/usr/bin/clickhouse-odbc-bridge.debug +- src: root/usr/lib/debug/usr/bin/clickhouse-library-bridge.debug + dst: /usr/lib/debug/usr/bin/clickhouse-library-bridge.debug +# docs +- src: ../AUTHORS + dst: /usr/share/doc/clickhouse-common-static-dbg/AUTHORS +- src: ../CHANGELOG.md + dst: /usr/share/doc/clickhouse-common-static-dbg/CHANGELOG.md +- src: ../LICENSE + dst: /usr/share/doc/clickhouse-common-static-dbg/LICENSE +- src: ../README.md + dst: /usr/share/doc/clickhouse-common-static-dbg/README.md diff --git a/packages/clickhouse-common-static.yaml b/packages/clickhouse-common-static.yaml new file mode 100644 index 00000000000..269d4318e5e --- /dev/null +++ b/packages/clickhouse-common-static.yaml @@ -0,0 +1,48 @@ +# package sources should be placed in ${PWD}/root +# nfpm should run from the same directory with a config +name: "clickhouse-common-static" +arch: "${DEB_ARCH}" # amd64, arm64 +platform: "linux" +version: "${CLICKHOUSE_VERSION_STRING}" +vendor: "ClickHouse Inc." +homepage: "https://clickhouse.com" +license: "Apache" +section: "database" +priority: "optional" + +replaces: +- clickhouse-common +- clickhouse-server-base +provides: +- clickhouse-common +- clickhouse-server-base +suggests: +- clickhouse-common-static-dbg + +maintainer: "ClickHouse Dev Team " +description: | + Common files for ClickHouse + ClickHouse is a column-oriented database management system + that allows generating analytical data reports in real time. + This package provides common files for both clickhouse server and client + +contents: +- src: root/usr/bin/clickhouse + dst: /usr/bin/clickhouse +- src: root/usr/bin/clickhouse-odbc-bridge + dst: /usr/bin/clickhouse-odbc-bridge +- src: root/usr/bin/clickhouse-library-bridge + dst: /usr/bin/clickhouse-library-bridge +- src: root/usr/bin/clickhouse-extract-from-config + dst: /usr/bin/clickhouse-extract-from-config +- src: root/usr/share/bash-completion/completions + dst: /usr/share/bash-completion/completions +# docs +- src: ../AUTHORS + dst: /usr/share/doc/clickhouse-common-static/AUTHORS +- src: ../CHANGELOG.md + dst: /usr/share/doc/clickhouse-common-static/CHANGELOG.md +- src: ../LICENSE + dst: /usr/share/doc/clickhouse-common-static/LICENSE +- src: ../README.md + dst: /usr/share/doc/clickhouse-common-static/README.md diff --git a/packages/clickhouse-keeper-dbg.yaml b/packages/clickhouse-keeper-dbg.yaml new file mode 100644 index 00000000000..2c70b7ad4aa --- /dev/null +++ b/packages/clickhouse-keeper-dbg.yaml @@ -0,0 +1,28 @@ +# package sources should be placed in ${PWD}/root +# nfpm should run from the same directory with a config +name: "clickhouse-keeper-dbg" +arch: "${DEB_ARCH}" # amd64, arm64 +platform: "linux" +version: "${CLICKHOUSE_VERSION_STRING}" +vendor: "ClickHouse Inc." +homepage: "https://clickhouse.com" +license: "Apache" +section: "database" +priority: "optional" +maintainer: "ClickHouse Dev Team " +description: | + debugging symbols for clickhouse-keeper + This package contains the debugging symbols for clickhouse-keeper. + +contents: +- src: root/usr/lib/debug/usr/bin/clickhouse-keeper.debug + dst: /usr/lib/debug/usr/bin/clickhouse-keeper.debug +# docs +- src: ../AUTHORS + dst: /usr/share/doc/clickhouse-keeper-dbg/AUTHORS +- src: ../CHANGELOG.md + dst: /usr/share/doc/clickhouse-keeper-dbg/CHANGELOG.md +- src: ../LICENSE + dst: /usr/share/doc/clickhouse-keeper-dbg/LICENSE +- src: ../README.md + dst: /usr/share/doc/clickhouse-keeper-dbg/README.md diff --git a/packages/clickhouse-keeper.yaml b/packages/clickhouse-keeper.yaml new file mode 100644 index 00000000000..e99ac30f944 --- /dev/null +++ b/packages/clickhouse-keeper.yaml @@ -0,0 +1,40 @@ +# package sources should be placed in ${PWD}/root +# nfpm should run from the same directory with a config +name: "clickhouse-keeper" +arch: "${DEB_ARCH}" # amd64, arm64 +platform: "linux" +version: "${CLICKHOUSE_VERSION_STRING}" +vendor: "ClickHouse Inc." +homepage: "https://clickhouse.com" +license: "Apache" +section: "database" +priority: "optional" + +conflicts: +- clickhouse-server +depends: +- adduser +suggests: +- clickhouse-keeper-dbg + +maintainer: "ClickHouse Dev Team " +description: | + Static clickhouse-keeper binary + A stand-alone clickhouse-keeper package + + +contents: +- src: root/etc/clickhouse-keeper + dst: /etc/clickhouse-keeper + type: config +- src: root/usr/bin/clickhouse-keeper + dst: /usr/bin/clickhouse-keeper +# docs +- src: ../AUTHORS + dst: /usr/share/doc/clickhouse-keeper/AUTHORS +- src: ../CHANGELOG.md + dst: /usr/share/doc/clickhouse-keeper/CHANGELOG.md +- src: ../LICENSE + dst: /usr/share/doc/clickhouse-keeper/LICENSE +- src: ../README.md + dst: /usr/share/doc/clickhouse-keeper/README.md diff --git a/packages/clickhouse-server.init b/packages/clickhouse-server.init new file mode 100755 index 00000000000..1695f6286b8 --- /dev/null +++ b/packages/clickhouse-server.init @@ -0,0 +1,227 @@ +#!/bin/sh +### BEGIN INIT INFO +# Provides: clickhouse-server +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Should-Start: $time $network +# Should-Stop: $network +# Short-Description: clickhouse-server daemon +### END INIT INFO +# +# NOTES: +# - Should-* -- script can start if the listed facilities are missing, unlike Required-* +# +# For the documentation [1]: +# +# [1]: https://wiki.debian.org/LSBInitScripts + +CLICKHOUSE_USER=clickhouse +CLICKHOUSE_GROUP=${CLICKHOUSE_USER} +SHELL=/bin/bash +PROGRAM=clickhouse-server +CLICKHOUSE_GENERIC_PROGRAM=clickhouse +CLICKHOUSE_PROGRAM_ENV="" +EXTRACT_FROM_CONFIG=${CLICKHOUSE_GENERIC_PROGRAM}-extract-from-config +CLICKHOUSE_CONFDIR=/etc/$PROGRAM +CLICKHOUSE_LOGDIR=/var/log/clickhouse-server +CLICKHOUSE_LOGDIR_USER=root +CLICKHOUSE_DATADIR=/var/lib/clickhouse +if [ -d "/var/lock" ]; then + LOCALSTATEDIR=/var/lock +else + LOCALSTATEDIR=/run/lock +fi + +if [ ! -d "$LOCALSTATEDIR" ]; then + mkdir -p "$LOCALSTATEDIR" +fi + +CLICKHOUSE_BINDIR=/usr/bin +CLICKHOUSE_CRONFILE=/etc/cron.d/clickhouse-server +CLICKHOUSE_CONFIG=$CLICKHOUSE_CONFDIR/config.xml +LOCKFILE=$LOCALSTATEDIR/$PROGRAM +CLICKHOUSE_PIDDIR=/var/run/$PROGRAM +CLICKHOUSE_PIDFILE="$CLICKHOUSE_PIDDIR/$PROGRAM.pid" +# CLICKHOUSE_STOP_TIMEOUT=60 # Disabled by default. Place to /etc/default/clickhouse if you need. + +# Some systems lack "flock" +command -v flock >/dev/null && FLOCK=flock + +# Override defaults from optional config file +test -f /etc/default/clickhouse && . /etc/default/clickhouse + + +die() +{ + echo $1 >&2 + exit 1 +} + + +# Check that configuration file is Ok. +check_config() +{ + if [ -x "$CLICKHOUSE_BINDIR/$EXTRACT_FROM_CONFIG" ]; then + su -s $SHELL ${CLICKHOUSE_USER} -c "$CLICKHOUSE_BINDIR/$EXTRACT_FROM_CONFIG --config-file=\"$CLICKHOUSE_CONFIG\" --key=path" >/dev/null || die "Configuration file ${CLICKHOUSE_CONFIG} doesn't parse successfully. Won't restart server. You may use forcerestart if you are sure."; + fi +} + + +initdb() +{ + ${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}" +} + + +start() +{ + ${CLICKHOUSE_GENERIC_PROGRAM} start --user "${CLICKHOUSE_USER}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}" +} + + +stop() +{ + ${CLICKHOUSE_GENERIC_PROGRAM} stop --pid-path "${CLICKHOUSE_PIDDIR}" +} + + +restart() +{ + ${CLICKHOUSE_GENERIC_PROGRAM} restart --user "${CLICKHOUSE_USER}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}" +} + + +forcestop() +{ + ${CLICKHOUSE_GENERIC_PROGRAM} stop --force --pid-path "${CLICKHOUSE_PIDDIR}" +} + + +service_or_func() +{ + if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then + systemctl $1 $PROGRAM + else + $1 + fi +} + +forcerestart() +{ + forcestop + # Should not use 'start' function if systemd active + service_or_func start +} + +use_cron() +{ + # 1. running systemd + if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then + return 1 + fi + # 2. disabled by config + if [ -z "$CLICKHOUSE_CRONFILE" ]; then + return 2 + fi + return 0 +} +# returns false if cron disabled (with systemd) +enable_cron() +{ + use_cron && sed -i 's/^#*//' "$CLICKHOUSE_CRONFILE" +} +# returns false if cron disabled (with systemd) +disable_cron() +{ + use_cron && sed -i 's/^#*/#/' "$CLICKHOUSE_CRONFILE" +} + + +is_cron_disabled() +{ + use_cron || return 0 + + # Assumes that either no lines are commented or all lines are commented. + # Also please note, that currently cron file for ClickHouse has only one line (but some time ago there was more). + grep -q -E '^#' "$CLICKHOUSE_CRONFILE"; +} + + +main() +{ + # See how we were called. + EXIT_STATUS=0 + case "$1" in + start) + service_or_func start && enable_cron + ;; + stop) + disable_cron + service_or_func stop + ;; + restart) + service_or_func restart && enable_cron + ;; + forcestop) + disable_cron + forcestop + ;; + forcerestart) + forcerestart && enable_cron + ;; + reload) + service_or_func restart + ;; + condstart) + service_or_func start + ;; + condstop) + service_or_func stop + ;; + condrestart) + service_or_func restart + ;; + condreload) + service_or_func restart + ;; + initdb) + initdb + ;; + enable_cron) + enable_cron + ;; + disable_cron) + disable_cron + ;; + *) + echo "Usage: $0 {start|stop|status|restart|forcestop|forcerestart|reload|condstart|condstop|condrestart|condreload|initdb}" + exit 2 + ;; + esac + + exit $EXIT_STATUS +} + + +status() +{ + ${CLICKHOUSE_GENERIC_PROGRAM} status --pid-path "${CLICKHOUSE_PIDDIR}" +} + + +# Running commands without need of locking +case "$1" in +status) + status + exit 0 + ;; +esac + + +( + if $FLOCK -n 9; then + main "$@" + else + echo "Init script is already running" && exit 1 + fi +) 9> $LOCKFILE diff --git a/packages/clickhouse-server.postinstall b/packages/clickhouse-server.postinstall new file mode 100644 index 00000000000..419c13e3daf --- /dev/null +++ b/packages/clickhouse-server.postinstall @@ -0,0 +1,47 @@ +#!/bin/sh +set -e +# set -x + +PROGRAM=clickhouse-server +CLICKHOUSE_USER=${CLICKHOUSE_USER:=clickhouse} +CLICKHOUSE_GROUP=${CLICKHOUSE_GROUP:=${CLICKHOUSE_USER}} +# Please note that we don't support paths with whitespaces. This is rather ignorant. +CLICKHOUSE_CONFDIR=${CLICKHOUSE_CONFDIR:=/etc/clickhouse-server} +CLICKHOUSE_DATADIR=${CLICKHOUSE_DATADIR:=/var/lib/clickhouse} +CLICKHOUSE_LOGDIR=${CLICKHOUSE_LOGDIR:=/var/log/clickhouse-server} +CLICKHOUSE_BINDIR=${CLICKHOUSE_BINDIR:=/usr/bin} +CLICKHOUSE_GENERIC_PROGRAM=${CLICKHOUSE_GENERIC_PROGRAM:=clickhouse} +EXTRACT_FROM_CONFIG=${CLICKHOUSE_GENERIC_PROGRAM}-extract-from-config +CLICKHOUSE_CONFIG=$CLICKHOUSE_CONFDIR/config.xml +CLICKHOUSE_PIDDIR=/var/run/$PROGRAM + +[ -f /usr/share/debconf/confmodule ] && . /usr/share/debconf/confmodule +[ -f /etc/default/clickhouse ] && . /etc/default/clickhouse + +if [ ! -f "/etc/debian_version" ]; then + not_deb_os=1 +fi + +if [ "$1" = configure ] || [ -n "$not_deb_os" ]; then + + ${CLICKHOUSE_GENERIC_PROGRAM} install --user "${CLICKHOUSE_USER}" --group "${CLICKHOUSE_GROUP}" --pid-path "${CLICKHOUSE_PIDDIR}" --config-path "${CLICKHOUSE_CONFDIR}" --binary-path "${CLICKHOUSE_BINDIR}" --log-path "${CLICKHOUSE_LOGDIR}" --data-path "${CLICKHOUSE_DATADIR}" + + if [ -x "/bin/systemctl" ] && [ -f /etc/systemd/system/clickhouse-server.service ] && [ -d /run/systemd/system ]; then + # if old rc.d service present - remove it + if [ -x "/etc/init.d/clickhouse-server" ] && [ -x "/usr/sbin/update-rc.d" ]; then + /usr/sbin/update-rc.d clickhouse-server remove + fi + + /bin/systemctl daemon-reload + /bin/systemctl enable clickhouse-server + else + # If you downgrading to version older than 1.1.54336 run: systemctl disable clickhouse-server + if [ -x "/etc/init.d/clickhouse-server" ]; then + if [ -x "/usr/sbin/update-rc.d" ]; then + /usr/sbin/update-rc.d clickhouse-server defaults 19 19 >/dev/null || exit $? + else + echo # Other OS + fi + fi + fi +fi diff --git a/packages/clickhouse-server.service b/packages/clickhouse-server.service new file mode 100644 index 00000000000..a9400b24270 --- /dev/null +++ b/packages/clickhouse-server.service @@ -0,0 +1,27 @@ +[Unit] +Description=ClickHouse Server (analytic DBMS for big data) +Requires=network-online.target +# NOTE: that After/Wants=time-sync.target is not enough, you need to ensure +# that the time was adjusted already, if you use systemd-timesyncd you are +# safe, but if you use ntp or some other daemon, you should configure it +# additionaly. +After=time-sync.target network-online.target +Wants=time-sync.target + +[Service] +Type=simple +User=clickhouse +Group=clickhouse +Restart=always +RestartSec=30 +RuntimeDirectory=clickhouse-server +ExecStart=/usr/bin/clickhouse-server --config=/etc/clickhouse-server/config.xml --pid-file=/run/clickhouse-server/clickhouse-server.pid +# Minus means that this file is optional. +EnvironmentFile=-/etc/default/clickhouse +LimitCORE=infinity +LimitNOFILE=500000 +CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE + +[Install] +# ClickHouse should not start from the rescue shell (rescue.target). +WantedBy=multi-user.target diff --git a/packages/clickhouse-server.yaml b/packages/clickhouse-server.yaml new file mode 100644 index 00000000000..ed56eb27e54 --- /dev/null +++ b/packages/clickhouse-server.yaml @@ -0,0 +1,68 @@ +# package sources should be placed in ${PWD}/root +# nfpm should run from the same directory with a config +name: "clickhouse-server" +arch: "all" +platform: "linux" +version: "${CLICKHOUSE_VERSION_STRING}" +vendor: "ClickHouse Inc." +homepage: "https://clickhouse.com" +license: "Apache" +section: "database" +priority: "optional" + +conflicts: +- clickhouse-keeper +depends: +- adduser +replaces: +- clickhouse-server-common +- clickhouse-server-base +provides: +- clickhouse-server-common +recommends: +- libcap2-bin + +maintainer: "ClickHouse Dev Team " +description: | + Server binary for ClickHouse + ClickHouse is a column-oriented database management system + that allows generating analytical data reports in real time. + This package provides clickhouse common configuration files + +overrides: + deb: + depends: + - clickhouse-common-static (= ${CLICKHOUSE_VERSION_STRING}) + rpm: + depends: + - clickhouse-common-static = ${CLICKHOUSE_VERSION_STRING} + +contents: +- src: root/etc/clickhouse-server + dst: /etc/clickhouse-server + type: config +- src: clickhouse-server.init + dst: /etc/init.d/clickhouse-server +- src: clickhouse-server.service + dst: /lib/systemd/system/clickhouse-server.service +- src: root/usr/bin/clickhouse-copier + dst: /usr/bin/clickhouse-copier +- src: clickhouse + dst: /usr/bin/clickhouse-keeper + type: symlink +- src: root/usr/bin/clickhouse-report + dst: /usr/bin/clickhouse-report +- src: root/usr/bin/clickhouse-server + dst: /usr/bin/clickhouse-server +# docs +- src: ../AUTHORS + dst: /usr/share/doc/clickhouse-server/AUTHORS +- src: ../CHANGELOG.md + dst: /usr/share/doc/clickhouse-server/CHANGELOG.md +- src: ../LICENSE + dst: /usr/share/doc/clickhouse-server/LICENSE +- src: ../README.md + dst: /usr/share/doc/clickhouse-server/README.md + +scripts: + postinstall: ./clickhouse-server.postinstall diff --git a/programs/CMakeLists.txt b/programs/CMakeLists.txt index 0890b9c95d3..cca7be97b61 100644 --- a/programs/CMakeLists.txt +++ b/programs/CMakeLists.txt @@ -460,10 +460,6 @@ else () list(APPEND CLICKHOUSE_BUNDLE clickhouse-keeper-converter) endif () - if (NOT BUILD_STRIPPED_BINARIES_PREFIX) - install (TARGETS clickhouse RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) - endif() - add_custom_target (clickhouse-bundle ALL DEPENDS ${CLICKHOUSE_BUNDLE}) if (USE_GDB_ADD_INDEX) @@ -474,13 +470,14 @@ else () add_custom_command(TARGET clickhouse POST_BUILD COMMAND ./clickhouse hash-binary > hash && ${OBJCOPY_PATH} --add-section .note.ClickHouse.hash=hash clickhouse COMMENT "Adding .note.ClickHouse.hash to clickhouse" VERBATIM) endif() - if (BUILD_STRIPPED_BINARIES_PREFIX) - clickhouse_strip_binary(TARGET clickhouse DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/${BUILD_STRIPPED_BINARIES_PREFIX} BINARY_PATH clickhouse) + if (INSTALL_STRIPPED_BINARIES) + clickhouse_strip_binary(TARGET clickhouse DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/${STRIPPED_BINARIES_OUTPUT} BINARY_PATH clickhouse) + else() + clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/${STRIPPED_BINARIES_OUTPUT}) + install (TARGETS clickhouse RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) endif() endif() - - if (ENABLE_TESTS) set (CLICKHOUSE_UNIT_TESTS_TARGETS unit_tests_dbms) add_custom_target (clickhouse-tests ALL DEPENDS ${CLICKHOUSE_UNIT_TESTS_TARGETS}) diff --git a/programs/install/Install.cpp b/programs/install/Install.cpp index f8df823ecb7..5dec09ea901 100644 --- a/programs/install/Install.cpp +++ b/programs/install/Install.cpp @@ -792,9 +792,9 @@ int mainEntryClickHouseInstall(int argc, char ** argv) fmt::print("Setting capabilities for clickhouse binary. This is optional.\n"); std::string command = fmt::format("command -v setcap >/dev/null" " && command -v capsh >/dev/null" - " && capsh --has-p=cap_net_admin,cap_ipc_lock,cap_sys_nice+ep >/dev/null 2>&1" - " && setcap 'cap_net_admin,cap_ipc_lock,cap_sys_nice+ep' {0}" - " || echo \"Cannot set 'net_admin' or 'ipc_lock' or 'sys_nice' capability for clickhouse binary." + " && capsh --has-p=cap_net_admin,cap_ipc_lock,cap_sys_nice,cap_net_bind_service+ep >/dev/null 2>&1" + " && setcap 'cap_net_admin,cap_ipc_lock,cap_sys_nice,cap_net_bind_service+ep' {0}" + " || echo \"Cannot set 'net_admin' or 'ipc_lock' or 'sys_nice' or 'net_bind_service' capability for clickhouse binary." " This is optional. Taskstats accounting will be disabled." " To enable taskstats accounting you may add the required capability later manually.\"", fs::canonical(main_bin_path).string()); diff --git a/programs/keeper/CMakeLists.txt b/programs/keeper/CMakeLists.txt index 92bb5dc45a3..b82b13d9607 100644 --- a/programs/keeper/CMakeLists.txt +++ b/programs/keeper/CMakeLists.txt @@ -71,17 +71,11 @@ if (BUILD_STANDALONE_KEEPER) ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CompressedReadBuffer.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CompressedReadBufferFromFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CompressedWriteBuffer.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CompressionCodecDelta.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CompressionCodecDoubleDelta.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CompressionCodecEncrypted.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CompressionCodecGorilla.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CompressionCodecLZ4.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CompressionCodecMultiple.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CompressionCodecNone.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CompressionCodecT64.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CompressionCodecZSTD.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CompressionFactory.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/getCompressionCodecForFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/ICompressionCodec.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/LZ4_decompress_faster.cpp @@ -137,5 +131,10 @@ if (BUILD_STANDALONE_KEEPER) add_dependencies(clickhouse-keeper clickhouse_keeper_configs) set_target_properties(clickhouse-keeper PROPERTIES RUNTIME_OUTPUT_DIRECTORY ../) - install(TARGETS clickhouse-keeper RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + if (INSTALL_STRIPPED_BINARIES) + clickhouse_strip_binary(TARGET clickhouse-keeper DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${STRIPPED_BINARIES_OUTPUT} BINARY_PATH ../clickhouse-keeper) + else() + clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse-keeper DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${STRIPPED_BINARIES_OUTPUT}) + install(TARGETS clickhouse-keeper RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) + endif() endif() diff --git a/programs/library-bridge/CMakeLists.txt b/programs/library-bridge/CMakeLists.txt index d7e104685c5..90ce3d8be7f 100644 --- a/programs/library-bridge/CMakeLists.txt +++ b/programs/library-bridge/CMakeLists.txt @@ -24,10 +24,9 @@ target_link_libraries(clickhouse-library-bridge PRIVATE set_target_properties(clickhouse-library-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..) -if (BUILD_STRIPPED_BINARIES_PREFIX) - clickhouse_strip_binary(TARGET clickhouse-library-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${BUILD_STRIPPED_BINARIES_PREFIX} BINARY_PATH ../clickhouse-library-bridge) -endif() - -if (NOT BUILD_STRIPPED_BINARIES_PREFIX) +if (INSTALL_STRIPPED_BINARIES) + clickhouse_strip_binary(TARGET clickhouse-library-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${STRIPPED_BINARIES_OUTPUT} BINARY_PATH ../clickhouse-library-bridge) +else() + clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse-library-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${STRIPPED_BINARIES_OUTPUT}) install(TARGETS clickhouse-library-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) endif() diff --git a/programs/odbc-bridge/CMakeLists.txt b/programs/odbc-bridge/CMakeLists.txt index 44493d7ab8a..b530e08ca26 100644 --- a/programs/odbc-bridge/CMakeLists.txt +++ b/programs/odbc-bridge/CMakeLists.txt @@ -39,11 +39,10 @@ if (USE_GDB_ADD_INDEX) add_custom_command(TARGET clickhouse-odbc-bridge POST_BUILD COMMAND ${GDB_ADD_INDEX_EXE} ../clickhouse-odbc-bridge COMMENT "Adding .gdb-index to clickhouse-odbc-bridge" VERBATIM) endif() -if (BUILD_STRIPPED_BINARIES_PREFIX) - clickhouse_strip_binary(TARGET clickhouse-odbc-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${BUILD_STRIPPED_BINARIES_PREFIX} BINARY_PATH ../clickhouse-odbc-bridge) -endif() - -if (NOT BUILD_STRIPPED_BINARIES_PREFIX) +if (INSTALL_STRIPPED_BINARIES) + clickhouse_strip_binary(TARGET clickhouse-odbc-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${STRIPPED_BINARIES_OUTPUT} BINARY_PATH ../clickhouse-odbc-bridge) +else() + clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse-odbc-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${STRIPPED_BINARIES_OUTPUT}) install(TARGETS clickhouse-odbc-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) endif() diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 1b11453dde4..c42496a546c 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -554,6 +555,10 @@ if (ThreadFuzzer::instance().isEffective()) config().getUInt("thread_pool_queue_size", 10000) ); + IOThreadPool::initialize( + config().getUInt("max_io_thread_pool_size", 100), + config().getUInt("max_io_thread_pool_free_size", 0), + config().getUInt("io_thread_pool_queue_size", 10000)); /// Initialize global local cache for remote filesystem. if (config().has("local_cache_for_remote_fs")) @@ -1022,8 +1027,8 @@ if (ThreadFuzzer::instance().isEffective()) std::make_unique( new KeeperTCPHandlerFactory( config_getter, global_context->getKeeperDispatcher(), - global_context->getSettingsRef().receive_timeout, - global_context->getSettingsRef().send_timeout, + global_context->getSettingsRef().receive_timeout.totalSeconds(), + global_context->getSettingsRef().send_timeout.totalSeconds(), false), server_pool, socket)); }); @@ -1045,8 +1050,8 @@ if (ThreadFuzzer::instance().isEffective()) std::make_unique( new KeeperTCPHandlerFactory( config_getter, global_context->getKeeperDispatcher(), - global_context->getSettingsRef().receive_timeout, - global_context->getSettingsRef().send_timeout, true), server_pool, socket)); + global_context->getSettingsRef().receive_timeout.totalSeconds(), + global_context->getSettingsRef().send_timeout.totalSeconds(), true), server_pool, socket)); #else UNUSED(port); throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.", diff --git a/src/Access/Common/QuotaDefs.h b/src/Access/Common/QuotaDefs.h index cfd8a07d9ff..7a69f811ea5 100644 --- a/src/Access/Common/QuotaDefs.h +++ b/src/Access/Common/QuotaDefs.h @@ -13,7 +13,7 @@ enum class QuotaType { QUERIES, /// Number of queries. QUERY_SELECTS, /// Number of select queries. - QUERY_INSERTS, /// Number of inserts queries. + QUERY_INSERTS, /// Number of insert queries. ERRORS, /// Number of queries with exceptions. RESULT_ROWS, /// Number of rows returned as result. RESULT_BYTES, /// Number of bytes returned as result. diff --git a/src/Backups/ArchiveBackup.cpp b/src/Backups/ArchiveBackup.cpp new file mode 100644 index 00000000000..5d4837fff2e --- /dev/null +++ b/src/Backups/ArchiveBackup.cpp @@ -0,0 +1,112 @@ +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + + +ArchiveBackup::ArchiveBackup( + const String & backup_name_, + const DiskPtr & disk_, + const String & path_, + const ContextPtr & context_, + const std::optional & base_backup_info_) + : BackupImpl(backup_name_, context_, base_backup_info_), disk(disk_), path(path_) +{ +} + +ArchiveBackup::~ArchiveBackup() +{ + close(); +} + +bool ArchiveBackup::backupExists() const +{ + return disk ? disk->exists(path) : fs::exists(path); +} + +void ArchiveBackup::openImpl(OpenMode open_mode_) +{ + /// mutex is already locked + if (open_mode_ == OpenMode::WRITE) + { + if (disk) + writer = createArchiveWriter(path, disk->writeFile(path)); + else + writer = createArchiveWriter(path); + + writer->setCompression(compression_method, compression_level); + writer->setPassword(password); + } + else if (open_mode_ == OpenMode::READ) + { + if (disk) + { + auto archive_read_function = [d = disk, p = path]() -> std::unique_ptr { return d->readFile(p); }; + size_t archive_size = disk->getFileSize(path); + reader = createArchiveReader(path, archive_read_function, archive_size); + } + else + reader = createArchiveReader(path); + + reader->setPassword(password); + } +} + +void ArchiveBackup::closeImpl(bool writing_finalized_) +{ + /// mutex is already locked + if (writer && writer->isWritingFile()) + throw Exception("There is some writing unfinished on close", ErrorCodes::LOGICAL_ERROR); + + writer.reset(); + reader.reset(); + + if ((getOpenModeNoLock() == OpenMode::WRITE) && !writing_finalized_) + fs::remove(path); +} + +std::unique_ptr ArchiveBackup::readFileImpl(const String & file_name) const +{ + /// mutex is already locked + return reader->readFile(file_name); +} + +std::unique_ptr ArchiveBackup::writeFileImpl(const String & file_name) +{ + /// mutex is already locked + return writer->writeFile(file_name); +} + +void ArchiveBackup::setCompression(const String & compression_method_, int compression_level_) +{ + std::lock_guard lock{mutex}; + compression_method = compression_method_; + compression_level = compression_level_; + if (writer) + writer->setCompression(compression_method, compression_level); +} + +void ArchiveBackup::setPassword(const String & password_) +{ + std::lock_guard lock{mutex}; + password = password_; + if (writer) + writer->setPassword(password); + if (reader) + reader->setPassword(password); +} + +} diff --git a/src/Backups/ArchiveBackup.h b/src/Backups/ArchiveBackup.h new file mode 100644 index 00000000000..9649c0c1843 --- /dev/null +++ b/src/Backups/ArchiveBackup.h @@ -0,0 +1,52 @@ +#pragma once + +#include + + +namespace DB +{ +class IDisk; +using DiskPtr = std::shared_ptr; +class IArchiveReader; +class IArchiveWriter; + +/// Stores a backup as a single .zip file. +class ArchiveBackup : public BackupImpl +{ +public: + /// `disk`_ is allowed to be nullptr and that means the `path_` is a path in the local filesystem. + ArchiveBackup( + const String & backup_name_, + const DiskPtr & disk_, + const String & path_, + const ContextPtr & context_, + const std::optional & base_backup_info_ = {}); + + ~ArchiveBackup() override; + + static constexpr const int kDefaultCompressionLevel = -1; + + /// Sets compression method and level. + void setCompression(const String & compression_method_, int compression_level_ = kDefaultCompressionLevel); + + /// Sets password. + void setPassword(const String & password_); + +private: + bool backupExists() const override; + void openImpl(OpenMode open_mode_) override; + void closeImpl(bool writing_finalized_) override; + bool supportsWritingInMultipleThreads() const override { return false; } + std::unique_ptr readFileImpl(const String & file_name) const override; + std::unique_ptr writeFileImpl(const String & file_name) override; + + const DiskPtr disk; + const String path; + std::shared_ptr reader; + std::shared_ptr writer; + String compression_method; + int compression_level = kDefaultCompressionLevel; + String password; +}; + +} diff --git a/src/Backups/BackupEntryConcat.cpp b/src/Backups/BackupEntryConcat.cpp deleted file mode 100644 index 1075b8be2ec..00000000000 --- a/src/Backups/BackupEntryConcat.cpp +++ /dev/null @@ -1,28 +0,0 @@ -#include -#include - - -namespace DB -{ -BackupEntryConcat::BackupEntryConcat( - BackupEntryPtr first_source_, - BackupEntryPtr second_source_, - const std::optional & checksum_) - : first_source(std::move(first_source_)) - , second_source(std::move(second_source_)) - , checksum(checksum_) -{ -} - -UInt64 BackupEntryConcat::getSize() const -{ - if (!size) - size = first_source->getSize() + second_source->getSize(); - return *size; -} - -std::unique_ptr BackupEntryConcat::getReadBuffer() const -{ - return std::make_unique(*first_source->getReadBuffer(), *second_source->getReadBuffer()); -} -} diff --git a/src/Backups/BackupEntryConcat.h b/src/Backups/BackupEntryConcat.h deleted file mode 100644 index cb38fb9b163..00000000000 --- a/src/Backups/BackupEntryConcat.h +++ /dev/null @@ -1,30 +0,0 @@ -#pragma once - -#include - - -namespace DB -{ - -/// Concatenates data of two backup entries. -class BackupEntryConcat : public IBackupEntry -{ -public: - /// The constructor is allowed to not set `checksum_`, in that case it will be calculated from the data. - BackupEntryConcat( - BackupEntryPtr first_source_, - BackupEntryPtr second_source_, - const std::optional & checksum_ = {}); - - UInt64 getSize() const override; - std::optional getChecksum() const override { return checksum; } - std::unique_ptr getReadBuffer() const override; - -private: - BackupEntryPtr first_source; - BackupEntryPtr second_source; - mutable std::optional size; - std::optional checksum; -}; - -} diff --git a/src/Backups/BackupEntryFromAppendOnlyFile.cpp b/src/Backups/BackupEntryFromAppendOnlyFile.cpp index 32d5713952f..d7f9d5624c8 100644 --- a/src/Backups/BackupEntryFromAppendOnlyFile.cpp +++ b/src/Backups/BackupEntryFromAppendOnlyFile.cpp @@ -29,7 +29,7 @@ BackupEntryFromAppendOnlyFile::BackupEntryFromAppendOnlyFile( std::unique_ptr BackupEntryFromAppendOnlyFile::getReadBuffer() const { auto buf = BackupEntryFromImmutableFile::getReadBuffer(); - return std::make_unique(std::move(buf), limit, true); + return std::make_unique(std::move(buf), limit, false); } } diff --git a/src/Backups/BackupEntryFromCallback.h b/src/Backups/BackupEntryFromCallback.h deleted file mode 100644 index e8df2d99f7c..00000000000 --- a/src/Backups/BackupEntryFromCallback.h +++ /dev/null @@ -1,31 +0,0 @@ -#pragma once - -#include - - -namespace DB -{ - -/// Represents small preloaded data to be included in a backup. -class BackupEntryFromCallback : public IBackupEntry -{ -public: - using ReadBufferCreator = std::function()>; - - /// The constructor is allowed to not set `checksum_`, in that case it will be calculated from the data. - BackupEntryFromCallback(const ReadBufferCreator & callback_, size_t size_, const std::optional & checksum_ = {}) - : callback(callback_), size(size_), checksum(checksum_) - { - } - - UInt64 getSize() const override { return size; } - std::optional getChecksum() const override { return checksum; } - std::unique_ptr getReadBuffer() const override { return callback(); } - -private: - const ReadBufferCreator callback; - const size_t size; - const std::optional checksum; -}; - -} diff --git a/src/Backups/BackupFactory.cpp b/src/Backups/BackupFactory.cpp index 490bfb002db..d64c2bd0318 100644 --- a/src/Backups/BackupFactory.cpp +++ b/src/Backups/BackupFactory.cpp @@ -21,7 +21,9 @@ BackupMutablePtr BackupFactory::createBackup(const CreateParams & params) const auto it = creators.find(engine_name); if (it == creators.end()) throw Exception(ErrorCodes::BACKUP_ENGINE_NOT_FOUND, "Not found backup engine {}", engine_name); - return (it->second)(params); + BackupMutablePtr backup = (it->second)(params); + backup->open(params.open_mode); + return backup; } void BackupFactory::registerBackupEngine(const String & engine_name, const CreatorFn & creator_fn) @@ -31,7 +33,12 @@ void BackupFactory::registerBackupEngine(const String & engine_name, const Creat creators[engine_name] = creator_fn; } -void registerBackupEngines(BackupFactory & factory); +void registerBackupEnginesFileAndDisk(BackupFactory &); + +void registerBackupEngines(BackupFactory & factory) +{ + registerBackupEnginesFileAndDisk(factory); +} BackupFactory::BackupFactory() { diff --git a/src/Backups/BackupFactory.h b/src/Backups/BackupFactory.h index 51d70c61f54..d3ebcfe2369 100644 --- a/src/Backups/BackupFactory.h +++ b/src/Backups/BackupFactory.h @@ -26,6 +26,9 @@ public: OpenMode open_mode = OpenMode::WRITE; BackupInfo backup_info; std::optional base_backup_info; + String compression_method; + int compression_level = -1; + String password; ContextPtr context; }; diff --git a/src/Backups/BackupImpl.cpp b/src/Backups/BackupImpl.cpp index d8ed247d20e..e4fc894837a 100644 --- a/src/Backups/BackupImpl.cpp +++ b/src/Backups/BackupImpl.cpp @@ -1,13 +1,11 @@ #include #include -#include -#include #include #include #include #include -#include #include +#include #include #include #include @@ -47,34 +45,82 @@ namespace } } -BackupImpl::BackupImpl(const String & backup_name_, OpenMode open_mode_, const ContextPtr & context_, const std::optional & base_backup_info_) - : backup_name(backup_name_), open_mode(open_mode_), context(context_), base_backup_info(base_backup_info_) + +class BackupImpl::BackupEntryFromBackupImpl : public IBackupEntry +{ +public: + BackupEntryFromBackupImpl( + const std::shared_ptr & backup_, + const String & file_name_, + UInt64 size_, + const std::optional checksum_, + BackupEntryPtr base_backup_entry_ = {}) + : backup(backup_), file_name(file_name_), size(size_), checksum(checksum_), + base_backup_entry(std::move(base_backup_entry_)) + { + } + + std::unique_ptr getReadBuffer() const override + { + auto read_buffer = backup->readFileImpl(file_name); + if (base_backup_entry) + { + auto base_backup_read_buffer = base_backup_entry->getReadBuffer(); + read_buffer = std::make_unique(std::move(base_backup_read_buffer), std::move(read_buffer)); + } + return read_buffer; + } + + UInt64 getSize() const override { return size; } + std::optional getChecksum() const override { return checksum; } + +private: + const std::shared_ptr backup; + const String file_name; + const UInt64 size; + const std::optional checksum; + BackupEntryPtr base_backup_entry; +}; + + +BackupImpl::BackupImpl(const String & backup_name_, const ContextPtr & context_, const std::optional & base_backup_info_) + : backup_name(backup_name_), context(context_), base_backup_info_param(base_backup_info_) { } BackupImpl::~BackupImpl() = default; -void BackupImpl::open() +void BackupImpl::open(OpenMode open_mode_) { - if (open_mode == OpenMode::WRITE) + std::lock_guard lock{mutex}; + if (open_mode == open_mode_) + return; + + if (open_mode != OpenMode::NONE) + throw Exception("Backup is already opened", ErrorCodes::LOGICAL_ERROR); + + if (open_mode_ == OpenMode::WRITE) { if (backupExists()) throw Exception(ErrorCodes::BACKUP_ALREADY_EXISTS, "Backup {} already exists", getName()); timestamp = std::time(nullptr); uuid = UUIDHelpers::generateV4(); - - startWriting(); - writing_started = true; + writing_finalized = false; } - if (open_mode == OpenMode::READ) + if (open_mode_ == OpenMode::READ) { if (!backupExists()) throw Exception(ErrorCodes::BACKUP_NOT_FOUND, "Backup {} not found", getName()); - readBackupMetadata(); } + openImpl(open_mode_); + + base_backup_info = base_backup_info_param; + if (open_mode_ == OpenMode::READ) + readBackupMetadata(); + if (base_backup_info) { BackupFactory::CreateParams params; @@ -83,25 +129,43 @@ void BackupImpl::open() params.context = context; base_backup = BackupFactory::instance().createBackup(params); - if (open_mode == OpenMode::WRITE) + if (open_mode_ == OpenMode::WRITE) base_backup_uuid = base_backup->getUUID(); else if (base_backup_uuid != base_backup->getUUID()) throw Exception(ErrorCodes::WRONG_BASE_BACKUP, "Backup {}: The base backup {} has different UUID ({} != {})", getName(), base_backup->getName(), toString(base_backup->getUUID()), (base_backup_uuid ? toString(*base_backup_uuid) : "")); } + + open_mode = open_mode_; } void BackupImpl::close() { - if (open_mode == OpenMode::WRITE) - { - if (writing_started && !writing_finalized) - { - /// Creating of the backup wasn't finished correctly, - /// so the backup cannot be used and it's better to remove its files. - removeAllFilesAfterFailure(); - } - } + std::lock_guard lock{mutex}; + if (open_mode == OpenMode::NONE) + return; + + closeImpl(writing_finalized); + + uuid = UUIDHelpers::Nil; + timestamp = 0; + base_backup_info.reset(); + base_backup.reset(); + base_backup_uuid.reset(); + file_infos.clear(); + open_mode = OpenMode::NONE; +} + +IBackup::OpenMode BackupImpl::getOpenMode() const +{ + std::lock_guard lock{mutex}; + return open_mode; +} + +time_t BackupImpl::getTimestamp() const +{ + std::lock_guard lock{mutex}; + return timestamp; } void BackupImpl::writeBackupMetadata() @@ -112,9 +176,20 @@ void BackupImpl::writeBackupMetadata() config->setString("uuid", toString(uuid)); if (base_backup_info) - config->setString("base_backup", base_backup_info->toString()); - if (base_backup_uuid) - config->setString("base_backup_uuid", toString(*base_backup_uuid)); + { + bool base_backup_in_use = false; + for (const auto & [name, info] : file_infos) + { + if (info.base_size) + base_backup_in_use = true; + } + + if (base_backup_in_use) + { + config->setString("base_backup", base_backup_info->toString()); + config->setString("base_backup_uuid", toString(*base_backup_uuid)); + } + } size_t index = 0; for (const auto & [name, info] : file_infos) @@ -128,7 +203,7 @@ void BackupImpl::writeBackupMetadata() if (info.base_size) { config->setUInt(prefix + "base_size", info.base_size); - if (info.base_size != info.size) + if (info.base_checksum != info.checksum) config->setString(prefix + "base_checksum", getHexUIntLowercase(info.base_checksum)); } } @@ -138,7 +213,7 @@ void BackupImpl::writeBackupMetadata() std::ostringstream stream; // STYLE_CHECK_ALLOW_STD_STRING_STREAM config->save(stream); String str = stream.str(); - auto out = addFileImpl(".backup"); + auto out = writeFileImpl(".backup"); out->write(str.data(), str.size()); } @@ -161,7 +236,7 @@ void BackupImpl::readBackupMetadata() if (config->has("base_backup") && !base_backup_info) base_backup_info = BackupInfo::fromString(config->getString("base_backup")); - if (config->has("base_backup_uuid") && !base_backup_uuid) + if (config->has("base_backup_uuid")) base_backup_uuid = parse(config->getString("base_backup_uuid")); file_infos.clear(); @@ -173,20 +248,22 @@ void BackupImpl::readBackupMetadata() { String prefix = "contents." + key + "."; String name = config->getString(prefix + "name"); - FileInfo & info = file_infos.emplace(name, FileInfo{}).first->second; + FileInfo info; info.size = config->getUInt(prefix + "size"); if (info.size) { info.checksum = unhexChecksum(config->getString(prefix + "checksum")); - if (config->has(prefix + "base_size")) + info.base_size = config->getUInt(prefix + "base_size", 0); + if (info.base_size) { - info.base_size = config->getUInt(prefix + "base_size"); - if (info.base_size == info.size) - info.base_checksum = info.checksum; - else + if (config->has(prefix + "base_checksum")) info.base_checksum = unhexChecksum(config->getString(prefix + "base_checksum")); + else + info.base_checksum = info.checksum; } } + file_infos.emplace(name, info); + file_checksums.emplace(info.checksum, name); } } } @@ -240,10 +317,22 @@ UInt128 BackupImpl::getFileChecksum(const String & file_name) const return it->second.checksum; } +std::optional BackupImpl::findFileByChecksum(const UInt128 & checksum) const +{ + std::lock_guard lock{mutex}; + auto it = file_checksums.find(checksum); + if (it == file_checksums.end()) + return std::nullopt; + return it->second; +} + BackupEntryPtr BackupImpl::readFile(const String & file_name) const { std::lock_guard lock{mutex}; + if (open_mode != OpenMode::READ) + throw Exception("Backup is not opened for reading", ErrorCodes::LOGICAL_ERROR); + auto it = file_infos.find(file_name); if (it == file_infos.end()) throw Exception( @@ -264,7 +353,8 @@ BackupEntryPtr BackupImpl::readFile(const String & file_name) const if (!info.base_size) { /// Data goes completely from this backup, the base backup isn't used. - return std::make_unique(read_callback, info.size, info.checksum); + return std::make_unique( + std::static_pointer_cast(shared_from_this()), file_name, info.size, info.checksum); } if (info.size < info.base_size) @@ -283,7 +373,8 @@ BackupEntryPtr BackupImpl::readFile(const String & file_name) const getName(), quoteString(file_name)); } - if (!base_backup->fileExists(file_name)) + auto base_file_name = base_backup->findFileByChecksum(info.base_checksum); + if (!base_file_name) { throw Exception( ErrorCodes::WRONG_BASE_BACKUP, @@ -291,7 +382,7 @@ BackupEntryPtr BackupImpl::readFile(const String & file_name) const getName(), quoteString(file_name)); } - auto base_entry = base_backup->readFile(file_name); + auto base_entry = base_backup->readFile(*base_file_name); auto base_size = base_entry->getSize(); if (base_size != info.base_size) { @@ -301,15 +392,6 @@ BackupEntryPtr BackupImpl::readFile(const String & file_name) const getName(), quoteString(file_name), base_backup->getName(), base_size, info.base_size); } - auto base_checksum = base_entry->getChecksum(); - if (base_checksum && (*base_checksum != info.base_checksum)) - { - throw Exception( - ErrorCodes::WRONG_BASE_BACKUP, - "Backup {}: Entry {} has unexpected checksum in the base backup {}", - getName(), quoteString(file_name), base_backup->getName()); - } - if (info.size == info.base_size) { /// Data goes completely from the base backup (nothing goes from this backup). @@ -318,18 +400,16 @@ BackupEntryPtr BackupImpl::readFile(const String & file_name) const /// The beginning of the data goes from the base backup, /// and the ending goes from this backup. - return std::make_unique( - std::move(base_entry), - std::make_unique(read_callback, info.size - info.base_size), - info.checksum); + return std::make_unique( + static_pointer_cast(shared_from_this()), file_name, info.size, info.checksum, std::move(base_entry)); } -void BackupImpl::addFile(const String & file_name, BackupEntryPtr entry) +void BackupImpl::writeFile(const String & file_name, BackupEntryPtr entry) { std::lock_guard lock{mutex}; if (open_mode != OpenMode::WRITE) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Illegal operation: Cannot write to a backup opened for reading"); + throw Exception("Backup is not opened for writing", ErrorCodes::LOGICAL_ERROR); if (file_infos.contains(file_name)) throw Exception( @@ -356,44 +436,60 @@ void BackupImpl::addFile(const String & file_name, BackupEntryPtr entry) } std::unique_ptr read_buffer; /// We'll set that later. - UInt64 read_pos = 0; /// Current position in read_buffer. + std::optional hashing_read_buffer; + UInt64 hashing_pos = 0; /// Current position in `hashing_read_buffer`. /// Determine whether it's possible to receive this entry's data from the base backup completely or partly. bool use_base = false; - if (base_exists && base_size) + if (base_exists && base_size && (size >= base_size)) { - if (size == base_size) + if (checksum && (size == base_size)) { /// The size is the same, we need to compare checksums to find out - /// if the entry's data has not been changed since the base backup. - if (!checksum) - { - read_buffer = entry->getReadBuffer(); - HashingReadBuffer hashing_read_buffer{*read_buffer}; - hashing_read_buffer.ignore(size); - read_pos = size; - checksum = hashing_read_buffer.getHash(); - } - if (checksum == base_checksum) - use_base = true; /// The data has not been changed. + /// if the entry's data has not changed since the base backup. + use_base = (*checksum == base_checksum); } - else if (size > base_size) + else { - /// The size has been increased, we need to calculate a partial checksum to find out - /// if the entry's data has been only appended since the base backup. + /// The size has increased, we need to calculate a partial checksum to find out + /// if the entry's data has only appended since the base backup. read_buffer = entry->getReadBuffer(); - HashingReadBuffer hashing_read_buffer{*read_buffer}; - hashing_read_buffer.ignore(base_size); - UInt128 partial_checksum = hashing_read_buffer.getHash(); - read_pos = base_size; - if (!checksum) - { - hashing_read_buffer.ignore(size - base_size); - checksum = hashing_read_buffer.getHash(); - read_pos = size; - } + hashing_read_buffer.emplace(*read_buffer); + hashing_read_buffer->ignore(base_size); + hashing_pos = base_size; + UInt128 partial_checksum = hashing_read_buffer->getHash(); + if (size == base_size) + checksum = partial_checksum; if (partial_checksum == base_checksum) - use_base = true; /// The data has been appended. + use_base = true; + } + } + + /// Finish calculating the checksum. + if (!checksum) + { + if (!read_buffer) + read_buffer = entry->getReadBuffer(); + if (!hashing_read_buffer) + hashing_read_buffer.emplace(*read_buffer); + hashing_read_buffer->ignore(size - hashing_pos); + checksum = hashing_read_buffer->getHash(); + } + hashing_read_buffer.reset(); + + /// Check if a entry with the same checksum exists in the base backup. + if (base_backup && !use_base) + { + if (auto base_file_name = base_backup->findFileByChecksum(*checksum)) + { + if (size == base_backup->getFileSize(*base_file_name)) + { + /// The entry's data has not changed since the base backup, + /// but the entry itself has been moved or renamed. + base_size = size; + base_checksum = *checksum; + use_base = true; + } } } @@ -401,74 +497,61 @@ void BackupImpl::addFile(const String & file_name, BackupEntryPtr entry) { /// The entry's data has not been changed since the base backup. FileInfo info; - info.size = base_size; - info.checksum = base_checksum; + info.size = size; + info.checksum = *checksum; info.base_size = base_size; info.base_checksum = base_checksum; file_infos.emplace(file_name, info); + file_checksums.emplace(*checksum, file_name); return; } + /// Either the entry wasn't exist in the base backup + /// or the entry has data appended to the end of the data from the base backup. + /// In both those cases we have to copy data to this backup. + + /// Find out where the start position to copy data is. + auto copy_pos = use_base ? base_size : 0; + + /// Move the current read position to the start position to copy data. + /// If `read_buffer` is seekable it's easier, otherwise we can use ignore(). + if (auto * seekable_buffer = dynamic_cast(read_buffer.get())) { - /// Either the entry wasn't exist in the base backup - /// or the entry has data appended to the end of the data from the base backup. - /// In both those cases we have to copy data to this backup. - - /// Find out where the start position to copy data is. - auto copy_pos = use_base ? base_size : 0; - - /// Move the current read position to the start position to copy data. - /// If `read_buffer` is seekable it's easier, otherwise we can use ignore(). - if (auto * seekable_buffer = dynamic_cast(read_buffer.get())) - { - if (read_pos != copy_pos) - seekable_buffer->seek(copy_pos, SEEK_SET); - } - else - { - if (read_pos > copy_pos) - { - read_buffer.reset(); - read_pos = 0; - } - - if (!read_buffer) - read_buffer = entry->getReadBuffer(); - - if (read_pos < copy_pos) - read_buffer->ignore(copy_pos - read_pos); - } - - /// If we haven't received or calculated a checksum yet, calculate it now. - ReadBuffer * maybe_hashing_read_buffer = read_buffer.get(); - std::optional hashing_read_buffer; - if (!checksum) - maybe_hashing_read_buffer = &hashing_read_buffer.emplace(*read_buffer); - - /// Copy the entry's data after `copy_pos`. - auto out = addFileImpl(file_name); - copyData(*maybe_hashing_read_buffer, *out); - - if (hashing_read_buffer) - checksum = hashing_read_buffer->getHash(); - - /// Done! - FileInfo info; - info.size = size; - info.checksum = *checksum; - if (use_base) - { - info.base_size = base_size; - info.base_checksum = base_checksum; - } - file_infos.emplace(file_name, info); + seekable_buffer->seek(copy_pos, SEEK_SET); } + else + { + read_buffer = entry->getReadBuffer(); + read_buffer->ignore(copy_pos); + } + + /// Copy the entry's data after `copy_pos`. + auto out = writeFileImpl(file_name); + copyData(*read_buffer, *out); + + /// Done! + FileInfo info; + info.size = size; + info.checksum = *checksum; + if (use_base) + { + info.base_size = base_size; + info.base_checksum = base_checksum; + } + file_infos.emplace(file_name, info); + file_checksums.emplace(*checksum, file_name); } + void BackupImpl::finalizeWriting() { + std::lock_guard lock{mutex}; + if (writing_finalized) + return; + if (open_mode != OpenMode::WRITE) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Illegal operation: Cannot write to a backup opened for reading"); + throw Exception("Backup is not opened for writing", ErrorCodes::LOGICAL_ERROR); + writeBackupMetadata(); writing_finalized = true; } diff --git a/src/Backups/BackupImpl.h b/src/Backups/BackupImpl.h index 5a0269cd662..d1fc3c3248c 100644 --- a/src/Backups/BackupImpl.h +++ b/src/Backups/BackupImpl.h @@ -4,6 +4,7 @@ #include #include #include +#include namespace DB @@ -20,29 +21,33 @@ class BackupImpl : public IBackup public: BackupImpl( const String & backup_name_, - OpenMode open_mode_, const ContextPtr & context_, const std::optional & base_backup_info_ = {}); ~BackupImpl() override; const String & getName() const override { return backup_name; } - OpenMode getOpenMode() const override { return open_mode; } - time_t getTimestamp() const override { return timestamp; } + void open(OpenMode open_mode_) override; + OpenMode getOpenMode() const override; + void close() override; + time_t getTimestamp() const override; UUID getUUID() const override { return uuid; } Strings listFiles(const String & prefix, const String & terminator) const override; bool fileExists(const String & file_name) const override; size_t getFileSize(const String & file_name) const override; UInt128 getFileChecksum(const String & file_name) const override; + std::optional findFileByChecksum(const UInt128 & checksum) const override; BackupEntryPtr readFile(const String & file_name) const override; - void addFile(const String & file_name, BackupEntryPtr entry) override; + void writeFile(const String & file_name, BackupEntryPtr entry) override; void finalizeWriting() override; protected: - /// Should be called in the constructor of a derived class. - void open(); + /// Checks if this backup exists. + virtual bool backupExists() const = 0; - /// Should be called in the destructor of a derived class. - void close(); + virtual void openImpl(OpenMode open_mode_) = 0; + OpenMode getOpenModeNoLock() const { return open_mode; } + + virtual void closeImpl(bool writing_finalized_) = 0; /// Read a file from the backup. /// Low level: the function doesn't check base backup or checksums. @@ -50,18 +55,9 @@ protected: /// Add a file to the backup. /// Low level: the function doesn't check base backup or checksums. - virtual std::unique_ptr addFileImpl(const String & file_name) = 0; + virtual std::unique_ptr writeFileImpl(const String & file_name) = 0; - /// Checks if this backup exists. - virtual bool backupExists() const = 0; - - /// Starts writing of this backup, only used if `open_mode == OpenMode::WRITE`. - /// After calling this function `backupExists()` should return true. - virtual void startWriting() = 0; - - /// Removes all the backup files, called if something goes wrong while we're writing the backup. - /// This function is called by `close()` if `startWriting()` was called and `finalizeWriting()` wasn't. - virtual void removeAllFilesAfterFailure() = 0; + mutable std::mutex mutex; private: void writeBackupMetadata(); @@ -77,18 +73,20 @@ private: UInt128 base_checksum{0, 0}; }; + class BackupEntryFromBackupImpl; + const String backup_name; - const OpenMode open_mode; - UUID uuid; - time_t timestamp = 0; ContextPtr context; + const std::optional base_backup_info_param; + OpenMode open_mode = OpenMode::NONE; + UUID uuid = {}; + time_t timestamp = 0; std::optional base_backup_info; std::shared_ptr base_backup; std::optional base_backup_uuid; - std::map file_infos; - bool writing_started = false; + std::map file_infos; /// Should be ordered alphabetically, see listFiles(). + std::unordered_map file_checksums; bool writing_finalized = false; - mutable std::mutex mutex; }; } diff --git a/src/Backups/BackupInDirectory.cpp b/src/Backups/BackupInDirectory.cpp deleted file mode 100644 index f071a21618c..00000000000 --- a/src/Backups/BackupInDirectory.cpp +++ /dev/null @@ -1,160 +0,0 @@ -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; -} - -namespace -{ - /// Checks multiple keys "key", "key[1]", "key[2]", and so on in the configuration - /// and find out if some of them have matching value. - bool findConfigKeyWithMatchingValue(const Poco::Util::AbstractConfiguration & config, const String & key, const std::function & match_function) - { - String current_key = key; - size_t counter = 0; - while (config.has(current_key)) - { - if (match_function(config.getString(current_key))) - return true; - current_key = key + "[" + std::to_string(++counter) + "]"; - } - return false; - } - - bool isDiskAllowed(const String & disk_name, const Poco::Util::AbstractConfiguration & config) - { - return findConfigKeyWithMatchingValue(config, "backups.allowed_disk", [&](const String & value) { return value == disk_name; }); - } - - bool isPathAllowed(const String & path, const Poco::Util::AbstractConfiguration & config) - { - return findConfigKeyWithMatchingValue(config, "backups.allowed_path", [&](const String & value) { return path.starts_with(value); }); - } -} - - -BackupInDirectory::BackupInDirectory( - const String & backup_name_, - OpenMode open_mode_, - const DiskPtr & disk_, - const String & path_, - const ContextPtr & context_, - const std::optional & base_backup_info_) - : BackupImpl(backup_name_, open_mode_, context_, base_backup_info_) - , disk(disk_), path(path_) -{ - /// Path to backup must end with '/' - if (path.back() != '/') - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Backup {}: Path to backup must end with '/', but {} doesn't.", getName(), quoteString(path)); - dir_path = fs::path(path).parent_path(); /// get path without terminating slash - - /// If `disk` is not specified, we create an internal instance of `DiskLocal` here. - if (!disk) - { - auto fspath = fs::path{dir_path}; - if (!fspath.has_filename()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Backup {}: Path to a backup must be a directory path.", getName(), quoteString(path)); - path = fspath.filename() / ""; - dir_path = fs::path(path).parent_path(); /// get path without terminating slash - String disk_path = fspath.remove_filename(); - disk = std::make_shared(disk_path, disk_path, 0); - } - - open(); -} - - -BackupInDirectory::~BackupInDirectory() -{ - close(); -} - -bool BackupInDirectory::backupExists() const -{ - return disk->isDirectory(dir_path); -} - -void BackupInDirectory::startWriting() -{ - disk->createDirectories(dir_path); -} - -void BackupInDirectory::removeAllFilesAfterFailure() -{ - if (disk->isDirectory(dir_path)) - disk->removeRecursive(dir_path); -} - -std::unique_ptr BackupInDirectory::readFileImpl(const String & file_name) const -{ - String file_path = path + file_name; - return disk->readFile(file_path); -} - -std::unique_ptr BackupInDirectory::addFileImpl(const String & file_name) -{ - String file_path = path + file_name; - disk->createDirectories(fs::path(file_path).parent_path()); - return disk->writeFile(file_path); -} - - -void registerBackupEngineFile(BackupFactory & factory) -{ - auto creator_fn = [](const BackupFactory::CreateParams & params) - { - String backup_name = params.backup_info.toString(); - const String & engine_name = params.backup_info.backup_engine_name; - const auto & args = params.backup_info.args; - - DiskPtr disk; - String path; - if (engine_name == "File") - { - if (args.size() != 1) - { - throw Exception( - "Backup engine 'File' requires 1 argument (path)", - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - } - - path = args[0].safeGet(); - - if (!isPathAllowed(path, params.context->getConfigRef())) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path {} is not allowed for backups", path); - } - else if (engine_name == "Disk") - { - if (args.size() != 2) - { - throw Exception( - "Backup engine 'Disk' requires 2 arguments (disk_name, path)", - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - } - - String disk_name = args[0].safeGet(); - disk = params.context->getDisk(disk_name); - path = args[1].safeGet(); - - if (!isDiskAllowed(disk_name, params.context->getConfigRef())) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Disk {} is not allowed for backups", disk_name); - } - - return std::make_shared(backup_name, params.open_mode, disk, path, params.context, params.base_backup_info); - }; - - factory.registerBackupEngine("File", creator_fn); - factory.registerBackupEngine("Disk", creator_fn); -} - -} diff --git a/src/Backups/BackupRenamingConfig.cpp b/src/Backups/BackupRenamingConfig.cpp deleted file mode 100644 index ff510d82a32..00000000000 --- a/src/Backups/BackupRenamingConfig.cpp +++ /dev/null @@ -1,89 +0,0 @@ -#include -#include - - -namespace DB -{ -using Kind = ASTBackupQuery::Kind; -using ElementType = ASTBackupQuery::ElementType; - -void BackupRenamingConfig::setNewTableName(const DatabaseAndTableName & old_table_name, const DatabaseAndTableName & new_table_name) -{ - old_to_new_table_names[old_table_name] = new_table_name; -} - -void BackupRenamingConfig::setNewDatabaseName(const String & old_database_name, const String & new_database_name) -{ - old_to_new_database_names[old_database_name] = new_database_name; -} - -void BackupRenamingConfig::setNewTemporaryTableName(const String & old_temporary_table_name, const String & new_temporary_table_name) -{ - old_to_new_temporary_table_names[old_temporary_table_name] = new_temporary_table_name; -} - -void BackupRenamingConfig::setFromBackupQuery(const ASTBackupQuery & backup_query) -{ - setFromBackupQueryElements(backup_query.elements); -} - -void BackupRenamingConfig::setFromBackupQueryElements(const ASTBackupQuery::Elements & backup_query_elements) -{ - for (const auto & element : backup_query_elements) - { - switch (element.type) - { - case ElementType::TABLE: [[fallthrough]]; - case ElementType::DICTIONARY: - { - const auto & new_name = element.new_name.second.empty() ? element.name : element.new_name; - setNewTableName(element.name, new_name); - break; - } - - case ASTBackupQuery::DATABASE: - { - const auto & new_name = element.new_name.first.empty() ? element.name.first : element.new_name.first; - setNewDatabaseName(element.name.first, new_name); - break; - } - - case ASTBackupQuery::TEMPORARY_TABLE: - { - const auto & new_name = element.new_name.second.empty() ? element.name.second : element.new_name.second; - setNewTemporaryTableName(element.name.second, new_name); - break; - } - - case ASTBackupQuery::ALL_DATABASES: break; - case ASTBackupQuery::ALL_TEMPORARY_TABLES: break; - case ASTBackupQuery::EVERYTHING: break; - } - } -} - -DatabaseAndTableName BackupRenamingConfig::getNewTableName(const DatabaseAndTableName & old_table_name) const -{ - auto it = old_to_new_table_names.find(old_table_name); - if (it != old_to_new_table_names.end()) - return it->second; - return {getNewDatabaseName(old_table_name.first), old_table_name.second}; -} - -const String & BackupRenamingConfig::getNewDatabaseName(const String & old_database_name) const -{ - auto it = old_to_new_database_names.find(old_database_name); - if (it != old_to_new_database_names.end()) - return it->second; - return old_database_name; -} - -const String & BackupRenamingConfig::getNewTemporaryTableName(const String & old_temporary_table_name) const -{ - auto it = old_to_new_temporary_table_names.find(old_temporary_table_name); - if (it != old_to_new_temporary_table_names.end()) - return it->second; - return old_temporary_table_name; -} - -} diff --git a/src/Backups/BackupRenamingConfig.h b/src/Backups/BackupRenamingConfig.h deleted file mode 100644 index 740781c9c9f..00000000000 --- a/src/Backups/BackupRenamingConfig.h +++ /dev/null @@ -1,39 +0,0 @@ -#pragma once - -#include -#include -#include -#include - - -namespace DB -{ -using DatabaseAndTableName = std::pair; - -/// Keeps information about renamings of databases or tables being processed -/// while we're making a backup or while we're restoring from a backup. -class BackupRenamingConfig -{ -public: - BackupRenamingConfig() = default; - - void setNewTableName(const DatabaseAndTableName & old_table_name, const DatabaseAndTableName & new_table_name); - void setNewDatabaseName(const String & old_database_name, const String & new_database_name); - void setNewTemporaryTableName(const String & old_temporary_table_name, const String & new_temporary_table_name); - void setFromBackupQuery(const ASTBackupQuery & backup_query); - void setFromBackupQueryElements(const ASTBackupQuery::Elements & backup_query_elements); - - /// Changes names according to the renaming. - DatabaseAndTableName getNewTableName(const DatabaseAndTableName & old_table_name) const; - const String & getNewDatabaseName(const String & old_database_name) const; - const String & getNewTemporaryTableName(const String & old_temporary_table_name) const; - -private: - std::map old_to_new_table_names; - std::unordered_map old_to_new_database_names; - std::unordered_map old_to_new_temporary_table_names; -}; - -using BackupRenamingConfigPtr = std::shared_ptr; - -} diff --git a/src/Backups/BackupSettings.cpp b/src/Backups/BackupSettings.cpp index f383330f246..817e0e7e1a7 100644 --- a/src/Backups/BackupSettings.cpp +++ b/src/Backups/BackupSettings.cpp @@ -1,6 +1,43 @@ #include +#include +#include +#include +#include + namespace DB { -IMPLEMENT_SETTINGS_TRAITS(BackupSettingsTraits, LIST_OF_BACKUP_SETTINGS) +namespace ErrorCodes +{ + extern const int UNKNOWN_SETTING; +} + +BackupSettings BackupSettings::fromBackupQuery(const ASTBackupQuery & query) +{ + BackupSettings res; + + if (query.base_backup_name) + res.base_backup_info = BackupInfo::fromAST(*query.base_backup_name); + + if (query.settings) + { + const auto & settings = query.settings->as().changes; + for (const auto & setting : settings) + { + if (setting.name == "compression_method") + res.compression_method = SettingFieldString{setting.value}; + else if (setting.name == "compression_level") + res.compression_level = SettingFieldInt64{setting.value}; + else if (setting.name == "password") + res.password = SettingFieldString{setting.value}; + else if (setting.name == "structure_only") + res.structure_only = SettingFieldBool{setting.value}; + else + throw Exception(ErrorCodes::UNKNOWN_SETTING, "Unknown setting {}", setting.name); + } + } + + return res; +} + } diff --git a/src/Backups/BackupSettings.h b/src/Backups/BackupSettings.h index 05fbda2598e..ca95a08da8f 100644 --- a/src/Backups/BackupSettings.h +++ b/src/Backups/BackupSettings.h @@ -1,16 +1,31 @@ #pragma once -#include +#include +#include namespace DB { +class ASTBackupQuery; -#define LIST_OF_BACKUP_SETTINGS(M) \ - M(Bool, dummy, false, "", 0) \ +/// Settings specified in the "SETTINGS" clause of a BACKUP query. +struct BackupSettings +{ + /// Base backup, if it's set an incremental backup will be built. + std::optional base_backup_info; -DECLARE_SETTINGS_TRAITS_ALLOW_CUSTOM_SETTINGS(BackupSettingsTraits, LIST_OF_BACKUP_SETTINGS) + /// Compression method and level for writing the backup (when applicable). + String compression_method; /// "" means default method + int compression_level = -1; /// -1 means default level -struct BackupSettings : public BaseSettings {}; + /// Password used to encrypt the backup. + String password; + + /// If this is set to true then only create queries will be written to backup, + /// without the data of tables. + bool structure_only = false; + + static BackupSettings fromBackupQuery(const ASTBackupQuery & query); +}; } diff --git a/src/Backups/BackupUtils.cpp b/src/Backups/BackupUtils.cpp index c26eec440e6..9e2da6f34f8 100644 --- a/src/Backups/BackupUtils.cpp +++ b/src/Backups/BackupUtils.cpp @@ -1,36 +1,27 @@ #include #include -#include +#include +#include +#include #include -#include -#include +#include #include +#include #include -#include #include -#include #include -#include -#include #include #include -#include -#include -#include -#include - -namespace fs = std::filesystem; namespace DB { namespace ErrorCodes { - extern const int BACKUP_ELEMENT_DUPLICATE; + extern const int CANNOT_BACKUP_TABLE; + extern const int CANNOT_BACKUP_DATABASE; extern const int BACKUP_IS_EMPTY; extern const int LOGICAL_ERROR; - extern const int TABLE_ALREADY_EXISTS; - extern const int CANNOT_RESTORE_TABLE; } namespace @@ -40,632 +31,286 @@ namespace using Elements = ASTBackupQuery::Elements; using ElementType = ASTBackupQuery::ElementType; - /// Replace elements of types DICTIONARY or EVERYTHING with elements of other types. - void replaceElementTypesWithBaseElementTypes(Elements & elements) + /// Makes backup entries to backup databases and tables according to the elements of ASTBackupQuery. + /// Keep this class consistent with RestoreTasksBuilder. + class BackupEntriesBuilder { - for (size_t i = 0; i != elements.size(); ++i) + public: + BackupEntriesBuilder(const ContextPtr & context_, const BackupSettings & backup_settings_) + : context(context_), backup_settings(backup_settings_) { - auto & element = elements[i]; - switch (element.type) - { - case ElementType::DICTIONARY: - { - element.type = ElementType::TABLE; - break; - } - - case ElementType::EVERYTHING: - { - element.type = ElementType::ALL_DATABASES; - auto & new_element = elements.emplace_back(); - new_element.type = ElementType::ALL_TEMPORARY_TABLES; - break; - } - - default: - break; - } } - } - /// Replaces an empty database with the current database. - void replaceEmptyDatabaseWithCurrentDatabase(Elements & elements, const String & current_database) - { - for (auto & element : elements) + /// Prepares internal structures for making backup entries. + void prepare(const ASTBackupQuery::Elements & elements) { - if (element.type == ElementType::TABLE) + String current_database = context->getCurrentDatabase(); + renaming_settings.setFromBackupQuery(elements, current_database); + + for (const auto & element : elements) { - if (element.name.first.empty() && !element.name.second.empty()) - element.name.first = current_database; - if (element.new_name.first.empty() && !element.new_name.second.empty()) - element.new_name.first = current_database; - } - } - } - - /// Replaces elements of types TEMPORARY_TABLE or ALL_TEMPORARY_TABLES with elements of type TABLE or DATABASE. - void replaceTemporaryTablesWithTemporaryDatabase(Elements & elements) - { - for (auto & element : elements) - { - switch (element.type) - { - case ElementType::TEMPORARY_TABLE: + switch (element.type) { - element.type = ElementType::TABLE; - element.name.first = DatabaseCatalog::TEMPORARY_DATABASE; - if (element.new_name.first.empty() && !element.new_name.second.empty()) - element.new_name.first = DatabaseCatalog::TEMPORARY_DATABASE; - break; - } - - case ElementType::ALL_TEMPORARY_TABLES: - { - element.type = ElementType::DATABASE; - element.name.first = DatabaseCatalog::TEMPORARY_DATABASE; - break; - } - - default: - break; - } - } - } - - /// Set new names if they are not specified. - void setNewNamesIfNotSet(Elements & elements) - { - for (auto & element : elements) - { - switch (element.type) - { - case ElementType::TABLE: - { - if (element.new_name.second.empty()) - element.new_name = element.name; - break; - } - - case ElementType::DATABASE: - { - if (element.new_name.first.empty()) - element.new_name = element.name; - break; - } - - default: - break; - } - } - } - - /// Removes duplications in the elements of a backup query by removing some excessive elements and by updating except_lists. - /// This function helps deduplicate elements in queries like "BACKUP ALL DATABASES, DATABASE xxx USING NAME yyy" - /// (we need a deduplication for that query because `ALL DATABASES` includes `xxx` however we don't want - /// to backup/restore the same database twice while executing the same query). - /// Also this function slightly reorders elements: it puts databases before tables and dictionaries they contain. - void deduplicateAndReorderElements(Elements & elements) - { - std::set skip_indices; /// Indices of elements which should be removed in the end of this function. - size_t index_all_databases = static_cast(-1); /// Index of the first element of type ALL_DATABASES or -1 if not found. - - struct DatabaseInfo - { - size_t index = static_cast(-1); - std::unordered_map tables; - }; - std::unordered_map databases; /// Found databases and tables. - - for (size_t i = 0; i != elements.size(); ++i) - { - auto & element = elements[i]; - switch (element.type) - { - case ElementType::TABLE: - { - auto & tables = databases.emplace(element.name.first, DatabaseInfo{}).first->second.tables; - auto it = tables.find(element.name.second); - if (it == tables.end()) + case ElementType::TABLE: { - tables.emplace(element.name.second, i); + const String & table_name = element.name.second; + String database_name = element.name.first; + if (database_name.empty()) + database_name = current_database; + prepareToBackupTable(DatabaseAndTableName{database_name, table_name}, element.partitions); + break; } - else + + case ElementType::DATABASE: { - size_t prev_index = it->second; - if ((elements[i].new_name == elements[prev_index].new_name) - && (elements[i].partitions.empty() == elements[prev_index].partitions.empty())) - { - insertAtEnd(elements[prev_index].partitions, elements[i].partitions); - skip_indices.emplace(i); - } - else - { - throw Exception( - "Table " + backQuote(element.name.first) + "." + backQuote(element.name.second) + " was specified twice", - ErrorCodes::BACKUP_ELEMENT_DUPLICATE); - } + const String & database_name = element.name.first; + prepareToBackupDatabase(database_name, element.except_list); + break; + } + + case ElementType::ALL_DATABASES: + { + prepareToBackupAllDatabases(element.except_list); + break; } - break; } - - case ElementType::DATABASE: - { - auto it = databases.find(element.name.first); - if (it == databases.end()) - { - DatabaseInfo new_db_info; - new_db_info.index = i; - databases.emplace(element.name.first, new_db_info); - } - else if (it->second.index == static_cast(-1)) - { - it->second.index = i; - } - else - { - size_t prev_index = it->second.index; - if ((elements[i].new_name == elements[prev_index].new_name) - && (elements[i].except_list == elements[prev_index].except_list)) - { - skip_indices.emplace(i); - } - else - { - throw Exception("Database " + backQuote(element.name.first) + " was specified twice", ErrorCodes::BACKUP_ELEMENT_DUPLICATE); - } - - } - break; - } - - case ElementType::ALL_DATABASES: - { - if (index_all_databases == static_cast(-1)) - { - index_all_databases = i; - } - else - { - size_t prev_index = index_all_databases; - if (elements[i].except_list == elements[prev_index].except_list) - skip_indices.emplace(i); - else - throw Exception("The tag ALL DATABASES was specified twice", ErrorCodes::BACKUP_ELEMENT_DUPLICATE); - } - break; - } - - default: - /// replaceElementTypesWithBaseElementTypes() and replaceTemporaryTablesWithTemporaryDatabase() should have removed all other element types. - throw Exception("Unexpected element type: " + std::to_string(static_cast(element.type)), ErrorCodes::LOGICAL_ERROR); } } - if (index_all_databases != static_cast(-1)) + /// Makes backup entries, should be called after prepare(). + BackupEntries makeBackupEntries() const { - for (auto & [database_name, database] : databases) + /// Check that there are not `different_create_query`. (If it's set it means error.) + for (const auto & info : databases | boost::adaptors::map_values) { - elements[index_all_databases].except_list.emplace(database_name); - if (database.index == static_cast(-1)) + if (info.different_create_query) + throw Exception(ErrorCodes::CANNOT_BACKUP_DATABASE, + "Cannot backup a database because two different create queries were generated for it: {} and {}", + serializeAST(*info.create_query), serializeAST(*info.different_create_query)); + } + + BackupEntries res; + for (const auto & info : databases | boost::adaptors::map_values) + res.push_back(makeBackupEntryForMetadata(*info.create_query)); + + for (const auto & info : tables | boost::adaptors::map_values) + { + res.push_back(makeBackupEntryForMetadata(*info.create_query)); + if (info.has_data) { - auto & new_element = elements.emplace_back(); - new_element.type = ElementType::DATABASE; - new_element.name.first = database_name; - new_element.new_name = new_element.name; - database.index = elements.size() - 1; + auto data_backup = info.storage->backupData(context, info.partitions); + if (!data_backup.empty()) + { + String data_path = getDataPathInBackup(*info.create_query); + for (auto & [path_in_backup, backup_entry] : data_backup) + res.emplace_back(data_path + path_in_backup, std::move(backup_entry)); + } + } + } + + /// A backup cannot be empty. + if (res.empty()) + throw Exception("Backup must not be empty", ErrorCodes::BACKUP_IS_EMPTY); + + return res; + } + + private: + /// Prepares to backup a single table and probably its database's definition. + void prepareToBackupTable(const DatabaseAndTableName & table_name_, const ASTs & partitions_) + { + auto [database, storage] = DatabaseCatalog::instance().getDatabaseAndTable({table_name_.first, table_name_.second}, context); + prepareToBackupTable(table_name_, {database, storage}, partitions_); + } + + void prepareToBackupTable(const DatabaseAndTableName & table_name_, const DatabaseAndTable & table_, const ASTs & partitions_) + { + context->checkAccess(AccessType::SHOW_TABLES, table_name_.first, table_name_.second); + + const auto & database = table_.first; + const auto & storage = table_.second; + + if (!database->hasTablesToBackup()) + throw Exception( + ErrorCodes::CANNOT_BACKUP_TABLE, + "Cannot backup the {} because it's contained in a hollow database (engine: {})", + formatTableNameOrTemporaryTableName(table_name_), + database->getEngineName()); + + /// Check that we are not trying to backup the same table again. + DatabaseAndTableName new_table_name = renaming_settings.getNewTableName(table_name_); + if (tables.contains(new_table_name)) + throw Exception(ErrorCodes::CANNOT_BACKUP_TABLE, "Cannot backup the {} twice", formatTableNameOrTemporaryTableName(new_table_name)); + + /// Make a create query for this table. + auto create_query = prepareCreateQueryForBackup(database->getCreateTableQuery(table_name_.second, context)); + + bool has_data = storage->hasDataToBackup() && !backup_settings.structure_only; + if (has_data) + { + /// We check for SELECT privilege only if we're going to read data from the table. + context->checkAccess(AccessType::SELECT, table_name_.first, table_name_.second); + } + + CreateTableInfo info; + info.create_query = create_query; + info.storage = storage; + info.name_in_backup = new_table_name; + info.partitions = partitions_; + info.has_data = has_data; + tables[new_table_name] = std::move(info); + + /// If it's not system or temporary database then probably we need to backup the database's definition too. + if (!isSystemOrTemporaryDatabase(table_name_.first)) + { + if (!databases.contains(new_table_name.first)) + { + /// Add a create query to backup the database if we haven't done it yet. + auto create_db_query = prepareCreateQueryForBackup(database->getCreateDatabaseQuery()); + create_db_query->setDatabase(new_table_name.first); + + CreateDatabaseInfo info_db; + info_db.create_query = create_db_query; + info_db.original_name = table_name_.first; + info_db.is_explicit = false; + databases[new_table_name.first] = std::move(info_db); + } + else + { + /// We already have added a create query to backup the database, + /// set `different_create_query` if it's not the same. + auto & info_db = databases[new_table_name.first]; + if (!info_db.is_explicit && (info_db.original_name != table_name_.first) && !info_db.different_create_query) + { + auto create_db_query = prepareCreateQueryForBackup(table_.first->getCreateDatabaseQuery()); + create_db_query->setDatabase(new_table_name.first); + if (!areDatabaseDefinitionsSame(*info_db.create_query, *create_db_query)) + info_db.different_create_query = create_db_query; + } } } } - for (auto & [database_name, database] : databases) + /// Prepares to restore a database and all tables in it. + void prepareToBackupDatabase(const String & database_name_, const std::set & except_list_) { - if (database.index == static_cast(-1)) - continue; - for (const auto & [table_name, table_index] : database.tables) - elements[database.index].except_list.emplace(table_name); + auto database = DatabaseCatalog::instance().getDatabase(database_name_, context); + prepareToBackupDatabase(database_name_, database, except_list_); } - /// Reorder the elements: databases should be before tables and dictionaries they contain. - for (auto & [database_name, database] : databases) + void prepareToBackupDatabase(const String & database_name_, const DatabasePtr & database_, const std::set & except_list_) { - if (database.index == static_cast(-1)) - continue; - size_t min_index = std::numeric_limits::max(); - auto min_index_it = database.tables.end(); - for (auto it = database.tables.begin(); it != database.tables.end(); ++it) + context->checkAccess(AccessType::SHOW_DATABASES, database_name_); + + /// Check that we are not trying to restore the same database again. + String new_database_name = renaming_settings.getNewDatabaseName(database_name_); + if (databases.contains(new_database_name) && databases[new_database_name].is_explicit) + throw Exception(ErrorCodes::CANNOT_BACKUP_DATABASE, "Cannot backup the database {} twice", backQuoteIfNeed(new_database_name)); + + /// Of course we're not going to backup the definition of the system or the temporary database. + if (!isSystemOrTemporaryDatabase(database_name_)) { - if (min_index > it->second) + /// Make a create query for this database. + auto create_db_query = prepareCreateQueryForBackup(database_->getCreateDatabaseQuery()); + + CreateDatabaseInfo info_db; + info_db.create_query = create_db_query; + info_db.original_name = database_name_; + info_db.is_explicit = true; + databases[new_database_name] = std::move(info_db); + } + + /// Backup tables in this database. + if (database_->hasTablesToBackup()) + { + for (auto it = database_->getTablesIterator(context); it->isValid(); it->next()) { - min_index = it->second; - min_index_it = it; + if (except_list_.contains(it->name())) + continue; + prepareToBackupTable({database_name_, it->name()}, {database_, it->table()}, {}); } } - if (database.index > min_index) + } + + /// Prepares to backup all the databases contained in the backup. + void prepareToBackupAllDatabases(const std::set & except_list_) + { + for (const auto & [database_name, database] : DatabaseCatalog::instance().getDatabases()) { - std::swap(elements[database.index], elements[min_index]); - std::swap(database.index, min_index_it->second); + if (except_list_.contains(database_name)) + continue; + if (isSystemOrTemporaryDatabase(database_name)) + continue; + prepareToBackupDatabase(database_name, database, {}); } } - for (auto skip_index : skip_indices | boost::adaptors::reversed) - elements.erase(elements.begin() + skip_index); - } - - Elements adjustElements(const Elements & elements, const String & current_database) - { - auto res = elements; - replaceElementTypesWithBaseElementTypes(res); - replaceEmptyDatabaseWithCurrentDatabase(res, current_database); - replaceTemporaryTablesWithTemporaryDatabase(res); - setNewNamesIfNotSet(res); - deduplicateAndReorderElements(res); - return res; - } - - String getDataPathInBackup(const DatabaseAndTableName & table_name) - { - if (table_name.first.empty() || table_name.second.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name and table name must not be empty"); - assert(!table_name.first.empty() && !table_name.second.empty()); - return String{"data/"} + escapeForFileName(table_name.first) + "/" + escapeForFileName(table_name.second) + "/"; - } - - String getDataPathInBackup(const IAST & create_query) - { - const auto & create = create_query.as(); - if (!create.table) - return {}; - if (create.temporary) - return getDataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.getTable()}); - return getDataPathInBackup({create.getDatabase(), create.getTable()}); - } - - String getMetadataPathInBackup(const DatabaseAndTableName & table_name) - { - if (table_name.first.empty() || table_name.second.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name and table name must not be empty"); - return String{"metadata/"} + escapeForFileName(table_name.first) + "/" + escapeForFileName(table_name.second) + ".sql"; - } - - String getMetadataPathInBackup(const String & database_name) - { - if (database_name.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name must not be empty"); - return String{"metadata/"} + escapeForFileName(database_name) + ".sql"; - } - - String getMetadataPathInBackup(const IAST & create_query) - { - const auto & create = create_query.as(); - if (!create.table) - return getMetadataPathInBackup(create.getDatabase()); - if (create.temporary) - return getMetadataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.getTable()}); - return getMetadataPathInBackup({create.getDatabase(), create.getTable()}); - } - - void backupCreateQuery(const IAST & create_query, BackupEntries & backup_entries) - { - auto metadata_entry = std::make_unique(serializeAST(create_query)); - String metadata_path = getMetadataPathInBackup(create_query); - backup_entries.emplace_back(metadata_path, std::move(metadata_entry)); - } - - void backupTable( - const DatabaseAndTable & database_and_table, - const String & table_name, - const ASTs & partitions, - const ContextPtr & context, - const BackupRenamingConfigPtr & renaming_config, - BackupEntries & backup_entries) - { - const auto & database = database_and_table.first; - const auto & storage = database_and_table.second; - context->checkAccess(AccessType::SELECT, database->getDatabaseName(), table_name); - - auto create_query = database->getCreateTableQuery(table_name, context); - ASTPtr new_create_query = renameInCreateQuery(create_query, renaming_config, context); - backupCreateQuery(*new_create_query, backup_entries); - - auto data_backup = storage->backup(partitions, context); - if (!data_backup.empty()) + /// Do renaming in the create query according to the renaming config. + std::shared_ptr prepareCreateQueryForBackup(const ASTPtr & ast) const { - String data_path = getDataPathInBackup(*new_create_query); - for (auto & [path_in_backup, backup_entry] : data_backup) - backup_entries.emplace_back(data_path + path_in_backup, std::move(backup_entry)); + ASTPtr query = ast; + ::DB::renameInCreateQuery(query, context, renaming_settings); + auto create_query = typeid_cast>(query); + create_query->uuid = UUIDHelpers::Nil; + create_query->to_inner_uuid = UUIDHelpers::Nil; + return create_query; } - } - void backupDatabase( - const DatabasePtr & database, - const std::set & except_list, - const ContextPtr & context, - const BackupRenamingConfigPtr & renaming_config, - BackupEntries & backup_entries) - { - context->checkAccess(AccessType::SHOW_TABLES, database->getDatabaseName()); - - auto create_query = database->getCreateDatabaseQuery(); - ASTPtr new_create_query = renameInCreateQuery(create_query, renaming_config, context); - backupCreateQuery(*new_create_query, backup_entries); - - for (auto it = database->getTablesIteratorForBackup(context); it->isValid(); it->next()) + static bool isSystemOrTemporaryDatabase(const String & database_name) { - if (except_list.contains(it->name())) - continue; - backupTable({database, it->table()}, it->name(), {}, context, renaming_config, backup_entries); + return (database_name == DatabaseCatalog::SYSTEM_DATABASE) || (database_name == DatabaseCatalog::TEMPORARY_DATABASE); } - } - void backupAllDatabases( - const std::set & except_list, - const ContextPtr & context, - const BackupRenamingConfigPtr & renaming_config, - BackupEntries & backup_entries) - { - for (const auto & [database_name, database] : DatabaseCatalog::instance().getDatabases()) + static std::pair makeBackupEntryForMetadata(const IAST & create_query) { - if (except_list.contains(database_name)) - continue; - if (database_name == DatabaseCatalog::SYSTEM_DATABASE || database_name == DatabaseCatalog::TEMPORARY_DATABASE) - continue; - backupDatabase(database, {}, context, renaming_config, backup_entries); + auto metadata_entry = std::make_unique(serializeAST(create_query)); + String metadata_path = getMetadataPathInBackup(create_query); + return {metadata_path, std::move(metadata_entry)}; } - } - void makeDatabaseIfNotExists(const String & database_name, ContextMutablePtr context) - { - if (DatabaseCatalog::instance().isDatabaseExist(database_name)) - return; - - /// We create and execute `create` query for the database name. - auto create_query = std::make_shared(); - create_query->setDatabase(database_name); - create_query->if_not_exists = true; - InterpreterCreateQuery create_interpreter{create_query, context}; - create_interpreter.execute(); - } - - ASTPtr readCreateQueryFromBackup(const DatabaseAndTableName & table_name, const BackupPtr & backup) - { - String create_query_path = getMetadataPathInBackup(table_name); - auto read_buffer = backup->readFile(create_query_path)->getReadBuffer(); - String create_query_str; - readStringUntilEOF(create_query_str, *read_buffer); - read_buffer.reset(); - ParserCreateQuery create_parser; - return parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); - } - - ASTPtr readCreateQueryFromBackup(const String & database_name, const BackupPtr & backup) - { - String create_query_path = getMetadataPathInBackup(database_name); - auto read_buffer = backup->readFile(create_query_path)->getReadBuffer(); - String create_query_str; - readStringUntilEOF(create_query_str, *read_buffer); - read_buffer.reset(); - ParserCreateQuery create_parser; - return parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); - } - - void restoreTable( - const DatabaseAndTableName & table_name, - const ASTs & partitions, - ContextMutablePtr context, - const BackupPtr & backup, - const BackupRenamingConfigPtr & renaming_config, - RestoreObjectsTasks & restore_tasks) - { - ASTPtr create_query = readCreateQueryFromBackup(table_name, backup); - auto new_create_query = typeid_cast>(renameInCreateQuery(create_query, renaming_config, context)); - - restore_tasks.emplace_back([table_name, new_create_query, partitions, context, backup]() -> RestoreDataTasks + /// Information which is used to make an instance of RestoreTableFromBackupTask. + struct CreateTableInfo { - DatabaseAndTableName new_table_name{new_create_query->getDatabase(), new_create_query->getTable()}; - if (new_create_query->temporary) - new_table_name.first = DatabaseCatalog::TEMPORARY_DATABASE; - - context->checkAccess(AccessType::INSERT, new_table_name.first, new_table_name.second); - + ASTPtr create_query; StoragePtr storage; - for (size_t try_index = 0; try_index != 10; ++try_index) - { - if (DatabaseCatalog::instance().isTableExist({new_table_name.first, new_table_name.second}, context)) - { - DatabasePtr existing_database; - StoragePtr existing_storage; - std::tie(existing_database, existing_storage) = DatabaseCatalog::instance().tryGetDatabaseAndTable({new_table_name.first, new_table_name.second}, context); - if (existing_storage) - { - if (auto existing_table_create_query = existing_database->tryGetCreateTableQuery(new_table_name.second, context)) - { - if (hasCompatibleDataToRestoreTable(*new_create_query, existing_table_create_query->as())) - { - storage = existing_storage; - break; - } - else - { - String error_message = (new_table_name.first == DatabaseCatalog::TEMPORARY_DATABASE) - ? ("Temporary table " + backQuoteIfNeed(new_table_name.second) + " already exists") - : ("Table " + backQuoteIfNeed(new_table_name.first) + "." + backQuoteIfNeed(new_table_name.second) - + " already exists"); - throw Exception(error_message, ErrorCodes::CANNOT_RESTORE_TABLE); - } - } - } - } + DatabaseAndTableName name_in_backup; + ASTs partitions; + bool has_data = false; + }; - makeDatabaseIfNotExists(new_table_name.first, context); - - try - { - InterpreterCreateQuery create_interpreter{new_create_query, context}; - create_interpreter.execute(); - } - catch (Exception & e) - { - if (e.code() != ErrorCodes::TABLE_ALREADY_EXISTS) - throw; - } - } - - if (!storage) - { - String error_message = (new_table_name.first == DatabaseCatalog::TEMPORARY_DATABASE) - ? ("Could not create temporary table " + backQuoteIfNeed(new_table_name.second) + " for restoring") - : ("Could not create table " + backQuoteIfNeed(new_table_name.first) + "." + backQuoteIfNeed(new_table_name.second) - + " for restoring"); - throw Exception(error_message, ErrorCodes::CANNOT_RESTORE_TABLE); - } - - String data_path_in_backup = getDataPathInBackup(table_name); - RestoreDataTasks restore_data_tasks = storage->restoreFromBackup(backup, data_path_in_backup, partitions, context); - - /// Keep `storage` alive while we're executing `restore_data_tasks`. - for (auto & restore_data_task : restore_data_tasks) - restore_data_task = [restore_data_task, storage]() { restore_data_task(); }; - - return restore_data_tasks; - }); - } - - void restoreDatabase(const String & database_name, const std::set & except_list, ContextMutablePtr context, const BackupPtr & backup, const BackupRenamingConfigPtr & renaming_config, RestoreObjectsTasks & restore_tasks) - { - ASTPtr create_query = readCreateQueryFromBackup(database_name, backup); - auto new_create_query = typeid_cast>(renameInCreateQuery(create_query, renaming_config, context)); - - restore_tasks.emplace_back([database_name, new_create_query, except_list, context, backup, renaming_config]() -> RestoreDataTasks + /// Information which is used to make an instance of RestoreDatabaseFromBackupTask. + struct CreateDatabaseInfo { - const String & new_database_name = new_create_query->getDatabase(); - context->checkAccess(AccessType::SHOW_TABLES, new_database_name); + ASTPtr create_query; + String original_name; - if (!DatabaseCatalog::instance().isDatabaseExist(new_database_name)) - { - /// We create and execute `create` query for the database name. - new_create_query->if_not_exists = true; - InterpreterCreateQuery create_interpreter{new_create_query, context}; - create_interpreter.execute(); - } + /// Whether the creation of this database is specified explicitly, via RESTORE DATABASE or + /// RESTORE ALL DATABASES. + /// It's false if the creation of this database is caused by creating a table contained in it. + bool is_explicit = false; - RestoreObjectsTasks restore_objects_tasks; - Strings table_metadata_filenames = backup->listFiles("metadata/" + escapeForFileName(database_name) + "/", "/"); - for (const String & table_metadata_filename : table_metadata_filenames) - { - String table_name = unescapeForFileName(fs::path{table_metadata_filename}.stem()); - if (except_list.contains(table_name)) - continue; - restoreTable({database_name, table_name}, {}, context, backup, renaming_config, restore_objects_tasks); - } + /// If this is set it means the following error: + /// it means that for implicitly created database there were two different create query + /// generated so we cannot restore the database. + ASTPtr different_create_query; + }; - RestoreDataTasks restore_data_tasks; - for (auto & restore_object_task : restore_objects_tasks) - insertAtEnd(restore_data_tasks, std::move(restore_object_task)()); - return restore_data_tasks; - }); - } - - void restoreAllDatabases(const std::set & except_list, ContextMutablePtr context, const BackupPtr & backup, const BackupRenamingConfigPtr & renaming_config, RestoreObjectsTasks & restore_tasks) - { - restore_tasks.emplace_back([except_list, context, backup, renaming_config]() -> RestoreDataTasks - { - RestoreObjectsTasks restore_objects_tasks; - Strings database_metadata_filenames = backup->listFiles("metadata/", "/"); - for (const String & database_metadata_filename : database_metadata_filenames) - { - String database_name = unescapeForFileName(fs::path{database_metadata_filename}.stem()); - if (except_list.contains(database_name)) - continue; - restoreDatabase(database_name, {}, context, backup, renaming_config, restore_objects_tasks); - } - - RestoreDataTasks restore_data_tasks; - for (auto & restore_object_task : restore_objects_tasks) - insertAtEnd(restore_data_tasks, std::move(restore_object_task)()); - return restore_data_tasks; - }); - } + ContextPtr context; + BackupSettings backup_settings; + DDLRenamingSettings renaming_settings; + std::map databases; + std::map tables; + }; } -BackupEntries makeBackupEntries(const Elements & elements, const ContextPtr & context) +BackupEntries makeBackupEntries(const ContextPtr & context, const Elements & elements, const BackupSettings & backup_settings) { - BackupEntries backup_entries; - - auto elements2 = adjustElements(elements, context->getCurrentDatabase()); - auto renaming_config = std::make_shared(); - renaming_config->setFromBackupQueryElements(elements2); - - for (const auto & element : elements2) - { - switch (element.type) - { - case ElementType::TABLE: - { - const String & database_name = element.name.first; - const String & table_name = element.name.second; - auto [database, storage] = DatabaseCatalog::instance().getDatabaseAndTable({database_name, table_name}, context); - backupTable({database, storage}, table_name, element.partitions, context, renaming_config, backup_entries); - break; - } - - case ElementType::DATABASE: - { - const String & database_name = element.name.first; - auto database = DatabaseCatalog::instance().getDatabase(database_name, context); - backupDatabase(database, element.except_list, context, renaming_config, backup_entries); - break; - } - - case ElementType::ALL_DATABASES: - { - backupAllDatabases(element.except_list, context, renaming_config, backup_entries); - break; - } - - default: - throw Exception("Unexpected element type", ErrorCodes::LOGICAL_ERROR); /// other element types have been removed in deduplicateElements() - } - } - - /// A backup cannot be empty. - if (backup_entries.empty()) - throw Exception("Backup must not be empty", ErrorCodes::BACKUP_IS_EMPTY); - - /// Check that all backup entries are unique. - ::sort( - backup_entries.begin(), - backup_entries.end(), - [](const std::pair> & lhs, const std::pair> & rhs) - { - return lhs.first < rhs.first; - }); - auto adjacent = std::adjacent_find(backup_entries.begin(), backup_entries.end()); - if (adjacent != backup_entries.end()) - throw Exception("Cannot write multiple entries with the same name " + quoteString(adjacent->first), ErrorCodes::BACKUP_ELEMENT_DUPLICATE); - - return backup_entries; + BackupEntriesBuilder builder{context, backup_settings}; + builder.prepare(elements); + return builder.makeBackupEntries(); } -UInt64 estimateBackupSize(const BackupEntries & backup_entries, const BackupPtr & base_backup) -{ - UInt64 total_size = 0; - for (const auto & [name, entry] : backup_entries) - { - UInt64 data_size = entry->getSize(); - if (base_backup) - { - if (base_backup->fileExists(name) && (data_size == base_backup->getFileSize(name))) - { - auto checksum = entry->getChecksum(); - if (checksum && (*checksum == base_backup->getFileChecksum(name))) - continue; - } - } - total_size += data_size; - } - return total_size; -} void writeBackupEntries(BackupMutablePtr backup, BackupEntries && backup_entries, size_t num_threads) { @@ -696,7 +341,7 @@ void writeBackupEntries(BackupMutablePtr backup, BackupEntries && backup_entries { try { - backup->addFile(name, std::move(entry)); + backup->writeFile(name, std::move(entry)); } catch (...) { @@ -729,105 +374,46 @@ void writeBackupEntries(BackupMutablePtr backup, BackupEntries && backup_entries } -RestoreObjectsTasks makeRestoreTasks(const Elements & elements, ContextMutablePtr context, const BackupPtr & backup) +String getDataPathInBackup(const DatabaseAndTableName & table_name) { - RestoreObjectsTasks restore_tasks; - - auto elements2 = adjustElements(elements, context->getCurrentDatabase()); - auto renaming_config = std::make_shared(); - renaming_config->setFromBackupQueryElements(elements2); - - for (const auto & element : elements2) - { - switch (element.type) - { - case ElementType::TABLE: - { - const String & database_name = element.name.first; - const String & table_name = element.name.second; - restoreTable({database_name, table_name}, element.partitions, context, backup, renaming_config, restore_tasks); - break; - } - - case ElementType::DATABASE: - { - const String & database_name = element.name.first; - restoreDatabase(database_name, element.except_list, context, backup, renaming_config, restore_tasks); - break; - } - - case ElementType::ALL_DATABASES: - { - restoreAllDatabases(element.except_list, context, backup, renaming_config, restore_tasks); - break; - } - - default: - throw Exception("Unexpected element type", ErrorCodes::LOGICAL_ERROR); /// other element types have been removed in deduplicateElements() - } - } - - return restore_tasks; + if (table_name.first.empty() || table_name.second.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name and table name must not be empty"); + assert(!table_name.first.empty() && !table_name.second.empty()); + return String{"data/"} + escapeForFileName(table_name.first) + "/" + escapeForFileName(table_name.second) + "/"; } - -void executeRestoreTasks(RestoreObjectsTasks && restore_tasks, size_t num_threads) +String getDataPathInBackup(const IAST & create_query) { - if (!num_threads) - num_threads = 1; + const auto & create = create_query.as(); + if (!create.table) + return {}; + if (create.temporary) + return getDataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.getTable()}); + return getDataPathInBackup({create.getDatabase(), create.getTable()}); +} - RestoreDataTasks restore_data_tasks; - for (auto & restore_object_task : restore_tasks) - insertAtEnd(restore_data_tasks, std::move(restore_object_task)()); - restore_tasks.clear(); +String getMetadataPathInBackup(const DatabaseAndTableName & table_name) +{ + if (table_name.first.empty() || table_name.second.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name and table name must not be empty"); + return String{"metadata/"} + escapeForFileName(table_name.first) + "/" + escapeForFileName(table_name.second) + ".sql"; +} - std::vector threads; - size_t num_active_threads = 0; - std::mutex mutex; - std::condition_variable cond; - std::exception_ptr exception; +String getMetadataPathInBackup(const String & database_name) +{ + if (database_name.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name must not be empty"); + return String{"metadata/"} + escapeForFileName(database_name) + ".sql"; +} - for (auto & restore_data_task : restore_data_tasks) - { - { - std::unique_lock lock{mutex}; - if (exception) - break; - cond.wait(lock, [&] { return num_active_threads < num_threads; }); - if (exception) - break; - ++num_active_threads; - } - - threads.emplace_back([&restore_data_task, &mutex, &cond, &num_active_threads, &exception]() mutable - { - try - { - restore_data_task(); - restore_data_task = {}; - } - catch (...) - { - std::lock_guard lock{mutex}; - if (!exception) - exception = std::current_exception(); - } - - { - std::lock_guard lock{mutex}; - --num_active_threads; - cond.notify_all(); - } - }); - } - - for (auto & thread : threads) - thread.join(); - - restore_data_tasks.clear(); - - if (exception) - std::rethrow_exception(exception); +String getMetadataPathInBackup(const IAST & create_query) +{ + const auto & create = create_query.as(); + if (!create.table) + return getMetadataPathInBackup(create.getDatabase()); + if (create.temporary) + return getMetadataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.getTable()}); + return getMetadataPathInBackup({create.getDatabase(), create.getTable()}); } } diff --git a/src/Backups/BackupUtils.h b/src/Backups/BackupUtils.h index 70f080cf6e9..d001d5a4bec 100644 --- a/src/Backups/BackupUtils.h +++ b/src/Backups/BackupUtils.h @@ -5,35 +5,28 @@ namespace DB { - class IBackup; -using BackupPtr = std::shared_ptr; using BackupMutablePtr = std::shared_ptr; class IBackupEntry; using BackupEntryPtr = std::unique_ptr; using BackupEntries = std::vector>; -using RestoreDataTask = std::function; -using RestoreDataTasks = std::vector; -using RestoreObjectTask = std::function; -using RestoreObjectsTasks = std::vector; +struct BackupSettings; class Context; using ContextPtr = std::shared_ptr; -using ContextMutablePtr = std::shared_ptr; - /// Prepares backup entries. -BackupEntries makeBackupEntries(const ASTBackupQuery::Elements & elements, const ContextPtr & context); - -/// Estimate total size of the backup which would be written from the specified entries. -UInt64 estimateBackupSize(const BackupEntries & backup_entries, const BackupPtr & base_backup); +BackupEntries makeBackupEntries(const ContextPtr & context, const ASTBackupQuery::Elements & elements, const BackupSettings & backup_settings); /// Write backup entries to an opened backup. void writeBackupEntries(BackupMutablePtr backup, BackupEntries && backup_entries, size_t num_threads); -/// Prepare restore tasks. -RestoreObjectsTasks makeRestoreTasks(const ASTBackupQuery::Elements & elements, ContextMutablePtr context, const BackupPtr & backup); +/// Returns the path to metadata in backup. +String getMetadataPathInBackup(const DatabaseAndTableName & table_name); +String getMetadataPathInBackup(const String & database_name); +String getMetadataPathInBackup(const IAST & create_query); -/// Execute restore tasks. -void executeRestoreTasks(RestoreObjectsTasks && restore_tasks, size_t num_threads); +/// Returns the path to table's data in backup. +String getDataPathInBackup(const DatabaseAndTableName & table_name); +String getDataPathInBackup(const IAST & create_query); } diff --git a/src/Backups/DDLCompareUtils.cpp b/src/Backups/DDLCompareUtils.cpp new file mode 100644 index 00000000000..625a0befe63 --- /dev/null +++ b/src/Backups/DDLCompareUtils.cpp @@ -0,0 +1,87 @@ +#include +#include +#include + + +namespace DB +{ +namespace +{ + std::shared_ptr prepareDDLToCompare(const ASTCreateQuery & ast) + { + auto res = typeid_cast>(ast.shared_from_this()); + + std::shared_ptr clone; + auto get_clone = [&] + { + if (!clone) + { + clone = typeid_cast>(res->clone()); + res = clone; + } + return clone; + }; + + /// Remove UUIDs. + if (res->uuid != UUIDHelpers::Nil) + get_clone()->uuid = UUIDHelpers::Nil; + + if (res->to_inner_uuid != UUIDHelpers::Nil) + get_clone()->to_inner_uuid = UUIDHelpers::Nil; + + /// Clear IF NOT EXISTS flag. + if (res->if_not_exists) + get_clone()->if_not_exists = false; + + return res; + } +} + + +bool areTableDefinitionsSame(const IAST & table1, const IAST & table2) +{ + auto ast1 = typeid_cast>(table1.shared_from_this()); + if (!ast1 || !ast1->table) + return false; + + auto ast2 = typeid_cast>(table2.shared_from_this()); + if (!ast2 || !ast2->table) + return false; + + if ((ast1->uuid != ast2->uuid) || (ast1->to_inner_uuid != ast2->to_inner_uuid) || + (ast1->if_not_exists != ast2->if_not_exists)) + { + ast1 = prepareDDLToCompare(*ast1); + ast2 = prepareDDLToCompare(*ast2); + } + + return serializeAST(*ast1) == serializeAST(*ast1); +} + + +bool areDatabaseDefinitionsSame(const IAST & database1, const IAST & database2) +{ + auto ast1 = typeid_cast>(database1.shared_from_this()); + if (!ast1 || ast1->table || !ast1->database) + return false; + + auto ast2 = typeid_cast>(database2.shared_from_this()); + if (!ast2 || ast2->table || !ast2->database) + return false; + + if ((ast1->uuid != ast2->uuid) || (ast1->if_not_exists != ast2->if_not_exists)) + { + ast1 = prepareDDLToCompare(*ast1); + ast2 = prepareDDLToCompare(*ast2); + } + + return serializeAST(*ast1) == serializeAST(*ast1); +} + + +bool areTableDataCompatible(const IAST & src_table, const IAST & dest_table) +{ + return areTableDefinitionsSame(src_table, dest_table); +} + +} diff --git a/src/Backups/DDLCompareUtils.h b/src/Backups/DDLCompareUtils.h new file mode 100644 index 00000000000..acb99c243ea --- /dev/null +++ b/src/Backups/DDLCompareUtils.h @@ -0,0 +1,17 @@ +#pragma once + + +namespace DB +{ +class IAST; + +/// Checks that two table definitions are actually the same. +bool areTableDefinitionsSame(const IAST & table1, const IAST & table2); + +/// Checks that two database definitions are actually the same. +bool areDatabaseDefinitionsSame(const IAST & database1, const IAST & database2); + +/// Whether the data from the first table can be attached to the second table. +bool areTableDataCompatible(const IAST & src_table, const IAST & dest_table); + +} diff --git a/src/Backups/DDLRenamingVisitor.cpp b/src/Backups/DDLRenamingVisitor.cpp new file mode 100644 index 00000000000..bcdfb32c29f --- /dev/null +++ b/src/Backups/DDLRenamingVisitor.cpp @@ -0,0 +1,379 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ +namespace ErrorCodes +{ + extern const int WRONG_DDL_RENAMING_SETTINGS; + extern const int LOGICAL_ERROR; +} + +namespace +{ + /// Replaces names of tables and databases used in a CREATE query, which can be either CREATE TABLE or + /// CREATE DICTIONARY or CREATE VIEW or CREATE TEMPORARY TABLE or CREATE DATABASE query. + void visitCreateQuery(ASTCreateQuery & create, const DDLRenamingVisitor::Data & data) + { + if (create.table) + { + DatabaseAndTableName table_name; + table_name.second = create.getTable(); + if (create.temporary) + table_name.first = DatabaseCatalog::TEMPORARY_DATABASE; + else if (create.database) + table_name.first = create.getDatabase(); + else + throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name specified in the CREATE TABLE query must not be empty"); + + table_name = data.renaming_settings.getNewTableName(table_name); + + if (table_name.first == DatabaseCatalog::TEMPORARY_DATABASE) + { + create.temporary = true; + create.setDatabase(""); + } + else + { + create.temporary = false; + create.setDatabase(table_name.first); + } + create.setTable(table_name.second); + } + else if (create.database) + { + String database_name = create.getDatabase(); + database_name = data.renaming_settings.getNewDatabaseName(database_name); + create.setDatabase(database_name); + } + else + throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name specified in the CREATE DATABASE query must not be empty"); + + if (!create.as_table.empty() && !create.as_database.empty()) + std::tie(create.as_database, create.as_table) = data.renaming_settings.getNewTableName({create.as_database, create.as_table}); + + if (!create.to_table_id.table_name.empty() && !create.to_table_id.database_name.empty()) + { + auto to_table = data.renaming_settings.getNewTableName({create.to_table_id.database_name, create.to_table_id.table_name}); + create.to_table_id = StorageID{to_table.first, to_table.second}; + } + } + + /// Replaces names of a database and a table in a expression like `db`.`table` + void visitTableExpression(ASTTableExpression & expr, const DDLRenamingVisitor::Data & data) + { + if (!expr.database_and_table_name) + return; + + ASTIdentifier * id = expr.database_and_table_name->as(); + if (!id) + return; + + auto table_id = id->createTable(); + if (!table_id) + return; + + const String & db_name = table_id->getDatabaseName(); + const String & table_name = table_id->shortName(); + if (db_name.empty() || table_name.empty()) + return; + + String new_db_name, new_table_name; + std::tie(new_db_name, new_table_name) = data.renaming_settings.getNewTableName({db_name, table_name}); + if ((new_db_name == db_name) && (new_table_name == table_name)) + return; + + expr.database_and_table_name = std::make_shared(Strings{new_db_name, new_table_name}); + expr.children.push_back(expr.database_and_table_name); + } + + /// Replaces a database's name passed via an argument of the function merge() or the table engine Merge. + void visitFunctionMerge(ASTFunction & function, const DDLRenamingVisitor::Data & data) + { + if (!function.arguments) + return; + + /// The first argument is a database's name and we can rename it. + /// The second argument is a regular expression and we can do nothing about it. + auto & args = function.arguments->as().children; + size_t db_name_arg_index = 0; + if (args.size() <= db_name_arg_index) + return; + + String db_name = evaluateConstantExpressionForDatabaseName(args[db_name_arg_index], data.context)->as().value.safeGet(); + if (db_name.empty()) + return; + + String new_db_name = data.renaming_settings.getNewDatabaseName(db_name); + if (new_db_name == db_name) + return; + args[db_name_arg_index] = std::make_shared(new_db_name); + } + + /// Replaces names of a table and a database passed via arguments of the function remote() or cluster() or the table engine Distributed. + void visitFunctionRemote(ASTFunction & function, const DDLRenamingVisitor::Data & data) + { + if (!function.arguments) + return; + + /// The first argument is an address or cluster's name, so we skip it. + /// The second argument can be either 'db.name' or just 'db' followed by the third argument 'table'. + auto & args = function.arguments->as().children; + + const auto * second_arg_as_function = args[1]->as(); + if (second_arg_as_function && TableFunctionFactory::instance().isTableFunctionName(second_arg_as_function->name)) + return; + + size_t db_name_index = 1; + if (args.size() <= db_name_index) + return; + + String name = evaluateConstantExpressionForDatabaseName(args[db_name_index], data.context)->as().value.safeGet(); + + size_t table_name_index = static_cast(-1); + + QualifiedTableName qualified_name; + + if (function.name == "Distributed") + qualified_name.table = name; + else + qualified_name = QualifiedTableName::parseFromString(name); + + if (qualified_name.database.empty()) + { + std::swap(qualified_name.database, qualified_name.table); + table_name_index = 2; + if (args.size() <= table_name_index) + return; + qualified_name.table = evaluateConstantExpressionForDatabaseName(args[table_name_index], data.context)->as().value.safeGet(); + } + + const String & db_name = qualified_name.database; + const String & table_name = qualified_name.table; + + if (db_name.empty() || table_name.empty()) + return; + + String new_db_name, new_table_name; + std::tie(new_db_name, new_table_name) = data.renaming_settings.getNewTableName({db_name, table_name}); + if ((new_db_name == db_name) && (new_table_name == table_name)) + return; + + if (table_name_index != static_cast(-1)) + { + if (new_db_name != db_name) + args[db_name_index] = std::make_shared(new_db_name); + if (new_table_name != table_name) + args[table_name_index] = std::make_shared(new_table_name); + } + else + { + args[db_name_index] = std::make_shared(new_db_name); + args.insert(args.begin() + db_name_index + 1, std::make_shared(new_table_name)); + } + } + + /// Replaces names of tables and databases used in arguments of a table function or a table engine. + void visitFunction(ASTFunction & function, const DDLRenamingVisitor::Data & data) + { + if ((function.name == "merge") || (function.name == "Merge")) + { + visitFunctionMerge(function, data); + } + else if ((function.name == "remote") || (function.name == "remoteSecure") || (function.name == "cluster") || + (function.name == "clusterAllReplicas") || (function.name == "Distributed")) + { + visitFunctionRemote(function, data); + } + } + + /// Replaces names of a table and a database used in source parameters of a dictionary. + void visitDictionary(ASTDictionary & dictionary, const DDLRenamingVisitor::Data & data) + { + if (!dictionary.source || dictionary.source->name != "clickhouse" || !dictionary.source->elements) + return; + + auto & elements = dictionary.source->elements->as().children; + String db_name, table_name; + size_t db_name_index = static_cast(-1); + size_t table_name_index = static_cast(-1); + + for (size_t i = 0; i != elements.size(); ++i) + { + auto & pair = elements[i]->as(); + if (pair.first == "db") + { + if (db_name_index != static_cast(-1)) + return; + db_name = pair.second->as().value.safeGet(); + db_name_index = i; + } + else if (pair.first == "table") + { + if (table_name_index != static_cast(-1)) + return; + table_name = pair.second->as().value.safeGet(); + table_name_index = i; + } + } + + if (db_name.empty() || table_name.empty()) + return; + + String new_db_name, new_table_name; + std::tie(new_db_name, new_table_name) = data.renaming_settings.getNewTableName({db_name, table_name}); + if ((new_db_name == db_name) && (new_table_name == table_name)) + return; + + if (new_db_name != db_name) + { + auto & pair = elements[db_name_index]->as(); + pair.replace(pair.second, std::make_shared(new_db_name)); + } + if (new_table_name != table_name) + { + auto & pair = elements[table_name_index]->as(); + pair.replace(pair.second, std::make_shared(new_table_name)); + } + } +} + + +void DDLRenamingSettings::setNewTableName(const DatabaseAndTableName & old_table_name, const DatabaseAndTableName & new_table_name) +{ + auto it = old_to_new_table_names.find(old_table_name); + if ((it != old_to_new_table_names.end())) + { + if (it->second == new_table_name) + return; + throw Exception(ErrorCodes::WRONG_DDL_RENAMING_SETTINGS, "Wrong renaming: it's specified that table {}.{} should be renamed to {}.{} and to {}.{} at the same time", + backQuoteIfNeed(old_table_name.first), backQuoteIfNeed(old_table_name.second), + backQuoteIfNeed(it->second.first), backQuoteIfNeed(it->second.second), + backQuoteIfNeed(new_table_name.first), backQuoteIfNeed(new_table_name.second)); + } + old_to_new_table_names[old_table_name] = new_table_name; +} + +void DDLRenamingSettings::setNewDatabaseName(const String & old_database_name, const String & new_database_name) +{ + auto it = old_to_new_database_names.find(old_database_name); + if ((it != old_to_new_database_names.end())) + { + if (it->second == new_database_name) + return; + throw Exception(ErrorCodes::WRONG_DDL_RENAMING_SETTINGS, "Wrong renaming: it's specified that database {} should be renamed to {} and to {} at the same time", + backQuoteIfNeed(old_database_name), backQuoteIfNeed(it->second), backQuoteIfNeed(new_database_name)); + } + old_to_new_database_names[old_database_name] = new_database_name; +} + +void DDLRenamingSettings::setFromBackupQuery(const ASTBackupQuery & backup_query, const String & current_database) +{ + setFromBackupQuery(backup_query.elements, current_database); +} + +void DDLRenamingSettings::setFromBackupQuery(const ASTBackupQuery::Elements & backup_query_elements, const String & current_database) +{ + old_to_new_table_names.clear(); + old_to_new_database_names.clear(); + + using ElementType = ASTBackupQuery::ElementType; + + for (const auto & element : backup_query_elements) + { + switch (element.type) + { + case ElementType::TABLE: + { + const String & table_name = element.name.second; + String database_name = element.name.first; + if (element.name_is_in_temp_db) + database_name = DatabaseCatalog::TEMPORARY_DATABASE; + else if (database_name.empty()) + database_name = current_database; + + const String & new_table_name = element.new_name.second; + String new_database_name = element.new_name.first; + if (element.new_name_is_in_temp_db) + new_database_name = DatabaseCatalog::TEMPORARY_DATABASE; + else if (new_database_name.empty()) + new_database_name = current_database; + + setNewTableName({database_name, table_name}, {new_database_name, new_table_name}); + break; + } + + case ASTBackupQuery::DATABASE: + { + String database_name = element.name.first; + if (element.name_is_in_temp_db) + database_name = DatabaseCatalog::TEMPORARY_DATABASE; + + String new_database_name = element.new_name.first; + if (element.new_name_is_in_temp_db) + new_database_name = DatabaseCatalog::TEMPORARY_DATABASE; + + setNewDatabaseName(database_name, new_database_name); + break; + } + + case ASTBackupQuery::ALL_DATABASES: break; + } + } +} + +DatabaseAndTableName DDLRenamingSettings::getNewTableName(const DatabaseAndTableName & old_table_name) const +{ + auto it = old_to_new_table_names.find(old_table_name); + if (it != old_to_new_table_names.end()) + return it->second; + return {getNewDatabaseName(old_table_name.first), old_table_name.second}; +} + +const String & DDLRenamingSettings::getNewDatabaseName(const String & old_database_name) const +{ + auto it = old_to_new_database_names.find(old_database_name); + if (it != old_to_new_database_names.end()) + return it->second; + return old_database_name; +} + + +bool DDLRenamingVisitor::needChildVisit(ASTPtr &, const ASTPtr &) { return true; } + +void DDLRenamingVisitor::visit(ASTPtr & ast, const Data & data) +{ + if (auto * create = ast->as()) + visitCreateQuery(*create, data); + else if (auto * expr = ast->as()) + visitTableExpression(*expr, data); + else if (auto * function = ast->as()) + visitFunction(*function, data); + else if (auto * dictionary = ast->as()) + visitDictionary(*dictionary, data); +} + +void renameInCreateQuery(ASTPtr & ast, const ContextPtr & global_context, const DDLRenamingSettings & renaming_settings) +{ + try + { + DDLRenamingVisitor::Data data{renaming_settings, global_context}; + DDLRenamingVisitor::Visitor{data}.visit(ast); + } + catch (...) + { + tryLogCurrentException("Backup", "Error while renaming in AST"); + } +} + +} diff --git a/src/Backups/DDLRenamingVisitor.h b/src/Backups/DDLRenamingVisitor.h new file mode 100644 index 00000000000..b624fffc267 --- /dev/null +++ b/src/Backups/DDLRenamingVisitor.h @@ -0,0 +1,61 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + + +namespace DB +{ +using DatabaseAndTableName = std::pair; +class IAST; +using ASTPtr = std::shared_ptr; +class Context; +using ContextPtr = std::shared_ptr; + +/// Keeps information about renamings of databases or tables being processed +/// while we're making a backup or while we're restoring from a backup. +class DDLRenamingSettings +{ +public: + DDLRenamingSettings() = default; + + void setNewTableName(const DatabaseAndTableName & old_table_name, const DatabaseAndTableName & new_table_name); + void setNewDatabaseName(const String & old_database_name, const String & new_database_name); + + void setFromBackupQuery(const ASTBackupQuery & backup_query, const String & current_database); + void setFromBackupQuery(const ASTBackupQuery::Elements & backup_query_elements, const String & current_database); + + /// Changes names according to the renaming. + DatabaseAndTableName getNewTableName(const DatabaseAndTableName & old_table_name) const; + const String & getNewDatabaseName(const String & old_database_name) const; + +private: + std::map old_to_new_table_names; + std::unordered_map old_to_new_database_names; +}; + + +/// Changes names in AST according to the renaming settings. +void renameInCreateQuery(ASTPtr & ast, const ContextPtr & global_context, const DDLRenamingSettings & renaming_settings); + +/// Visits ASTCreateQuery and changes names of tables and databases according to passed DDLRenamingConfig. +class DDLRenamingVisitor +{ +public: + struct Data + { + const DDLRenamingSettings & renaming_settings; + ContextPtr context; + }; + + using Visitor = InDepthNodeVisitor; + + static bool needChildVisit(ASTPtr &, const ASTPtr &); + static void visit(ASTPtr & ast, const Data & data); +}; + +} diff --git a/src/Backups/DirectoryBackup.cpp b/src/Backups/DirectoryBackup.cpp new file mode 100644 index 00000000000..dc4d098dbe9 --- /dev/null +++ b/src/Backups/DirectoryBackup.cpp @@ -0,0 +1,82 @@ +#include +#include +#include +#include + + +namespace DB +{ +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + + +DirectoryBackup::DirectoryBackup( + const String & backup_name_, + const DiskPtr & disk_, + const String & path_, + const ContextPtr & context_, + const std::optional & base_backup_info_) + : BackupImpl(backup_name_, context_, base_backup_info_) + , disk(disk_), path(path_) +{ + /// Path to backup must end with '/' + if (!path.ends_with("/")) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Backup {}: Path to backup must end with '/', but {} doesn't.", getName(), quoteString(path)); + dir_path = fs::path(path).parent_path(); /// get path without terminating slash + + /// If `disk` is not specified, we create an internal instance of `DiskLocal` here. + if (!disk) + { + auto fspath = fs::path{dir_path}; + if (!fspath.has_filename()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Backup {}: Path to a backup must be a directory path.", getName(), quoteString(path)); + path = fspath.filename() / ""; + dir_path = fs::path(path).parent_path(); /// get path without terminating slash + String disk_path = fspath.remove_filename(); + disk = std::make_shared(disk_path, disk_path, 0); + } +} + + +DirectoryBackup::~DirectoryBackup() +{ + close(); +} + +bool DirectoryBackup::backupExists() const +{ + return disk->isDirectory(dir_path); +} + +void DirectoryBackup::openImpl(OpenMode open_mode_) +{ + if (open_mode_ == OpenMode::WRITE) + disk->createDirectories(dir_path); +} + +void DirectoryBackup::closeImpl(bool writing_finalized_) +{ + if ((getOpenModeNoLock() == OpenMode::WRITE) && !writing_finalized_ && disk->isDirectory(dir_path)) + { + /// Creating of the backup wasn't finished correctly, + /// so the backup cannot be used and it's better to remove its files. + disk->removeRecursive(dir_path); + } +} + +std::unique_ptr DirectoryBackup::readFileImpl(const String & file_name) const +{ + String file_path = path + file_name; + return disk->readFile(file_path); +} + +std::unique_ptr DirectoryBackup::writeFileImpl(const String & file_name) +{ + String file_path = path + file_name; + disk->createDirectories(fs::path(file_path).parent_path()); + return disk->writeFile(file_path); +} + +} diff --git a/src/Backups/BackupInDirectory.h b/src/Backups/DirectoryBackup.h similarity index 73% rename from src/Backups/BackupInDirectory.h rename to src/Backups/DirectoryBackup.h index dd6a39d019a..7d9b5cc4557 100644 --- a/src/Backups/BackupInDirectory.h +++ b/src/Backups/DirectoryBackup.h @@ -10,25 +10,24 @@ using DiskPtr = std::shared_ptr; /// Represents a backup stored on a disk. /// A backup is stored as a directory, each entry is stored as a file in that directory. -class BackupInDirectory : public BackupImpl +class DirectoryBackup : public BackupImpl { public: /// `disk`_ is allowed to be nullptr and that means the `path_` is a path in the local filesystem. - BackupInDirectory( + DirectoryBackup( const String & backup_name_, - OpenMode open_mode_, const DiskPtr & disk_, const String & path_, const ContextPtr & context_, const std::optional & base_backup_info_ = {}); - ~BackupInDirectory() override; + ~DirectoryBackup() override; private: bool backupExists() const override; - void startWriting() override; - void removeAllFilesAfterFailure() override; + void openImpl(OpenMode open_mode_) override; + void closeImpl(bool writing_finalized_) override; std::unique_ptr readFileImpl(const String & file_name) const override; - std::unique_ptr addFileImpl(const String & file_name) override; + std::unique_ptr writeFileImpl(const String & file_name) override; DiskPtr disk; String path; diff --git a/src/Backups/IBackup.h b/src/Backups/IBackup.h index 0fd94a6d53c..5e13ff88575 100644 --- a/src/Backups/IBackup.h +++ b/src/Backups/IBackup.h @@ -1,8 +1,8 @@ #pragma once #include -#include #include +#include namespace DB @@ -13,7 +13,7 @@ using BackupEntryPtr = std::unique_ptr; /// Represents a backup, i.e. a storage of BackupEntries which can be accessed by their names. /// A backup can be either incremental or non-incremental. An incremental backup doesn't store /// the data of the entries which are not changed compared to its base backup. -class IBackup : public std::enable_shared_from_this, public TypePromotion +class IBackup : public std::enable_shared_from_this { public: IBackup() = default; @@ -24,13 +24,18 @@ public: enum class OpenMode { + NONE, READ, WRITE, }; - /// A backup can be open either in READ or WRITE mode. + /// Opens the backup and start its reading or writing depending on `open_mode`. + virtual void open(OpenMode open_mode) = 0; virtual OpenMode getOpenMode() const = 0; + /// Closes the backup and ends its reading or writing. + virtual void close() = 0; + /// Returns the time point when this backup was created. virtual time_t getTimestamp() const = 0; @@ -57,17 +62,20 @@ public: /// This function does the same as `read(file_name)->getCheckum()` but faster. virtual UInt128 getFileChecksum(const String & file_name) const = 0; + /// Finds a file by its checksum, returns nullopt if not found. + virtual std::optional findFileByChecksum(const UInt128 & checksum) const = 0; + /// Reads an entry from the backup. virtual BackupEntryPtr readFile(const String & file_name) const = 0; /// Puts a new entry to the backup. - virtual void addFile(const String & file_name, BackupEntryPtr entry) = 0; - - /// Whether it's possible to add new entries to the backup in multiple threads. - virtual bool supportsWritingInMultipleThreads() const { return true; } + virtual void writeFile(const String & file_name, BackupEntryPtr entry) = 0; /// Finalizes writing the backup, should be called after all entries have been successfully written. virtual void finalizeWriting() = 0; + + /// Whether it's possible to add new entries to the backup in multiple threads. + virtual bool supportsWritingInMultipleThreads() const { return true; } }; using BackupPtr = std::shared_ptr; diff --git a/src/Backups/IBackupEntriesBatch.cpp b/src/Backups/IBackupEntriesBatch.cpp new file mode 100644 index 00000000000..bf6bc6cce83 --- /dev/null +++ b/src/Backups/IBackupEntriesBatch.cpp @@ -0,0 +1,37 @@ +#include +#include + + +namespace DB +{ + +class IBackupEntriesBatch::BackupEntryFromBatch : public IBackupEntry +{ +public: + BackupEntryFromBatch(const std::shared_ptr & generator_, size_t index_) : batch(generator_), index(index_) + { + assert(batch); + } + + UInt64 getSize() const override { return batch->getSize(index); } + std::optional getChecksum() const override { return batch->getChecksum(index); } + std::unique_ptr getReadBuffer() const override { return batch->getReadBuffer(index); } + +private: + const std::shared_ptr batch; + const size_t index; +}; + + +BackupEntries IBackupEntriesBatch::getBackupEntries() +{ + BackupEntries res; + res.reserve(entry_names.size()); + for (size_t i = 0; i != entry_names.size(); ++i) + { + res.emplace_back(entry_names[i], std::make_unique(shared_from_this(), i)); + } + return res; +} + +} diff --git a/src/Backups/IBackupEntriesBatch.h b/src/Backups/IBackupEntriesBatch.h new file mode 100644 index 00000000000..0d8c8d5aa26 --- /dev/null +++ b/src/Backups/IBackupEntriesBatch.h @@ -0,0 +1,29 @@ +#pragma once + +#include +#include + +namespace DB +{ + +/// Helper class designed to generate multiple backup entries from one source. +class IBackupEntriesBatch : public std::enable_shared_from_this +{ +public: + BackupEntries getBackupEntries(); + + virtual ~IBackupEntriesBatch() = default; + +protected: + IBackupEntriesBatch(const Strings & entry_names_) : entry_names(entry_names_) {} + + virtual std::unique_ptr getReadBuffer(size_t index) = 0; + virtual UInt64 getSize(size_t index) = 0; + virtual std::optional getChecksum(size_t) { return {}; } + +private: + class BackupEntryFromBatch; + const Strings entry_names; +}; + +} diff --git a/src/Backups/IRestoreTask.h b/src/Backups/IRestoreTask.h new file mode 100644 index 00000000000..10046ff1807 --- /dev/null +++ b/src/Backups/IRestoreTask.h @@ -0,0 +1,31 @@ +#pragma once + +#include +#include + + +namespace DB +{ + +/// Represents a task of restoring something (database / table / table's part) from backup. +class IRestoreTask +{ +public: + IRestoreTask() = default; + virtual ~IRestoreTask() = default; + + /// Perform restoring, the function also can return a list of nested tasks that should be run later. + virtual std::vector> run() = 0; + + /// Is it necessary to run this task sequentially? + /// Sequential tasks are executed first and strictly in one thread. + virtual bool isSequential() const { return false; } + + /// Reverts the effect of run(). If that's not possible, the function does nothing. + virtual void rollback() {} +}; + +using RestoreTaskPtr = std::unique_ptr; +using RestoreTasks = std::vector; + +} diff --git a/src/Backups/RestoreSettings.cpp b/src/Backups/RestoreSettings.cpp new file mode 100644 index 00000000000..485650e39f0 --- /dev/null +++ b/src/Backups/RestoreSettings.cpp @@ -0,0 +1,47 @@ +#include +#include +#include +#include +#include + + +namespace DB +{ +namespace ErrorCodes +{ + extern const int UNKNOWN_SETTING; +} + +RestoreSettings RestoreSettings::fromRestoreQuery(const ASTBackupQuery & query) +{ + RestoreSettings res; + + if (query.base_backup_name) + res.base_backup_info = BackupInfo::fromAST(*query.base_backup_name); + + if (query.settings) + { + const auto & settings = query.settings->as().changes; + for (const auto & setting : settings) + { + if (setting.name == "password") + res.password = SettingFieldString{setting.value}; + else if (setting.name == "structure_only") + res.structure_only = SettingFieldBool{setting.value}; + else if (setting.name == "throw_if_database_exists") + res.throw_if_database_exists = SettingFieldBool{setting.value}; + else if (setting.name == "throw_if_table_exists") + res.throw_if_table_exists = SettingFieldBool{setting.value}; + else if (setting.name == "throw_if_database_def_differs") + res.throw_if_database_def_differs = SettingFieldBool{setting.value}; + else if (setting.name == "throw_if_table_def_differs") + res.throw_if_table_def_differs = SettingFieldBool{setting.value}; + else + throw Exception(ErrorCodes::UNKNOWN_SETTING, "Unknown setting {}", setting.name); + } + } + + return res; +} + +} diff --git a/src/Backups/RestoreSettings.h b/src/Backups/RestoreSettings.h new file mode 100644 index 00000000000..b129224943b --- /dev/null +++ b/src/Backups/RestoreSettings.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include + + +namespace DB +{ +class ASTBackupQuery; + +struct StorageRestoreSettings +{ +}; + +/// Settings specified in the "SETTINGS" clause of a RESTORE query. +struct RestoreSettings : public StorageRestoreSettings +{ + /// Base backup, with this setting we can override the location of the base backup while restoring. + /// Any incremental backup keeps inside the information about its base backup, + /// so using this setting is optional. + std::optional base_backup_info; + + /// Password used to decrypt the backup. + String password; + + /// If this is set to true then only create queries will be read from backup, + /// without the data of tables. + bool structure_only = false; + + /// Whether RESTORE DATABASE must throw an exception if a destination database already exists. + bool throw_if_database_exists = true; + + /// Whether RESTORE TABLE must throw an exception if a destination table already exists. + bool throw_if_table_exists = true; + + /// Whether RESTORE DATABASE must throw an exception if a destination database has + /// a different definition comparing with the definition read from backup. + bool throw_if_database_def_differs = true; + + /// Whether RESTORE TABLE must throw an exception if a destination table has + /// a different definition comparing with the definition read from backup. + bool throw_if_table_def_differs = true; + + static RestoreSettings fromRestoreQuery(const ASTBackupQuery & query); +}; + +} diff --git a/src/Backups/RestoreUtils.cpp b/src/Backups/RestoreUtils.cpp new file mode 100644 index 00000000000..8073b6d0818 --- /dev/null +++ b/src/Backups/RestoreUtils.cpp @@ -0,0 +1,685 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace fs = std::filesystem; + + +namespace DB +{ +namespace ErrorCodes +{ + extern const int CANNOT_RESTORE_TABLE; + extern const int CANNOT_RESTORE_DATABASE; +} + +namespace +{ + using Kind = ASTBackupQuery::Kind; + using Element = ASTBackupQuery::Element; + using Elements = ASTBackupQuery::Elements; + using ElementType = ASTBackupQuery::ElementType; + using RestoreSettingsPtr = std::shared_ptr; + + + /// Restores a database (without tables inside), should be executed before executing + /// RestoreTableTask. + class RestoreDatabaseTask : public IRestoreTask + { + public: + RestoreDatabaseTask( + ContextMutablePtr context_, + const ASTPtr & create_query_, + const RestoreSettingsPtr & restore_settings_, + bool ignore_if_database_def_differs_) + : context(context_) + , create_query(typeid_cast>(create_query_)) + , restore_settings(restore_settings_) + , ignore_if_database_def_differs(ignore_if_database_def_differs_) + { + } + + RestoreTasks run() override + { + createDatabase(); + getDatabase(); + checkDatabaseCreateQuery(); + return {}; + } + + bool isSequential() const override { return true; } + + private: + void createDatabase() + { + /// We need to call clone() for `create_query` because the interpreter can decide + /// to change a passed AST a little bit. + InterpreterCreateQuery create_interpreter{create_query->clone(), context}; + create_interpreter.execute(); + } + + DatabasePtr getDatabase() + { + if (!database) + database = DatabaseCatalog::instance().getDatabase(create_query->getDatabase()); + return database; + } + + ASTPtr getDatabaseCreateQuery() + { + if (!database_create_query) + database_create_query = getDatabase()->getCreateDatabaseQuery(); + return database_create_query; + } + + void checkDatabaseCreateQuery() + { + if (ignore_if_database_def_differs || !restore_settings->throw_if_database_def_differs) + return; + + getDatabaseCreateQuery(); + if (areDatabaseDefinitionsSame(*create_query, *database_create_query)) + return; + + throw Exception( + ErrorCodes::CANNOT_RESTORE_DATABASE, + "The database {} already exists but has a different definition: {}, " + "compare to its definition in the backup: {}", + backQuoteIfNeed(create_query->getDatabase()), + serializeAST(*database_create_query), + serializeAST(*create_query)); + } + + ContextMutablePtr context; + std::shared_ptr create_query; + RestoreSettingsPtr restore_settings; + bool ignore_if_database_def_differs = false; + DatabasePtr database; + ASTPtr database_create_query; + }; + + + /// Restores a table and fills it with data. + class RestoreTableTask : public IRestoreTask + { + public: + RestoreTableTask( + ContextMutablePtr context_, + const ASTPtr & create_query_, + const ASTs & partitions_, + const BackupPtr & backup_, + const DatabaseAndTableName & table_name_in_backup_, + const RestoreSettingsPtr & restore_settings_) + : context(context_), create_query(typeid_cast>(create_query_)), + partitions(partitions_), backup(backup_), table_name_in_backup(table_name_in_backup_), + restore_settings(restore_settings_) + { + table_name = DatabaseAndTableName{create_query->getDatabase(), create_query->getTable()}; + if (create_query->temporary) + table_name.first = DatabaseCatalog::TEMPORARY_DATABASE; + } + + RestoreTasks run() override + { + createStorage(); + getStorage(); + checkStorageCreateQuery(); + RestoreTasks tasks; + if (auto task = insertData()) + tasks.push_back(std::move(task)); + return tasks; + } + + bool isSequential() const override { return true; } + + private: + void createStorage() + { + /// We need to call clone() for `create_query` because the interpreter can decide + /// to change a passed AST a little bit. + InterpreterCreateQuery create_interpreter{create_query->clone(), context}; + create_interpreter.execute(); + } + + StoragePtr getStorage() + { + if (!storage) + std::tie(database, storage) = DatabaseCatalog::instance().getDatabaseAndTable({table_name.first, table_name.second}, context); + return storage; + } + + ASTPtr getStorageCreateQuery() + { + if (!storage_create_query) + { + getStorage(); + storage_create_query = database->getCreateTableQuery(table_name.second, context); + } + return storage_create_query; + } + + void checkStorageCreateQuery() + { + if (!restore_settings->throw_if_table_def_differs) + return; + + getStorageCreateQuery(); + if (areTableDefinitionsSame(*create_query, *storage_create_query)) + return; + + throw Exception( + ErrorCodes::CANNOT_RESTORE_TABLE, + "The {} already exists but has a different definition: {}, " + "compare to its definition in the backup: {}", + formatTableNameOrTemporaryTableName(table_name), + serializeAST(*storage_create_query), + serializeAST(*create_query)); + } + + bool hasData() + { + if (has_data) + return *has_data; + + has_data = false; + if (restore_settings->structure_only) + return false; + + data_path_in_backup = getDataPathInBackup(table_name_in_backup); + if (backup->listFiles(data_path_in_backup).empty()) + return false; + + getStorageCreateQuery(); + if (!areTableDataCompatible(*create_query, *storage_create_query)) + throw Exception( + ErrorCodes::CANNOT_RESTORE_TABLE, + "Cannot attach data of the {} in the backup to the existing {} because of they are not compatible. " + "Here is the definition of the {} in the backup: {}, and here is the definition of the existing {}: {}", + formatTableNameOrTemporaryTableName(table_name_in_backup), + formatTableNameOrTemporaryTableName(table_name), + formatTableNameOrTemporaryTableName(table_name_in_backup), + serializeAST(*create_query), + formatTableNameOrTemporaryTableName(table_name), + serializeAST(*storage_create_query)); + + /// We check for INSERT privilege only if we're going to write into table. + context->checkAccess(AccessType::INSERT, table_name.first, table_name.second); + + has_data = true; + return true; + } + + RestoreTaskPtr insertData() + { + if (!hasData()) + return {}; + return storage->restoreData(context, partitions, backup, data_path_in_backup, *restore_settings); + } + + ContextMutablePtr context; + std::shared_ptr create_query; + DatabaseAndTableName table_name; + ASTs partitions; + BackupPtr backup; + DatabaseAndTableName table_name_in_backup; + RestoreSettingsPtr restore_settings; + DatabasePtr database; + StoragePtr storage; + ASTPtr storage_create_query; + std::optional has_data; + String data_path_in_backup; + }; + + + /// Makes tasks for restoring databases and tables according to the elements of ASTBackupQuery. + /// Keep this class consistent with BackupEntriesBuilder. + class RestoreTasksBuilder + { + public: + RestoreTasksBuilder(ContextMutablePtr context_, const BackupPtr & backup_, const RestoreSettings & restore_settings_) + : context(context_), backup(backup_), restore_settings(restore_settings_) {} + + /// Prepares internal structures for making tasks for restoring. + void prepare(const ASTBackupQuery::Elements & elements) + { + String current_database = context->getCurrentDatabase(); + renaming_settings.setFromBackupQuery(elements, current_database); + + for (const auto & element : elements) + { + switch (element.type) + { + case ElementType::TABLE: + { + const String & table_name = element.name.second; + String database_name = element.name.first; + if (database_name.empty()) + database_name = current_database; + prepareToRestoreTable(DatabaseAndTableName{database_name, table_name}, element.partitions); + break; + } + + case ElementType::DATABASE: + { + const String & database_name = element.name.first; + prepareToRestoreDatabase(database_name, element.except_list); + break; + } + + case ElementType::ALL_DATABASES: + { + prepareToRestoreAllDatabases(element.except_list); + break; + } + } + } + } + + /// Makes tasks for restoring, should be called after prepare(). + RestoreTasks makeTasks() const + { + /// Check that there are not `different_create_query`. (If it's set it means error.) + for (const auto & info : databases | boost::adaptors::map_values) + { + if (info.different_create_query) + throw Exception(ErrorCodes::CANNOT_RESTORE_DATABASE, + "Cannot restore a database because two different create queries were generated for it: {} and {}", + serializeAST(*info.create_query), serializeAST(*info.different_create_query)); + } + + auto restore_settings_ptr = std::make_shared(restore_settings); + + RestoreTasks res; + for (const auto & info : databases | boost::adaptors::map_values) + res.push_back(std::make_unique(context, info.create_query, restore_settings_ptr, + /* ignore_if_database_def_differs = */ !info.is_explicit)); + + /// TODO: We need to restore tables according to their dependencies. + for (const auto & info : tables | boost::adaptors::map_values) + res.push_back(std::make_unique(context, info.create_query, info.partitions, backup, info.name_in_backup, restore_settings_ptr)); + + return res; + } + + private: + /// Prepares to restore a single table and probably its database's definition. + void prepareToRestoreTable(const DatabaseAndTableName & table_name_, const ASTs & partitions_) + { + /// Check that we are not trying to restore the same table again. + DatabaseAndTableName new_table_name = renaming_settings.getNewTableName(table_name_); + if (tables.contains(new_table_name)) + throw Exception(ErrorCodes::CANNOT_RESTORE_TABLE, "Cannot restore the {} twice", formatTableNameOrTemporaryTableName(new_table_name)); + + /// Make a create query for this table. + auto create_query = renameInCreateQuery(readCreateQueryFromBackup(table_name_)); + create_query->if_not_exists = !restore_settings.throw_if_table_exists; + + CreateTableInfo info; + info.create_query = create_query; + info.name_in_backup = table_name_; + info.partitions = partitions_; + tables[new_table_name] = std::move(info); + + /// If it's not system or temporary database then probably we need to restore the database's definition too. + if (!isSystemOrTemporaryDatabase(new_table_name.first)) + { + if (!databases.contains(new_table_name.first)) + { + /// Add a create query for restoring the database if we haven't done it yet. + std::shared_ptr create_db_query; + String db_name_in_backup = table_name_.first; + if (hasCreateQueryInBackup(db_name_in_backup)) + { + create_db_query = renameInCreateQuery(readCreateQueryFromBackup(db_name_in_backup)); + } + else + { + create_db_query = std::make_shared(); + db_name_in_backup.clear(); + } + create_db_query->setDatabase(new_table_name.first); + create_db_query->if_not_exists = true; + + CreateDatabaseInfo info_db; + info_db.create_query = create_db_query; + info_db.name_in_backup = std::move(db_name_in_backup); + info_db.is_explicit = false; + databases[new_table_name.first] = std::move(info_db); + } + else + { + /// We already have added a create query for restoring the database, + /// set `different_create_query` if it's not the same. + auto & info_db = databases[new_table_name.first]; + if (!info_db.is_explicit && (info_db.name_in_backup != table_name_.first) && !info_db.different_create_query) + { + std::shared_ptr create_db_query; + if (hasCreateQueryInBackup(table_name_.first)) + create_db_query = renameInCreateQuery(readCreateQueryFromBackup(table_name_.first)); + else + create_db_query = std::make_shared(); + create_db_query->setDatabase(new_table_name.first); + create_db_query->if_not_exists = true; + if (!areDatabaseDefinitionsSame(*info_db.create_query, *create_db_query)) + info_db.different_create_query = create_db_query; + } + } + } + } + + /// Prepares to restore a database and all tables in it. + void prepareToRestoreDatabase(const String & database_name_, const std::set & except_list_) + { + /// Check that we are not trying to restore the same database again. + String new_database_name = renaming_settings.getNewDatabaseName(database_name_); + if (databases.contains(new_database_name) && databases[new_database_name].is_explicit) + throw Exception(ErrorCodes::CANNOT_RESTORE_DATABASE, "Cannot restore the database {} twice", backQuoteIfNeed(new_database_name)); + + Strings table_metadata_filenames = backup->listFiles("metadata/" + escapeForFileName(database_name_) + "/", "/"); + + bool throw_if_no_create_database_query = table_metadata_filenames.empty(); + if (throw_if_no_create_database_query && !hasCreateQueryInBackup(database_name_)) + throw Exception(ErrorCodes::CANNOT_RESTORE_DATABASE, "Cannot restore the database {} because there is no such database in the backup", backQuoteIfNeed(database_name_)); + + /// Of course we're not going to restore the definition of the system or the temporary database. + if (!isSystemOrTemporaryDatabase(new_database_name)) + { + /// Make a create query for this database. + std::shared_ptr create_db_query; + String db_name_in_backup = database_name_; + if (hasCreateQueryInBackup(db_name_in_backup)) + { + create_db_query = renameInCreateQuery(readCreateQueryFromBackup(db_name_in_backup)); + } + else + { + create_db_query = std::make_shared(); + create_db_query->setDatabase(database_name_); + db_name_in_backup.clear(); + } + + create_db_query->if_not_exists = !restore_settings.throw_if_database_exists; + + CreateDatabaseInfo info_db; + info_db.create_query = create_db_query; + info_db.name_in_backup = std::move(db_name_in_backup); + info_db.is_explicit = true; + databases[new_database_name] = std::move(info_db); + } + + /// Restore tables in this database. + for (const String & table_metadata_filename : table_metadata_filenames) + { + String table_name = unescapeForFileName(fs::path{table_metadata_filename}.stem()); + if (except_list_.contains(table_name)) + continue; + prepareToRestoreTable(DatabaseAndTableName{database_name_, table_name}, ASTs{}); + } + } + + /// Prepares to restore all the databases contained in the backup. + void prepareToRestoreAllDatabases(const std::set & except_list_) + { + Strings database_metadata_filenames = backup->listFiles("metadata/", "/"); + for (const String & database_metadata_filename : database_metadata_filenames) + { + String database_name = unescapeForFileName(fs::path{database_metadata_filename}.stem()); + if (except_list_.contains(database_name)) + continue; + prepareToRestoreDatabase(database_name, std::set{}); + } + } + + /// Reads a create query for creating a specified table from the backup. + std::shared_ptr readCreateQueryFromBackup(const DatabaseAndTableName & table_name) const + { + String create_query_path = getMetadataPathInBackup(table_name); + if (!backup->fileExists(create_query_path)) + throw Exception(ErrorCodes::CANNOT_RESTORE_TABLE, "Cannot restore the {} because there is no such table in the backup", + formatTableNameOrTemporaryTableName(table_name)); + auto read_buffer = backup->readFile(create_query_path)->getReadBuffer(); + String create_query_str; + readStringUntilEOF(create_query_str, *read_buffer); + read_buffer.reset(); + ParserCreateQuery create_parser; + return typeid_cast>(parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH)); + } + + /// Reads a create query for creating a specified database from the backup. + std::shared_ptr readCreateQueryFromBackup(const String & database_name) const + { + String create_query_path = getMetadataPathInBackup(database_name); + if (!backup->fileExists(create_query_path)) + throw Exception(ErrorCodes::CANNOT_RESTORE_DATABASE, "Cannot restore the database {} because there is no such database in the backup", backQuoteIfNeed(database_name)); + auto read_buffer = backup->readFile(create_query_path)->getReadBuffer(); + String create_query_str; + readStringUntilEOF(create_query_str, *read_buffer); + read_buffer.reset(); + ParserCreateQuery create_parser; + return typeid_cast>(parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH)); + } + + /// Whether there is a create query for creating a specified database in the backup. + bool hasCreateQueryInBackup(const String & database_name) const + { + String create_query_path = getMetadataPathInBackup(database_name); + return backup->fileExists(create_query_path); + } + + /// Do renaming in the create query according to the renaming config. + std::shared_ptr renameInCreateQuery(const ASTPtr & ast) const + { + ASTPtr query = ast; + ::DB::renameInCreateQuery(query, context, renaming_settings); + auto create_query = typeid_cast>(query); + return create_query; + } + + static bool isSystemOrTemporaryDatabase(const String & database_name) + { + return (database_name == DatabaseCatalog::SYSTEM_DATABASE) || (database_name == DatabaseCatalog::TEMPORARY_DATABASE); + } + + /// Information which is used to make an instance of RestoreTableTask. + struct CreateTableInfo + { + ASTPtr create_query; + DatabaseAndTableName name_in_backup; + ASTs partitions; + }; + + /// Information which is used to make an instance of RestoreDatabaseTask. + struct CreateDatabaseInfo + { + ASTPtr create_query; + String name_in_backup; + + /// Whether the creation of this database is specified explicitly, via RESTORE DATABASE or + /// RESTORE ALL DATABASES. + /// It's false if the creation of this database is caused by creating a table contained in it. + bool is_explicit = false; + + /// If this is set it means the following error: + /// it means that for implicitly created database there were two different create query + /// generated so we cannot restore the database. + ASTPtr different_create_query; + }; + + ContextMutablePtr context; + BackupPtr backup; + RestoreSettings restore_settings; + DDLRenamingSettings renaming_settings; + std::map databases; + std::map tables; + }; + + + /// Reverts completed restore tasks (in reversed order). + void rollbackRestoreTasks(RestoreTasks && restore_tasks) + { + for (auto & restore_task : restore_tasks | boost::adaptors::reversed) + { + try + { + std::move(restore_task)->rollback(); + } + catch (...) + { + tryLogCurrentException("Restore", "Couldn't rollback changes after failed RESTORE"); + } + } + } +} + + +RestoreTasks makeRestoreTasks(ContextMutablePtr context, const BackupPtr & backup, const Elements & elements, const RestoreSettings & restore_settings) +{ + RestoreTasksBuilder builder{context, backup, restore_settings}; + builder.prepare(elements); + return builder.makeTasks(); +} + + +void executeRestoreTasks(RestoreTasks && restore_tasks, size_t num_threads) +{ + if (!num_threads) + num_threads = 1; + + RestoreTasks completed_tasks; + bool need_rollback_completed_tasks = true; + + SCOPE_EXIT({ + if (need_rollback_completed_tasks) + rollbackRestoreTasks(std::move(completed_tasks)); + }); + + std::deque> sequential_tasks; + std::deque> enqueued_tasks; + + /// There are two kinds of restore tasks: sequential and non-sequential ones. + /// Sequential tasks are executed first and always in one thread. + for (auto & task : restore_tasks) + { + if (task->isSequential()) + sequential_tasks.push_back(std::move(task)); + else + enqueued_tasks.push_back(std::move(task)); + } + + /// Sequential tasks. + while (!sequential_tasks.empty()) + { + auto current_task = std::move(sequential_tasks.front()); + sequential_tasks.pop_front(); + + RestoreTasks new_tasks = current_task->run(); + + completed_tasks.push_back(std::move(current_task)); + for (auto & task : new_tasks) + { + if (task->isSequential()) + sequential_tasks.push_back(std::move(task)); + else + enqueued_tasks.push_back(std::move(task)); + } + } + + /// Non-sequential tasks. + std::unordered_map> running_tasks; + std::vector threads; + std::mutex mutex; + std::condition_variable cond; + std::exception_ptr exception; + + while (true) + { + IRestoreTask * current_task = nullptr; + { + std::unique_lock lock{mutex}; + cond.wait(lock, [&] + { + if (exception) + return true; + if (enqueued_tasks.empty()) + return running_tasks.empty(); + return (running_tasks.size() < num_threads); + }); + + if (exception || enqueued_tasks.empty()) + break; + + auto current_task_ptr = std::move(enqueued_tasks.front()); + current_task = current_task_ptr.get(); + enqueued_tasks.pop_front(); + running_tasks[current_task] = std::move(current_task_ptr); + } + + assert(current_task); + threads.emplace_back([current_task, &mutex, &cond, &enqueued_tasks, &running_tasks, &completed_tasks, &exception]() mutable + { + { + std::lock_guard lock{mutex}; + if (exception) + return; + } + + RestoreTasks new_tasks; + std::exception_ptr new_exception; + try + { + new_tasks = current_task->run(); + } + catch (...) + { + new_exception = std::current_exception(); + } + + { + std::lock_guard lock{mutex}; + auto current_task_it = running_tasks.find(current_task); + auto current_task_ptr = std::move(current_task_it->second); + running_tasks.erase(current_task_it); + + if (!new_exception) + { + completed_tasks.push_back(std::move(current_task_ptr)); + enqueued_tasks.insert( + enqueued_tasks.end(), std::make_move_iterator(new_tasks.begin()), std::make_move_iterator(new_tasks.end())); + } + + if (!exception) + exception = new_exception; + + cond.notify_all(); + } + }); + } + + for (auto & thread : threads) + thread.join(); + + if (exception) + std::rethrow_exception(exception); + else + need_rollback_completed_tasks = false; +} + +} diff --git a/src/Backups/RestoreUtils.h b/src/Backups/RestoreUtils.h new file mode 100644 index 00000000000..33d2f7ff527 --- /dev/null +++ b/src/Backups/RestoreUtils.h @@ -0,0 +1,24 @@ +#pragma once + +#include + + +namespace DB +{ + +class IBackup; +using BackupPtr = std::shared_ptr; +class IRestoreTask; +using RestoreTaskPtr = std::unique_ptr; +using RestoreTasks = std::vector; +struct RestoreSettings; +class Context; +using ContextMutablePtr = std::shared_ptr; + +/// Prepares restore tasks. +RestoreTasks makeRestoreTasks(ContextMutablePtr context, const BackupPtr & backup, const ASTBackupQuery::Elements & elements, const RestoreSettings & restore_settings); + +/// Executes restore tasks. +void executeRestoreTasks(RestoreTasks && tasks, size_t num_threads); + +} diff --git a/src/Backups/formatTableNameOrTemporaryTableName.cpp b/src/Backups/formatTableNameOrTemporaryTableName.cpp new file mode 100644 index 00000000000..7338e1dab23 --- /dev/null +++ b/src/Backups/formatTableNameOrTemporaryTableName.cpp @@ -0,0 +1,17 @@ +#include +#include +#include + + +namespace DB +{ + +String formatTableNameOrTemporaryTableName(const DatabaseAndTableName & table_name) +{ + if (table_name.first == DatabaseCatalog::TEMPORARY_DATABASE) + return "temporary table " + backQuoteIfNeed(table_name.second); + else + return "table " + backQuoteIfNeed(table_name.first) + "." + backQuoteIfNeed(table_name.second); +} + +} diff --git a/src/Backups/formatTableNameOrTemporaryTableName.h b/src/Backups/formatTableNameOrTemporaryTableName.h new file mode 100644 index 00000000000..a6b94cd4077 --- /dev/null +++ b/src/Backups/formatTableNameOrTemporaryTableName.h @@ -0,0 +1,13 @@ +#pragma once + +#include + + +namespace DB +{ +using DatabaseAndTableName = std::pair; + +/// Outputs either "table db_name.table_name" or "temporary table table_name". +String formatTableNameOrTemporaryTableName(const DatabaseAndTableName & table_name); + +} diff --git a/src/Backups/hasCompatibleDataToRestoreTable.cpp b/src/Backups/hasCompatibleDataToRestoreTable.cpp deleted file mode 100644 index 9c11d371bb0..00000000000 --- a/src/Backups/hasCompatibleDataToRestoreTable.cpp +++ /dev/null @@ -1,22 +0,0 @@ -#include -#include -#include - - -namespace DB -{ - -bool hasCompatibleDataToRestoreTable(const ASTCreateQuery & query1, const ASTCreateQuery & query2) -{ - /// TODO: Write more subtle condition here. - auto q1 = typeid_cast>(query1.clone()); - auto q2 = typeid_cast>(query2.clone()); - - /// Remove UUIDs. - q1->uuid = UUIDHelpers::Nil; - q2->uuid = UUIDHelpers::Nil; - - return serializeAST(*q1) == serializeAST(*q2); -} - -} diff --git a/src/Backups/hasCompatibleDataToRestoreTable.h b/src/Backups/hasCompatibleDataToRestoreTable.h deleted file mode 100644 index 92aab8b4579..00000000000 --- a/src/Backups/hasCompatibleDataToRestoreTable.h +++ /dev/null @@ -1,11 +0,0 @@ -#pragma once - - -namespace DB -{ -class ASTCreateQuery; - -/// Whether the data of the first table can be inserted to the second table. -bool hasCompatibleDataToRestoreTable(const ASTCreateQuery & query1, const ASTCreateQuery & query2); - -} diff --git a/src/Backups/registerBackupEngines.cpp b/src/Backups/registerBackupEngines.cpp deleted file mode 100644 index 33bc2c8fdd9..00000000000 --- a/src/Backups/registerBackupEngines.cpp +++ /dev/null @@ -1,14 +0,0 @@ - - -namespace DB -{ -class BackupFactory; - -void registerBackupEngineFile(BackupFactory &); - -void registerBackupEngines(BackupFactory & factory) -{ - registerBackupEngineFile(factory); -} - -} diff --git a/src/Backups/registerBackupEnginesFileAndDisk.cpp b/src/Backups/registerBackupEnginesFileAndDisk.cpp new file mode 100644 index 00000000000..6a34d67115d --- /dev/null +++ b/src/Backups/registerBackupEnginesFileAndDisk.cpp @@ -0,0 +1,168 @@ +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int INVALID_CONFIG_PARAMETER; +} + + +namespace +{ + namespace fs = std::filesystem; + + [[noreturn]] void throwDiskIsAllowed(const String & disk_name) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Disk {} is not allowed for backups", disk_name); + } + + [[noreturn]] void throwPathNotAllowed(const fs::path & path) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path {} is not allowed for backups", quoteString(String{path})); + } + + void checkAllowedPathInConfigIsValid(const String & key, const fs::path & value) + { + if (value.empty() || value.is_relative()) + throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Configuration parameter {} has a wrong value {}", key, String{value}); + } + + /// Checks that a disk name and a path specified as parameters of Disk() are valid. + void checkDiskNameAndPath(const String & disk_name, fs::path & path, const Poco::Util::AbstractConfiguration & config) + { + String key = "backups.allowed_disk"; + bool disk_name_found = false; + size_t counter = 0; + while (config.has(key)) + { + if (config.getString(key) == disk_name) + { + disk_name_found = true; + break; + } + key = "backups.allowed_disk[" + std::to_string(++counter) + "]"; + } + + if (!disk_name_found) + throwDiskIsAllowed(disk_name); + + path = path.lexically_normal(); + if (!path.is_relative() || path.empty() || (*path.begin() == "..")) + throwPathNotAllowed(path); + } + + /// Checks that a path specified as a parameter of File() is valid. + void checkPath(fs::path & path, const Poco::Util::AbstractConfiguration & config) + { + String key = "backups.allowed_path"; + + path = path.lexically_normal(); + if (path.empty()) + throwPathNotAllowed(path); + + if (path.is_relative()) + { + if (*path.begin() == "..") + throwPathNotAllowed(path); + + auto base = fs::path(config.getString(key, "")); + checkAllowedPathInConfigIsValid(key, base); + path = base / path; + return; + } + + bool path_found_in_config = false; + size_t counter = 0; + while (config.has(key)) + { + auto base = fs::path(config.getString(key)); + checkAllowedPathInConfigIsValid(key, base); + auto rel = path.lexically_relative(base); + if (!rel.empty() && (*rel.begin() != "..")) + { + path_found_in_config = true; + break; + } + key = "backups.allowed_path[" + std::to_string(++counter) + "]"; + } + + if (!path_found_in_config) + throwPathNotAllowed(path); + } +} + + +void registerBackupEnginesFileAndDisk(BackupFactory & factory) +{ + auto creator_fn = [](const BackupFactory::CreateParams & params) -> std::unique_ptr + { + String backup_name = params.backup_info.toString(); + const String & engine_name = params.backup_info.backup_engine_name; + const auto & args = params.backup_info.args; + + DiskPtr disk; + fs::path path; + if (engine_name == "File") + { + if (args.size() != 1) + { + throw Exception( + "Backup engine 'File' requires 1 argument (path)", + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + } + + path = args[0].safeGet(); + checkPath(path, params.context->getConfigRef()); + } + else if (engine_name == "Disk") + { + if (args.size() != 2) + { + throw Exception( + "Backup engine 'Disk' requires 2 arguments (disk_name, path)", + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + } + + String disk_name = args[0].safeGet(); + path = args[1].safeGet(); + checkDiskNameAndPath(disk_name, path, params.context->getConfigRef()); + disk = params.context->getDisk(disk_name); + } + + std::unique_ptr backup; + + if (!path.has_filename() && !path.empty()) + { + if (!params.password.empty()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Password is not applicable, backup cannot be encrypted"); + backup = std::make_unique(backup_name, disk, path, params.context, params.base_backup_info); + } + else if (hasRegisteredArchiveFileExtension(path)) + { + auto archive_backup = std::make_unique(backup_name, disk, path, params.context, params.base_backup_info); + archive_backup->setCompression(params.compression_method, params.compression_level); + archive_backup->setPassword(params.password); + backup = std::move(archive_backup); + } + else + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path to backup must be either a directory or a path to an archive"); + + return backup; + }; + + factory.registerBackupEngine("File", creator_fn); + factory.registerBackupEngine("Disk", creator_fn); +} + +} diff --git a/src/Backups/renameInCreateQuery.cpp b/src/Backups/renameInCreateQuery.cpp deleted file mode 100644 index 5b2492b3be4..00000000000 --- a/src/Backups/renameInCreateQuery.cpp +++ /dev/null @@ -1,282 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - -namespace -{ - class RenameInCreateQueryTransformMatcher - { - public: - struct Data - { - BackupRenamingConfigPtr renaming_config; - ContextPtr context; - }; - - static bool needChildVisit(ASTPtr &, const ASTPtr &) { return true; } - - static void visit(ASTPtr & ast, const Data & data) - { - if (auto * create = ast->as()) - visitCreateQuery(*create, data); - else if (auto * expr = ast->as()) - visitTableExpression(*expr, data); - else if (auto * function = ast->as()) - visitFunction(*function, data); - else if (auto * dictionary = ast->as()) - visitDictionary(*dictionary, data); - } - - private: - /// Replaces names of tables and databases used in a CREATE query, which can be either CREATE TABLE or - /// CREATE DICTIONARY or CREATE VIEW or CREATE TEMPORARY TABLE or CREATE DATABASE query. - static void visitCreateQuery(ASTCreateQuery & create, const Data & data) - { - if (create.temporary) - { - if (!create.table) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Table name specified in the CREATE TEMPORARY TABLE query must not be empty"); - create.setTable(data.renaming_config->getNewTemporaryTableName(create.getTable())); - } - else if (!create.table) - { - if (!create.database) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name specified in the CREATE DATABASE query must not be empty"); - create.setDatabase(data.renaming_config->getNewDatabaseName(create.getDatabase())); - } - else - { - if (!create.database) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name specified in the CREATE TABLE query must not be empty"); - auto table_and_database_name = data.renaming_config->getNewTableName({create.getDatabase(), create.getTable()}); - create.setDatabase(table_and_database_name.first); - create.setTable(table_and_database_name.second); - } - - create.uuid = UUIDHelpers::Nil; - - if (!create.as_table.empty() && !create.as_database.empty()) - std::tie(create.as_database, create.as_table) = data.renaming_config->getNewTableName({create.as_database, create.as_table}); - - if (!create.to_table_id.table_name.empty() && !create.to_table_id.database_name.empty()) - { - auto to_table = data.renaming_config->getNewTableName({create.to_table_id.database_name, create.to_table_id.table_name}); - create.to_table_id = StorageID{to_table.first, to_table.second}; - } - } - - /// Replaces names of a database and a table in a expression like `db`.`table` - static void visitTableExpression(ASTTableExpression & expr, const Data & data) - { - if (!expr.database_and_table_name) - return; - - ASTIdentifier * id = expr.database_and_table_name->as(); - if (!id) - return; - - auto table_id = id->createTable(); - if (!table_id) - return; - - const String & db_name = table_id->getDatabaseName(); - const String & table_name = table_id->shortName(); - if (db_name.empty() || table_name.empty()) - return; - - String new_db_name, new_table_name; - std::tie(new_db_name, new_table_name) = data.renaming_config->getNewTableName({db_name, table_name}); - if ((new_db_name == db_name) && (new_table_name == table_name)) - return; - - expr.database_and_table_name = std::make_shared(Strings{new_db_name, new_table_name}); - expr.children.push_back(expr.database_and_table_name); - } - - /// Replaces names of tables and databases used in arguments of a table function or a table engine. - static void visitFunction(ASTFunction & function, const Data & data) - { - if ((function.name == "merge") || (function.name == "Merge")) - { - visitFunctionMerge(function, data); - } - else if ((function.name == "remote") || (function.name == "remoteSecure") || (function.name == "cluster") || - (function.name == "clusterAllReplicas") || (function.name == "Distributed")) - { - visitFunctionRemote(function, data); - } - } - - /// Replaces a database's name passed via an argument of the function merge() or the table engine Merge. - static void visitFunctionMerge(ASTFunction & function, const Data & data) - { - if (!function.arguments) - return; - - /// The first argument is a database's name and we can rename it. - /// The second argument is a regular expression and we can do nothing about it. - auto & args = function.arguments->as().children; - size_t db_name_arg_index = 0; - if (args.size() <= db_name_arg_index) - return; - - String db_name = evaluateConstantExpressionForDatabaseName(args[db_name_arg_index], data.context)->as().value.safeGet(); - if (db_name.empty()) - return; - - String new_db_name = data.renaming_config->getNewDatabaseName(db_name); - if (new_db_name == db_name) - return; - args[db_name_arg_index] = std::make_shared(new_db_name); - } - - /// Replaces names of a table and a database passed via arguments of the function remote() or cluster() or the table engine Distributed. - static void visitFunctionRemote(ASTFunction & function, const Data & data) - { - if (!function.arguments) - return; - - /// The first argument is an address or cluster's name, so we skip it. - /// The second argument can be either 'db.name' or just 'db' followed by the third argument 'table'. - auto & args = function.arguments->as().children; - - const auto * second_arg_as_function = args[1]->as(); - if (second_arg_as_function && TableFunctionFactory::instance().isTableFunctionName(second_arg_as_function->name)) - return; - - size_t db_name_index = 1; - if (args.size() <= db_name_index) - return; - - String name = evaluateConstantExpressionForDatabaseName(args[db_name_index], data.context)->as().value.safeGet(); - - size_t table_name_index = static_cast(-1); - - QualifiedTableName qualified_name; - - if (function.name == "Distributed") - qualified_name.table = name; - else - qualified_name = QualifiedTableName::parseFromString(name); - - if (qualified_name.database.empty()) - { - std::swap(qualified_name.database, qualified_name.table); - table_name_index = 2; - if (args.size() <= table_name_index) - return; - qualified_name.table = evaluateConstantExpressionForDatabaseName(args[table_name_index], data.context)->as().value.safeGet(); - } - - const String & db_name = qualified_name.database; - const String & table_name = qualified_name.table; - - if (db_name.empty() || table_name.empty()) - return; - - String new_db_name, new_table_name; - std::tie(new_db_name, new_table_name) = data.renaming_config->getNewTableName({db_name, table_name}); - if ((new_db_name == db_name) && (new_table_name == table_name)) - return; - - if (table_name_index != static_cast(-1)) - { - if (new_db_name != db_name) - args[db_name_index] = std::make_shared(new_db_name); - if (new_table_name != table_name) - args[table_name_index] = std::make_shared(new_table_name); - } - else - { - args[db_name_index] = std::make_shared(new_db_name); - args.insert(args.begin() + db_name_index + 1, std::make_shared(new_table_name)); - } - } - - /// Replaces names of a table and a database used in source parameters of a dictionary. - static void visitDictionary(ASTDictionary & dictionary, const Data & data) - { - if (!dictionary.source || dictionary.source->name != "clickhouse" || !dictionary.source->elements) - return; - - auto & elements = dictionary.source->elements->as().children; - String db_name, table_name; - size_t db_name_index = static_cast(-1); - size_t table_name_index = static_cast(-1); - - for (size_t i = 0; i != elements.size(); ++i) - { - auto & pair = elements[i]->as(); - if (pair.first == "db") - { - if (db_name_index != static_cast(-1)) - return; - db_name = pair.second->as().value.safeGet(); - db_name_index = i; - } - else if (pair.first == "table") - { - if (table_name_index != static_cast(-1)) - return; - table_name = pair.second->as().value.safeGet(); - table_name_index = i; - } - } - - if (db_name.empty() || table_name.empty()) - return; - - String new_db_name, new_table_name; - std::tie(new_db_name, new_table_name) = data.renaming_config->getNewTableName({db_name, table_name}); - if ((new_db_name == db_name) && (new_table_name == table_name)) - return; - - if (new_db_name != db_name) - { - auto & pair = elements[db_name_index]->as(); - pair.replace(pair.second, std::make_shared(new_db_name)); - } - if (new_table_name != table_name) - { - auto & pair = elements[table_name_index]->as(); - pair.replace(pair.second, std::make_shared(new_table_name)); - } - } - }; - - using RenameInCreateQueryTransformVisitor = InDepthNodeVisitor; -} - - -ASTPtr renameInCreateQuery(const ASTPtr & ast, const BackupRenamingConfigPtr & renaming_config, const ContextPtr & context) -{ - auto new_ast = ast->clone(); - try - { - RenameInCreateQueryTransformVisitor::Data data{renaming_config, context}; - RenameInCreateQueryTransformVisitor{data}.visit(new_ast); - return new_ast; - } - catch (...) - { - tryLogCurrentException("Backup", "Error while renaming in AST"); - return ast; - } -} - -} diff --git a/src/Backups/renameInCreateQuery.h b/src/Backups/renameInCreateQuery.h deleted file mode 100644 index 9c62d07e5c6..00000000000 --- a/src/Backups/renameInCreateQuery.h +++ /dev/null @@ -1,16 +0,0 @@ -#pragma once - -#include - -namespace DB -{ -class IAST; -using ASTPtr = std::shared_ptr; -class Context; -using ContextPtr = std::shared_ptr; -class BackupRenamingConfig; -using BackupRenamingConfigPtr = std::shared_ptr; - -/// Changes names in AST according to the renaming settings. -ASTPtr renameInCreateQuery(const ASTPtr & ast, const BackupRenamingConfigPtr & renaming_config, const ContextPtr & context); -} diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 2937aeb4291..115d047e6e8 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1976,25 +1976,23 @@ void ClientBase::readArguments( for (int arg_num = 1; arg_num < argc; ++arg_num) { - const char * arg = argv[arg_num]; + std::string_view arg = argv[arg_num]; - if (arg == "--external"sv) + if (arg == "--external") { in_external_group = true; external_tables_arguments.emplace_back(Arguments{""}); } /// Options with value after equal sign. - else if (in_external_group - && (0 == strncmp(arg, "--file=", strlen("--file=")) || 0 == strncmp(arg, "--name=", strlen("--name=")) - || 0 == strncmp(arg, "--format=", strlen("--format=")) || 0 == strncmp(arg, "--structure=", strlen("--structure=")) - || 0 == strncmp(arg, "--types=", strlen("--types=")))) + else if ( + in_external_group + && (arg.starts_with("--file=") || arg.starts_with("--name=") || arg.starts_with("--format=") || arg.starts_with("--structure=") + || arg.starts_with("--types="))) { external_tables_arguments.back().emplace_back(arg); } /// Options with value after whitespace. - else if (in_external_group - && (arg == "--file"sv || arg == "--name"sv || arg == "--format"sv - || arg == "--structure"sv || arg == "--types"sv)) + else if (in_external_group && (arg == "--file" || arg == "--name" || arg == "--format" || arg == "--structure" || arg == "--types")) { if (arg_num + 1 < argc) { @@ -2011,20 +2009,12 @@ void ClientBase::readArguments( in_external_group = false; /// Parameter arg after underline. - if (startsWith(arg, "--param_")) + if (arg.starts_with("--param_")) { - const char * param_continuation = arg + strlen("--param_"); - const char * equal_pos = strchr(param_continuation, '='); + auto param_continuation = arg.substr(strlen("--param_")); + auto equal_pos = param_continuation.find_first_of('='); - if (equal_pos == param_continuation) - throw Exception("Parameter name cannot be empty", ErrorCodes::BAD_ARGUMENTS); - - if (equal_pos) - { - /// param_name=value - query_parameters.emplace(String(param_continuation, equal_pos), String(equal_pos + 1)); - } - else + if (equal_pos == std::string::npos) { /// param_name value ++arg_num; @@ -2033,12 +2023,20 @@ void ClientBase::readArguments( arg = argv[arg_num]; query_parameters.emplace(String(param_continuation), String(arg)); } + else + { + if (equal_pos == 0) + throw Exception("Parameter name cannot be empty", ErrorCodes::BAD_ARGUMENTS); + + /// param_name=value + query_parameters.emplace(param_continuation.substr(0, equal_pos), param_continuation.substr(equal_pos + 1)); + } } - else if (startsWith(arg, "--host") || startsWith(arg, "-h")) + else if (arg.starts_with("--host") || arg.starts_with("-h")) { std::string host_arg; /// --host host - if (arg == "--host"sv || arg == "-h"sv) + if (arg == "--host" || arg == "-h") { ++arg_num; if (arg_num >= argc) @@ -2065,11 +2063,11 @@ void ClientBase::readArguments( prev_host_arg = host_arg; } } - else if (startsWith(arg, "--port")) + else if (arg.starts_with("--port")) { - std::string port_arg = arg; + auto port_arg = String{arg}; /// --port port - if (arg == "--port"sv) + if (arg == "--port") { port_arg.push_back('='); ++arg_num; @@ -2094,7 +2092,7 @@ void ClientBase::readArguments( prev_port_arg = port_arg; } } - else if (arg == "--allow_repeated_settings"sv) + else if (arg == "--allow_repeated_settings") allow_repeated_settings = true; else common_arguments.emplace_back(arg); diff --git a/src/Client/ConnectionPoolWithFailover.cpp b/src/Client/ConnectionPoolWithFailover.cpp index aaffe85ae2e..13d39980e1c 100644 --- a/src/Client/ConnectionPoolWithFailover.cpp +++ b/src/Client/ConnectionPoolWithFailover.cpp @@ -29,15 +29,15 @@ ConnectionPoolWithFailover::ConnectionPoolWithFailover( time_t decrease_error_period_, size_t max_error_cap_) : Base(std::move(nested_pools_), decrease_error_period_, max_error_cap_, &Poco::Logger::get("ConnectionPoolWithFailover")) - , default_load_balancing(load_balancing) + , get_priority_load_balancing(load_balancing) { const std::string & local_hostname = getFQDNOrHostName(); - hostname_differences.resize(nested_pools.size()); + get_priority_load_balancing.hostname_differences.resize(nested_pools.size()); for (size_t i = 0; i < nested_pools.size(); ++i) { ConnectionPool & connection_pool = dynamic_cast(*nested_pools[i]); - hostname_differences[i] = getHostNameDifference(local_hostname, connection_pool.getHost()); + get_priority_load_balancing.hostname_differences[i] = getHostNameDifference(local_hostname, connection_pool.getHost()); } } @@ -51,36 +51,15 @@ IConnectionPool::Entry ConnectionPoolWithFailover::get(const ConnectionTimeouts }; size_t offset = 0; + LoadBalancing load_balancing = get_priority_load_balancing.load_balancing; if (settings) - offset = settings->load_balancing_first_offset % nested_pools.size(); - GetPriorityFunc get_priority; - switch (settings ? LoadBalancing(settings->load_balancing) : default_load_balancing) { - case LoadBalancing::NEAREST_HOSTNAME: - get_priority = [&](size_t i) { return hostname_differences[i]; }; - break; - case LoadBalancing::IN_ORDER: - get_priority = [](size_t i) { return i; }; - break; - case LoadBalancing::RANDOM: - break; - case LoadBalancing::FIRST_OR_RANDOM: - get_priority = [offset](size_t i) -> size_t { return i != offset; }; - break; - case LoadBalancing::ROUND_ROBIN: - if (last_used >= nested_pools.size()) - last_used = 0; - ++last_used; - /* Consider nested_pools.size() equals to 5 - * last_used = 1 -> get_priority: 0 1 2 3 4 - * last_used = 2 -> get_priority: 4 0 1 2 3 - * last_used = 3 -> get_priority: 4 3 0 1 2 - * ... - * */ - get_priority = [&](size_t i) { ++i; return i < last_used ? nested_pools.size() - i : i - last_used; }; - break; + offset = settings->load_balancing_first_offset % nested_pools.size(); + load_balancing = LoadBalancing(settings->load_balancing); } + GetPriorityFunc get_priority = get_priority_load_balancing.getPriorityFunc(load_balancing, offset, nested_pools.size()); + UInt64 max_ignored_errors = settings ? settings->distributed_replica_max_ignored_errors.value : 0; bool fallback_to_stale_replicas = settings ? settings->fallback_to_stale_replicas_for_distributed_queries.value : true; @@ -173,38 +152,14 @@ std::vector ConnectionPoolWithFailover::g ConnectionPoolWithFailover::Base::GetPriorityFunc ConnectionPoolWithFailover::makeGetPriorityFunc(const Settings * settings) { size_t offset = 0; + LoadBalancing load_balancing = get_priority_load_balancing.load_balancing; if (settings) - offset = settings->load_balancing_first_offset % nested_pools.size(); - - GetPriorityFunc get_priority; - switch (settings ? LoadBalancing(settings->load_balancing) : default_load_balancing) { - case LoadBalancing::NEAREST_HOSTNAME: - get_priority = [&](size_t i) { return hostname_differences[i]; }; - break; - case LoadBalancing::IN_ORDER: - get_priority = [](size_t i) { return i; }; - break; - case LoadBalancing::RANDOM: - break; - case LoadBalancing::FIRST_OR_RANDOM: - get_priority = [offset](size_t i) -> size_t { return i != offset; }; - break; - case LoadBalancing::ROUND_ROBIN: - if (last_used >= nested_pools.size()) - last_used = 0; - ++last_used; - /* Consider nested_pools.size() equals to 5 - * last_used = 1 -> get_priority: 0 1 2 3 4 - * last_used = 2 -> get_priority: 5 0 1 2 3 - * last_used = 3 -> get_priority: 5 4 0 1 2 - * ... - * */ - get_priority = [&](size_t i) { ++i; return i < last_used ? nested_pools.size() - i : i - last_used; }; - break; + offset = settings->load_balancing_first_offset % nested_pools.size(); + load_balancing = LoadBalancing(settings->load_balancing); } - return get_priority; + return get_priority_load_balancing.getPriorityFunc(load_balancing, offset, nested_pools.size()); } std::vector ConnectionPoolWithFailover::getManyImpl( diff --git a/src/Client/ConnectionPoolWithFailover.h b/src/Client/ConnectionPoolWithFailover.h index 4e47905aae6..df7dd572ef3 100644 --- a/src/Client/ConnectionPoolWithFailover.h +++ b/src/Client/ConnectionPoolWithFailover.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include @@ -109,9 +110,7 @@ private: GetPriorityFunc makeGetPriorityFunc(const Settings * settings); - std::vector hostname_differences; /// Distances from name of this host to the names of hosts of pools. - size_t last_used = 0; /// Last used for round_robin policy. - LoadBalancing default_load_balancing; + GetPriorityForLoadBalancing get_priority_load_balancing; }; using ConnectionPoolWithFailoverPtr = std::shared_ptr; diff --git a/src/Columns/MaskOperations.cpp b/src/Columns/MaskOperations.cpp index 1641bdf5a4c..9e2d02253be 100644 --- a/src/Columns/MaskOperations.cpp +++ b/src/Columns/MaskOperations.cpp @@ -83,9 +83,20 @@ size_t extractMaskNumericImpl( const PaddedPODArray * null_bytemap, PaddedPODArray * nulls) { + if constexpr (!column_is_short) + { + if (data.size() != mask.size()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "The size of a full data column is not equal to the size of a mask"); + } + size_t ones_count = 0; size_t data_index = 0; - for (size_t i = 0; i != mask.size(); ++i) + + size_t mask_size = mask.size(); + size_t data_size = data.size(); + + size_t i = 0; + for (; i != mask_size && data_index != data_size; ++i) { // Change mask only where value is 1. if (!mask[i]) @@ -118,6 +129,13 @@ size_t extractMaskNumericImpl( mask[i] = value; } + + if constexpr (column_is_short) + { + if (data_index != data_size) + throw Exception(ErrorCodes::LOGICAL_ERROR, "The size of a short column is not equal to the number of ones in a mask"); + } + return ones_count; } diff --git a/src/Common/ArenaWithFreeLists.h b/src/Common/ArenaWithFreeLists.h index cd4c1bc9d8d..53a59c98299 100644 --- a/src/Common/ArenaWithFreeLists.h +++ b/src/Common/ArenaWithFreeLists.h @@ -113,5 +113,35 @@ public: } }; +class SynchronizedArenaWithFreeLists : private ArenaWithFreeLists +{ +public: + explicit SynchronizedArenaWithFreeLists( + const size_t initial_size = 4096, const size_t growth_factor = 2, + const size_t linear_growth_threshold = 128 * 1024 * 1024) + : ArenaWithFreeLists{initial_size, growth_factor, linear_growth_threshold} + {} + + char * alloc(const size_t size) + { + std::lock_guard lock{mutex}; + return ArenaWithFreeLists::alloc(size); + } + + void free(char * ptr, const size_t size) + { + std::lock_guard lock{mutex}; + return ArenaWithFreeLists::free(ptr, size); + } + + /// Size of the allocated pool in bytes + size_t size() const + { + std::lock_guard lock{mutex}; + return ArenaWithFreeLists::size(); + } +private: + mutable std::mutex mutex; +}; } diff --git a/src/Common/Dwarf.cpp b/src/Common/Dwarf.cpp index 6b952d0d093..8705cb1130c 100644 --- a/src/Common/Dwarf.cpp +++ b/src/Common/Dwarf.cpp @@ -25,7 +25,6 @@ #include #include - #define DW_CHILDREN_no 0 #define DW_FORM_addr 1 #define DW_FORM_block1 0x0a @@ -125,7 +124,7 @@ template requires std::is_trivial_v && std::is_standard_layout_v T read(std::string_view & sp) { - SAFE_CHECK(sp.size() >= sizeof(T), "underflow"); + SAFE_CHECK(sp.size() >= sizeof(T), fmt::format("underflow: expected bytes {}, got bytes {}", sizeof(T), sp.size())); T x; memcpy(&x, sp.data(), sizeof(T)); sp.remove_prefix(sizeof(T)); @@ -690,7 +689,7 @@ bool Dwarf::findDebugInfoOffset(uintptr_t address, std::string_view aranges, uin Dwarf::Die Dwarf::getDieAtOffset(const CompilationUnit & cu, uint64_t offset) const { - SAFE_CHECK(offset < info_.size(), "unexpected offset"); + SAFE_CHECK(offset < info_.size(), fmt::format("unexpected offset {}, info size {}", offset, info_.size())); Die die; std::string_view sp{info_.data() + offset, cu.offset + cu.size - offset}; die.offset = offset; @@ -708,19 +707,6 @@ Dwarf::Die Dwarf::getDieAtOffset(const CompilationUnit & cu, uint64_t offset) co return die; } -Dwarf::Die Dwarf::findDefinitionDie(const CompilationUnit & cu, const Die & die) const -{ - // Find the real definition instead of declaration. - // DW_AT_specification: Incomplete, non-defining, or separate declaration - // corresponding to a declaration - auto offset = getAttribute(cu, die, DW_AT_specification); - if (!offset) - { - return die; - } - return getDieAtOffset(cu, cu.offset + offset.value()); -} - /** * Find the @locationInfo for @address in the compilation unit represented * by the @sp .debug_info entry. @@ -861,7 +847,10 @@ bool Dwarf::findLocation( SymbolizedFrame inline_frame; inline_frame.found = true; inline_frame.addr = address; - inline_frame.name = call_location.name.data(); + if (!call_location.name.empty()) + inline_frame.name = call_location.name.data(); + else + inline_frame.name = nullptr; inline_frame.location.has_file_and_line = true; inline_frame.location.file = call_location.file; inline_frame.location.line = call_location.line; @@ -1034,17 +1023,54 @@ void Dwarf::findInlinedSubroutineDieForAddress( location.file = line_vm.getFullFileName(*call_file); location.line = *call_line; + /// Something wrong with receiving debug info about inline. + /// If set to true we stop parsing DWARF. + bool die_for_inline_broken = false; + auto get_function_name = [&](const CompilationUnit & srcu, uint64_t die_offset) { - auto decl_die = getDieAtOffset(srcu, die_offset); + Die decl_die = getDieAtOffset(srcu, die_offset); + auto & die_to_look_for_name = decl_die; + + Die def_die; // Jump to the actual function definition instead of declaration for name // and line info. - auto def_die = findDefinitionDie(srcu, decl_die); + // DW_AT_specification: Incomplete, non-defining, or separate declaration + // corresponding to a declaration + auto offset = getAttribute(srcu, decl_die, DW_AT_specification); + if (offset) + { + /// FIXME: actually it's a bug in our DWARF parser. + /// + /// Most of the times compilation unit offset (srcu.offset) is some big number inside .debug_info (like 434782255). + /// Offset of DIE definition is some small relative number to srcu.offset (like 3518). + /// However in some unknown cases offset looks like global, non relative number (like 434672579) and in this + /// case we obviously doing something wrong parsing DWARF. + /// + /// What is important -- this bug? reproduces only with -flto=thin in release mode. + /// Also llvm-dwarfdump --verify ./clickhouse says that our DWARF is ok, so it's another prove + /// that we just doing something wrong. + /// + /// FIXME: Currently we just give up parsing DWARF for inlines when we got into this situation. + if (srcu.offset + offset.value() >= info_.size()) + { + die_for_inline_broken = true; + } + else + { + def_die = getDieAtOffset(srcu, srcu.offset + offset.value()); + die_to_look_for_name = def_die; + } + } std::string_view name; + + if (die_for_inline_broken) + return name; + // The file and line will be set in the next inline subroutine based on // its DW_AT_call_file and DW_AT_call_line. - forEachAttribute(srcu, def_die, [&](const Attribute & attr) + forEachAttribute(srcu, die_to_look_for_name, [&](const Attribute & attr) { switch (attr.spec.name) { @@ -1083,6 +1109,10 @@ void Dwarf::findInlinedSubroutineDieForAddress( ? get_function_name(cu, cu.offset + *abstract_origin) : get_function_name(findCompilationUnit(info_, *abstract_origin), *abstract_origin); + /// FIXME: see comment above + if (die_for_inline_broken) + return false; + locations.push_back(location); findInlinedSubroutineDieForAddress(cu, child_die, line_vm, address, base_addr_cu, locations, max_size); diff --git a/src/Common/Dwarf.h b/src/Common/Dwarf.h index e57e58e438a..3d8a50236ff 100644 --- a/src/Common/Dwarf.h +++ b/src/Common/Dwarf.h @@ -260,11 +260,6 @@ private: /** cu must exist during the life cycle of created detail::Die. */ Die getDieAtOffset(const CompilationUnit & cu, uint64_t offset) const; - /** - * Find the actual definition DIE instead of declaration for the given die. - */ - Die findDefinitionDie(const CompilationUnit & cu, const Die & die) const; - bool findLocation( uintptr_t address, LocationInfoMode mode, diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 3ed13ac04ea..b6c67478b26 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -575,7 +575,7 @@ M(604, BACKUP_ENTRY_ALREADY_EXISTS) \ M(605, BACKUP_ENTRY_NOT_FOUND) \ M(606, BACKUP_IS_EMPTY) \ - M(607, BACKUP_ELEMENT_DUPLICATE) \ + M(607, CANNOT_RESTORE_DATABASE) \ M(608, CANNOT_RESTORE_TABLE) \ M(609, FUNCTION_ALREADY_EXISTS) \ M(610, CANNOT_DROP_FUNCTION) \ @@ -614,6 +614,9 @@ M(643, CANNOT_UNPACK_ARCHIVE) \ M(644, REMOTE_FS_OBJECT_CACHE_ERROR) \ M(645, NUMBER_OF_DIMENSIONS_MISMATHED) \ + M(646, CANNOT_BACKUP_DATABASE) \ + M(647, CANNOT_BACKUP_TABLE) \ + M(648, WRONG_DDL_RENAMING_SETTINGS) \ \ M(999, KEEPER_EXCEPTION) \ M(1000, POCO_EXCEPTION) \ diff --git a/src/Common/FiberStack.h b/src/Common/FiberStack.h index 29f84ee6d85..c55608311d0 100644 --- a/src/Common/FiberStack.h +++ b/src/Common/FiberStack.h @@ -31,8 +31,8 @@ public: /// probably it worth to try to increase stack size for coroutines. /// /// Current value is just enough for all tests in our CI. It's not selected in some special - /// way. We will have 40 pages with 4KB page size. - static constexpr size_t default_stack_size = 192 * 1024; /// 64KB was not enough for tests + /// way. We will have 80 pages with 4KB page size. + static constexpr size_t default_stack_size = 320 * 1024; /// 64KB was not enough for tests explicit FiberStack(size_t stack_size_ = default_stack_size) : stack_size(stack_size_) { diff --git a/src/Common/FileChecker.cpp b/src/Common/FileChecker.cpp index 4de5a92a1b8..cdc928ea11e 100644 --- a/src/Common/FileChecker.cpp +++ b/src/Common/FileChecker.cpp @@ -1,13 +1,15 @@ -#include +#include +#include +#include #include #include #include #include #include -#include +#include -#include +namespace fs = std::filesystem; namespace DB { @@ -19,6 +21,10 @@ namespace ErrorCodes } +FileChecker::FileChecker(const String & file_info_path_) : FileChecker(nullptr, file_info_path_) +{ +} + FileChecker::FileChecker(DiskPtr disk_, const String & file_info_path_) : disk(std::move(disk_)) { setPath(file_info_path_); @@ -45,8 +51,8 @@ String FileChecker::getPath() const void FileChecker::update(const String & full_file_path) { - bool exists = disk->exists(full_file_path); - auto real_size = exists ? disk->getFileSize(full_file_path) : 0; /// No race condition assuming no one else is working with these files. + bool exists = fileReallyExists(full_file_path); + auto real_size = exists ? getRealFileSize(full_file_path) : 0; /// No race condition assuming no one else is working with these files. map[fileName(full_file_path)] = real_size; } @@ -74,8 +80,8 @@ CheckResults FileChecker::check() const { const String & name = name_size.first; String path = parentPath(files_info_path) + name; - bool exists = disk->exists(path); - auto real_size = exists ? disk->getFileSize(path) : 0; /// No race condition assuming no one else is working with these files. + bool exists = fileReallyExists(path); + auto real_size = exists ? getRealFileSize(path) : 0; /// No race condition assuming no one else is working with these files. if (real_size != name_size.second) { @@ -99,8 +105,8 @@ void FileChecker::repair() const String & name = name_size.first; size_t expected_size = name_size.second; String path = parentPath(files_info_path) + name; - bool exists = disk->exists(path); - auto real_size = exists ? disk->getFileSize(path) : 0; /// No race condition assuming no one else is working with these files. + bool exists = fileReallyExists(path); + auto real_size = exists ? getRealFileSize(path) : 0; /// No race condition assuming no one else is working with these files. if (real_size < expected_size) throw Exception(ErrorCodes::UNEXPECTED_END_OF_FILE, "Size of {} is less than expected. Size is {} but should be {}.", @@ -119,7 +125,7 @@ void FileChecker::save() const std::string tmp_files_info_path = parentPath(files_info_path) + "tmp_" + fileName(files_info_path); { - std::unique_ptr out = disk->writeFile(tmp_files_info_path); + std::unique_ptr out = disk ? disk->writeFile(tmp_files_info_path) : std::make_unique(tmp_files_info_path); /// So complex JSON structure - for compatibility with the old format. writeCString("{\"clickhouse\":{", *out); @@ -141,17 +147,20 @@ void FileChecker::save() const out->next(); } - disk->replaceFile(tmp_files_info_path, files_info_path); + if (disk) + disk->replaceFile(tmp_files_info_path, files_info_path); + else + fs::rename(tmp_files_info_path, files_info_path); } void FileChecker::load() { map.clear(); - if (!disk->exists(files_info_path)) + if (!fileReallyExists(files_info_path)) return; - std::unique_ptr in = disk->readFile(files_info_path); + std::unique_ptr in = disk ? disk->readFile(files_info_path) : std::make_unique(files_info_path); WriteBufferFromOwnString out; /// The JSON library does not support whitespace. We delete them. Inefficient. @@ -169,4 +178,14 @@ void FileChecker::load() map[unescapeForFileName(file.getName())] = file.getValue()["size"].toUInt(); } +bool FileChecker::fileReallyExists(const String & path_) const +{ + return disk ? disk->exists(path_) : fs::exists(path_); +} + +size_t FileChecker::getRealFileSize(const String & path_) const +{ + return disk ? disk->getFileSize(path_) : fs::file_size(path_); +} + } diff --git a/src/Common/FileChecker.h b/src/Common/FileChecker.h index a0ea449393e..98e7837db75 100644 --- a/src/Common/FileChecker.h +++ b/src/Common/FileChecker.h @@ -2,16 +2,19 @@ #include #include -#include namespace DB { +class IDisk; +using DiskPtr = std::shared_ptr; + /// Stores the sizes of all columns, and can check whether the columns are corrupted. class FileChecker { public: + FileChecker(const String & file_info_path_); FileChecker(DiskPtr disk_, const String & file_info_path_); void setPath(const String & file_info_path_); @@ -36,6 +39,9 @@ public: private: void load(); + bool fileReallyExists(const String & path_) const; + size_t getRealFileSize(const String & path_) const; + const DiskPtr disk; const Poco::Logger * log = &Poco::Logger::get("FileChecker"); diff --git a/src/Common/GetPriorityForLoadBalancing.cpp b/src/Common/GetPriorityForLoadBalancing.cpp new file mode 100644 index 00000000000..d8e7566e891 --- /dev/null +++ b/src/Common/GetPriorityForLoadBalancing.cpp @@ -0,0 +1,49 @@ +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +std::function GetPriorityForLoadBalancing::getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const +{ + std::function get_priority; + switch (load_balance) + { + case LoadBalancing::NEAREST_HOSTNAME: + if (hostname_differences.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "It's a bug: hostname_differences is not initialized"); + get_priority = [&](size_t i) { return hostname_differences[i]; }; + break; + case LoadBalancing::IN_ORDER: + get_priority = [](size_t i) { return i; }; + break; + case LoadBalancing::RANDOM: + break; + case LoadBalancing::FIRST_OR_RANDOM: + get_priority = [offset](size_t i) -> size_t { return i != offset; }; + break; + case LoadBalancing::ROUND_ROBIN: + if (last_used >= pool_size) + last_used = 0; + ++last_used; + /* Consider pool_size equals to 5 + * last_used = 1 -> get_priority: 0 1 2 3 4 + * last_used = 2 -> get_priority: 4 0 1 2 3 + * last_used = 3 -> get_priority: 4 3 0 1 2 + * ... + * */ + get_priority = [&](size_t i) + { + ++i; + return i < last_used ? pool_size - i : i - last_used; + }; + break; + } + return get_priority; +} + +} diff --git a/src/Common/GetPriorityForLoadBalancing.h b/src/Common/GetPriorityForLoadBalancing.h new file mode 100644 index 00000000000..e57b02b5e90 --- /dev/null +++ b/src/Common/GetPriorityForLoadBalancing.h @@ -0,0 +1,34 @@ +#pragma once + +#include + +namespace DB +{ + +class GetPriorityForLoadBalancing +{ +public: + GetPriorityForLoadBalancing(LoadBalancing load_balancing_) : load_balancing(load_balancing_) {} + GetPriorityForLoadBalancing(){} + + bool operator == (const GetPriorityForLoadBalancing & other) const + { + return load_balancing == other.load_balancing && hostname_differences == other.hostname_differences; + } + + bool operator != (const GetPriorityForLoadBalancing & other) const + { + return !(*this == other); + } + + std::function getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const; + + std::vector hostname_differences; /// Distances from name of this host to the names of hosts of pools. + + LoadBalancing load_balancing = LoadBalancing::RANDOM; + +private: + mutable size_t last_used = 0; /// Last used for round_robin policy. +}; + +} diff --git a/src/Common/IntervalKind.cpp b/src/Common/IntervalKind.cpp index 69b56be48ac..1478b832282 100644 --- a/src/Common/IntervalKind.cpp +++ b/src/Common/IntervalKind.cpp @@ -13,6 +13,9 @@ Int32 IntervalKind::toAvgSeconds() const { switch (kind) { + case IntervalKind::Nanosecond: return 0; /// fractional parts of seconds have 0 seconds + case IntervalKind::Microsecond: return 0; + case IntervalKind::Millisecond: return 0; case IntervalKind::Second: return 1; case IntervalKind::Minute: return 60; case IntervalKind::Hour: return 3600; @@ -52,6 +55,9 @@ const char * IntervalKind::toKeyword() const { switch (kind) { + case IntervalKind::Nanosecond: return "NANOSECOND"; + case IntervalKind::Microsecond: return "MICROSECOND"; + case IntervalKind::Millisecond: return "MILLISECOND"; case IntervalKind::Second: return "SECOND"; case IntervalKind::Minute: return "MINUTE"; case IntervalKind::Hour: return "HOUR"; @@ -69,6 +75,9 @@ const char * IntervalKind::toLowercasedKeyword() const { switch (kind) { + case IntervalKind::Nanosecond: return "nanosecond"; + case IntervalKind::Microsecond: return "microsecond"; + case IntervalKind::Millisecond: return "millisecond"; case IntervalKind::Second: return "second"; case IntervalKind::Minute: return "minute"; case IntervalKind::Hour: return "hour"; @@ -86,6 +95,12 @@ const char * IntervalKind::toDateDiffUnit() const { switch (kind) { + case IntervalKind::Nanosecond: + return "nanosecond"; + case IntervalKind::Microsecond: + return "microsecond"; + case IntervalKind::Millisecond: + return "millisecond"; case IntervalKind::Second: return "second"; case IntervalKind::Minute: @@ -111,6 +126,12 @@ const char * IntervalKind::toNameOfFunctionToIntervalDataType() const { switch (kind) { + case IntervalKind::Nanosecond: + return "toIntervalNanosecond"; + case IntervalKind::Microsecond: + return "toIntervalMicrosecond"; + case IntervalKind::Millisecond: + return "toIntervalMillisecond"; case IntervalKind::Second: return "toIntervalSecond"; case IntervalKind::Minute: @@ -136,6 +157,12 @@ const char * IntervalKind::toNameOfFunctionExtractTimePart() const { switch (kind) { + case IntervalKind::Nanosecond: + return "toNanosecond"; + case IntervalKind::Microsecond: + return "toMicrosecond"; + case IntervalKind::Millisecond: + return "toMillisecond"; case IntervalKind::Second: return "toSecond"; case IntervalKind::Minute: @@ -162,6 +189,21 @@ const char * IntervalKind::toNameOfFunctionExtractTimePart() const bool IntervalKind::tryParseString(const std::string & kind, IntervalKind::Kind & result) { + if ("nanosecond" == kind) + { + result = IntervalKind::Nanosecond; + return true; + } + if ("microsecond" == kind) + { + result = IntervalKind::Microsecond; + return true; + } + if ("millisecond" == kind) + { + result = IntervalKind::Millisecond; + return true; + } if ("second" == kind) { result = IntervalKind::Second; diff --git a/src/Common/IntervalKind.h b/src/Common/IntervalKind.h index aab0bb79be5..d5f2b5672cd 100644 --- a/src/Common/IntervalKind.h +++ b/src/Common/IntervalKind.h @@ -10,6 +10,9 @@ struct IntervalKind { enum Kind { + Nanosecond, + Microsecond, + Millisecond, Second, Minute, Hour, @@ -61,6 +64,9 @@ struct IntervalKind /// NOLINTNEXTLINE #define FOR_EACH_INTERVAL_KIND(M) \ + M(Nanosecond) \ + M(Microsecond) \ + M(Millisecond) \ M(Second) \ M(Minute) \ M(Hour) \ diff --git a/src/Common/OvercommitTracker.cpp b/src/Common/OvercommitTracker.cpp index 0e70619f628..7b03b9f271d 100644 --- a/src/Common/OvercommitTracker.cpp +++ b/src/Common/OvercommitTracker.cpp @@ -23,6 +23,12 @@ void OvercommitTracker::setMaxWaitTime(UInt64 wait_time) bool OvercommitTracker::needToStopQuery(MemoryTracker * tracker) { + // NOTE: Do not change the order of locks + // + // global_mutex must be acquired before overcommit_m, because + // method OvercommitTracker::unsubscribe(MemoryTracker *) is + // always called with already acquired global_mutex in + // ProcessListEntry::~ProcessListEntry(). std::unique_lock global_lock(global_mutex); std::unique_lock lk(overcommit_m); @@ -76,7 +82,7 @@ void UserOvercommitTracker::pickQueryToExcludeImpl() MemoryTracker * query_tracker = nullptr; OvercommitRatio current_ratio{0, 0}; // At this moment query list must be read only. - // BlockQueryIfMemoryLimit is used in ProcessList to guarantee this. + // This is guaranteed by locking global_mutex in OvercommitTracker::needToStopQuery. auto & queries = user_process_list->queries; LOG_DEBUG(logger, "Trying to choose query to stop from {} queries", queries.size()); for (auto const & query : queries) @@ -111,9 +117,9 @@ void GlobalOvercommitTracker::pickQueryToExcludeImpl() MemoryTracker * query_tracker = nullptr; OvercommitRatio current_ratio{0, 0}; // At this moment query list must be read only. - // BlockQueryIfMemoryLimit is used in ProcessList to guarantee this. - LOG_DEBUG(logger, "Trying to choose query to stop"); - process_list->processEachQueryStatus([&](DB::QueryStatus const & query) + // This is guaranteed by locking global_mutex in OvercommitTracker::needToStopQuery. + LOG_DEBUG(logger, "Trying to choose query to stop from {} queries", process_list->size()); + for (auto const & query : process_list->processes) { if (query.isKilled()) return; @@ -134,7 +140,7 @@ void GlobalOvercommitTracker::pickQueryToExcludeImpl() query_tracker = memory_tracker; current_ratio = ratio; } - }); + } LOG_DEBUG(logger, "Selected to stop query with overcommit ratio {}/{}", current_ratio.committed, current_ratio.soft_limit); picked_tracker = query_tracker; diff --git a/src/Common/OvercommitTracker.h b/src/Common/OvercommitTracker.h index 7c7974f0a24..f59390a8ace 100644 --- a/src/Common/OvercommitTracker.h +++ b/src/Common/OvercommitTracker.h @@ -43,8 +43,6 @@ class MemoryTracker; // is killed to free memory. struct OvercommitTracker : boost::noncopyable { - explicit OvercommitTracker(std::mutex & global_mutex_); - void setMaxWaitTime(UInt64 wait_time); bool needToStopQuery(MemoryTracker * tracker); @@ -54,8 +52,12 @@ struct OvercommitTracker : boost::noncopyable virtual ~OvercommitTracker() = default; protected: + explicit OvercommitTracker(std::mutex & global_mutex_); + virtual void pickQueryToExcludeImpl() = 0; + // This mutex is used to disallow concurrent access + // to picked_tracker and cancelation_state variables. mutable std::mutex overcommit_m; mutable std::condition_variable cv; @@ -87,6 +89,11 @@ private: } } + // Global mutex which is used in ProcessList to synchronize + // insertion and deletion of queries. + // OvercommitTracker::pickQueryToExcludeImpl() implementations + // require this mutex to be locked, because they read list (or sublist) + // of queries. std::mutex & global_mutex; }; diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index 178559894e3..e1bf8a37ee7 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -9,6 +9,7 @@ M(SelectQuery, "Same as Query, but only for SELECT queries.") \ M(InsertQuery, "Same as Query, but only for INSERT queries.") \ M(AsyncInsertQuery, "Same as InsertQuery, but only for asynchronous INSERT queries.") \ + M(AsyncInsertBytes, "Data size in bytes of asynchronous INSERT queries.") \ M(FailedQuery, "Number of failed queries.") \ M(FailedSelectQuery, "Same as FailedQuery, but only for SELECT queries.") \ M(FailedInsertQuery, "Same as FailedQuery, but only for INSERT queries.") \ diff --git a/src/Common/RadixSort.h b/src/Common/RadixSort.h index 944ab860355..2f02ebb9e03 100644 --- a/src/Common/RadixSort.h +++ b/src/Common/RadixSort.h @@ -515,6 +515,11 @@ public: radixSortLSDInternal(arr, size, false, nullptr); } + static void executeLSD(Element * arr, size_t size, bool reverse) + { + radixSortLSDInternal(arr, size, reverse, nullptr); + } + /** This function will start to sort inplace (modify 'arr') * but on the last step it will write result directly to the destination * instead of finishing sorting 'arr'. diff --git a/src/Common/ZooKeeper/CMakeLists.txt b/src/Common/ZooKeeper/CMakeLists.txt index 34ebad9bb50..a9a335d1461 100644 --- a/src/Common/ZooKeeper/CMakeLists.txt +++ b/src/Common/ZooKeeper/CMakeLists.txt @@ -22,7 +22,6 @@ target_link_libraries (clickhouse_common_zookeeper_no_log PRIVATE string_utils ) - if (ENABLE_EXAMPLES) add_subdirectory(examples) endif() diff --git a/src/Common/ZooKeeper/ZooKeeper.cpp b/src/Common/ZooKeeper/ZooKeeper.cpp index b1574341c40..118789c0ffc 100644 --- a/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/src/Common/ZooKeeper/ZooKeeper.cpp @@ -5,15 +5,15 @@ #include #include -#include -#include #include -#include +#include #include #include +#include #include +#include #define ZOOKEEPER_CONNECTION_TIMEOUT_MS 1000 @@ -48,7 +48,7 @@ static void check(Coordination::Error code, const std::string & path) void ZooKeeper::init(const std::string & implementation_, const Strings & hosts_, const std::string & identity_, - int32_t session_timeout_ms_, int32_t operation_timeout_ms_, const std::string & chroot_) + int32_t session_timeout_ms_, int32_t operation_timeout_ms_, const std::string & chroot_, const GetPriorityForLoadBalancing & get_priority_load_balancing_) { log = &Poco::Logger::get("ZooKeeper"); hosts = hosts_; @@ -57,6 +57,7 @@ void ZooKeeper::init(const std::string & implementation_, const Strings & hosts_ operation_timeout_ms = operation_timeout_ms_; chroot = chroot_; implementation = implementation_; + get_priority_load_balancing = get_priority_load_balancing_; if (implementation == "zookeeper") { @@ -66,14 +67,13 @@ void ZooKeeper::init(const std::string & implementation_, const Strings & hosts_ Coordination::ZooKeeper::Nodes nodes; nodes.reserve(hosts.size()); - Strings shuffled_hosts = hosts; /// Shuffle the hosts to distribute the load among ZooKeeper nodes. - pcg64 generator(randomSeed()); - std::shuffle(shuffled_hosts.begin(), shuffled_hosts.end(), generator); + std::vector shuffled_hosts = shuffleHosts(); bool dns_error = false; - for (auto & host_string : shuffled_hosts) + for (auto & host : shuffled_hosts) { + auto & host_string = host.host; try { bool secure = bool(startsWith(host_string, "secure://")); @@ -81,6 +81,7 @@ void ZooKeeper::init(const std::string & implementation_, const Strings & hosts_ if (secure) host_string.erase(0, strlen("secure://")); + LOG_TEST(log, "Adding ZooKeeper host {} ({})", host_string, Poco::Net::SocketAddress{host_string}.toString()); nodes.emplace_back(Coordination::ZooKeeper::Node{Poco::Net::SocketAddress{host_string}, secure}); } catch (const Poco::Net::HostNotFoundException & e) @@ -154,23 +155,47 @@ void ZooKeeper::init(const std::string & implementation_, const Strings & hosts_ } } +std::vector ZooKeeper::shuffleHosts() const +{ + std::function get_priority = get_priority_load_balancing.getPriorityFunc(get_priority_load_balancing.load_balancing, 0, hosts.size()); + std::vector shuffle_hosts; + for (size_t i = 0; i < hosts.size(); ++i) + { + ShuffleHost shuffle_host; + shuffle_host.host = hosts[i]; + if (get_priority) + shuffle_host.priority = get_priority(i); + shuffle_host.randomize(); + shuffle_hosts.emplace_back(shuffle_host); + } + + std::sort( + shuffle_hosts.begin(), shuffle_hosts.end(), + [](const ShuffleHost & lhs, const ShuffleHost & rhs) + { + return ShuffleHost::compare(lhs, rhs); + }); + + return shuffle_hosts; +} + ZooKeeper::ZooKeeper(const std::string & hosts_string, const std::string & identity_, int32_t session_timeout_ms_, int32_t operation_timeout_ms_, const std::string & chroot_, const std::string & implementation_, - std::shared_ptr zk_log_) + std::shared_ptr zk_log_, const GetPriorityForLoadBalancing & get_priority_load_balancing_) { zk_log = std::move(zk_log_); Strings hosts_strings; splitInto<','>(hosts_strings, hosts_string); - init(implementation_, hosts_strings, identity_, session_timeout_ms_, operation_timeout_ms_, chroot_); + init(implementation_, hosts_strings, identity_, session_timeout_ms_, operation_timeout_ms_, chroot_, get_priority_load_balancing_); } ZooKeeper::ZooKeeper(const Strings & hosts_, const std::string & identity_, int32_t session_timeout_ms_, int32_t operation_timeout_ms_, const std::string & chroot_, const std::string & implementation_, - std::shared_ptr zk_log_) + std::shared_ptr zk_log_, const GetPriorityForLoadBalancing & get_priority_load_balancing_) { zk_log = std::move(zk_log_); - init(implementation_, hosts_, identity_, session_timeout_ms_, operation_timeout_ms_, chroot_); + init(implementation_, hosts_, identity_, session_timeout_ms_, operation_timeout_ms_, chroot_, get_priority_load_balancing_); } struct ZooKeeperArgs @@ -213,6 +238,15 @@ struct ZooKeeperArgs { implementation = config.getString(config_name + "." + key); } + else if (key == "zookeeper_load_balancing") + { + String load_balancing_str = config.getString(config_name + "." + key); + /// Use magic_enum to avoid dependency from dbms (`SettingFieldLoadBalancingTraits::fromString(...)`) + auto load_balancing = magic_enum::enum_cast(Poco::toUpper(load_balancing_str)); + if (!load_balancing) + throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Unknown load balancing: {}", load_balancing_str); + get_priority_load_balancing.load_balancing = *load_balancing; + } else throw KeeperException(std::string("Unknown key ") + key + " in config file", Coordination::Error::ZBADARGUMENTS); } @@ -224,6 +258,15 @@ struct ZooKeeperArgs if (chroot.back() == '/') chroot.pop_back(); } + + /// init get_priority_load_balancing + get_priority_load_balancing.hostname_differences.resize(hosts.size()); + const String & local_hostname = getFQDNOrHostName(); + for (size_t i = 0; i < hosts.size(); ++i) + { + const String & node_host = hosts[i].substr(0, hosts[i].find_last_of(':')); + get_priority_load_balancing.hostname_differences[i] = DB::getHostNameDifference(local_hostname, node_host); + } } Strings hosts; @@ -232,13 +275,14 @@ struct ZooKeeperArgs int operation_timeout_ms; std::string chroot; std::string implementation; + GetPriorityForLoadBalancing get_priority_load_balancing; }; ZooKeeper::ZooKeeper(const Poco::Util::AbstractConfiguration & config, const std::string & config_name, std::shared_ptr zk_log_) : zk_log(std::move(zk_log_)) { ZooKeeperArgs args(config, config_name); - init(args.implementation, args.hosts, args.identity, args.session_timeout_ms, args.operation_timeout_ms, args.chroot); + init(args.implementation, args.hosts, args.identity, args.session_timeout_ms, args.operation_timeout_ms, args.chroot, args.get_priority_load_balancing); } bool ZooKeeper::configChanged(const Poco::Util::AbstractConfiguration & config, const std::string & config_name) const @@ -249,8 +293,11 @@ bool ZooKeeper::configChanged(const Poco::Util::AbstractConfiguration & config, if (args.implementation == implementation && implementation == "testkeeper") return false; - return std::tie(args.implementation, args.hosts, args.identity, args.session_timeout_ms, args.operation_timeout_ms, args.chroot) - != std::tie(implementation, hosts, identity, session_timeout_ms, operation_timeout_ms, chroot); + if (args.get_priority_load_balancing != get_priority_load_balancing) + return true; + + return std::tie(args.implementation, args.hosts, args.identity, args.session_timeout_ms, args.operation_timeout_ms, args.chroot, args.get_priority_load_balancing) + != std::tie(implementation, hosts, identity, session_timeout_ms, operation_timeout_ms, chroot, args.get_priority_load_balancing); } @@ -757,7 +804,7 @@ bool ZooKeeper::waitForDisappear(const std::string & path, const WaitCondition & ZooKeeperPtr ZooKeeper::startNewSession() const { - return std::make_shared(hosts, identity, session_timeout_ms, operation_timeout_ms, chroot, implementation, zk_log); + return std::make_shared(hosts, identity, session_timeout_ms, operation_timeout_ms, chroot, implementation, zk_log, get_priority_load_balancing); } diff --git a/src/Common/ZooKeeper/ZooKeeper.h b/src/Common/ZooKeeper/ZooKeeper.h index 6d0f8a438b1..f901a79591f 100644 --- a/src/Common/ZooKeeper/ZooKeeper.h +++ b/src/Common/ZooKeeper/ZooKeeper.h @@ -13,7 +13,10 @@ #include #include #include +#include +#include #include +#include namespace ProfileEvents @@ -37,6 +40,25 @@ namespace zkutil /// Preferred size of multi() command (in number of ops) constexpr size_t MULTI_BATCH_SIZE = 100; +struct ShuffleHost +{ + String host; + Int64 priority = 0; + UInt32 random = 0; + + void randomize() + { + random = thread_local_rng(); + } + + static bool compare(const ShuffleHost & lhs, const ShuffleHost & rhs) + { + return std::forward_as_tuple(lhs.priority, lhs.random) + < std::forward_as_tuple(rhs.priority, rhs.random); + } +}; + +using GetPriorityForLoadBalancing = DB::GetPriorityForLoadBalancing; /// ZooKeeper session. The interface is substantially different from the usual libzookeeper API. /// @@ -58,14 +80,16 @@ public: int32_t operation_timeout_ms_ = Coordination::DEFAULT_OPERATION_TIMEOUT_MS, const std::string & chroot_ = "", const std::string & implementation_ = "zookeeper", - std::shared_ptr zk_log_ = nullptr); + std::shared_ptr zk_log_ = nullptr, + const GetPriorityForLoadBalancing & get_priority_load_balancing_ = {}); explicit ZooKeeper(const Strings & hosts_, const std::string & identity_ = "", int32_t session_timeout_ms_ = Coordination::DEFAULT_SESSION_TIMEOUT_MS, int32_t operation_timeout_ms_ = Coordination::DEFAULT_OPERATION_TIMEOUT_MS, const std::string & chroot_ = "", const std::string & implementation_ = "zookeeper", - std::shared_ptr zk_log_ = nullptr); + std::shared_ptr zk_log_ = nullptr, + const GetPriorityForLoadBalancing & get_priority_load_balancing_ = {}); /** Config of the form: @@ -91,6 +115,8 @@ public: */ ZooKeeper(const Poco::Util::AbstractConfiguration & config, const std::string & config_name, std::shared_ptr zk_log_); + std::vector shuffleHosts() const; + /// Creates a new session with the same parameters. This method can be used for reconnecting /// after the session has expired. /// This object remains unchanged, and the new session is returned. @@ -284,7 +310,7 @@ private: friend class EphemeralNodeHolder; void init(const std::string & implementation_, const Strings & hosts_, const std::string & identity_, - int32_t session_timeout_ms_, int32_t operation_timeout_ms_, const std::string & chroot_); + int32_t session_timeout_ms_, int32_t operation_timeout_ms_, const std::string & chroot_, const GetPriorityForLoadBalancing & get_priority_load_balancing_); /// The following methods don't any throw exceptions but return error codes. Coordination::Error createImpl(const std::string & path, const std::string & data, int32_t mode, std::string & path_created); @@ -311,6 +337,8 @@ private: Poco::Logger * log = nullptr; std::shared_ptr zk_log; + GetPriorityForLoadBalancing get_priority_load_balancing; + AtomicStopwatch session_uptime; }; diff --git a/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/src/Common/ZooKeeper/ZooKeeperImpl.cpp index 0627a70193f..d3c993344b6 100644 --- a/src/Common/ZooKeeper/ZooKeeperImpl.cpp +++ b/src/Common/ZooKeeper/ZooKeeperImpl.cpp @@ -451,7 +451,7 @@ void ZooKeeper::connect( } else { - LOG_TEST(log, "Connected to ZooKeeper at {} with session_id {}", socket.peerAddress().toString(), session_id); + LOG_TEST(log, "Connected to ZooKeeper at {} with session_id {}{}", socket.peerAddress().toString(), session_id, fail_reasons.str()); } } diff --git a/src/Common/formatIPv6.h b/src/Common/formatIPv6.h index 1a65adae55b..d6efeed17e6 100644 --- a/src/Common/formatIPv6.h +++ b/src/Common/formatIPv6.h @@ -11,7 +11,7 @@ constexpr size_t IPV4_BINARY_LENGTH = 4; constexpr size_t IPV6_BINARY_LENGTH = 16; constexpr size_t IPV4_MAX_TEXT_LENGTH = 15; /// Does not count tail zero byte. -constexpr size_t IPV6_MAX_TEXT_LENGTH = 39; +constexpr size_t IPV6_MAX_TEXT_LENGTH = 45; /// Does not count tail zero byte. namespace DB { diff --git a/src/Common/isLocalAddress.cpp b/src/Common/isLocalAddress.cpp index d79e4cebd15..596fd4caad7 100644 --- a/src/Common/isLocalAddress.cpp +++ b/src/Common/isLocalAddress.cpp @@ -124,6 +124,7 @@ bool isLocalAddress(const Poco::Net::SocketAddress & address, UInt16 clickhouse_ size_t getHostNameDifference(const std::string & local_hostname, const std::string & host) { + /// FIXME should we replace it with Levenstein distance? (we already have it in NamePrompter) size_t hostname_difference = 0; for (size_t i = 0; i < std::min(local_hostname.length(), host.length()); ++i) if (local_hostname[i] != host[i]) diff --git a/src/Common/mysqlxx/PoolWithFailover.cpp b/src/Common/mysqlxx/PoolWithFailover.cpp index 208ad10b63b..36dd713d454 100644 --- a/src/Common/mysqlxx/PoolWithFailover.cpp +++ b/src/Common/mysqlxx/PoolWithFailover.cpp @@ -127,7 +127,14 @@ PoolWithFailover::Entry PoolWithFailover::get() /// If we cannot connect to some replica due to pool overflow, than we will wait and connect. PoolPtr * full_pool = nullptr; - std::map> error_detail; + + struct ErrorDetail + { + int code; + std::string description; + }; + + std::unordered_map replica_name_to_error_detail; for (size_t try_no = 0; try_no < max_tries; ++try_no) { @@ -161,15 +168,8 @@ PoolWithFailover::Entry PoolWithFailover::get() } app.logger().warning("Connection to " + pool->getDescription() + " failed: " + e.displayText()); - //save all errors to error_detail - if (error_detail.contains(pool->getDescription())) - { - error_detail[pool->getDescription()] = {e.displayText(), e.code()}; - } - else - { - error_detail.insert({pool->getDescription(), {e.displayText(), e.code()}}); - } + replica_name_to_error_detail.insert_or_assign(pool->getDescription(), ErrorDetail{e.code(), e.displayText()}); + continue; } @@ -189,15 +189,19 @@ PoolWithFailover::Entry PoolWithFailover::get() DB::WriteBufferFromOwnString message; message << "Connections to all replicas failed: "; for (auto it = replicas_by_priority.begin(); it != replicas_by_priority.end(); ++it) + { for (auto jt = it->second.begin(); jt != it->second.end(); ++jt) { message << (it == replicas_by_priority.begin() && jt == it->second.begin() ? "" : ", ") << (*jt)->getDescription(); - if (error_detail.contains((*jt)->getDescription())) + + if (auto error_detail_it = replica_name_to_error_detail.find(((*jt)->getDescription())); + error_detail_it != replica_name_to_error_detail.end()) { - std::tuple error_and_code = error_detail[(*jt)->getDescription()]; - message << ", ERROR " << std::get<1>(error_and_code) << " : " << std::get<0>(error_and_code); + const auto & [code, description] = error_detail_it->second; + message << ", ERROR " << code << " : " << description; } } + } throw Poco::Exception(message.str()); } diff --git a/src/Compression/CompressionFactory.cpp b/src/Compression/CompressionFactory.cpp index ca5e5176d13..abf5e38a8c3 100644 --- a/src/Compression/CompressionFactory.cpp +++ b/src/Compression/CompressionFactory.cpp @@ -165,25 +165,36 @@ void registerCodecNone(CompressionCodecFactory & factory); void registerCodecLZ4(CompressionCodecFactory & factory); void registerCodecLZ4HC(CompressionCodecFactory & factory); void registerCodecZSTD(CompressionCodecFactory & factory); +void registerCodecMultiple(CompressionCodecFactory & factory); + + +/// Keeper use only general-purpose codecs, so we don't need these special codecs +/// in standalone build +#ifndef KEEPER_STANDALONE_BUILD + void registerCodecDelta(CompressionCodecFactory & factory); void registerCodecT64(CompressionCodecFactory & factory); void registerCodecDoubleDelta(CompressionCodecFactory & factory); void registerCodecGorilla(CompressionCodecFactory & factory); void registerCodecEncrypted(CompressionCodecFactory & factory); -void registerCodecMultiple(CompressionCodecFactory & factory); + +#endif CompressionCodecFactory::CompressionCodecFactory() { - registerCodecLZ4(*this); registerCodecNone(*this); + registerCodecLZ4(*this); registerCodecZSTD(*this); registerCodecLZ4HC(*this); + registerCodecMultiple(*this); + +#ifndef KEEPER_STANDALONE_BUILD registerCodecDelta(*this); registerCodecT64(*this); registerCodecDoubleDelta(*this); registerCodecGorilla(*this); registerCodecEncrypted(*this); - registerCodecMultiple(*this); +#endif default_codec = get("LZ4", {}); } diff --git a/src/Core/Block.cpp b/src/Core/Block.cpp index 5c93d6719fa..a7142ef7f2e 100644 --- a/src/Core/Block.cpp +++ b/src/Core/Block.cpp @@ -13,6 +13,7 @@ #include #include +#include namespace DB @@ -269,8 +270,18 @@ const ColumnWithTypeAndName & Block::safeGetByPosition(size_t position) const } -const ColumnWithTypeAndName * Block::findByName(const std::string & name) const +const ColumnWithTypeAndName * Block::findByName(const std::string & name, bool case_insensitive) const { + if (case_insensitive) + { + auto found = std::find_if(data.begin(), data.end(), [&](const auto & column) { return boost::iequals(column.name, name); }); + if (found == data.end()) + { + return nullptr; + } + return &*found; + } + auto it = index_by_name.find(name); if (index_by_name.end() == it) { @@ -280,19 +291,23 @@ const ColumnWithTypeAndName * Block::findByName(const std::string & name) const } -const ColumnWithTypeAndName & Block::getByName(const std::string & name) const +const ColumnWithTypeAndName & Block::getByName(const std::string & name, bool case_insensitive) const { - const auto * result = findByName(name); + const auto * result = findByName(name, case_insensitive); if (!result) - throw Exception("Not found column " + name + " in block. There are only columns: " + dumpNames() - , ErrorCodes::NOT_FOUND_COLUMN_IN_BLOCK); + throw Exception( + "Not found column " + name + " in block. There are only columns: " + dumpNames(), ErrorCodes::NOT_FOUND_COLUMN_IN_BLOCK); return *result; } -bool Block::has(const std::string & name) const +bool Block::has(const std::string & name, bool case_insensitive) const { + if (case_insensitive) + return std::find_if(data.begin(), data.end(), [&](const auto & column) { return boost::iequals(column.name, name); }) + != data.end(); + return index_by_name.end() != index_by_name.find(name); } @@ -301,8 +316,8 @@ size_t Block::getPositionByName(const std::string & name) const { auto it = index_by_name.find(name); if (index_by_name.end() == it) - throw Exception("Not found column " + name + " in block. There are only columns: " + dumpNames() - , ErrorCodes::NOT_FOUND_COLUMN_IN_BLOCK); + throw Exception( + "Not found column " + name + " in block. There are only columns: " + dumpNames(), ErrorCodes::NOT_FOUND_COLUMN_IN_BLOCK); return it->second; } diff --git a/src/Core/Block.h b/src/Core/Block.h index 66e16b70f47..c5d3e1ae35a 100644 --- a/src/Core/Block.h +++ b/src/Core/Block.h @@ -60,21 +60,21 @@ public: ColumnWithTypeAndName & safeGetByPosition(size_t position); const ColumnWithTypeAndName & safeGetByPosition(size_t position) const; - ColumnWithTypeAndName* findByName(const std::string & name) + ColumnWithTypeAndName* findByName(const std::string & name, bool case_insensitive = false) { return const_cast( - const_cast(this)->findByName(name)); + const_cast(this)->findByName(name, case_insensitive)); } - const ColumnWithTypeAndName * findByName(const std::string & name) const; + const ColumnWithTypeAndName * findByName(const std::string & name, bool case_insensitive = false) const; - ColumnWithTypeAndName & getByName(const std::string & name) + ColumnWithTypeAndName & getByName(const std::string & name, bool case_insensitive = false) { return const_cast( - const_cast(this)->getByName(name)); + const_cast(this)->getByName(name, case_insensitive)); } - const ColumnWithTypeAndName & getByName(const std::string & name) const; + const ColumnWithTypeAndName & getByName(const std::string & name, bool case_insensitive = false) const; Container::iterator begin() { return data.begin(); } Container::iterator end() { return data.end(); } @@ -83,7 +83,7 @@ public: Container::const_iterator cbegin() const { return data.cbegin(); } Container::const_iterator cend() const { return data.cend(); } - bool has(const std::string & name) const; + bool has(const std::string & name, bool case_insensitive = false) const; size_t getPositionByName(const std::string & name) const; diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 8d28696094b..f81b61ea648 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -47,6 +47,8 @@ class IColumn; M(UInt64, max_insert_delayed_streams_for_parallel_write, 0, "The maximum number of streams (columns) to delay final part flush. Default - auto (1000 in case of underlying storage supports parallel write, for example S3 and disabled otherwise)", 0) \ M(UInt64, max_final_threads, 16, "The maximum number of threads to read from table with FINAL.", 0) \ M(MaxThreads, max_threads, 0, "The maximum number of threads to execute the request. By default, it is determined automatically.", 0) \ + M(MaxThreads, max_download_threads, 4, "The maximum number of threads to download data (e.g. for URL engine).", 0) \ + M(UInt64, max_download_buffer_size, 10*1024*1024, "The maximal size of buffer for parallel downloading (e.g. for URL engine) per each thread.", 0) \ M(UInt64, max_read_buffer_size, DBMS_DEFAULT_BUFFER_SIZE, "The maximum size of the buffer to read from the filesystem.", 0) \ M(UInt64, max_distributed_connections, 1024, "The maximum number of connections for distributed processing of one query (should be greater than max_threads).", 0) \ M(UInt64, max_query_size, DBMS_DEFAULT_MAX_QUERY_SIZE, "Which part of the query can be read into RAM for parsing (the remaining data for INSERT, if any, is read later)", 0) \ @@ -614,11 +616,13 @@ class IColumn; M(Bool, input_format_tsv_empty_as_default, false, "Treat empty fields in TSV input as default values.", 0) \ M(Bool, input_format_tsv_enum_as_number, false, "Treat inserted enum values in TSV formats as enum indices \\N", 0) \ M(Bool, input_format_null_as_default, true, "For text input formats initialize null fields with default values if data type of this field is not nullable", 0) \ - M(Bool, input_format_use_lowercase_column_name, false, "Use lowercase column name while reading input formats", 0) \ M(Bool, input_format_arrow_import_nested, false, "Allow to insert array of structs into Nested table in Arrow input format.", 0) \ + M(Bool, input_format_arrow_case_insensitive_column_matching, false, "Ignore case when matching Arrow columns with CH columns.", 0) \ M(Bool, input_format_orc_import_nested, false, "Allow to insert array of structs into Nested table in ORC input format.", 0) \ M(Int64, input_format_orc_row_batch_size, 100'000, "Batch size when reading ORC stripes.", 0) \ + M(Bool, input_format_orc_case_insensitive_column_matching, false, "Ignore case when matching ORC columns with CH columns.", 0) \ M(Bool, input_format_parquet_import_nested, false, "Allow to insert array of structs into Nested table in Parquet input format.", 0) \ + M(Bool, input_format_parquet_case_insensitive_column_matching, false, "Ignore case when matching Parquet columns with CH columns.", 0) \ M(Bool, input_format_allow_seeks, true, "Allow seeks while reading in ORC/Parquet/Arrow input formats", 0) \ M(Bool, input_format_orc_allow_missing_columns, false, "Allow missing columns while reading ORC input formats", 0) \ M(Bool, input_format_parquet_allow_missing_columns, false, "Allow missing columns while reading Parquet input formats", 0) \ diff --git a/src/Core/SettingsEnums.cpp b/src/Core/SettingsEnums.cpp index ddd1c29785c..3f68038560c 100644 --- a/src/Core/SettingsEnums.cpp +++ b/src/Core/SettingsEnums.cpp @@ -149,4 +149,5 @@ IMPLEMENT_SETTING_ENUM(MsgPackUUIDRepresentation , ErrorCodes::BAD_ARGUMENTS, {"str", FormatSettings::MsgPackUUIDRepresentation::STR}, {"ext", FormatSettings::MsgPackUUIDRepresentation::EXT}}) + } diff --git a/src/DataTypes/DataTypeInterval.cpp b/src/DataTypes/DataTypeInterval.cpp index 57d071a8666..9faf0cec2d8 100644 --- a/src/DataTypes/DataTypeInterval.cpp +++ b/src/DataTypes/DataTypeInterval.cpp @@ -13,6 +13,9 @@ bool DataTypeInterval::equals(const IDataType & rhs) const void registerDataTypeInterval(DataTypeFactory & factory) { + factory.registerSimpleDataType("IntervalNanosecond", [] { return DataTypePtr(std::make_shared(IntervalKind::Nanosecond)); }); + factory.registerSimpleDataType("IntervalMicrosecond", [] { return DataTypePtr(std::make_shared(IntervalKind::Microsecond)); }); + factory.registerSimpleDataType("IntervalMillisecond", [] { return DataTypePtr(std::make_shared(IntervalKind::Millisecond)); }); factory.registerSimpleDataType("IntervalSecond", [] { return DataTypePtr(std::make_shared(IntervalKind::Second)); }); factory.registerSimpleDataType("IntervalMinute", [] { return DataTypePtr(std::make_shared(IntervalKind::Minute)); }); factory.registerSimpleDataType("IntervalHour", [] { return DataTypePtr(std::make_shared(IntervalKind::Hour)); }); diff --git a/src/DataTypes/NestedUtils.cpp b/src/DataTypes/NestedUtils.cpp index df504bc34a8..8f5e40de5b8 100644 --- a/src/DataTypes/NestedUtils.cpp +++ b/src/DataTypes/NestedUtils.cpp @@ -15,6 +15,8 @@ #include +#include + namespace DB { @@ -227,14 +229,17 @@ void validateArraySizes(const Block & block) } -std::unordered_set getAllTableNames(const Block & block) +std::unordered_set getAllTableNames(const Block & block, bool to_lower_case) { std::unordered_set nested_table_names; - for (auto & name : block.getNames()) + for (const auto & name : block.getNames()) { auto nested_table_name = Nested::extractTableName(name); + if (to_lower_case) + boost::to_lower(nested_table_name); + if (!nested_table_name.empty()) - nested_table_names.insert(nested_table_name); + nested_table_names.insert(std::move(nested_table_name)); } return nested_table_names; } diff --git a/src/DataTypes/NestedUtils.h b/src/DataTypes/NestedUtils.h index 2ca5c17dc74..f6dc42d5c58 100644 --- a/src/DataTypes/NestedUtils.h +++ b/src/DataTypes/NestedUtils.h @@ -32,7 +32,7 @@ namespace Nested void validateArraySizes(const Block & block); /// Get all nested tables names from a block. - std::unordered_set getAllTableNames(const Block & block); + std::unordered_set getAllTableNames(const Block & block, bool to_lower_case = false); } } diff --git a/src/Databases/DatabaseMemory.h b/src/Databases/DatabaseMemory.h index b854d9be1f3..87fae115b59 100644 --- a/src/Databases/DatabaseMemory.h +++ b/src/Databases/DatabaseMemory.h @@ -50,6 +50,9 @@ public: void alterTable(ContextPtr local_context, const StorageID & table_id, const StorageInMemoryMetadata & metadata) override; + /// This database can contain tables to backup. + bool hasTablesToBackup() const override { return true; } + private: String data_path; using NameToASTCreate = std::unordered_map; diff --git a/src/Databases/DatabaseOrdinary.h b/src/Databases/DatabaseOrdinary.h index 982be2024ce..2144f874b03 100644 --- a/src/Databases/DatabaseOrdinary.h +++ b/src/Databases/DatabaseOrdinary.h @@ -36,6 +36,9 @@ public: const StorageID & table_id, const StorageInMemoryMetadata & metadata) override; + /// This database can contain tables to backup. + bool hasTablesToBackup() const override { return true; } + protected: virtual void commitAlterTable( const StorageID & table_id, diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index d9d9f5b45f6..0c3cc56c061 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -88,6 +88,9 @@ DatabaseReplicated::DatabaseReplicated( /// If zookeeper chroot prefix is used, path should start with '/', because chroot concatenates without it. if (zookeeper_path.front() != '/') zookeeper_path = "/" + zookeeper_path; + + if (!db_settings.collection_name.value.empty()) + fillClusterAuthInfo(db_settings.collection_name.value, context_->getConfigRef()); } String DatabaseReplicated::getFullReplicaName() const @@ -191,22 +194,36 @@ ClusterPtr DatabaseReplicated::getClusterImpl() const shards.back().emplace_back(unescapeForFileName(host_port)); } - String username = db_settings.cluster_username; - String password = db_settings.cluster_password; UInt16 default_port = getContext()->getTCPPort(); - bool secure = db_settings.cluster_secure_connection; bool treat_local_as_remote = false; bool treat_local_port_as_remote = getContext()->getApplicationType() == Context::ApplicationType::LOCAL; return std::make_shared( getContext()->getSettingsRef(), shards, - username, - password, + cluster_auth_info.cluster_username, + cluster_auth_info.cluster_password, default_port, treat_local_as_remote, treat_local_port_as_remote, - secure); + cluster_auth_info.cluster_secure_connection, + /*priority=*/1, + database_name, + cluster_auth_info.cluster_secret); +} + + +void DatabaseReplicated::fillClusterAuthInfo(String collection_name, const Poco::Util::AbstractConfiguration & config_ref) +{ + const auto & config_prefix = fmt::format("named_collections.{}", collection_name); + + if (!config_ref.has(config_prefix)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no collection named `{}` in config", collection_name); + + cluster_auth_info.cluster_username = config_ref.getString(config_prefix + ".cluster_username", ""); + cluster_auth_info.cluster_password = config_ref.getString(config_prefix + ".cluster_password", ""); + cluster_auth_info.cluster_secret = config_ref.getString(config_prefix + ".cluster_secret", ""); + cluster_auth_info.cluster_secure_connection = config_ref.getBool(config_prefix + ".cluster_secure_connection", false); } void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(bool force_attach) diff --git a/src/Databases/DatabaseReplicated.h b/src/Databases/DatabaseReplicated.h index fcb8a2c4d33..ac212e168b8 100644 --- a/src/Databases/DatabaseReplicated.h +++ b/src/Databases/DatabaseReplicated.h @@ -75,6 +75,16 @@ private: bool createDatabaseNodesInZooKeeper(const ZooKeeperPtr & current_zookeeper); void createReplicaNodesInZooKeeper(const ZooKeeperPtr & current_zookeeper); + struct + { + String cluster_username{"default"}; + String cluster_password; + String cluster_secret; + bool cluster_secure_connection{false}; + } cluster_auth_info; + + void fillClusterAuthInfo(String collection_name, const Poco::Util::AbstractConfiguration & config); + void checkQueryValid(const ASTPtr & query, ContextPtr query_context) const; void recoverLostReplica(const ZooKeeperPtr & current_zookeeper, UInt32 our_log_ptr, UInt32 max_log_ptr); diff --git a/src/Databases/DatabaseReplicatedSettings.h b/src/Databases/DatabaseReplicatedSettings.h index 0aff26712c0..8bed1ababf6 100644 --- a/src/Databases/DatabaseReplicatedSettings.h +++ b/src/Databases/DatabaseReplicatedSettings.h @@ -8,12 +8,11 @@ namespace DB class ASTStorage; #define LIST_OF_DATABASE_REPLICATED_SETTINGS(M) \ - M(Float, max_broken_tables_ratio, 0.5, "Do not recover replica automatically if the ratio of staled tables to all tables is greater", 0) \ + M(Float, max_broken_tables_ratio, 0.5, "Do not recover replica automatically if the ratio of staled tables to all tables is greater", 0) \ M(UInt64, max_replication_lag_to_enqueue, 10, "Replica will throw exception on attempt to execute query if its replication lag greater", 0) \ M(UInt64, wait_entry_commited_timeout_sec, 3600, "Replicas will try to cancel query if timeout exceed, but initiator host has not executed it yet", 0) \ - M(String, cluster_username, "default", "Username to use when connecting to hosts of cluster", 0) \ - M(String, cluster_password, "", "Password to use when connecting to hosts of cluster", 0) \ - M(Bool, cluster_secure_connection, false, "Enable TLS when connecting to hosts of cluster", 0) \ + M(String, collection_name, "", "A name of a collection defined in server's config where all info for cluster authentication is defined", 0) \ + DECLARE_SETTINGS_TRAITS(DatabaseReplicatedSettingsTraits, LIST_OF_DATABASE_REPLICATED_SETTINGS) diff --git a/src/Databases/IDatabase.h b/src/Databases/IDatabase.h index f95653feb20..51d4b8bb6b1 100644 --- a/src/Databases/IDatabase.h +++ b/src/Databases/IDatabase.h @@ -289,12 +289,6 @@ public: throw Exception(getEngineName() + ": RENAME DATABASE is not supported", ErrorCodes::NOT_IMPLEMENTED); } - /// Whether the contained tables should be written to a backup. - virtual DatabaseTablesIteratorPtr getTablesIteratorForBackup(ContextPtr context) const - { - return getTablesIterator(context); /// By default we backup each table. - } - /// Returns path for persistent data storage if the database supports it, empty string otherwise virtual String getDataPath() const { return {}; } @@ -335,6 +329,10 @@ public: throw Exception(ErrorCodes::LOGICAL_ERROR, "Database engine {} does not run a replication thread!", getEngineName()); } + /// Returns true if the backup of the database is hollow, which means it doesn't contain + /// any tables which can be stored to a backup. + virtual bool hasTablesToBackup() const { return false; } + virtual ~IDatabase() = default; protected: diff --git a/src/Disks/S3/DiskS3.cpp b/src/Disks/S3/DiskS3.cpp index e46620d9d1f..e05ccef74c0 100644 --- a/src/Disks/S3/DiskS3.cpp +++ b/src/Disks/S3/DiskS3.cpp @@ -20,6 +20,7 @@ #include #include +#include #include #include #include @@ -264,32 +265,6 @@ std::unique_ptr DiskS3::writeFile(const String & path, LOG_TRACE(log, "{} to file by path: {}. S3 path: {}", mode == WriteMode::Rewrite ? "Write" : "Append", backQuote(metadata_disk->getPath() + path), remote_fs_root_path + blob_name); - ScheduleFunc schedule = [pool = &getThreadPoolWriter(), thread_group = CurrentThread::getGroup()](auto callback) - { - pool->scheduleOrThrow([callback = std::move(callback), thread_group]() - { - if (thread_group) - CurrentThread::attachTo(thread_group); - - SCOPE_EXIT_SAFE( - if (thread_group) - CurrentThread::detachQueryIfNotDetached(); - - /// After we detached from the thread_group, parent for memory_tracker inside ThreadStatus will be reset to it's parent. - /// Typically, it may be changes from Process to User. - /// Usually it could be ok, because thread pool task is executed before user-level memory tracker is destroyed. - /// However, thread could stay alive inside the thread pool, and it's ThreadStatus as well. - /// When, finally, we destroy the thread (and the ThreadStatus), - /// it can use memory tracker in the ~ThreadStatus in order to alloc/free untracked_memory,\ - /// and by this time user-level memory tracker may be already destroyed. - /// - /// As a work-around, reset memory tracker to total, which is always alive. - CurrentThread::get().memory_tracker.setParent(&total_memory_tracker); - ); - callback(); - }); - }; - auto s3_buffer = std::make_unique( settings->client, bucket, @@ -299,7 +274,7 @@ std::unique_ptr DiskS3::writeFile(const String & path, settings->s3_upload_part_size_multiply_parts_count_threshold, settings->s3_max_single_part_upload_size, std::move(object_metadata), - buf_size, std::move(schedule)); + buf_size, threadPoolCallbackRunner(getThreadPoolWriter())); auto create_metadata_callback = [this, path, blob_name, mode] (size_t count) { diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index 08554cf7e07..3aa82cb79b4 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -89,10 +89,10 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings) format_settings.json.quote_64bit_integers = settings.output_format_json_quote_64bit_integers; format_settings.json.quote_denormals = settings.output_format_json_quote_denormals; format_settings.null_as_default = settings.input_format_null_as_default; - format_settings.use_lowercase_column_name = settings.input_format_use_lowercase_column_name; format_settings.decimal_trailing_zeros = settings.output_format_decimal_trailing_zeros; format_settings.parquet.row_group_size = settings.output_format_parquet_row_group_size; format_settings.parquet.import_nested = settings.input_format_parquet_import_nested; + format_settings.parquet.case_insensitive_column_matching = settings.input_format_parquet_case_insensitive_column_matching; format_settings.parquet.allow_missing_columns = settings.input_format_parquet_allow_missing_columns; format_settings.pretty.charset = settings.output_format_pretty_grid_charset.toString() == "ASCII" ? FormatSettings::Pretty::Charset::ASCII : FormatSettings::Pretty::Charset::UTF8; format_settings.pretty.color = settings.output_format_pretty_color; @@ -123,9 +123,11 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings) format_settings.arrow.low_cardinality_as_dictionary = settings.output_format_arrow_low_cardinality_as_dictionary; format_settings.arrow.import_nested = settings.input_format_arrow_import_nested; format_settings.arrow.allow_missing_columns = settings.input_format_arrow_allow_missing_columns; + format_settings.arrow.case_insensitive_column_matching = settings.input_format_arrow_case_insensitive_column_matching; format_settings.orc.import_nested = settings.input_format_orc_import_nested; format_settings.orc.allow_missing_columns = settings.input_format_orc_allow_missing_columns; format_settings.orc.row_batch_size = settings.input_format_orc_row_batch_size; + format_settings.orc.case_insensitive_column_matching = settings.input_format_orc_case_insensitive_column_matching; format_settings.defaults_for_omitted_fields = settings.input_format_defaults_for_omitted_fields; format_settings.capn_proto.enum_comparing_mode = settings.format_capn_proto_enum_comparising_mode; format_settings.seekable_read = settings.input_format_allow_seeks; diff --git a/src/Formats/FormatSettings.h b/src/Formats/FormatSettings.h index 4881c1a43c8..bd0a84d9ded 100644 --- a/src/Formats/FormatSettings.h +++ b/src/Formats/FormatSettings.h @@ -32,7 +32,6 @@ struct FormatSettings bool null_as_default = true; bool decimal_trailing_zeros = false; bool defaults_for_omitted_fields = true; - bool use_lowercase_column_name = false; bool seekable_read = true; UInt64 max_rows_to_read_for_schema_inference = 100; @@ -75,6 +74,7 @@ struct FormatSettings bool low_cardinality_as_dictionary = false; bool import_nested = false; bool allow_missing_columns = false; + bool case_insensitive_column_matching = false; } arrow; struct @@ -137,6 +137,7 @@ struct FormatSettings UInt64 row_group_size = 1000000; bool import_nested = false; bool allow_missing_columns = false; + bool case_insensitive_column_matching = false; } parquet; struct Pretty @@ -217,6 +218,7 @@ struct FormatSettings bool import_nested = false; bool allow_missing_columns = false; int64_t row_batch_size = 100'000; + bool case_insensitive_column_matching = false; } orc; /// For capnProto format we should determine how to diff --git a/src/Formats/registerFormats.cpp b/src/Formats/registerFormats.cpp index 210ef1953b1..8c5955b2108 100644 --- a/src/Formats/registerFormats.cpp +++ b/src/Formats/registerFormats.cpp @@ -13,6 +13,7 @@ void registerFileSegmentationEngineCSV(FormatFactory & factory); void registerFileSegmentationEngineJSONEachRow(FormatFactory & factory); void registerFileSegmentationEngineRegexp(FormatFactory & factory); void registerFileSegmentationEngineJSONAsString(FormatFactory & factory); +void registerFileSegmentationEngineJSONAsObject(FormatFactory & factory); void registerFileSegmentationEngineJSONCompactEachRow(FormatFactory & factory); /// Formats for both input/output. @@ -103,6 +104,7 @@ void registerProtobufSchemaReader(FormatFactory & factory); void registerProtobufListSchemaReader(FormatFactory & factory); void registerLineAsStringSchemaReader(FormatFactory & factory); void registerJSONAsStringSchemaReader(FormatFactory & factory); +void registerJSONAsObjectSchemaReader(FormatFactory & factory); void registerRawBLOBSchemaReader(FormatFactory & factory); void registerMsgPackSchemaReader(FormatFactory & factory); void registerCapnProtoSchemaReader(FormatFactory & factory); @@ -123,6 +125,7 @@ void registerFormats() registerFileSegmentationEngineJSONEachRow(factory); registerFileSegmentationEngineRegexp(factory); registerFileSegmentationEngineJSONAsString(factory); + registerFileSegmentationEngineJSONAsObject(factory); registerFileSegmentationEngineJSONCompactEachRow(factory); registerInputFormatNative(factory); @@ -207,6 +210,7 @@ void registerFormats() registerProtobufListSchemaReader(factory); registerLineAsStringSchemaReader(factory); registerJSONAsStringSchemaReader(factory); + registerJSONAsObjectSchemaReader(factory); registerRawBLOBSchemaReader(factory); registerMsgPackSchemaReader(factory); registerCapnProtoSchemaReader(factory); diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index a7f06689820..bc1ae807e7d 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -41,6 +41,11 @@ namespace ErrorCodes throw Exception("Illegal type Date of argument for function " + std::string(name), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } + static inline UInt32 dateTimeIsNotSupported(const char * name) + { + throw Exception("Illegal type DateTime of argument for function " + std::string(name), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } + /// This factor transformation will say that the function is monotone everywhere. struct ZeroTransform { @@ -311,6 +316,133 @@ struct ToStartOfSecondImpl using FactorTransform = ZeroTransform; }; +struct ToStartOfMillisecondImpl +{ + static constexpr auto name = "toStartOfMillisecond"; + + static inline DateTime64 execute(const DateTime64 & datetime64, Int64 scale_multiplier, const DateLUTImpl &) + { + // given that scale is 6, scale_multiplier is 1000000 + // for DateTime64 value of 123.456789: + // 123456789 - 789 = 123456000 + // for DateTime64 value of -123.456789: + // -123456789 - (1000 + (-789)) = -123457000 + + if (scale_multiplier == 1000) + { + return datetime64; + } + else if (scale_multiplier <= 1000) + { + return datetime64 * (1000 / scale_multiplier); + } + else + { + auto droppable_part_with_sign = DecimalUtils::getFractionalPartWithScaleMultiplier(datetime64, scale_multiplier / 1000); + + if (droppable_part_with_sign < 0) + droppable_part_with_sign += scale_multiplier; + + return datetime64 - droppable_part_with_sign; + } + } + + static inline UInt32 execute(UInt32, const DateLUTImpl &) + { + throw Exception("Illegal type DateTime of argument for function " + std::string(name), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } + static inline UInt32 execute(Int32, const DateLUTImpl &) + { + return dateIsNotSupported(name); + } + static inline UInt32 execute(UInt16, const DateLUTImpl &) + { + return dateIsNotSupported(name); + } + + using FactorTransform = ZeroTransform; +}; + +struct ToStartOfMicrosecondImpl +{ + static constexpr auto name = "toStartOfMicrosecond"; + + static inline DateTime64 execute(const DateTime64 & datetime64, Int64 scale_multiplier, const DateLUTImpl &) + { + // @see ToStartOfMillisecondImpl + + if (scale_multiplier == 1000000) + { + return datetime64; + } + else if (scale_multiplier <= 1000000) + { + return datetime64 * (1000000 / scale_multiplier); + } + else + { + auto droppable_part_with_sign = DecimalUtils::getFractionalPartWithScaleMultiplier(datetime64, scale_multiplier / 1000000); + + if (droppable_part_with_sign < 0) + droppable_part_with_sign += scale_multiplier; + + return datetime64 - droppable_part_with_sign; + } + } + + static inline UInt32 execute(UInt32, const DateLUTImpl &) + { + throw Exception("Illegal type DateTime of argument for function " + std::string(name), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } + static inline UInt32 execute(Int32, const DateLUTImpl &) + { + return dateIsNotSupported(name); + } + static inline UInt32 execute(UInt16, const DateLUTImpl &) + { + return dateIsNotSupported(name); + } + + using FactorTransform = ZeroTransform; +}; + +struct ToStartOfNanosecondImpl +{ + static constexpr auto name = "toStartOfNanosecond"; + + static inline DateTime64 execute(const DateTime64 & datetime64, Int64 scale_multiplier, const DateLUTImpl &) + { + // @see ToStartOfMillisecondImpl + if (scale_multiplier == 1000000000) + { + return datetime64; + } + else if (scale_multiplier <= 1000000000) + { + return datetime64 * (1000000000 / scale_multiplier); + } + else + { + throw Exception("Illegal type of argument for function " + std::string(name) + ", DateTime64 expected", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } + } + + static inline UInt32 execute(UInt32, const DateLUTImpl &) + { + throw Exception("Illegal type DateTime of argument for function " + std::string(name), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } + static inline UInt32 execute(Int32, const DateLUTImpl &) + { + return dateIsNotSupported(name); + } + static inline UInt32 execute(UInt16, const DateLUTImpl &) + { + return dateIsNotSupported(name); + } + + using FactorTransform = ZeroTransform; +}; + struct ToStartOfFiveMinuteImpl { static constexpr auto name = "toStartOfFiveMinute"; diff --git a/src/Functions/FunctionDateOrDateTimeAddInterval.h b/src/Functions/FunctionDateOrDateTimeAddInterval.h index cb48b819481..fbfc9e9bc1f 100644 --- a/src/Functions/FunctionDateOrDateTimeAddInterval.h +++ b/src/Functions/FunctionDateOrDateTimeAddInterval.h @@ -40,26 +40,158 @@ namespace ErrorCodes /// - 'AddSecondsImpl::execute(UInt32, ...) -> UInt32' is available to the ClickHouse users as 'addSeconds(DateTime, ...) -> DateTime' /// - 'AddSecondsImpl::execute(UInt16, ...) -> UInt32' is available to the ClickHouse users as 'addSeconds(Date, ...) -> DateTime' +struct AddNanosecondsImpl +{ + static constexpr auto name = "addNanoseconds"; + + static inline NO_SANITIZE_UNDEFINED DecimalUtils::DecimalComponents + execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl &, UInt16 scale = DataTypeDateTime64::default_scale) + { + Int64 multiplier = DecimalUtils::scaleMultiplier(9 - scale); + auto division = std::div(t.fractional * multiplier + delta, static_cast(1000000000)); + return {t.whole * multiplier + division.quot, t.fractional * multiplier + delta}; + } + + static inline NO_SANITIZE_UNDEFINED DateTime64 + execute(DateTime64 t, Int64 delta, const DateLUTImpl &, UInt16 scale = 0) + { + Int64 multiplier = DecimalUtils::scaleMultiplier(9 - scale); + return t * multiplier + delta; + } + + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &, UInt16 = 0) + { + Int64 multiplier = DecimalUtils::scaleMultiplier(9); + return t * multiplier + delta; + } + + static inline NO_SANITIZE_UNDEFINED DateTime64 execute(UInt16, Int64, const DateLUTImpl &, UInt16 = 0) + { + throw Exception("addNanoSeconds() cannot be used with Date", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } + + static inline NO_SANITIZE_UNDEFINED DateTime64 execute(Int32, Int64, const DateLUTImpl &, UInt16 = 0) + { + throw Exception("addNanoSeconds() cannot be used with Date32", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } +}; + +struct AddMicrosecondsImpl +{ + static constexpr auto name = "addMicroseconds"; + + static inline NO_SANITIZE_UNDEFINED DecimalUtils::DecimalComponents + execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl &, UInt16 scale = 0) + { + Int64 multiplier = DecimalUtils::scaleMultiplier(std::abs(6 - scale)); + if (scale <= 6) + { + auto division = std::div((t.fractional + delta), static_cast(10e6)); + return {t.whole * multiplier + division.quot, division.rem}; + } + else + { + auto division = std::div((t.fractional + delta * multiplier), static_cast(10e6 * multiplier)); + return {t.whole + division.quot, division.rem}; + } + } + + static inline NO_SANITIZE_UNDEFINED DateTime64 + execute(DateTime64 t, Int64 delta, const DateLUTImpl &, UInt16 scale = 0) + { + Int64 multiplier = DecimalUtils::scaleMultiplier(std::abs(6 - scale)); + return scale <= 6 ? t * multiplier + delta : t + delta * multiplier; + } + + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &, UInt16 = 0) + { + Int64 multiplier = DecimalUtils::scaleMultiplier(6); + return t * multiplier + delta; + } + + static inline NO_SANITIZE_UNDEFINED DateTime64 execute(UInt16, Int64, const DateLUTImpl &, UInt16 = 0) + { + throw Exception("addMicroSeconds() cannot be used with Date", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } + + static inline NO_SANITIZE_UNDEFINED DateTime64 execute(Int32, Int64, const DateLUTImpl &, UInt16 = 0) + { + throw Exception("addMicroSeconds() cannot be used with Date32", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } +}; + +struct AddMillisecondsImpl +{ + static constexpr auto name = "addMilliseconds"; + + static inline NO_SANITIZE_UNDEFINED DecimalUtils::DecimalComponents + execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl &, UInt16 scale = DataTypeDateTime64::default_scale) + { + Int64 multiplier = DecimalUtils::scaleMultiplier(std::abs(3 - scale)); + if (scale <= 3) + { + auto division = std::div((t.fractional + delta), static_cast(1000)); + return {t.whole * multiplier + division.quot, division.rem}; + } + else + { + auto division = std::div((t.fractional + delta * multiplier), static_cast(1000 * multiplier)); + return {t.whole + division.quot,division.rem}; + } + } + + static inline NO_SANITIZE_UNDEFINED DateTime64 + execute(DateTime64 t, Int64 delta, const DateLUTImpl &, UInt16 scale = 0) + { + Int64 multiplier = DecimalUtils::scaleMultiplier(std::abs(3 - scale)); + return scale <= 3 ? t * multiplier + delta : t + delta * multiplier; + } + + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &, UInt16 = 0) + { + Int64 multiplier = DecimalUtils::scaleMultiplier(3); + return t * multiplier + delta; + } + + static inline NO_SANITIZE_UNDEFINED DateTime64 execute(UInt16, Int64, const DateLUTImpl &, UInt16 = 0) + { + throw Exception("addMilliSeconds() cannot be used with Date", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } + + static inline NO_SANITIZE_UNDEFINED DateTime64 execute(Int32, Int64, const DateLUTImpl &, UInt16 = 0) + { + throw Exception("addMilliSeconds() cannot be used with Date32", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } +}; + struct AddSecondsImpl { static constexpr auto name = "addSeconds"; static inline NO_SANITIZE_UNDEFINED DecimalUtils::DecimalComponents - execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl &) + execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl &, UInt16 = 0) { return {t.whole + delta, t.fractional}; } - static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &) + static inline NO_SANITIZE_UNDEFINED DateTime64 + execute(DateTime64 t, Int64 delta, const DateLUTImpl &, UInt16 scale = 0) + { + return t + delta * DecimalUtils::scaleMultiplier(scale); + } + + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &, UInt16 = 0) { return t + delta; } - static inline NO_SANITIZE_UNDEFINED Int64 execute(Int32 d, Int64 delta, const DateLUTImpl & time_zone) + + static inline NO_SANITIZE_UNDEFINED Int64 execute(Int32 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { // use default datetime64 scale return (time_zone.fromDayNum(ExtendedDayNum(d)) + delta) * 1000; } - static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone) + + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return time_zone.fromDayNum(DayNum(d)) + delta; } @@ -70,21 +202,29 @@ struct AddMinutesImpl static constexpr auto name = "addMinutes"; static inline NO_SANITIZE_UNDEFINED DecimalUtils::DecimalComponents - execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl &) + execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl &, UInt16 = 0) { return {t.whole + delta * 60, t.fractional}; } - static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &) + static inline NO_SANITIZE_UNDEFINED DateTime64 + execute(DateTime64 t, Int64 delta, const DateLUTImpl &, UInt16 scale = 0) + { + return t + 60 * delta * DecimalUtils::scaleMultiplier(scale); + } + + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &, UInt16 = 0) { return t + delta * 60; } - static inline NO_SANITIZE_UNDEFINED Int64 execute(Int32 d, Int64 delta, const DateLUTImpl & time_zone) + + static inline NO_SANITIZE_UNDEFINED Int64 execute(Int32 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { // use default datetime64 scale return (time_zone.fromDayNum(ExtendedDayNum(d)) + delta * 60) * 1000; } - static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone) + + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return time_zone.fromDayNum(DayNum(d)) + delta * 60; } @@ -95,20 +235,29 @@ struct AddHoursImpl static constexpr auto name = "addHours"; static inline NO_SANITIZE_UNDEFINED DecimalUtils::DecimalComponents - execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl &) + execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl &, UInt16 = 0) { return {t.whole + delta * 3600, t.fractional}; } - static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &) + + static inline NO_SANITIZE_UNDEFINED DateTime64 + execute(DateTime64 t, Int64 delta, const DateLUTImpl &, UInt16 scale = 0) + { + return t + 3600 * delta * DecimalUtils::scaleMultiplier(scale); + } + + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl &, UInt16 = 0) { return t + delta * 3600; } - static inline NO_SANITIZE_UNDEFINED Int64 execute(Int32 d, Int64 delta, const DateLUTImpl & time_zone) + + static inline NO_SANITIZE_UNDEFINED Int64 execute(Int32 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { // use default datetime64 scale return (time_zone.fromDayNum(ExtendedDayNum(d)) + delta * 3600) * 1000; } - static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone) + + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return time_zone.fromDayNum(DayNum(d)) + delta * 3600; } @@ -119,22 +268,30 @@ struct AddDaysImpl static constexpr auto name = "addDays"; static inline NO_SANITIZE_UNDEFINED DecimalUtils::DecimalComponents - execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl & time_zone) + execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return {time_zone.addDays(t.whole, delta), t.fractional}; } - static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl & time_zone) + static inline NO_SANITIZE_UNDEFINED DateTime64 + execute(DateTime64 t, Int64 delta, const DateLUTImpl & time_zone, UInt16 scale = 0) + { + auto multiplier = DecimalUtils::scaleMultiplier(scale); + auto d = std::div(t, multiplier); + return time_zone.addDays(d.quot, delta) * multiplier + d.rem; + } + + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return time_zone.addDays(t, delta); } - static inline NO_SANITIZE_UNDEFINED UInt16 execute(UInt16 d, Int64 delta, const DateLUTImpl &) + static inline NO_SANITIZE_UNDEFINED UInt16 execute(UInt16 d, Int64 delta, const DateLUTImpl &, UInt16 = 0) { return d + delta; } - static inline NO_SANITIZE_UNDEFINED Int32 execute(Int32 d, Int64 delta, const DateLUTImpl &) + static inline NO_SANITIZE_UNDEFINED Int32 execute(Int32 d, Int64 delta, const DateLUTImpl &, UInt16 = 0) { return d + delta; } @@ -145,22 +302,30 @@ struct AddWeeksImpl static constexpr auto name = "addWeeks"; static inline NO_SANITIZE_UNDEFINED DecimalUtils::DecimalComponents - execute(DecimalUtils::DecimalComponents t, Int32 delta, const DateLUTImpl & time_zone) + execute(DecimalUtils::DecimalComponents t, Int32 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return {time_zone.addWeeks(t.whole, delta), t.fractional}; } - static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int32 delta, const DateLUTImpl & time_zone) + static inline NO_SANITIZE_UNDEFINED DateTime64 + execute(DateTime64 t, Int32 delta, const DateLUTImpl & time_zone, UInt16 scale = 0) + { + auto multiplier = DecimalUtils::scaleMultiplier(scale); + auto d = std::div(t, multiplier); + return time_zone.addDays(d.quot, delta * 7) * multiplier + d.rem; + } + + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int32 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return time_zone.addWeeks(t, delta); } - static inline NO_SANITIZE_UNDEFINED UInt16 execute(UInt16 d, Int32 delta, const DateLUTImpl &) + static inline NO_SANITIZE_UNDEFINED UInt16 execute(UInt16 d, Int32 delta, const DateLUTImpl &, UInt16 = 0) { return d + delta * 7; } - static inline NO_SANITIZE_UNDEFINED Int32 execute(Int32 d, Int32 delta, const DateLUTImpl &) + static inline NO_SANITIZE_UNDEFINED Int32 execute(Int32 d, Int32 delta, const DateLUTImpl &, UInt16 = 0) { return d + delta * 7; } @@ -170,23 +335,31 @@ struct AddMonthsImpl { static constexpr auto name = "addMonths"; - static inline DecimalUtils::DecimalComponents - execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl & time_zone) + static inline NO_SANITIZE_UNDEFINED DecimalUtils::DecimalComponents + execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return {time_zone.addMonths(t.whole, delta), t.fractional}; } - static inline UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl & time_zone) + static inline NO_SANITIZE_UNDEFINED DateTime64 + execute(DateTime64 t, Int64 delta, const DateLUTImpl & time_zone, UInt16 scale = 0) + { + auto multiplier = DecimalUtils::scaleMultiplier(scale); + auto d = std::div(t, multiplier); + return time_zone.addMonths(d.quot, delta) * multiplier + d.rem; + } + + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return time_zone.addMonths(t, delta); } - static inline UInt16 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone) + static inline NO_SANITIZE_UNDEFINED UInt16 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return time_zone.addMonths(DayNum(d), delta); } - static inline Int32 execute(Int32 d, Int64 delta, const DateLUTImpl & time_zone) + static inline NO_SANITIZE_UNDEFINED Int32 execute(Int32 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return time_zone.addMonths(ExtendedDayNum(d), delta); } @@ -197,22 +370,30 @@ struct AddQuartersImpl static constexpr auto name = "addQuarters"; static inline DecimalUtils::DecimalComponents - execute(DecimalUtils::DecimalComponents t, Int32 delta, const DateLUTImpl & time_zone) + execute(DecimalUtils::DecimalComponents t, Int32 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return {time_zone.addQuarters(t.whole, delta), t.fractional}; } - static inline UInt32 execute(UInt32 t, Int32 delta, const DateLUTImpl & time_zone) + static inline NO_SANITIZE_UNDEFINED DateTime64 + execute(DateTime64 t, Int32 delta, const DateLUTImpl & time_zone, UInt16 scale = 0) + { + auto multiplier = DecimalUtils::scaleMultiplier(scale); + auto d = std::div(t, multiplier); + return time_zone.addQuarters(d.quot, delta) * multiplier + d.rem; + } + + static inline UInt32 execute(UInt32 t, Int32 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return time_zone.addQuarters(t, delta); } - static inline UInt16 execute(UInt16 d, Int32 delta, const DateLUTImpl & time_zone) + static inline UInt16 execute(UInt16 d, Int32 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return time_zone.addQuarters(DayNum(d), delta); } - static inline Int32 execute(Int32 d, Int32 delta, const DateLUTImpl & time_zone) + static inline Int32 execute(Int32 d, Int32 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return time_zone.addQuarters(ExtendedDayNum(d), delta); } @@ -222,23 +403,31 @@ struct AddYearsImpl { static constexpr auto name = "addYears"; - static inline DecimalUtils::DecimalComponents - execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl & time_zone) + static inline NO_SANITIZE_UNDEFINED DecimalUtils::DecimalComponents + execute(DecimalUtils::DecimalComponents t, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return {time_zone.addYears(t.whole, delta), t.fractional}; } - static inline UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl & time_zone) + static inline NO_SANITIZE_UNDEFINED DateTime64 + execute(DateTime64 t, Int64 delta, const DateLUTImpl & time_zone, UInt16 scale = 0) + { + auto multiplier = DecimalUtils::scaleMultiplier(scale); + auto d = std::div(t, multiplier); + return time_zone.addYears(d.quot, delta) * multiplier + d.rem; + } + + static inline NO_SANITIZE_UNDEFINED UInt32 execute(UInt32 t, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return time_zone.addYears(t, delta); } - static inline UInt16 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone) + static inline NO_SANITIZE_UNDEFINED UInt16 execute(UInt16 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return time_zone.addYears(DayNum(d), delta); } - static inline Int32 execute(Int32 d, Int64 delta, const DateLUTImpl & time_zone) + static inline NO_SANITIZE_UNDEFINED Int32 execute(Int32 d, Int64 delta, const DateLUTImpl & time_zone, UInt16 = 0) { return time_zone.addYears(ExtendedDayNum(d), delta); } @@ -250,13 +439,16 @@ struct SubtractIntervalImpl : public Transform using Transform::Transform; template - inline NO_SANITIZE_UNDEFINED auto execute(T t, Int64 delta, const DateLUTImpl & time_zone) const + inline NO_SANITIZE_UNDEFINED auto execute(T t, Int64 delta, const DateLUTImpl & time_zone, UInt16 scale) const { /// Signed integer overflow is Ok. - return Transform::execute(t, -delta, time_zone); + return Transform::execute(t, -delta, time_zone, scale); } }; +struct SubtractNanosecondsImpl : SubtractIntervalImpl { static constexpr auto name = "subtractNanoseconds"; }; +struct SubtractMicrosecondsImpl : SubtractIntervalImpl { static constexpr auto name = "subtractMicroseconds"; }; +struct SubtractMillisecondsImpl : SubtractIntervalImpl { static constexpr auto name = "subtractMilliseconds"; }; struct SubtractSecondsImpl : SubtractIntervalImpl { static constexpr auto name = "subtractSeconds"; }; struct SubtractMinutesImpl : SubtractIntervalImpl { static constexpr auto name = "subtractMinutes"; }; struct SubtractHoursImpl : SubtractIntervalImpl { static constexpr auto name = "subtractHours"; }; @@ -277,17 +469,17 @@ struct Adder {} template - void NO_INLINE vectorConstant(const FromVectorType & vec_from, ToVectorType & vec_to, Int64 delta, const DateLUTImpl & time_zone) const + void NO_INLINE vectorConstant(const FromVectorType & vec_from, ToVectorType & vec_to, Int64 delta, const DateLUTImpl & time_zone, UInt16 scale) const { size_t size = vec_from.size(); vec_to.resize(size); for (size_t i = 0; i < size; ++i) - vec_to[i] = transform.execute(vec_from[i], checkOverflow(delta), time_zone); + vec_to[i] = transform.execute(vec_from[i], checkOverflow(delta), time_zone, scale); } template - void vectorVector(const FromVectorType & vec_from, ToVectorType & vec_to, const IColumn & delta, const DateLUTImpl & time_zone) const + void vectorVector(const FromVectorType & vec_from, ToVectorType & vec_to, const IColumn & delta, const DateLUTImpl & time_zone, UInt16 scale) const { size_t size = vec_from.size(); vec_to.resize(size); @@ -296,11 +488,11 @@ struct Adder ColumnUInt8, ColumnUInt16, ColumnUInt32, ColumnUInt64, ColumnInt8, ColumnInt16, ColumnInt32, ColumnInt64, ColumnFloat32, ColumnFloat64>( - &delta, [&](const auto & column){ vectorVector(vec_from, vec_to, column, time_zone, size); return true; }); + &delta, [&](const auto & column){ vectorVector(vec_from, vec_to, column, time_zone, scale, size); return true; }); } template - void constantVector(const FromType & from, ToVectorType & vec_to, const IColumn & delta, const DateLUTImpl & time_zone) const + void constantVector(const FromType & from, ToVectorType & vec_to, const IColumn & delta, const DateLUTImpl & time_zone, UInt16 scale) const { size_t size = delta.size(); vec_to.resize(size); @@ -309,7 +501,7 @@ struct Adder ColumnUInt8, ColumnUInt16, ColumnUInt32, ColumnUInt64, ColumnInt8, ColumnInt16, ColumnInt32, ColumnInt64, ColumnFloat32, ColumnFloat64>( - &delta, [&](const auto & column){ constantVector(from, vec_to, column, time_zone, size); return true; }); + &delta, [&](const auto & column){ constantVector(from, vec_to, column, time_zone, scale, size); return true; }); } private: @@ -325,18 +517,18 @@ private: template NO_INLINE NO_SANITIZE_UNDEFINED void vectorVector( - const FromVectorType & vec_from, ToVectorType & vec_to, const DeltaColumnType & delta, const DateLUTImpl & time_zone, size_t size) const + const FromVectorType & vec_from, ToVectorType & vec_to, const DeltaColumnType & delta, const DateLUTImpl & time_zone, UInt16 scale, size_t size) const { for (size_t i = 0; i < size; ++i) - vec_to[i] = transform.execute(vec_from[i], checkOverflow(delta.getData()[i]), time_zone); + vec_to[i] = transform.execute(vec_from[i], checkOverflow(delta.getData()[i]), time_zone, scale); } template NO_INLINE NO_SANITIZE_UNDEFINED void constantVector( - const FromType & from, ToVectorType & vec_to, const DeltaColumnType & delta, const DateLUTImpl & time_zone, size_t size) const + const FromType & from, ToVectorType & vec_to, const DeltaColumnType & delta, const DateLUTImpl & time_zone, UInt16 scale, size_t size) const { for (size_t i = 0; i < size; ++i) - vec_to[i] = transform.execute(from, checkOverflow(delta.getData()[i]), time_zone); + vec_to[i] = transform.execute(from, checkOverflow(delta.getData()[i]), time_zone, scale); } }; @@ -344,7 +536,7 @@ private: template struct DateTimeAddIntervalImpl { - static ColumnPtr execute(Transform transform, const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type) + static ColumnPtr execute(Transform transform, const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, UInt16 scale = 0) { using FromValueType = typename FromDataType::FieldType; using FromColumnType = typename FromDataType::ColumnType; @@ -363,16 +555,15 @@ struct DateTimeAddIntervalImpl if (const auto * sources = checkAndGetColumn(source_col.get())) { if (const auto * delta_const_column = typeid_cast(&delta_column)) - op.vectorConstant(sources->getData(), col_to->getData(), delta_const_column->getInt(0), time_zone); + op.vectorConstant(sources->getData(), col_to->getData(), delta_const_column->getInt(0), time_zone, scale); else - op.vectorVector(sources->getData(), col_to->getData(), delta_column, time_zone); + op.vectorVector(sources->getData(), col_to->getData(), delta_column, time_zone, scale); } else if (const auto * sources_const = checkAndGetColumnConst(source_col.get())) { op.constantVector( sources_const->template getValue(), - col_to->getData(), - delta_column, time_zone); + col_to->getData(), delta_column, time_zone, scale); } else { @@ -463,18 +654,10 @@ public: } } - // TransformDateTime64 helps choosing correct overload of exec and does some transformations - // on input and output parameters to simplify support of DateTime64 in concrete Transform. - template - using TransformType = std::conditional_t< - std::is_same_v, - TransformDateTime64, - Transform>; - /// Helper templates to deduce return type based on argument type, since some overloads may promote or denote types, /// e.g. addSeconds(Date, 1) => DateTime template - using TransformExecuteReturnType = decltype(std::declval>().execute(FieldType(), 0, std::declval())); + using TransformExecuteReturnType = decltype(std::declval().execute(FieldType(), 0, std::declval(), 0)); // Deduces RETURN DataType from INPUT DataType, based on return type of Transform{}.execute(INPUT_TYPE, UInt64, DateLUTImpl). // e.g. for Transform-type that has execute()-overload with 'UInt16' input and 'UInt32' return, @@ -500,11 +683,33 @@ public: if (typeid_cast(arguments[0].type.get())) { const auto & datetime64_type = assert_cast(*arguments[0].type); - return std::make_shared(datetime64_type.getScale(), extractTimeZoneNameFromFunctionArguments(arguments, 2, 0)); + + auto from_scale = datetime64_type.getScale(); + auto scale = from_scale; + + if (std::is_same_v) + scale = 9; + else if (std::is_same_v) + scale = 6; + else if (std::is_same_v) + scale = 3; + + scale = std::max(scale, from_scale); + + return std::make_shared(scale, extractTimeZoneNameFromFunctionArguments(arguments, 2, 0)); } else { - return std::make_shared(DataTypeDateTime64::default_scale, extractTimeZoneNameFromFunctionArguments(arguments, 2, 0)); + auto scale = DataTypeDateTime64::default_scale; + + if (std::is_same_v) + scale = 9; + else if (std::is_same_v) + scale = 6; + else if (std::is_same_v) + scale = 3; + + return std::make_shared(scale, extractTimeZoneNameFromFunctionArguments(arguments, 2, 0)); } } else @@ -541,9 +746,9 @@ public: } else if (const auto * datetime64_type = assert_cast(from_type)) { - using WrappedTransformType = TransformType; - return DateTimeAddIntervalImpl, WrappedTransformType>::execute( - WrappedTransformType{datetime64_type->getScale()}, arguments, result_type); + auto from_scale = datetime64_type->getScale(); + return DateTimeAddIntervalImpl, Transform>::execute( + Transform{}, arguments, result_type, from_scale); } else throw Exception("Illegal type " + arguments[0].type->getName() + " of first argument of function " + getName(), diff --git a/src/Functions/FunctionDateOrDateTimeToSomething.h b/src/Functions/FunctionDateOrDateTimeToSomething.h index 00678e65364..5269eecea37 100644 --- a/src/Functions/FunctionDateOrDateTimeToSomething.h +++ b/src/Functions/FunctionDateOrDateTimeToSomething.h @@ -88,6 +88,20 @@ public: Int64 scale = DataTypeDateTime64::default_scale; if (const auto * dt64 = checkAndGetDataType(arguments[0].type.get())) scale = dt64->getScale(); + auto source_scale = scale; + + if constexpr (std::is_same_v) + { + scale = std::max(source_scale, static_cast(3)); + } + else if constexpr (std::is_same_v) + { + scale = std::max(source_scale, static_cast(6)); + } + else if constexpr (std::is_same_v) + { + scale = std::max(source_scale, static_cast(9)); + } return std::make_shared(scale, extractTimeZoneNameFromFunctionArguments(arguments, 1, 0)); } diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 4f5f6ae483f..7f8e9148032 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -112,6 +112,9 @@ void registerFunctionsConversion(FunctionFactory & factory) factory.registerFunction(); factory.registerFunction(); + factory.registerFunction>(); + factory.registerFunction>(); + factory.registerFunction>(); factory.registerFunction>(); factory.registerFunction>(); factory.registerFunction>(); diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index d1564008dfe..e098378f51a 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -1487,6 +1487,9 @@ struct NameToDecimal256 { static constexpr auto name = "toDecimal256"; }; static constexpr auto kind = IntervalKind::INTERVAL_KIND; \ }; +DEFINE_NAME_TO_INTERVAL(Nanosecond) +DEFINE_NAME_TO_INTERVAL(Microsecond) +DEFINE_NAME_TO_INTERVAL(Millisecond) DEFINE_NAME_TO_INTERVAL(Second) DEFINE_NAME_TO_INTERVAL(Minute) DEFINE_NAME_TO_INTERVAL(Hour) @@ -2703,13 +2706,10 @@ private: return createWrapper(from_type, to_type, requested_result_is_nullable); } - WrapperType createUInt8ToUInt8Wrapper(const DataTypePtr from_type, const DataTypePtr to_type) const + WrapperType createUInt8ToBoolWrapper(const DataTypePtr from_type, const DataTypePtr to_type) const { return [from_type, to_type] (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable *, size_t /*input_rows_count*/) -> ColumnPtr { - if (isBool(from_type) || !isBool(to_type)) - return arguments.front().column; - /// Special case when we convert UInt8 column to Bool column. /// both columns have type UInt8, but we shouldn't use identity wrapper, /// because Bool column can contain only 0 and 1. @@ -3506,15 +3506,19 @@ private: /// 'requested_result_is_nullable' is true if CAST to Nullable type is requested. WrapperType prepareImpl(const DataTypePtr & from_type, const DataTypePtr & to_type, bool requested_result_is_nullable) const { - bool convert_to_ipv6 = to_type->getCustomName() && to_type->getCustomName()->getName() == "IPv6"; + if (isUInt8(from_type) && isBool(to_type)) + return createUInt8ToBoolWrapper(from_type, to_type); - if (from_type->equals(*to_type) && !convert_to_ipv6) - { - if (isUInt8(from_type)) - return createUInt8ToUInt8Wrapper(from_type, to_type); + /// We can cast IPv6 into IPv6, IPv4 into IPv4, but we should not allow to cast FixedString(16) into IPv6 as part of identity cast + bool safe_convert_custom_types = true; + if (const auto * to_type_custom_name = to_type->getCustomName()) + safe_convert_custom_types = from_type->getCustomName() && from_type->getCustomName()->getName() == to_type_custom_name->getName(); + else if (const auto * from_type_custom_name = from_type->getCustomName()) + safe_convert_custom_types = to_type->getCustomName() && from_type_custom_name->getName() == to_type->getCustomName()->getName(); + + if (from_type->equals(*to_type) && safe_convert_custom_types) return createIdentityWrapper(from_type); - } else if (WhichDataType(from_type).isNothing()) return createNothingWrapper(to_type.get()); diff --git a/src/Functions/FunctionsTimeWindow.cpp b/src/Functions/FunctionsTimeWindow.cpp index 79ce7356ee7..76844e2e6fb 100644 --- a/src/Functions/FunctionsTimeWindow.cpp +++ b/src/Functions/FunctionsTimeWindow.cpp @@ -20,6 +20,7 @@ namespace ErrorCodes extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int ARGUMENT_OUT_OF_BOUND; + extern const int SYNTAX_ERROR; } namespace @@ -167,6 +168,13 @@ struct TimeWindowImpl switch (std::get<0>(interval)) { + //TODO: add proper support for fractional seconds +// case IntervalKind::Nanosecond: +// return executeTumble(*time_column_vec, std::get<1>(interval), time_zone); +// case IntervalKind::Microsecond: +// return executeTumble(*time_column_vec, std::get<1>(interval), time_zone); +// case IntervalKind::Millisecond: +// return executeTumble(*time_column_vec, std::get<1>(interval), time_zone); case IntervalKind::Second: return executeTumble(*time_column_vec, std::get<1>(interval), time_zone); case IntervalKind::Minute: @@ -183,6 +191,8 @@ struct TimeWindowImpl return executeTumble(*time_column_vec, std::get<1>(interval), time_zone); case IntervalKind::Year: return executeTumble(*time_column_vec, std::get<1>(interval), time_zone); + default: + throw Exception("Fraction seconds are unsupported by windows yet", ErrorCodes::SYNTAX_ERROR); } __builtin_unreachable(); } @@ -350,6 +360,16 @@ struct TimeWindowImpl switch (std::get<0>(window_interval)) { + //TODO: add proper support for fractional seconds +// case IntervalKind::Nanosecond: +// return executeHop( +// *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); +// case IntervalKind::Microsecond: +// return executeHop( +// *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); +// case IntervalKind::Millisecond: +// return executeHop( +// *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); case IntervalKind::Second: return executeHop( *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); @@ -374,6 +394,8 @@ struct TimeWindowImpl case IntervalKind::Year: return executeHop( *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); + default: + throw Exception("Fraction seconds are unsupported by windows yet", ErrorCodes::SYNTAX_ERROR); } __builtin_unreachable(); } @@ -487,6 +509,16 @@ struct TimeWindowImpl switch (std::get<0>(window_interval)) { + //TODO: add proper support for fractional seconds +// case IntervalKind::Nanosecond: +// return executeHopSlice( +// *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); +// case IntervalKind::Microsecond: +// return executeHopSlice( +// *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); +// case IntervalKind::Millisecond: +// return executeHopSlice( +// *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); case IntervalKind::Second: return executeHopSlice( *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); @@ -511,6 +543,8 @@ struct TimeWindowImpl case IntervalKind::Year: return executeHopSlice( *time_column_vec, std::get<1>(hop_interval), std::get<1>(window_interval), time_zone); + default: + throw Exception("Fraction seconds are unsupported by windows yet", ErrorCodes::SYNTAX_ERROR); } __builtin_unreachable(); } diff --git a/src/Functions/FunctionsTimeWindow.h b/src/Functions/FunctionsTimeWindow.h index 313de10702d..3ea397e4c7d 100644 --- a/src/Functions/FunctionsTimeWindow.h +++ b/src/Functions/FunctionsTimeWindow.h @@ -80,7 +80,32 @@ struct ToStartOfTransform; TRANSFORM_TIME(Hour) TRANSFORM_TIME(Minute) TRANSFORM_TIME(Second) -#undef TRANSFORM_DATE +#undef TRANSFORM_TIME + +#define TRANSFORM_SUBSECONDS(INTERVAL_KIND, DEF_SCALE) \ +template<> \ + struct ToStartOfTransform \ + { \ + static Int64 execute(Int64 t, UInt64 delta, const UInt32 scale) \ + { \ + if (scale <= DEF_SCALE) \ + { \ + auto val = t * DecimalUtils::scaleMultiplier(DEF_SCALE - scale); \ + if (delta == 1) \ + return val; \ + else \ + return val - (val % delta); \ + } \ + else \ + { \ + return t - (t % (delta * DecimalUtils::scaleMultiplier(scale - DEF_SCALE))) ; \ + } \ + } \ + }; + TRANSFORM_SUBSECONDS(Millisecond, 3) + TRANSFORM_SUBSECONDS(Microsecond, 6) + TRANSFORM_SUBSECONDS(Nanosecond, 9) +#undef TRANSFORM_SUBSECONDS template struct AddTime; @@ -117,6 +142,25 @@ struct ToStartOfTransform; ADD_TIME(Second, 1) #undef ADD_TIME +#define ADD_SUBSECONDS(INTERVAL_KIND, DEF_SCALE) \ +template <> \ + struct AddTime \ + { \ + static inline NO_SANITIZE_UNDEFINED Int64 execute(Int64 t, UInt64 delta, const UInt32 scale) \ + { \ + if (scale < DEF_SCALE) \ + { \ + return t + delta * DecimalUtils::scaleMultiplier(DEF_SCALE - scale); \ + } \ + else \ + return t + delta * DecimalUtils::scaleMultiplier(scale - DEF_SCALE); \ + } \ + }; + ADD_SUBSECONDS(Millisecond, 3) + ADD_SUBSECONDS(Microsecond, 6) + ADD_SUBSECONDS(Nanosecond, 9) +#undef ADD_SUBSECONDS + template struct TimeWindowImpl { diff --git a/src/Functions/SubtractSubSeconds.cpp b/src/Functions/SubtractSubSeconds.cpp new file mode 100644 index 00000000000..5eeb24c8748 --- /dev/null +++ b/src/Functions/SubtractSubSeconds.cpp @@ -0,0 +1,28 @@ +#include +#include + + +namespace DB +{ + +using FunctionSubtractNanoseconds = FunctionDateOrDateTimeAddInterval; +void registerFunctionSubtractNanoseconds(FunctionFactory & factory) +{ + factory.registerFunction(); +}; + +using FunctionSubtractMicroseconds = FunctionDateOrDateTimeAddInterval; +void registerFunctionSubtractMicroseconds(FunctionFactory & factory) +{ + factory.registerFunction(); +}; + +using FunctionSubtractMilliseconds = FunctionDateOrDateTimeAddInterval; +void registerFunctionSubtractMilliseconds(FunctionFactory & factory) +{ + factory.registerFunction(); +}; + +} + + diff --git a/src/Functions/TransformDateTime64.h b/src/Functions/TransformDateTime64.h index b05bdab65ad..9ac28118b8f 100644 --- a/src/Functions/TransformDateTime64.h +++ b/src/Functions/TransformDateTime64.h @@ -13,7 +13,7 @@ namespace DB * * DateTime64 value and scale factor (2) * * DateTime64 broken down to components, result of execute is then re-assembled back into DateTime64 value (3) * - * Suitable Transfotm-types are commonly used in Date/DateTime manipulation functions, + * Suitable Transform-types are commonly used in Date/DateTime manipulation functions, * and should implement static (or const) function with following signatures: * 1: * R execute(Int64 whole_value, ... ) diff --git a/src/Functions/addSubSeconds.cpp b/src/Functions/addSubSeconds.cpp new file mode 100644 index 00000000000..f58f8b20b99 --- /dev/null +++ b/src/Functions/addSubSeconds.cpp @@ -0,0 +1,28 @@ +#include +#include + + +namespace DB +{ + +using FunctionAddNanoseconds = FunctionDateOrDateTimeAddInterval; +void registerFunctionAddNanoseconds(FunctionFactory & factory) +{ + factory.registerFunction(); +}; + +using FunctionAddMicroseconds = FunctionDateOrDateTimeAddInterval; +void registerFunctionAddMicroseconds(FunctionFactory & factory) +{ + factory.registerFunction(); +}; + +using FunctionAddMilliseconds = FunctionDateOrDateTimeAddInterval; +void registerFunctionAddMilliseconds(FunctionFactory & factory) +{ + factory.registerFunction(); +}; + +} + + diff --git a/src/Functions/addressToLineWithInlines.cpp b/src/Functions/addressToLineWithInlines.cpp index c3e62bd802e..4877268989d 100644 --- a/src/Functions/addressToLineWithInlines.cpp +++ b/src/Functions/addressToLineWithInlines.cpp @@ -75,7 +75,7 @@ private: writeChar(':', out); writeIntText(location.line, out); - if (frame) + if (frame && frame->name != nullptr) { writeChar(':', out); int status = 0; diff --git a/src/Functions/caseWithExpression.cpp b/src/Functions/caseWithExpression.cpp index 37ee89c1f11..e06a01431da 100644 --- a/src/Functions/caseWithExpression.cpp +++ b/src/Functions/caseWithExpression.cpp @@ -43,6 +43,9 @@ public: for (size_t i = 2; i < args.size() - 1; i += 2) dst_array_types.push_back(args[i]); + // Type of the ELSE branch + dst_array_types.push_back(args.back()); + return getLeastSupertype(dst_array_types); } diff --git a/src/Functions/if.cpp b/src/Functions/if.cpp index 730612745ef..82448966b8c 100644 --- a/src/Functions/if.cpp +++ b/src/Functions/if.cpp @@ -894,13 +894,20 @@ private: /// If then is NULL, we create Nullable column with null mask OR-ed with condition. if (then_is_null) { + ColumnPtr arg_else_column; + /// In case when arg_else column type differs with result + /// column type we should cast it to result type. + if (removeNullable(arg_else.type)->getName() != removeNullable(result_type)->getName()) + arg_else_column = castColumn(arg_else, result_type); + else + arg_else_column = arg_else.column; + if (cond_col) { - auto arg_else_column = arg_else.column; auto result_column = IColumn::mutate(std::move(arg_else_column)); if (else_is_short) result_column->expand(cond_col->getData(), true); - if (isColumnNullable(*arg_else.column)) + if (isColumnNullable(*result_column)) { assert_cast(*result_column).applyNullMap(assert_cast(*arg_cond.column)); return result_column; @@ -913,7 +920,7 @@ private: if (cond_const_col->getValue()) return result_type->createColumn()->cloneResized(input_rows_count); else - return makeNullableColumnIfNot(arg_else.column); + return makeNullableColumnIfNot(arg_else_column); } else throw Exception("Illegal column " + arg_cond.column->getName() + " of first argument of function " + getName() @@ -924,14 +931,21 @@ private: /// If else is NULL, we create Nullable column with null mask OR-ed with negated condition. if (else_is_null) { + ColumnPtr arg_then_column; + /// In case when arg_then column type differs with result + /// column type we should cast it to result type. + if (removeNullable(arg_then.type)->getName() != removeNullable(result_type)->getName()) + arg_then_column = castColumn(arg_then, result_type); + else + arg_then_column = arg_then.column; + if (cond_col) { - auto arg_then_column = arg_then.column; auto result_column = IColumn::mutate(std::move(arg_then_column)); if (then_is_short) result_column->expand(cond_col->getData(), false); - if (isColumnNullable(*arg_then.column)) + if (isColumnNullable(*result_column)) { assert_cast(*result_column).applyNegatedNullMap(assert_cast(*arg_cond.column)); return result_column; @@ -954,7 +968,7 @@ private: else if (cond_const_col) { if (cond_const_col->getValue()) - return makeNullableColumnIfNot(arg_then.column); + return makeNullableColumnIfNot(arg_then_column); else return result_type->createColumn()->cloneResized(input_rows_count); } diff --git a/src/Functions/yandexConsistentHash.cpp b/src/Functions/kostikConsistentHash.cpp similarity index 59% rename from src/Functions/yandexConsistentHash.cpp rename to src/Functions/kostikConsistentHash.cpp index 58617e29af7..a38c3c965d8 100644 --- a/src/Functions/yandexConsistentHash.cpp +++ b/src/Functions/kostikConsistentHash.cpp @@ -7,9 +7,9 @@ namespace DB { /// An O(1) time and space consistent hash algorithm by Konstantin Oblakov -struct YandexConsistentHashImpl +struct KostikConsistentHashImpl { - static constexpr auto name = "yandexConsistentHash"; + static constexpr auto name = "kostikConsistentHash"; using HashType = UInt64; /// Actually it supports UInt64, but it is efficient only if n <= 32768 @@ -23,12 +23,12 @@ struct YandexConsistentHashImpl } }; -using FunctionYandexConsistentHash = FunctionConsistentHashImpl; +using FunctionKostikConsistentHash = FunctionConsistentHashImpl; -void registerFunctionYandexConsistentHash(FunctionFactory & factory) +void registerFunctionKostikConsistentHash(FunctionFactory & factory) { - factory.registerFunction(); + factory.registerFunction(); + factory.registerAlias("yandexConsistentHash", "kostikConsistentHash"); } } - diff --git a/src/Functions/registerFunctionsConsistentHashing.cpp b/src/Functions/registerFunctionsConsistentHashing.cpp index d4d740bc92f..84a78cd6765 100644 --- a/src/Functions/registerFunctionsConsistentHashing.cpp +++ b/src/Functions/registerFunctionsConsistentHashing.cpp @@ -2,12 +2,12 @@ namespace DB { class FunctionFactory; -void registerFunctionYandexConsistentHash(FunctionFactory & factory); +void registerFunctionKostikConsistentHash(FunctionFactory & factory); void registerFunctionJumpConsistentHash(FunctionFactory & factory); void registerFunctionsConsistentHashing(FunctionFactory & factory) { - registerFunctionYandexConsistentHash(factory); + registerFunctionKostikConsistentHash(factory); registerFunctionJumpConsistentHash(factory); } diff --git a/src/Functions/registerFunctionsDateTime.cpp b/src/Functions/registerFunctionsDateTime.cpp index 5211a62ff1e..dd7b67c47ac 100644 --- a/src/Functions/registerFunctionsDateTime.cpp +++ b/src/Functions/registerFunctionsDateTime.cpp @@ -11,6 +11,9 @@ void registerFunctionToDayOfWeek(FunctionFactory &); void registerFunctionToDayOfYear(FunctionFactory &); void registerFunctionToHour(FunctionFactory &); void registerFunctionToMinute(FunctionFactory &); +void registerFunctionToStartOfNanosecond(FunctionFactory &); +void registerFunctionToStartOfMicrosecond(FunctionFactory &); +void registerFunctionToStartOfMillisecond(FunctionFactory &); void registerFunctionToStartOfSecond(FunctionFactory &); void registerFunctionToSecond(FunctionFactory &); void registerFunctionToStartOfDay(FunctionFactory &); @@ -47,6 +50,9 @@ void registerFunctionTimeSlots(FunctionFactory &); void registerFunctionToYYYYMM(FunctionFactory &); void registerFunctionToYYYYMMDD(FunctionFactory &); void registerFunctionToYYYYMMDDhhmmss(FunctionFactory &); +void registerFunctionAddNanoseconds(FunctionFactory &); +void registerFunctionAddMicroseconds(FunctionFactory &); +void registerFunctionAddMilliseconds(FunctionFactory &); void registerFunctionAddSeconds(FunctionFactory &); void registerFunctionAddMinutes(FunctionFactory &); void registerFunctionAddHours(FunctionFactory &); @@ -55,6 +61,9 @@ void registerFunctionAddWeeks(FunctionFactory &); void registerFunctionAddMonths(FunctionFactory &); void registerFunctionAddQuarters(FunctionFactory &); void registerFunctionAddYears(FunctionFactory &); +void registerFunctionSubtractNanoseconds(FunctionFactory &); +void registerFunctionSubtractMicroseconds(FunctionFactory &); +void registerFunctionSubtractMilliseconds(FunctionFactory &); void registerFunctionSubtractSeconds(FunctionFactory &); void registerFunctionSubtractMinutes(FunctionFactory &); void registerFunctionSubtractHours(FunctionFactory &); @@ -93,6 +102,9 @@ void registerFunctionsDateTime(FunctionFactory & factory) registerFunctionToStartOfMonth(factory); registerFunctionToStartOfQuarter(factory); registerFunctionToStartOfYear(factory); + registerFunctionToStartOfNanosecond(factory); + registerFunctionToStartOfMicrosecond(factory); + registerFunctionToStartOfMillisecond(factory); registerFunctionToStartOfSecond(factory); registerFunctionToStartOfMinute(factory); registerFunctionToStartOfFiveMinute(factory); @@ -119,6 +131,9 @@ void registerFunctionsDateTime(FunctionFactory & factory) registerFunctionToYYYYMM(factory); registerFunctionToYYYYMMDD(factory); registerFunctionToYYYYMMDDhhmmss(factory); + registerFunctionAddNanoseconds(factory); + registerFunctionAddMicroseconds(factory); + registerFunctionAddMilliseconds(factory); registerFunctionAddSeconds(factory); registerFunctionAddMinutes(factory); registerFunctionAddHours(factory); @@ -127,6 +142,9 @@ void registerFunctionsDateTime(FunctionFactory & factory) registerFunctionAddMonths(factory); registerFunctionAddQuarters(factory); registerFunctionAddYears(factory); + registerFunctionSubtractNanoseconds(factory); + registerFunctionSubtractMicroseconds(factory); + registerFunctionSubtractMilliseconds(factory); registerFunctionSubtractSeconds(factory); registerFunctionSubtractMinutes(factory); registerFunctionSubtractHours(factory); diff --git a/src/Functions/throwIf.cpp b/src/Functions/throwIf.cpp index 7533e30c9b9..d2c1e7d2d55 100644 --- a/src/Functions/throwIf.cpp +++ b/src/Functions/throwIf.cpp @@ -131,8 +131,10 @@ public: message.value_or("Value passed to '" + getName() + "' function is non zero")); } + size_t result_size = in_untyped->size(); + /// We return non constant to avoid constant folding. - return ColumnUInt8::create(in_data.size(), 0); + return ColumnUInt8::create(result_size, 0); } return nullptr; diff --git a/src/Functions/toStartOfInterval.cpp b/src/Functions/toStartOfInterval.cpp index 09b7931de8d..bff33f9b061 100644 --- a/src/Functions/toStartOfInterval.cpp +++ b/src/Functions/toStartOfInterval.cpp @@ -33,184 +33,273 @@ namespace template <> struct Transform { - static constexpr auto name = function_name; - - static UInt16 execute(UInt16 d, UInt64 years, const DateLUTImpl & time_zone) + static UInt16 execute(UInt16 d, Int64 years, const DateLUTImpl & time_zone, Int64) { return time_zone.toStartOfYearInterval(DayNum(d), years); } - static UInt16 execute(Int32 d, UInt64 years, const DateLUTImpl & time_zone) + static UInt16 execute(Int32 d, Int64 years, const DateLUTImpl & time_zone, Int64) { return time_zone.toStartOfYearInterval(ExtendedDayNum(d), years); } - static UInt16 execute(UInt32 t, UInt64 years, const DateLUTImpl & time_zone) + static UInt16 execute(UInt32 t, Int64 years, const DateLUTImpl & time_zone, Int64) { return time_zone.toStartOfYearInterval(time_zone.toDayNum(t), years); } - static UInt16 execute(Int64 t, UInt64 years, const DateLUTImpl & time_zone) + static UInt16 execute(Int64 t, Int64 years, const DateLUTImpl & time_zone, Int64 scale_multiplier) { - return time_zone.toStartOfYearInterval(time_zone.toDayNum(t), years); + return time_zone.toStartOfYearInterval(time_zone.toDayNum(t / scale_multiplier), years); } }; template <> struct Transform { - static constexpr auto name = function_name; - - static UInt16 execute(UInt16 d, UInt64 quarters, const DateLUTImpl & time_zone) + static UInt16 execute(UInt16 d, Int64 quarters, const DateLUTImpl & time_zone, Int64) { return time_zone.toStartOfQuarterInterval(DayNum(d), quarters); } - static UInt16 execute(Int32 d, UInt64 quarters, const DateLUTImpl & time_zone) + static UInt16 execute(Int32 d, Int64 quarters, const DateLUTImpl & time_zone, Int64) { return time_zone.toStartOfQuarterInterval(ExtendedDayNum(d), quarters); } - static UInt16 execute(UInt32 t, UInt64 quarters, const DateLUTImpl & time_zone) + static UInt16 execute(UInt32 t, Int64 quarters, const DateLUTImpl & time_zone, Int64) { return time_zone.toStartOfQuarterInterval(time_zone.toDayNum(t), quarters); } - static UInt16 execute(Int64 t, UInt64 quarters, const DateLUTImpl & time_zone) + static UInt16 execute(Int64 t, Int64 quarters, const DateLUTImpl & time_zone, Int64 scale_multiplier) { - return time_zone.toStartOfQuarterInterval(time_zone.toDayNum(t), quarters); + return time_zone.toStartOfQuarterInterval(time_zone.toDayNum(t / scale_multiplier), quarters); } }; template <> struct Transform { - static constexpr auto name = function_name; - - static UInt16 execute(UInt16 d, UInt64 months, const DateLUTImpl & time_zone) + static UInt16 execute(UInt16 d, Int64 months, const DateLUTImpl & time_zone, Int64) { return time_zone.toStartOfMonthInterval(DayNum(d), months); } - static UInt16 execute(Int32 d, UInt64 months, const DateLUTImpl & time_zone) + static UInt16 execute(Int32 d, Int64 months, const DateLUTImpl & time_zone, Int64) { return time_zone.toStartOfMonthInterval(ExtendedDayNum(d), months); } - static UInt16 execute(UInt32 t, UInt64 months, const DateLUTImpl & time_zone) + static UInt16 execute(UInt32 t, Int64 months, const DateLUTImpl & time_zone, Int64) { return time_zone.toStartOfMonthInterval(time_zone.toDayNum(t), months); } - static UInt16 execute(Int64 t, UInt64 months, const DateLUTImpl & time_zone) + static UInt16 execute(Int64 t, Int64 months, const DateLUTImpl & time_zone, Int64 scale_multiplier) { - return time_zone.toStartOfMonthInterval(time_zone.toDayNum(t), months); + return time_zone.toStartOfMonthInterval(time_zone.toDayNum(t / scale_multiplier), months); } }; template <> struct Transform { - static constexpr auto name = function_name; - - static UInt16 execute(UInt16 d, UInt64 weeks, const DateLUTImpl & time_zone) + static UInt16 execute(UInt16 d, Int64 weeks, const DateLUTImpl & time_zone, Int64) { return time_zone.toStartOfWeekInterval(DayNum(d), weeks); } - static UInt16 execute(Int32 d, UInt64 weeks, const DateLUTImpl & time_zone) + static UInt16 execute(Int32 d, Int64 weeks, const DateLUTImpl & time_zone, Int64) { return time_zone.toStartOfWeekInterval(ExtendedDayNum(d), weeks); } - static UInt16 execute(UInt32 t, UInt64 weeks, const DateLUTImpl & time_zone) + static UInt16 execute(UInt32 t, Int64 weeks, const DateLUTImpl & time_zone, Int64) { return time_zone.toStartOfWeekInterval(time_zone.toDayNum(t), weeks); } - static UInt16 execute(Int64 t, UInt64 weeks, const DateLUTImpl & time_zone) + static UInt16 execute(Int64 t, Int64 weeks, const DateLUTImpl & time_zone, Int64 scale_multiplier) { - return time_zone.toStartOfWeekInterval(time_zone.toDayNum(t), weeks); + return time_zone.toStartOfWeekInterval(time_zone.toDayNum(t / scale_multiplier), weeks); } }; template <> struct Transform { - static constexpr auto name = function_name; - - static UInt32 execute(UInt16 d, UInt64 days, const DateLUTImpl & time_zone) + static UInt32 execute(UInt16 d, Int64 days, const DateLUTImpl & time_zone, Int64) { return time_zone.toStartOfDayInterval(ExtendedDayNum(d), days); } - static UInt32 execute(Int32 d, UInt64 days, const DateLUTImpl & time_zone) + static UInt32 execute(Int32 d, Int64 days, const DateLUTImpl & time_zone, Int64) { return time_zone.toStartOfDayInterval(ExtendedDayNum(d), days); } - static UInt32 execute(UInt32 t, UInt64 days, const DateLUTImpl & time_zone) + static UInt32 execute(UInt32 t, Int64 days, const DateLUTImpl & time_zone, Int64) { return time_zone.toStartOfDayInterval(time_zone.toDayNum(t), days); } - static UInt32 execute(Int64 t, UInt64 days, const DateLUTImpl & time_zone) + static Int64 execute(Int64 t, Int64 days, const DateLUTImpl & time_zone, Int64 scale_multiplier) { - return time_zone.toStartOfDayInterval(time_zone.toDayNum(t), days); + return time_zone.toStartOfDayInterval(time_zone.toDayNum(t / scale_multiplier), days); } }; template <> struct Transform { - static constexpr auto name = function_name; + static UInt32 execute(UInt16, Int64, const DateLUTImpl &, Int64) { return dateIsNotSupported(function_name); } - static UInt32 execute(UInt16, UInt64, const DateLUTImpl &) { return dateIsNotSupported(function_name); } - static UInt32 execute(Int32, UInt64, const DateLUTImpl &) { return dateIsNotSupported(function_name); } - static UInt32 execute(UInt32 t, UInt64 hours, const DateLUTImpl & time_zone) { return time_zone.toStartOfHourInterval(t, hours); } - static UInt32 execute(Int64 t, UInt64 hours, const DateLUTImpl & time_zone) { return time_zone.toStartOfHourInterval(t, hours); } + static UInt32 execute(Int32, Int64, const DateLUTImpl &, Int64) { return dateIsNotSupported(function_name); } + + static UInt32 execute(UInt32 t, Int64 hours, const DateLUTImpl & time_zone, Int64) + { + return time_zone.toStartOfHourInterval(t, hours); + } + + static UInt32 execute(Int64 t, Int64 hours, const DateLUTImpl & time_zone, Int64 scale_multiplier) + { + return time_zone.toStartOfHourInterval(t / scale_multiplier, hours); + } }; template <> struct Transform { - static constexpr auto name = function_name; + static UInt32 execute(UInt16, Int64, const DateLUTImpl &, Int64) { return dateIsNotSupported(function_name); } - static UInt32 execute(UInt16, UInt64, const DateLUTImpl &) { return dateIsNotSupported(function_name); } + static UInt32 execute(Int32, Int64, const DateLUTImpl &, Int64) { return dateIsNotSupported(function_name); } - static UInt32 execute(Int32, UInt64, const DateLUTImpl &) { return dateIsNotSupported(function_name); } - - static UInt32 execute(UInt32 t, UInt64 minutes, const DateLUTImpl & time_zone) + static UInt32 execute(UInt32 t, Int64 minutes, const DateLUTImpl & time_zone, Int64) { return time_zone.toStartOfMinuteInterval(t, minutes); } - static UInt32 execute(Int64 t, UInt64 minutes, const DateLUTImpl & time_zone) + static UInt32 execute(Int64 t, Int64 minutes, const DateLUTImpl & time_zone, Int64 scale_multiplier) { - return time_zone.toStartOfMinuteInterval(t, minutes); + return time_zone.toStartOfMinuteInterval(t / scale_multiplier, minutes); } }; template <> struct Transform { - static constexpr auto name = function_name; + static UInt32 execute(UInt16, Int64, const DateLUTImpl &, Int64) { return dateIsNotSupported(function_name); } - static UInt32 execute(UInt16, UInt64, const DateLUTImpl &) { return dateIsNotSupported(function_name); } + static UInt32 execute(Int32, Int64, const DateLUTImpl &, Int64) { return dateIsNotSupported(function_name); } - static UInt32 execute(Int32, UInt64, const DateLUTImpl &) { return dateIsNotSupported(function_name); } - - static UInt32 execute(UInt32 t, UInt64 seconds, const DateLUTImpl & time_zone) + static UInt32 execute(UInt32 t, Int64 seconds, const DateLUTImpl & time_zone, Int64) { return time_zone.toStartOfSecondInterval(t, seconds); } - static Int64 execute(Int64 t, UInt64 seconds, const DateLUTImpl & time_zone) + static UInt32 execute(Int64 t, Int64 seconds, const DateLUTImpl & time_zone, Int64 scale_multiplier) { - return time_zone.toStartOfSecondInterval(t, seconds); + return time_zone.toStartOfSecondInterval(t / scale_multiplier, seconds); } }; + template <> + struct Transform + { + static UInt32 execute(UInt16, Int64, const DateLUTImpl &, Int64) { return dateIsNotSupported(function_name); } + + static UInt32 execute(Int32, Int64, const DateLUTImpl &, Int64) { return dateIsNotSupported(function_name); } + + static UInt32 execute(UInt32, Int64, const DateLUTImpl &, Int64) { return dateTimeIsNotSupported(function_name); } + + static Int64 execute(Int64 t, Int64 milliseconds, const DateLUTImpl &, Int64 scale_multiplier) + { + if (scale_multiplier < 1000) + { + Int64 t_milliseconds = t * (static_cast(1000) / scale_multiplier); + if (likely(t >= 0)) + return t_milliseconds / milliseconds * milliseconds; + else + return ((t_milliseconds + 1) / milliseconds - 1) * milliseconds; + } + else if (scale_multiplier > 1000) + { + Int64 scale_diff = scale_multiplier / static_cast(1000); + if (likely(t >= 0)) + return t / milliseconds / scale_diff * milliseconds; + else + return ((t + 1) / milliseconds / scale_diff - 1) * milliseconds; + } + else + if (likely(t >= 0)) + return t / milliseconds * milliseconds; + else + return ((t + 1) / milliseconds - 1) * milliseconds; + } + }; + + template <> + struct Transform + { + static UInt32 execute(UInt16, Int64, const DateLUTImpl &, Int64) { return dateIsNotSupported(function_name); } + + static UInt32 execute(Int32, Int64, const DateLUTImpl &, Int64) { return dateIsNotSupported(function_name); } + + static UInt32 execute(UInt32, Int64, const DateLUTImpl &, Int64) { return dateTimeIsNotSupported(function_name); } + + static Int64 execute(Int64 t, Int64 microseconds, const DateLUTImpl &, Int64 scale_multiplier) + { + if (scale_multiplier < 1000000) + { + Int64 t_microseconds = t * (static_cast(1000000) / scale_multiplier); + if (likely(t >= 0)) + return t_microseconds / microseconds * microseconds; + else + return ((t_microseconds + 1) / microseconds - 1) * microseconds; + } + else if (scale_multiplier > 1000000) + { + Int64 scale_diff = scale_multiplier / static_cast(1000000); + if (likely(t >= 0)) + return t / microseconds / scale_diff * microseconds; + else + return ((t + 1) / microseconds / scale_diff - 1) * microseconds; + } + else + if (likely(t >= 0)) + return t / microseconds * microseconds; + else + return ((t + 1) / microseconds - 1) * microseconds; + } + }; + + template <> + struct Transform + { + static UInt32 execute(UInt16, Int64, const DateLUTImpl &, Int64) { return dateIsNotSupported(function_name); } + + static UInt32 execute(Int32, Int64, const DateLUTImpl &, Int64) { return dateIsNotSupported(function_name); } + + static UInt32 execute(UInt32, Int64, const DateLUTImpl &, Int64) { return dateTimeIsNotSupported(function_name); } + + static Int64 execute(Int64 t, Int64 nanoseconds, const DateLUTImpl &, Int64 scale_multiplier) + { + if (scale_multiplier < 1000000000) + { + Int64 t_nanoseconds = t * (static_cast(1000000000) / scale_multiplier); + if (likely(t >= 0)) + return t_nanoseconds / nanoseconds * nanoseconds; + else + return ((t_nanoseconds + 1) / nanoseconds - 1) * nanoseconds; + } + else + if (likely(t >= 0)) + return t / nanoseconds * nanoseconds; + else + return ((t + 1) / nanoseconds - 1) * nanoseconds; + } + }; class FunctionToStartOfInterval : public IFunction { @@ -240,6 +329,7 @@ public: const DataTypeInterval * interval_type = nullptr; bool result_type_is_date = false; + bool result_type_is_datetime = false; auto check_interval_argument = [&] { interval_type = checkAndGetDataType(arguments[1].type.get()); @@ -251,6 +341,8 @@ public: result_type_is_date = (interval_type->getKind() == IntervalKind::Year) || (interval_type->getKind() == IntervalKind::Quarter) || (interval_type->getKind() == IntervalKind::Month) || (interval_type->getKind() == IntervalKind::Week); + result_type_is_datetime = (interval_type->getKind() == IntervalKind::Day) || (interval_type->getKind() == IntervalKind::Hour) + || (interval_type->getKind() == IntervalKind::Minute) || (interval_type->getKind() == IntervalKind::Second); }; auto check_timezone_argument = [&] @@ -263,7 +355,7 @@ public: if (first_argument_is_date && result_type_is_date) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "The timezone argument of function {} with interval type {} is allowed only when the 1st argument " - "has the type DateTime", + "has the type DateTime or DateTime64", getName(), interval_type->getKind().toString()); }; @@ -288,19 +380,33 @@ public: if (result_type_is_date) return std::make_shared(); - else + else if (result_type_is_datetime) return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 2, 0)); + else + { + auto scale = 0; + + if (interval_type->getKind() == IntervalKind::Nanosecond) + scale = 9; + else if (interval_type->getKind() == IntervalKind::Microsecond) + scale = 6; + else if (interval_type->getKind() == IntervalKind::Millisecond) + scale = 3; + + return std::make_shared(scale, extractTimeZoneNameFromFunctionArguments(arguments, 2, 0)); + } + } bool useDefaultImplementationForConstants() const override { return true; } ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1, 2}; } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /* input_rows_count */) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /* input_rows_count */) const override { const auto & time_column = arguments[0]; const auto & interval_column = arguments[1]; const auto & time_zone = extractTimeZoneFromFunctionArguments(arguments, 2, 0); - auto result_column = dispatchForColumns(time_column, interval_column, time_zone); + auto result_column = dispatchForColumns(time_column, interval_column, result_type, time_zone); return result_column; } @@ -316,33 +422,36 @@ public: private: ColumnPtr dispatchForColumns( - const ColumnWithTypeAndName & time_column, const ColumnWithTypeAndName & interval_column, const DateLUTImpl & time_zone) const + const ColumnWithTypeAndName & time_column, const ColumnWithTypeAndName & interval_column, const DataTypePtr & result_type, const DateLUTImpl & time_zone) const { const auto & from_datatype = *time_column.type.get(); const auto which_type = WhichDataType(from_datatype); + + if (which_type.isDateTime64()) + { + const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); + auto scale = assert_cast(from_datatype).getScale(); + + if (time_column_vec) + return dispatchForIntervalColumn(assert_cast(from_datatype), *time_column_vec, interval_column, result_type, time_zone, scale); + } if (which_type.isDateTime()) { const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); if (time_column_vec) - return dispatchForIntervalColumn(assert_cast(from_datatype), *time_column_vec, interval_column, time_zone); + return dispatchForIntervalColumn(assert_cast(from_datatype), *time_column_vec, interval_column, result_type, time_zone); } if (which_type.isDate()) { const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); if (time_column_vec) - return dispatchForIntervalColumn(assert_cast(from_datatype), *time_column_vec, interval_column, time_zone); + return dispatchForIntervalColumn(assert_cast(from_datatype), *time_column_vec, interval_column, result_type, time_zone); } if (which_type.isDate32()) { const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); if (time_column_vec) - return dispatchForIntervalColumn(assert_cast(from_datatype), *time_column_vec, interval_column, time_zone); - } - if (which_type.isDateTime64()) - { - const auto * time_column_vec = checkAndGetColumn(time_column.column.get()); - if (time_column_vec) - return dispatchForIntervalColumn(assert_cast(from_datatype), *time_column_vec, interval_column, time_zone); + return dispatchForIntervalColumn(assert_cast(from_datatype), *time_column_vec, interval_column, result_type, time_zone); } throw Exception( "Illegal column for first argument of function " + getName() + ". Must contain dates or dates with time", @@ -351,7 +460,8 @@ private: template ColumnPtr dispatchForIntervalColumn( - const FromDataType & from, const ColumnType & time_column, const ColumnWithTypeAndName & interval_column, const DateLUTImpl & time_zone) const + const FromDataType & from, const ColumnType & time_column, const ColumnWithTypeAndName & interval_column, + const DataTypePtr & result_type, const DateLUTImpl & time_zone, const UInt16 scale = 1) const { const auto * interval_type = checkAndGetDataType(interval_column.type.get()); if (!interval_type) @@ -368,49 +478,52 @@ private: switch (interval_type->getKind()) { + case IntervalKind::Nanosecond: + return execute(from, time_column, num_units, result_type, time_zone, scale); + case IntervalKind::Microsecond: + return execute(from, time_column, num_units, result_type, time_zone, scale); + case IntervalKind::Millisecond: + return execute(from, time_column, num_units, result_type, time_zone, scale); case IntervalKind::Second: - return execute(from, time_column, num_units, time_zone); + return execute(from, time_column, num_units, result_type, time_zone, scale); case IntervalKind::Minute: - return execute(from, time_column, num_units, time_zone); + return execute(from, time_column, num_units, result_type, time_zone, scale); case IntervalKind::Hour: - return execute(from, time_column, num_units, time_zone); + return execute(from, time_column, num_units, result_type, time_zone, scale); case IntervalKind::Day: - return execute(from, time_column, num_units, time_zone); + return execute(from, time_column, num_units, result_type, time_zone, scale); case IntervalKind::Week: - return execute(from, time_column, num_units, time_zone); + return execute(from, time_column, num_units, result_type, time_zone, scale); case IntervalKind::Month: - return execute(from, time_column, num_units, time_zone); + return execute(from, time_column, num_units, result_type, time_zone, scale); case IntervalKind::Quarter: - return execute(from, time_column, num_units, time_zone); + return execute(from, time_column, num_units, result_type, time_zone, scale); case IntervalKind::Year: - return execute(from, time_column, num_units, time_zone); + return execute(from, time_column, num_units, result_type, time_zone, scale); } __builtin_unreachable(); } - - template - ColumnPtr execute(const FromDataType & from_datatype, const ColumnType & time_column, UInt64 num_units, const DateLUTImpl & time_zone) const + template + ColumnPtr execute(const FromDataType &, const ColumnType & time_column_type, Int64 num_units, const DataTypePtr & result_type, const DateLUTImpl & time_zone, const UInt16 scale) const { - const auto & time_data = time_column.getData(); - size_t size = time_column.size(); - auto result = ColumnVector::create(); - auto & result_data = result->getData(); + using ToColumnType = typename ToDataType::ColumnType; + + const auto & time_data = time_column_type.getData(); + size_t size = time_data.size(); + + auto result_col = result_type->createColumn(); + auto *col_to = assert_cast(result_col.get()); + auto & result_data = col_to->getData(); result_data.resize(size); - if constexpr (std::is_same_v) - { - const auto transform = TransformDateTime64>{from_datatype.getScale()}; - for (size_t i = 0; i != size; ++i) - result_data[i] = transform.execute(time_data[i], num_units, time_zone); - } - else - { - for (size_t i = 0; i != size; ++i) - result_data[i] = Transform::execute(time_data[i], num_units, time_zone); - } - return result; + Int64 scale_multiplier = DecimalUtils::scaleMultiplier(scale); + + for (size_t i = 0; i != size; ++i) + result_data[i] = Transform::execute(time_data[i], num_units, time_zone, scale_multiplier); + + return result_col; } }; diff --git a/src/Functions/toStartOfSubsecond.cpp b/src/Functions/toStartOfSubsecond.cpp new file mode 100644 index 00000000000..b2257c5e3cd --- /dev/null +++ b/src/Functions/toStartOfSubsecond.cpp @@ -0,0 +1,30 @@ +#include +#include +#include + + +namespace DB +{ + +using FunctionToStartOfMillisecond = FunctionDateOrDateTimeToSomething; + +void registerFunctionToStartOfMillisecond(FunctionFactory & factory) +{ + factory.registerFunction(); +} + +using FunctionToStartOfMicrosecond = FunctionDateOrDateTimeToSomething; + +void registerFunctionToStartOfMicrosecond(FunctionFactory & factory) +{ + factory.registerFunction(); +} + +using FunctionToStartOfNanosecond = FunctionDateOrDateTimeToSomething; + +void registerFunctionToStartOfNanosecond(FunctionFactory & factory) +{ + factory.registerFunction(); +} + +} diff --git a/src/IO/Archives/IArchiveReader.h b/src/IO/Archives/IArchiveReader.h index 584e80a7d09..b5c474977bf 100644 --- a/src/IO/Archives/IArchiveReader.h +++ b/src/IO/Archives/IArchiveReader.h @@ -23,7 +23,6 @@ public: { UInt64 uncompressed_size; UInt64 compressed_size; - int compression_method; bool is_encrypted; }; diff --git a/src/IO/Archives/IArchiveWriter.h b/src/IO/Archives/IArchiveWriter.h index 3856d16fb89..c6330509f54 100644 --- a/src/IO/Archives/IArchiveWriter.h +++ b/src/IO/Archives/IArchiveWriter.h @@ -29,7 +29,7 @@ public: /// Sets compression method and level. /// Changing them will affect next file in the archive. - virtual void setCompression(int /* compression_method */, int /* compression_level */ = kDefaultCompressionLevel) {} /// NOLINT + virtual void setCompression(const String & /* compression_method */, int /* compression_level */ = kDefaultCompressionLevel) {} /// Sets password. If the password is not empty it will enable encryption in the archive. virtual void setPassword(const String & /* password */) {} diff --git a/src/IO/Archives/ZipArchiveReader.cpp b/src/IO/Archives/ZipArchiveReader.cpp index 4e83234615c..68726248dc4 100644 --- a/src/IO/Archives/ZipArchiveReader.cpp +++ b/src/IO/Archives/ZipArchiveReader.cpp @@ -1,6 +1,7 @@ #include #if USE_MINIZIP +#include #include #include #include @@ -18,6 +19,20 @@ namespace ErrorCodes using RawHandle = unzFile; +namespace +{ + void checkCompressionMethodIsEnabled(int compression_method_) + { + ZipArchiveWriter::checkCompressionMethodIsEnabled(compression_method_); + } + + void checkEncryptionIsEnabled() + { + ZipArchiveWriter::checkEncryptionIsEnabled(); + } +} + + /// Holds a raw handle, calls acquireRawHandle() in the constructor and releaseRawHandle() in the destructor. class ZipArchiveReader::HandleHolder { @@ -108,7 +123,7 @@ public: return *file_name; } - const FileInfo & getFileInfo() const + const FileInfoImpl & getFileInfo() const { if (!file_info) retrieveFileInfo(); @@ -161,7 +176,7 @@ private: std::shared_ptr reader; RawHandle raw_handle = nullptr; mutable std::optional file_name; - mutable std::optional file_info; + mutable std::optional file_info; }; @@ -174,7 +189,7 @@ public: , handle(std::move(handle_)) { const auto & file_info = handle.getFileInfo(); - checkCompressionMethodIsEnabled(static_cast(file_info.compression_method)); + checkCompressionMethodIsEnabled(file_info.compression_method); const char * password_cstr = nullptr; if (file_info.is_encrypted) @@ -227,7 +242,7 @@ public: if (new_pos > static_cast(file_info.uncompressed_size)) throw Exception("Seek position is out of bound", ErrorCodes::SEEK_POSITION_OUT_OF_BOUND); - if (file_info.compression_method == static_cast(CompressionMethod::kStore)) + if (file_info.compression_method == MZ_COMPRESS_METHOD_STORE) { /// unzSeek64() works only for non-compressed files. checkResult(unzSeek64(raw_handle, off, whence)); diff --git a/src/IO/Archives/ZipArchiveReader.h b/src/IO/Archives/ZipArchiveReader.h index 6932a93e23f..7236b0b660c 100644 --- a/src/IO/Archives/ZipArchiveReader.h +++ b/src/IO/Archives/ZipArchiveReader.h @@ -4,7 +4,6 @@ #if USE_MINIZIP #include -#include #include #include #include @@ -20,8 +19,6 @@ class SeekableReadBuffer; class ZipArchiveReader : public shared_ptr_helper, public IArchiveReader { public: - using CompressionMethod = ZipArchiveWriter::CompressionMethod; - ~ZipArchiveReader() override; /// Returns true if there is a specified file in the archive. @@ -45,11 +42,6 @@ public: /// Sets password used to decrypt the contents of the files in the archive. void setPassword(const String & password_) override; - /// Utility functions. - static CompressionMethod parseCompressionMethod(const String & str) { return ZipArchiveWriter::parseCompressionMethod(str); } - static void checkCompressionMethodIsEnabled(CompressionMethod method) { ZipArchiveWriter::checkCompressionMethodIsEnabled(method); } - static void checkEncryptionIsEnabled() { ZipArchiveWriter::checkEncryptionIsEnabled(); } - private: /// Constructs an archive's reader that will read from a file in the local filesystem. explicit ZipArchiveReader(const String & path_to_archive_); @@ -66,6 +58,11 @@ private: void init(); + struct FileInfoImpl : public FileInfo + { + int compression_method; + }; + HandleHolder acquireHandle(); RawHandle acquireRawHandle(); void releaseRawHandle(RawHandle handle_); diff --git a/src/IO/Archives/ZipArchiveWriter.cpp b/src/IO/Archives/ZipArchiveWriter.cpp index 79192223657..dbfd66a6293 100644 --- a/src/IO/Archives/ZipArchiveWriter.cpp +++ b/src/IO/Archives/ZipArchiveWriter.cpp @@ -80,7 +80,7 @@ public: { auto compress_method = handle.getWriter()->compression_method; auto compress_level = handle.getWriter()->compression_level; - checkCompressionMethodIsEnabled(static_cast(compress_method)); + checkCompressionMethodIsEnabled(compress_method); const char * password_cstr = nullptr; const String & password_str = handle.getWriter()->password; @@ -238,7 +238,7 @@ ZipArchiveWriter::ZipArchiveWriter(const String & path_to_archive_) } ZipArchiveWriter::ZipArchiveWriter(const String & path_to_archive_, std::unique_ptr archive_write_buffer_) - : path_to_archive(path_to_archive_) + : path_to_archive(path_to_archive_), compression_method(MZ_COMPRESS_METHOD_DEFLATE) { if (archive_write_buffer_) handle = StreamFromWriteBuffer::open(std::move(archive_write_buffer_)); @@ -246,6 +246,7 @@ ZipArchiveWriter::ZipArchiveWriter(const String & path_to_archive_, std::unique_ handle = zipOpen64(path_to_archive.c_str(), /* append= */ false); if (!handle) throw Exception(ErrorCodes::CANNOT_PACK_ARCHIVE, "Couldn't create zip archive {}", quoteString(path_to_archive)); + } ZipArchiveWriter::~ZipArchiveWriter() @@ -274,10 +275,10 @@ bool ZipArchiveWriter::isWritingFile() const return !handle; } -void ZipArchiveWriter::setCompression(int compression_method_, int compression_level_) +void ZipArchiveWriter::setCompression(const String & compression_method_, int compression_level_) { std::lock_guard lock{mutex}; - compression_method = compression_method_; + compression_method = compressionMethodToInt(compression_method_); compression_level = compression_level_; } @@ -287,48 +288,62 @@ void ZipArchiveWriter::setPassword(const String & password_) password = password_; } -ZipArchiveWriter::CompressionMethod ZipArchiveWriter::parseCompressionMethod(const String & str) +int ZipArchiveWriter::compressionMethodToInt(const String & compression_method_) { - if (str.empty()) - return CompressionMethod::kDeflate; /// Default compression method is DEFLATE. - else if (boost::iequals(str, "store")) - return CompressionMethod::kStore; - else if (boost::iequals(str, "deflate")) - return CompressionMethod::kDeflate; - else if (boost::iequals(str, "bzip2")) - return CompressionMethod::kBzip2; - else if (boost::iequals(str, "lzma")) - return CompressionMethod::kLzma; - else if (boost::iequals(str, "zstd")) - return CompressionMethod::kZstd; - else if (boost::iequals(str, "xz")) - return CompressionMethod::kXz; + if (compression_method_.empty()) + return MZ_COMPRESS_METHOD_DEFLATE; /// By default the compression method is "deflate". + else if (compression_method_ == kStore) + return MZ_COMPRESS_METHOD_STORE; + else if (compression_method_ == kDeflate) + return MZ_COMPRESS_METHOD_DEFLATE; + else if (compression_method_ == kBzip2) + return MZ_COMPRESS_METHOD_BZIP2; + else if (compression_method_ == kLzma) + return MZ_COMPRESS_METHOD_LZMA; + else if (compression_method_ == kZstd) + return MZ_COMPRESS_METHOD_ZSTD; + else if (compression_method_ == kXz) + return MZ_COMPRESS_METHOD_XZ; else - throw Exception(ErrorCodes::CANNOT_PACK_ARCHIVE, "Unknown compression method specified for a zip archive: {}", str); + throw Exception(ErrorCodes::CANNOT_PACK_ARCHIVE, "Unknown compression method specified for a zip archive: {}", compression_method_); +} + +String ZipArchiveWriter::intToCompressionMethod(int compression_method_) +{ + switch (compression_method_) + { + case MZ_COMPRESS_METHOD_STORE: return kStore; + case MZ_COMPRESS_METHOD_DEFLATE: return kDeflate; + case MZ_COMPRESS_METHOD_BZIP2: return kBzip2; + case MZ_COMPRESS_METHOD_LZMA: return kLzma; + case MZ_COMPRESS_METHOD_ZSTD: return kZstd; + case MZ_COMPRESS_METHOD_XZ: return kXz; + } + throw Exception(ErrorCodes::CANNOT_PACK_ARCHIVE, "Unknown compression method specified for a zip archive: {}", compression_method_); } /// Checks that a passed compression method can be used. -void ZipArchiveWriter::checkCompressionMethodIsEnabled(CompressionMethod method) +void ZipArchiveWriter::checkCompressionMethodIsEnabled(int compression_method_) { - switch (method) + switch (compression_method_) { - case CompressionMethod::kStore: [[fallthrough]]; - case CompressionMethod::kDeflate: - case CompressionMethod::kLzma: - case CompressionMethod::kXz: - case CompressionMethod::kZstd: + case MZ_COMPRESS_METHOD_STORE: [[fallthrough]]; + case MZ_COMPRESS_METHOD_DEFLATE: + case MZ_COMPRESS_METHOD_LZMA: + case MZ_COMPRESS_METHOD_ZSTD: + case MZ_COMPRESS_METHOD_XZ: return; - case CompressionMethod::kBzip2: + case MZ_COMPRESS_METHOD_BZIP2: { #if USE_BZIP2 return; #else - throw Exception("BZIP2 compression method is disabled", ErrorCodes::SUPPORT_IS_DISABLED); + throw Exception("bzip2 compression method is disabled", ErrorCodes::SUPPORT_IS_DISABLED); #endif } } - throw Exception(ErrorCodes::CANNOT_PACK_ARCHIVE, "Unknown compression method specified for a zip archive: {}", static_cast(method)); + throw Exception(ErrorCodes::CANNOT_PACK_ARCHIVE, "Unknown compression method specified for a zip archive: {}", compression_method_); } /// Checks that encryption is enabled. diff --git a/src/IO/Archives/ZipArchiveWriter.h b/src/IO/Archives/ZipArchiveWriter.h index 76f8dd8e9e5..58df4902434 100644 --- a/src/IO/Archives/ZipArchiveWriter.h +++ b/src/IO/Archives/ZipArchiveWriter.h @@ -31,16 +31,12 @@ public: bool isWritingFile() const override; /// Supported compression methods. - enum class CompressionMethod - { - /// See mz.h - kStore = 0, - kDeflate = 8, - kBzip2 = 12, - kLzma = 14, - kZstd = 93, - kXz = 95, - }; + static constexpr const char kStore[] = "store"; + static constexpr const char kDeflate[] = "deflate"; + static constexpr const char kBzip2[] = "bzip2"; + static constexpr const char kLzma[] = "lzma"; + static constexpr const char kZstd[] = "zstd"; + static constexpr const char kXz[] = "xz"; /// Some compression levels. enum class CompressionLevels @@ -53,7 +49,7 @@ public: /// Sets compression method and level. /// Changing them will affect next file in the archive. - void setCompression(int compression_method_, int compression_level_) override; + void setCompression(const String & compression_method_, int compression_level_) override; /// Sets password. Only contents of the files are encrypted, /// names of files are not encrypted. @@ -61,8 +57,9 @@ public: void setPassword(const String & password_) override; /// Utility functions. - static CompressionMethod parseCompressionMethod(const String & str); - static void checkCompressionMethodIsEnabled(CompressionMethod method); + static int compressionMethodToInt(const String & compression_method_); + static String intToCompressionMethod(int compression_method_); + static void checkCompressionMethodIsEnabled(int compression_method_); static void checkEncryptionIsEnabled(); private: @@ -85,7 +82,7 @@ private: [[noreturn]] void showError(const String & message) const; const String path_to_archive; - int compression_method = static_cast(CompressionMethod::kDeflate); + int compression_method; /// By default the compression method is "deflate". int compression_level = kDefaultCompressionLevel; String password; RawHandle handle = nullptr; diff --git a/src/IO/Archives/hasRegisteredArchiveFileExtension.cpp b/src/IO/Archives/hasRegisteredArchiveFileExtension.cpp new file mode 100644 index 00000000000..6b2ef29d054 --- /dev/null +++ b/src/IO/Archives/hasRegisteredArchiveFileExtension.cpp @@ -0,0 +1,12 @@ +#include + + +namespace DB +{ + +bool hasRegisteredArchiveFileExtension(const String & path) +{ + return path.ends_with(".zip") || path.ends_with(".zipx"); +} + +} diff --git a/src/IO/Archives/hasRegisteredArchiveFileExtension.h b/src/IO/Archives/hasRegisteredArchiveFileExtension.h new file mode 100644 index 00000000000..cab938aa0b4 --- /dev/null +++ b/src/IO/Archives/hasRegisteredArchiveFileExtension.h @@ -0,0 +1,12 @@ +#pragma once + +#include + + +namespace DB +{ + +/// Returns true if a specified path has one of the registered file extensions for an archive. +bool hasRegisteredArchiveFileExtension(const String & path); + +} diff --git a/src/IO/ConcatReadBuffer.h b/src/IO/ConcatReadBuffer.h index 4ef8d04d4c9..3f44181a6e9 100644 --- a/src/IO/ConcatReadBuffer.h +++ b/src/IO/ConcatReadBuffer.h @@ -23,6 +23,12 @@ public: assert(!buffers.empty()); } + ConcatReadBuffer(std::unique_ptr buf1, std::unique_ptr buf2) : ConcatReadBuffer() + { + appendBuffer(std::move(buf1)); + appendBuffer(std::move(buf2)); + } + ConcatReadBuffer(ReadBuffer & buf1, ReadBuffer & buf2) : ConcatReadBuffer() { appendBuffer(wrapReadBufferReference(buf1)); diff --git a/src/IO/IOThreadPool.cpp b/src/IO/IOThreadPool.cpp new file mode 100644 index 00000000000..4014d00d8b8 --- /dev/null +++ b/src/IO/IOThreadPool.cpp @@ -0,0 +1,34 @@ +#include +#include "Core/Field.h" + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +std::unique_ptr IOThreadPool::instance; + +void IOThreadPool::initialize(size_t max_threads, size_t max_free_threads, size_t queue_size) +{ + if (instance) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "The IO thread pool is initialized twice"); + } + + instance = std::make_unique(max_threads, max_free_threads, queue_size, false /*shutdown_on_exception*/); +} + +ThreadPool & IOThreadPool::get() +{ + if (!instance) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "The IO thread pool is not initialized"); + } + + return *instance; +} + +} diff --git a/src/IO/IOThreadPool.h b/src/IO/IOThreadPool.h new file mode 100644 index 00000000000..4fcf99b6048 --- /dev/null +++ b/src/IO/IOThreadPool.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +namespace DB +{ + +/* + * ThreadPool used for the IO. + */ +class IOThreadPool +{ + static std::unique_ptr instance; + +public: + static void initialize(size_t max_threads, size_t max_free_threads, size_t queue_size); + static ThreadPool & get(); +}; + +} diff --git a/src/IO/ParallelReadBuffer.cpp b/src/IO/ParallelReadBuffer.cpp new file mode 100644 index 00000000000..7fa10c160ad --- /dev/null +++ b/src/IO/ParallelReadBuffer.cpp @@ -0,0 +1,290 @@ +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; + extern const int CANNOT_SEEK_THROUGH_FILE; + extern const int SEEK_POSITION_OUT_OF_BOUND; + +} + +ParallelReadBuffer::ParallelReadBuffer( + std::unique_ptr reader_factory_, + ThreadPool * pool_, + size_t max_working_readers_, + WorkerSetup worker_setup_, + WorkerCleanup worker_cleanup_) + : SeekableReadBufferWithSize(nullptr, 0) + , pool(pool_) + , max_working_readers(max_working_readers_) + , reader_factory(std::move(reader_factory_)) + , worker_setup(std::move(worker_setup_)) + , worker_cleanup(std::move(worker_cleanup_)) +{ + std::unique_lock lock{mutex}; + addReaders(lock); +} + +bool ParallelReadBuffer::addReaderToPool(std::unique_lock & /*buffer_lock*/) +{ + auto reader = reader_factory->getReader(); + if (!reader) + { + return false; + } + + auto worker = read_workers.emplace_back(std::make_shared(std::move(reader))); + + pool->scheduleOrThrow( + [&, this, worker = std::move(worker)]() mutable + { + ThreadStatus thread_status; + + { + std::lock_guard lock{mutex}; + ++active_working_reader; + } + + SCOPE_EXIT({ + worker_cleanup(thread_status); + + std::lock_guard lock{mutex}; + --active_working_reader; + if (active_working_reader == 0) + { + readers_done.notify_all(); + } + }); + worker_setup(thread_status); + + readerThreadFunction(std::move(worker)); + }); + return true; +} + +void ParallelReadBuffer::addReaders(std::unique_lock & buffer_lock) +{ + while (read_workers.size() < max_working_readers && addReaderToPool(buffer_lock)) + ; +} + +off_t ParallelReadBuffer::seek(off_t offset, int whence) +{ + if (whence != SEEK_SET) + throw Exception("Only SEEK_SET mode is allowed.", ErrorCodes::CANNOT_SEEK_THROUGH_FILE); + + if (offset < 0) + throw Exception("Seek position is out of bounds. Offset: " + std::to_string(offset), ErrorCodes::SEEK_POSITION_OUT_OF_BOUND); + + if (!working_buffer.empty() && static_cast(offset) >= current_position - working_buffer.size() && offset < current_position) + { + pos = working_buffer.end() - (current_position - offset); + assert(pos >= working_buffer.begin()); + assert(pos <= working_buffer.end()); + + return offset; + } + + std::unique_lock lock{mutex}; + const auto offset_is_in_range + = [&](const auto & range) { return static_cast(offset) >= range.left && static_cast(offset) <= *range.right; }; + + while (!read_workers.empty() && (offset < current_position || !offset_is_in_range(read_workers.front()->range))) + { + read_workers.front()->cancel = true; + read_workers.pop_front(); + } + + if (!read_workers.empty()) + { + auto & front_worker = read_workers.front(); + auto & segments = front_worker->segments; + current_position = front_worker->range.left; + while (true) + { + next_condvar.wait(lock, [&] { return emergency_stop || !segments.empty(); }); + + if (emergency_stop) + handleEmergencyStop(); + + auto next_segment = front_worker->nextSegment(); + if (static_cast(offset) < current_position + next_segment.size()) + { + current_segment = std::move(next_segment); + working_buffer = internal_buffer = Buffer(current_segment.data(), current_segment.data() + current_segment.size()); + current_position += current_segment.size(); + pos = working_buffer.end() - (current_position - offset); + addReaders(lock); + return offset; + } + + current_position += next_segment.size(); + } + } + + lock.unlock(); + finishAndWait(); + + reader_factory->seek(offset, whence); + all_completed = false; + read_workers.clear(); + + current_position = offset; + resetWorkingBuffer(); + + emergency_stop = false; + + lock.lock(); + addReaders(lock); + return offset; +} + +std::optional ParallelReadBuffer::getTotalSize() +{ + std::lock_guard lock{mutex}; + return reader_factory->getTotalSize(); +} + +off_t ParallelReadBuffer::getPosition() +{ + return current_position - available(); +} + +bool ParallelReadBuffer::currentWorkerReady() const +{ + assert(!read_workers.empty()); + return read_workers.front()->finished || !read_workers.front()->segments.empty(); +} + +bool ParallelReadBuffer::currentWorkerCompleted() const +{ + assert(!read_workers.empty()); + return read_workers.front()->finished && read_workers.front()->segments.empty(); +} + +void ParallelReadBuffer::handleEmergencyStop() +{ + // this can only be called from the main thread when there is an exception + assert(background_exception); + if (background_exception) + std::rethrow_exception(background_exception); +} + +bool ParallelReadBuffer::nextImpl() +{ + if (all_completed) + return false; + + while (true) + { + std::unique_lock lock(mutex); + next_condvar.wait( + lock, + [this]() + { + /// Check if no more readers left or current reader can be processed + return emergency_stop || currentWorkerReady(); + }); + + bool worker_removed = false; + /// Remove completed units + while (!read_workers.empty() && currentWorkerCompleted() && !emergency_stop) + { + read_workers.pop_front(); + worker_removed = true; + } + + if (emergency_stop) + handleEmergencyStop(); + + if (worker_removed) + addReaders(lock); + + /// All readers processed, stop + if (read_workers.empty()) + { + all_completed = true; + return false; + } + + auto & front_worker = read_workers.front(); + /// Read data from first segment of the first reader + if (!front_worker->segments.empty()) + { + current_segment = front_worker->nextSegment(); + if (currentWorkerCompleted()) + { + read_workers.pop_front(); + all_completed = !addReaderToPool(lock) && read_workers.empty(); + } + break; + } + } + working_buffer = internal_buffer = Buffer(current_segment.data(), current_segment.data() + current_segment.size()); + current_position += working_buffer.size(); + return true; +} + +void ParallelReadBuffer::readerThreadFunction(ReadWorkerPtr read_worker) +{ + try + { + while (!emergency_stop && !read_worker->cancel) + { + if (!read_worker->reader->next()) + throw Exception("Failed to read all the data from the reader", ErrorCodes::LOGICAL_ERROR); + + if (emergency_stop || read_worker->cancel) + break; + + Buffer buffer = read_worker->reader->buffer(); + size_t bytes_to_copy = std::min(buffer.size(), read_worker->bytes_left); + Segment new_segment(bytes_to_copy, &arena); + memcpy(new_segment.data(), buffer.begin(), bytes_to_copy); + read_worker->reader->ignore(bytes_to_copy); + read_worker->bytes_left -= bytes_to_copy; + { + /// New data ready to be read + std::lock_guard lock(mutex); + read_worker->segments.emplace_back(std::move(new_segment)); + read_worker->finished = read_worker->bytes_left == 0; + next_condvar.notify_all(); + } + + if (read_worker->finished) + { + break; + } + } + } + catch (...) + { + onBackgroundException(); + } +} + +void ParallelReadBuffer::onBackgroundException() +{ + std::lock_guard lock(mutex); + if (!background_exception) + { + background_exception = std::current_exception(); + } + emergency_stop = true; + next_condvar.notify_all(); +} + +void ParallelReadBuffer::finishAndWait() +{ + emergency_stop = true; + + std::unique_lock lock{mutex}; + readers_done.wait(lock, [&] { return active_working_reader == 0; }); +} + +} diff --git a/src/IO/ParallelReadBuffer.h b/src/IO/ParallelReadBuffer.h new file mode 100644 index 00000000000..7b364205e8e --- /dev/null +++ b/src/IO/ParallelReadBuffer.h @@ -0,0 +1,174 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace DB +{ + +/** + * Reads from multiple ReadBuffers in parallel. + * Preserves order of readers obtained from ReadBufferFactory. + * + * It consumes multiple readers and yields data from them in order as it passed. + * Each working reader save segments of data to internal queue. + * + * ParallelReadBuffer in nextImpl method take first available segment from first reader in deque and fed it to user. + * When first reader finish reading, they will be removed from worker deque and data from next reader consumed. + * + * Number of working readers limited by max_working_readers. + */ +class ParallelReadBuffer : public SeekableReadBufferWithSize +{ +private: + /// Blocks until data occurred in the first reader or this reader indicate finishing + /// Finished readers removed from queue and data from next readers processed + bool nextImpl() override; + + class Segment : private boost::noncopyable + { + public: + Segment(size_t size_, SynchronizedArenaWithFreeLists * arena_) : arena(arena_), m_data(arena->alloc(size_)), m_size(size_) { } + + Segment() = default; + + Segment(Segment && other) noexcept : arena(other.arena) + { + std::swap(m_data, other.m_data); + std::swap(m_size, other.m_size); + } + + Segment & operator=(Segment && other) noexcept + { + arena = other.arena; + std::swap(m_data, other.m_data); + std::swap(m_size, other.m_size); + return *this; + } + + ~Segment() + { + if (m_data) + { + arena->free(m_data, m_size); + } + } + + auto data() const noexcept { return m_data; } + auto size() const noexcept { return m_size; } + + private: + SynchronizedArenaWithFreeLists * arena{nullptr}; + char * m_data{nullptr}; + size_t m_size{0}; + }; + +public: + class ReadBufferFactory + { + public: + virtual SeekableReadBufferPtr getReader() = 0; + virtual ~ReadBufferFactory() = default; + virtual off_t seek(off_t off, int whence) = 0; + virtual std::optional getTotalSize() = 0; + }; + + using WorkerSetup = std::function; + using WorkerCleanup = std::function; + explicit ParallelReadBuffer( + std::unique_ptr reader_factory_, + ThreadPool * pool, + size_t max_working_readers, + WorkerSetup worker_setup = {}, + WorkerCleanup worker_cleanup = {}); + + ~ParallelReadBuffer() override { finishAndWait(); } + + off_t seek(off_t off, int whence) override; + std::optional getTotalSize() override; + off_t getPosition() override; + +private: + /// Reader in progress with a list of read segments + struct ReadWorker + { + explicit ReadWorker(SeekableReadBufferPtr reader_) : reader(std::move(reader_)), range(reader->getRemainingReadRange()) + { + assert(range.right); + bytes_left = *range.right - range.left + 1; + } + + Segment nextSegment() + { + assert(!segments.empty()); + auto next_segment = std::move(segments.front()); + segments.pop_front(); + range.left += next_segment.size(); + return next_segment; + } + + SeekableReadBufferPtr reader; + std::deque segments; + bool finished{false}; + SeekableReadBuffer::Range range; + size_t bytes_left{0}; + std::atomic_bool cancel{false}; + }; + + using ReadWorkerPtr = std::shared_ptr; + + /// First worker in deque have new data or processed all available amount + bool currentWorkerReady() const; + /// First worker in deque processed and flushed all data + bool currentWorkerCompleted() const; + + void handleEmergencyStop(); + + void addReaders(std::unique_lock & buffer_lock); + bool addReaderToPool(std::unique_lock & buffer_lock); + + /// Process read_worker, read data and save into internal segments queue + void readerThreadFunction(ReadWorkerPtr read_worker); + + void onBackgroundException(); + void finishAndWait(); + + SynchronizedArenaWithFreeLists arena; + + Segment current_segment; + + ThreadPool * pool; + size_t max_working_readers; + size_t active_working_reader{0}; + // Triggered when all reader workers are done + std::condition_variable readers_done; + + std::unique_ptr reader_factory; + + WorkerSetup worker_setup; + WorkerCleanup worker_cleanup; + + /** + * FIFO queue of readers. + * Each worker contains reader itself and downloaded segments. + * When reader read all available data it will be removed from + * deque and data from next reader will be consumed to user. + */ + std::deque read_workers; + + std::mutex mutex; + /// Triggered when new data available + std::condition_variable next_condvar; + + std::exception_ptr background_exception = nullptr; + std::atomic_bool emergency_stop{false}; + + off_t current_position{0}; + + bool all_completed{false}; +}; + +} diff --git a/src/IO/ReadWriteBufferFromHTTP.h b/src/IO/ReadWriteBufferFromHTTP.h index 73ad73bf895..061dd772212 100644 --- a/src/IO/ReadWriteBufferFromHTTP.h +++ b/src/IO/ReadWriteBufferFromHTTP.h @@ -1,32 +1,33 @@ #pragma once #include -#include -#include #include #include +#include #include #include #include #include +#include +#include +#include #include #include #include #include #include #include +#include #include #include #include #include #include -#include -#include namespace ProfileEvents { - extern const Event ReadBufferSeekCancelConnection; +extern const Event ReadBufferSeekCancelConnection; } namespace DB @@ -48,7 +49,7 @@ class UpdatableSessionBase { protected: SessionPtr session; - UInt64 redirects { 0 }; + UInt64 redirects{0}; Poco::URI initial_uri; ConnectionTimeouts timeouts; UInt64 max_redirects; @@ -56,19 +57,12 @@ protected: public: virtual void buildNewSession(const Poco::URI & uri) = 0; - explicit UpdatableSessionBase(const Poco::URI uri, - const ConnectionTimeouts & timeouts_, - UInt64 max_redirects_) - : initial_uri { uri } - , timeouts { timeouts_ } - , max_redirects { max_redirects_ } + explicit UpdatableSessionBase(const Poco::URI uri, const ConnectionTimeouts & timeouts_, UInt64 max_redirects_) + : initial_uri{uri}, timeouts{timeouts_}, max_redirects{max_redirects_} { } - SessionPtr getSession() - { - return session; - } + SessionPtr getSession() { return session; } void updateSession(const Poco::URI & uri) { @@ -99,7 +93,7 @@ namespace detail /// HTTP range, including right bound [begin, end]. struct Range { - size_t begin = 0; + std::optional begin; std::optional end; }; @@ -144,10 +138,9 @@ namespace detail return read_range.begin || read_range.end || retry_with_range_header; } - size_t getOffset() const - { - return read_range.begin + offset_from_begin_pos; - } + size_t getRangeBegin() const { return read_range.begin.value_or(0); } + + size_t getOffset() const { return getRangeBegin() + offset_from_begin_pos; } std::istream * callImpl(Poco::URI uri_, Poco::Net::HTTPResponse & response, const std::string & method_) { @@ -161,7 +154,7 @@ namespace detail if (out_stream_callback) request.setChunkedTransferEncoding(true); - for (auto & http_header_entry: http_header_entries) + for (auto & http_header_entry : http_header_entries) request.set(std::get<0>(http_header_entry), std::get<1>(http_header_entry)); if (withPartialContent()) @@ -207,26 +200,14 @@ namespace detail std::optional getTotalSize() override { if (read_range.end) - return *read_range.end - read_range.begin; + return *read_range.end - getRangeBegin(); Poco::Net::HTTPResponse response; for (size_t i = 0; i < 10; ++i) { try { - call(response, Poco::Net::HTTPRequest::HTTP_HEAD); - - while (isRedirect(response.getStatus())) - { - Poco::URI uri_redirect(response.get("Location")); - if (remote_host_filter) - remote_host_filter->checkURL(uri_redirect); - - session->updateSession(uri_redirect); - - istr = callImpl(uri_redirect, response, method); - } - + callWithRedirects(response, Poco::Net::HTTPRequest::HTTP_HEAD); break; } catch (const Poco::Exception & e) @@ -236,7 +217,7 @@ namespace detail } if (response.hasContentLength()) - read_range.end = read_range.begin + response.getContentLength(); + read_range.end = getRangeBegin() + response.getContentLength(); return read_range.end; } @@ -252,6 +233,21 @@ namespace detail InitializeError initialization_error = InitializeError::NONE; + private: + void setupExternalBuffer() + { + /** + * use_external_buffer -- means we read into the buffer which + * was passed to us from somewhere else. We do not check whether + * previously returned buffer was read or not (no hasPendingData() check is needed), + * because this branch means we are prefetching data, + * each nextImpl() call we can fill a different buffer. + */ + impl->set(internal_buffer.begin(), internal_buffer.size()); + assert(working_buffer.begin() != nullptr); + assert(!internal_buffer.empty()); + } + public: using NextCallback = std::function; using OutStreamCallback = std::function; @@ -276,7 +272,7 @@ namespace detail , session {session_} , out_stream_callback {out_stream_callback_} , credentials {credentials_} - , http_header_entries {http_header_entries_} + , http_header_entries {std::move(http_header_entries_)} , remote_host_filter {remote_host_filter_} , buffer_size {buffer_size_} , use_external_buffer {use_external_buffer_} @@ -287,18 +283,21 @@ namespace detail { if (settings.http_max_tries <= 0 || settings.http_retry_initial_backoff_ms <= 0 || settings.http_retry_initial_backoff_ms >= settings.http_retry_max_backoff_ms) - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Invalid setting for http backoff, " - "must be http_max_tries >= 1 (current is {}) and " - "0 < http_retry_initial_backoff_ms < settings.http_retry_max_backoff_ms (now 0 < {} < {})", - settings.http_max_tries, settings.http_retry_initial_backoff_ms, settings.http_retry_max_backoff_ms); + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Invalid setting for http backoff, " + "must be http_max_tries >= 1 (current is {}) and " + "0 < http_retry_initial_backoff_ms < settings.http_retry_max_backoff_ms (now 0 < {} < {})", + settings.http_max_tries, + settings.http_retry_initial_backoff_ms, + settings.http_retry_max_backoff_ms); // Configure User-Agent if it not already set. const std::string user_agent = "User-Agent"; - auto iter = std::find_if(http_header_entries.begin(), http_header_entries.end(), [&user_agent](const HTTPHeaderEntry & entry) - { - return std::get<0>(entry) == user_agent; - }); + auto iter = std::find_if( + http_header_entries.begin(), + http_header_entries.end(), + [&user_agent](const HTTPHeaderEntry & entry) { return std::get<0>(entry) == user_agent; }); if (iter == http_header_entries.end()) { @@ -313,7 +312,36 @@ namespace detail } } - void call(Poco::Net::HTTPResponse & response, const String & method_) + static bool isRetriableError(const Poco::Net::HTTPResponse::HTTPStatus http_status) noexcept + { + constexpr std::array non_retriable_errors{ + Poco::Net::HTTPResponse::HTTPStatus::HTTP_BAD_REQUEST, + Poco::Net::HTTPResponse::HTTPStatus::HTTP_UNAUTHORIZED, + Poco::Net::HTTPResponse::HTTPStatus::HTTP_NOT_FOUND, + Poco::Net::HTTPResponse::HTTPStatus::HTTP_FORBIDDEN, + Poco::Net::HTTPResponse::HTTPStatus::HTTP_METHOD_NOT_ALLOWED}; + + return std::all_of( + non_retriable_errors.begin(), non_retriable_errors.end(), [&](const auto status) { return http_status != status; }); + } + + void callWithRedirects(Poco::Net::HTTPResponse & response, const String & method_, bool throw_on_all_errors = false) + { + call(response, method_, throw_on_all_errors); + + while (isRedirect(response.getStatus())) + { + Poco::URI uri_redirect(response.get("Location")); + if (remote_host_filter) + remote_host_filter->checkURL(uri_redirect); + + session->updateSession(uri_redirect); + + istr = callImpl(uri_redirect, response, method); + } + } + + void call(Poco::Net::HTTPResponse & response, const String & method_, bool throw_on_all_errors = false) { try { @@ -321,18 +349,18 @@ namespace detail } catch (...) { + if (throw_on_all_errors) + { + throw; + } + auto http_status = response.getStatus(); - if (http_status == Poco::Net::HTTPResponse::HTTPStatus::HTTP_NOT_FOUND - && http_skip_not_found_url) + if (http_status == Poco::Net::HTTPResponse::HTTPStatus::HTTP_NOT_FOUND && http_skip_not_found_url) { initialization_error = InitializeError::SKIP_NOT_FOUND_URL; } - else if (http_status == Poco::Net::HTTPResponse::HTTPStatus::HTTP_BAD_REQUEST - || http_status == Poco::Net::HTTPResponse::HTTPStatus::HTTP_UNAUTHORIZED - || http_status == Poco::Net::HTTPResponse::HTTPStatus::HTTP_NOT_FOUND - || http_status == Poco::Net::HTTPResponse::HTTPStatus::HTTP_FORBIDDEN - || http_status == Poco::Net::HTTPResponse::HTTPStatus::HTTP_METHOD_NOT_ALLOWED) + else if (!isRetriableError(http_status)) { initialization_error = InitializeError::NON_RETRIABLE_ERROR; exception = std::current_exception(); @@ -372,12 +400,14 @@ namespace detail if (withPartialContent() && response.getStatus() != Poco::Net::HTTPResponse::HTTPStatus::HTTP_PARTIAL_CONTENT) { /// Having `200 OK` instead of `206 Partial Content` is acceptable in case we retried with range.begin == 0. - if (read_range.begin) + if (read_range.begin && *read_range.begin != 0) { if (!exception) - exception = std::make_exception_ptr( - Exception(ErrorCodes::HTTP_RANGE_NOT_SATISFIABLE, - "Cannot read with range: [{}, {}]", read_range.begin, read_range.end ? *read_range.end : '-')); + exception = std::make_exception_ptr(Exception( + ErrorCodes::HTTP_RANGE_NOT_SATISFIABLE, + "Cannot read with range: [{}, {}]", + *read_range.begin, + read_range.end ? *read_range.end : '-')); initialization_error = InitializeError::NON_RETRIABLE_ERROR; return; @@ -386,12 +416,12 @@ namespace detail { /// We could have range.begin == 0 and range.end != 0 in case of DiskWeb and failing to read with partial content /// will affect only performance, so a warning is enough. - LOG_WARNING(log, "Unable to read with range header: [{}, {}]", read_range.begin, *read_range.end); + LOG_WARNING(log, "Unable to read with range header: [{}, {}]", getRangeBegin(), *read_range.end); } } if (!offset_from_begin_pos && !read_range.end && response.hasContentLength()) - read_range.end = read_range.begin + response.getContentLength(); + read_range.end = getRangeBegin() + response.getContentLength(); try { @@ -399,12 +429,7 @@ namespace detail if (use_external_buffer) { - /** - * See comment 30 lines below. - */ - impl->set(internal_buffer.begin(), internal_buffer.size()); - assert(working_buffer.begin() != nullptr); - assert(!internal_buffer.empty()); + setupExternalBuffer(); } } catch (const Poco::Exception & e) @@ -426,23 +451,17 @@ namespace detail if (next_callback) next_callback(count()); - if (read_range.end && getOffset() == read_range.end.value()) + if (read_range.end && getOffset() > read_range.end.value()) + { + assert(getOffset() == read_range.end.value() + 1); return false; + } if (impl) { if (use_external_buffer) { - /** - * use_external_buffer -- means we read into the buffer which - * was passed to us from somewhere else. We do not check whether - * previously returned buffer was read or not (no hasPendingData() check is needed), - * because this branch means we are prefetching data, - * each nextImpl() call we can fill a different buffer. - */ - impl->set(internal_buffer.begin(), internal_buffer.size()); - assert(working_buffer.begin() != nullptr); - assert(!internal_buffer.empty()); + setupExternalBuffer(); } else { @@ -477,10 +496,7 @@ namespace detail if (use_external_buffer) { - /// See comment 40 lines above. - impl->set(internal_buffer.begin(), internal_buffer.size()); - assert(working_buffer.begin() != nullptr); - assert(!internal_buffer.empty()); + setupExternalBuffer(); } } @@ -498,13 +514,18 @@ namespace detail if (!can_retry_request) throw; - LOG_ERROR(log, - "HTTP request to `{}` failed at try {}/{} with bytes read: {}/{}. " - "Error: {}. (Current backoff wait is {}/{} ms)", - uri.toString(), i + 1, settings.http_max_tries, - getOffset(), read_range.end ? toString(*read_range.end) : "unknown", - e.displayText(), - milliseconds_to_wait, settings.http_retry_max_backoff_ms); + LOG_ERROR( + log, + "HTTP request to `{}` failed at try {}/{} with bytes read: {}/{}. " + "Error: {}. (Current backoff wait is {}/{} ms)", + uri.toString(), + i + 1, + settings.http_max_tries, + getOffset(), + read_range.end ? toString(*read_range.end) : "unknown", + e.displayText(), + milliseconds_to_wait, + settings.http_retry_max_backoff_ms); retry_with_range_header = true; exception = std::current_exception(); @@ -529,10 +550,7 @@ namespace detail return true; } - off_t getPosition() override - { - return getOffset() - available(); - } + off_t getPosition() override { return getOffset() - available(); } off_t seek(off_t offset_, int whence) override { @@ -540,12 +558,11 @@ namespace detail throw Exception("Only SEEK_SET mode is allowed.", ErrorCodes::CANNOT_SEEK_THROUGH_FILE); if (offset_ < 0) - throw Exception("Seek position is out of bounds. Offset: " + std::to_string(offset_), ErrorCodes::SEEK_POSITION_OUT_OF_BOUND); + throw Exception( + "Seek position is out of bounds. Offset: " + std::to_string(offset_), ErrorCodes::SEEK_POSITION_OUT_OF_BOUND); off_t current_offset = getOffset(); - if (!working_buffer.empty() - && size_t(offset_) >= current_offset - working_buffer.size() - && offset_ < current_offset) + if (!working_buffer.empty() && size_t(offset_) >= current_offset - working_buffer.size() && offset_ < current_offset) { pos = working_buffer.end() - (current_offset - offset_); assert(pos >= working_buffer.begin()); @@ -567,7 +584,6 @@ namespace detail if (impl) { - ProfileEvents::increment(ProfileEvents::ReadBufferSeekCancelConnection); impl.reset(); } @@ -580,6 +596,8 @@ namespace detail return offset_; } + SeekableReadBuffer::Range getRemainingReadRange() const override { return {getOffset(), read_range.end}; } + std::string getResponseCookie(const std::string & name, const std::string & def) const { for (const auto & cookie : cookies) @@ -599,10 +617,7 @@ namespace detail next_callback(count()); } - const std::string & getCompressionMethod() const - { - return content_encoding; - } + const std::string & getCompressionMethod() const { return content_encoding; } }; } @@ -611,19 +626,50 @@ class UpdatableSession : public UpdatableSessionBase using Parent = UpdatableSessionBase; public: - UpdatableSession( - const Poco::URI uri, - const ConnectionTimeouts & timeouts_, - const UInt64 max_redirects_) + UpdatableSession(const Poco::URI uri, const ConnectionTimeouts & timeouts_, const UInt64 max_redirects_) : Parent(uri, timeouts_, max_redirects_) { session = makeHTTPSession(initial_uri, timeouts); } - void buildNewSession(const Poco::URI & uri) override + void buildNewSession(const Poco::URI & uri) override { session = makeHTTPSession(uri, timeouts); } +}; + +class RangeGenerator +{ +public: + explicit RangeGenerator(size_t total_size_, size_t range_step_, size_t range_start = 0) + : from(range_start), range_step(range_step_), total_size(total_size_) { - session = makeHTTPSession(uri, timeouts); } + + size_t totalRanges() const { return static_cast(round(static_cast(total_size - from) / range_step)); } + + using Range = std::pair; + + // return upper exclusive range of values, i.e. [from_range, to_range> + std::optional nextRange() + { + if (from >= total_size) + { + return std::nullopt; + } + + auto to = from + range_step; + if (to >= total_size) + { + to = total_size; + } + + Range range{from, to}; + from = to; + return std::move(range); + } + +private: + size_t from; + size_t range_step; + size_t total_size; }; class ReadWriteBufferFromHTTP : public detail::ReadWriteBufferFromHTTPBase> @@ -631,7 +677,7 @@ class ReadWriteBufferFromHTTP : public detail::ReadWriteBufferFromHTTPBase>; public: - ReadWriteBufferFromHTTP( + ReadWriteBufferFromHTTP( Poco::URI uri_, const std::string & method_, OutStreamCallback out_stream_callback_, @@ -646,14 +692,117 @@ public: bool delay_initialization_ = true, bool use_external_buffer_ = false, bool skip_not_found_url_ = false) - : Parent(std::make_shared(uri_, timeouts, max_redirects), - uri_, credentials_, method_, out_stream_callback_, buffer_size_, - settings_, http_header_entries_, read_range_, remote_host_filter_, - delay_initialization_, use_external_buffer_, skip_not_found_url_) + : Parent( + std::make_shared(uri_, timeouts, max_redirects), + uri_, + credentials_, + method_, + out_stream_callback_, + buffer_size_, + settings_, + http_header_entries_, + read_range_, + remote_host_filter_, + delay_initialization_, + use_external_buffer_, + skip_not_found_url_) { } }; +class RangedReadWriteBufferFromHTTPFactory : public ParallelReadBuffer::ReadBufferFactory +{ + using OutStreamCallback = ReadWriteBufferFromHTTP::OutStreamCallback; + +public: + RangedReadWriteBufferFromHTTPFactory( + size_t total_object_size_, + size_t range_step_, + Poco::URI uri_, + std::string method_, + OutStreamCallback out_stream_callback_, + ConnectionTimeouts timeouts_, + const Poco::Net::HTTPBasicCredentials & credentials_, + UInt64 max_redirects_ = 0, + size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE, + ReadSettings settings_ = {}, + ReadWriteBufferFromHTTP::HTTPHeaderEntries http_header_entries_ = {}, + const RemoteHostFilter * remote_host_filter_ = nullptr, + bool delay_initialization_ = true, + bool use_external_buffer_ = false, + bool skip_not_found_url_ = false) + : range_generator(total_object_size_, range_step_) + , total_object_size(total_object_size_) + , range_step(range_step_) + , uri(uri_) + , method(std::move(method_)) + , out_stream_callback(out_stream_callback_) + , timeouts(std::move(timeouts_)) + , credentials(credentials_) + , max_redirects(max_redirects_) + , buffer_size(buffer_size_) + , settings(std::move(settings_)) + , http_header_entries(std::move(http_header_entries_)) + , remote_host_filter(remote_host_filter_) + , delay_initialization(delay_initialization_) + , use_external_buffer(use_external_buffer_) + , skip_not_found_url(skip_not_found_url_) + { + } + + SeekableReadBufferPtr getReader() override + { + const auto next_range = range_generator.nextRange(); + if (!next_range) + { + return nullptr; + } + + return std::make_shared( + uri, + method, + out_stream_callback, + timeouts, + credentials, + max_redirects, + buffer_size, + settings, + http_header_entries, + // HTTP Range has inclusive bounds, i.e. [from, to] + ReadWriteBufferFromHTTP::Range{next_range->first, next_range->second - 1}, + remote_host_filter, + delay_initialization, + use_external_buffer, + skip_not_found_url); + } + + off_t seek(off_t off, [[maybe_unused]] int whence) override + { + range_generator = RangeGenerator{total_object_size, range_step, static_cast(off)}; + return off; + } + + std::optional getTotalSize() override { return total_object_size; } + +private: + RangeGenerator range_generator; + size_t total_object_size; + size_t range_step; + Poco::URI uri; + std::string method; + OutStreamCallback out_stream_callback; + ConnectionTimeouts timeouts; + const Poco::Net::HTTPBasicCredentials & credentials; + UInt64 max_redirects; + size_t buffer_size; + ReadSettings settings; + ReadWriteBufferFromHTTP::HTTPHeaderEntries http_header_entries; + const RemoteHostFilter * remote_host_filter; + bool delay_initialization; + bool use_external_buffer; + bool skip_not_found_url; +}; + class UpdatablePooledSession : public UpdatableSessionBase { using Parent = UpdatableSessionBase; @@ -662,20 +811,14 @@ private: size_t per_endpoint_pool_size; public: - explicit UpdatablePooledSession(const Poco::URI uri, - const ConnectionTimeouts & timeouts_, - const UInt64 max_redirects_, - size_t per_endpoint_pool_size_) - : Parent(uri, timeouts_, max_redirects_) - , per_endpoint_pool_size { per_endpoint_pool_size_ } + explicit UpdatablePooledSession( + const Poco::URI uri, const ConnectionTimeouts & timeouts_, const UInt64 max_redirects_, size_t per_endpoint_pool_size_) + : Parent(uri, timeouts_, max_redirects_), per_endpoint_pool_size{per_endpoint_pool_size_} { session = makePooledHTTPSession(initial_uri, timeouts, per_endpoint_pool_size); } - void buildNewSession(const Poco::URI & uri) override - { - session = makePooledHTTPSession(uri, timeouts, per_endpoint_pool_size); - } + void buildNewSession(const Poco::URI & uri) override { session = makePooledHTTPSession(uri, timeouts, per_endpoint_pool_size); } }; class PooledReadWriteBufferFromHTTP : public detail::ReadWriteBufferFromHTTPBase> @@ -683,7 +826,8 @@ class PooledReadWriteBufferFromHTTP : public detail::ReadWriteBufferFromHTTPBase using Parent = detail::ReadWriteBufferFromHTTPBase>; public: - explicit PooledReadWriteBufferFromHTTP(Poco::URI uri_, + explicit PooledReadWriteBufferFromHTTP( + Poco::URI uri_, const std::string & method_ = {}, OutStreamCallback out_stream_callback_ = {}, const ConnectionTimeouts & timeouts_ = {}, @@ -691,12 +835,13 @@ public: size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE, const UInt64 max_redirects = 0, size_t max_connections_per_endpoint = DEFAULT_COUNT_OF_HTTP_CONNECTIONS_PER_ENDPOINT) - : Parent(std::make_shared(uri_, timeouts_, max_redirects, max_connections_per_endpoint), - uri_, - credentials_, - method_, - out_stream_callback_, - buffer_size_) + : Parent( + std::make_shared(uri_, timeouts_, max_redirects, max_connections_per_endpoint), + uri_, + credentials_, + method_, + out_stream_callback_, + buffer_size_) { } }; diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index fb9752ae391..181ac9aed7e 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -372,8 +372,8 @@ SetPtr makeExplicitSet( element_type = low_cardinality_type->getDictionaryType(); auto set_key = PreparedSetKey::forLiteral(*right_arg, set_element_types); - if (prepared_sets.count(set_key)) - return prepared_sets.at(set_key); /// Already prepared. + if (auto it = prepared_sets.find(set_key); it != prepared_sets.end()) + return it->second; /// Already prepared. Block block; const auto & right_arg_func = std::dynamic_pointer_cast(right_arg); @@ -388,7 +388,7 @@ SetPtr makeExplicitSet( set->insertFromBlock(block.getColumnsWithTypeAndName()); set->finishInsert(); - prepared_sets[set_key] = set; + prepared_sets.emplace(set_key, set); return set; } @@ -707,7 +707,7 @@ ASTs ActionsMatcher::doUntuple(const ASTFunction * function, ActionsMatcher::Dat if (tid != 0) tuple_ast = tuple_ast->clone(); - auto literal = std::make_shared(UInt64(++tid)); + auto literal = std::make_shared(UInt64{++tid}); visit(*literal, literal, data); auto func = makeASTFunction("tupleElement", tuple_ast, literal); @@ -814,14 +814,13 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & if (!data.only_consts) { /// We are in the part of the tree that we are not going to compute. You just need to define types. - /// Do not subquery and create sets. We replace "in*" function to "in*IgnoreSet". + /// Do not evaluate subquery and create sets. We replace "in*" function to "in*IgnoreSet". auto argument_name = node.arguments->children.at(0)->getColumnName(); - data.addFunction( - FunctionFactory::instance().get(node.name + "IgnoreSet", data.getContext()), - { argument_name, argument_name }, - column_name); + FunctionFactory::instance().get(node.name + "IgnoreSet", data.getContext()), + {argument_name, argument_name}, + column_name); } return; } @@ -1145,8 +1144,8 @@ SetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no_su if (no_subqueries) return {}; auto set_key = PreparedSetKey::forSubquery(*right_in_operand); - if (data.prepared_sets.count(set_key)) - return data.prepared_sets.at(set_key); + if (auto it = data.prepared_sets.find(set_key); it != data.prepared_sets.end()) + return it->second; /// A special case is if the name of the table is specified on the right side of the IN statement, /// and the table has the type Set (a previously prepared set). @@ -1160,7 +1159,7 @@ SetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no_su StorageSet * storage_set = dynamic_cast(table.get()); if (storage_set) { - data.prepared_sets[set_key] = storage_set->getSet(); + data.prepared_sets.emplace(set_key, storage_set->getSet()); return storage_set->getSet(); } } @@ -1174,7 +1173,7 @@ SetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no_su /// If you already created a Set with the same subquery / table. if (subquery_for_set.set) { - data.prepared_sets[set_key] = subquery_for_set.set; + data.prepared_sets.emplace(set_key, subquery_for_set.set); return subquery_for_set.set; } @@ -1196,7 +1195,7 @@ SetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no_su } subquery_for_set.set = set; - data.prepared_sets[set_key] = set; + data.prepared_sets.emplace(set_key, set); return set; } else diff --git a/src/Interpreters/ActionsVisitor.h b/src/Interpreters/ActionsVisitor.h index b6b67bac81c..342cc9eef9d 100644 --- a/src/Interpreters/ActionsVisitor.h +++ b/src/Interpreters/ActionsVisitor.h @@ -10,6 +10,7 @@ namespace DB { +class ASTExpressionList; class ASTFunction; class ExpressionActions; @@ -89,10 +90,7 @@ struct ScopeStack : WithContext void addColumn(ColumnWithTypeAndName column); void addAlias(const std::string & name, std::string alias); void addArrayJoin(const std::string & source_name, std::string result_name); - void addFunction( - const FunctionOverloadResolverPtr & function, - const Names & argument_names, - std::string result_name); + void addFunction(const FunctionOverloadResolverPtr & function, const Names & argument_names, std::string result_name); ActionsDAGPtr popLevel(); diff --git a/src/Interpreters/AsynchronousInsertQueue.cpp b/src/Interpreters/AsynchronousInsertQueue.cpp index c60ab0f6510..6102066f85b 100644 --- a/src/Interpreters/AsynchronousInsertQueue.cpp +++ b/src/Interpreters/AsynchronousInsertQueue.cpp @@ -32,6 +32,7 @@ namespace CurrentMetrics namespace ProfileEvents { extern const Event AsyncInsertQuery; + extern const Event AsyncInsertBytes; } namespace DB @@ -222,7 +223,9 @@ void AsynchronousInsertQueue::pushImpl(InsertData::EntryPtr entry, QueueIterator if (!data) data = std::make_unique(); - data->size += entry->bytes.size(); + size_t entry_data_size = entry->bytes.size(); + + data->size += entry_data_size; data->last_update = std::chrono::steady_clock::now(); data->entries.emplace_back(entry); @@ -239,6 +242,7 @@ void AsynchronousInsertQueue::pushImpl(InsertData::EntryPtr entry, QueueIterator CurrentMetrics::add(CurrentMetrics::PendingAsyncInsert); ProfileEvents::increment(ProfileEvents::AsyncInsertQuery); + ProfileEvents::increment(ProfileEvents::AsyncInsertBytes, entry_data_size); } void AsynchronousInsertQueue::waitForProcessingQuery(const String & query_id, const Milliseconds & timeout) diff --git a/src/Interpreters/CatBoostModel.cpp b/src/Interpreters/CatBoostModel.cpp index 1b6e30a0959..cffaa81c4f0 100644 --- a/src/Interpreters/CatBoostModel.cpp +++ b/src/Interpreters/CatBoostModel.cpp @@ -26,10 +26,10 @@ extern const int CANNOT_LOAD_CATBOOST_MODEL; extern const int CANNOT_APPLY_CATBOOST_MODEL; } - /// CatBoost wrapper interface functions. -struct CatBoostWrapperAPI +class CatBoostWrapperAPI { +public: using ModelCalcerHandle = void; ModelCalcerHandle * (* ModelCalcerCreate)(); // NOLINT @@ -68,9 +68,6 @@ struct CatBoostWrapperAPI }; -namespace -{ - class CatBoostModelHolder { private: @@ -84,7 +81,61 @@ public: }; -class CatBoostModelImpl : public ICatBoostModel +/// Holds CatBoost wrapper library and provides wrapper interface. +class CatBoostLibHolder +{ +public: + explicit CatBoostLibHolder(std::string lib_path_) : lib_path(std::move(lib_path_)), lib(lib_path) { initAPI(); } + + const CatBoostWrapperAPI & getAPI() const { return api; } + const std::string & getCurrentPath() const { return lib_path; } + +private: + CatBoostWrapperAPI api; + std::string lib_path; + SharedLibrary lib; + + void initAPI() + { + load(api.ModelCalcerCreate, "ModelCalcerCreate"); + load(api.ModelCalcerDelete, "ModelCalcerDelete"); + load(api.GetErrorString, "GetErrorString"); + load(api.LoadFullModelFromFile, "LoadFullModelFromFile"); + load(api.CalcModelPredictionFlat, "CalcModelPredictionFlat"); + load(api.CalcModelPrediction, "CalcModelPrediction"); + load(api.CalcModelPredictionWithHashedCatFeatures, "CalcModelPredictionWithHashedCatFeatures"); + load(api.GetStringCatFeatureHash, "GetStringCatFeatureHash"); + load(api.GetIntegerCatFeatureHash, "GetIntegerCatFeatureHash"); + load(api.GetFloatFeaturesCount, "GetFloatFeaturesCount"); + load(api.GetCatFeaturesCount, "GetCatFeaturesCount"); + tryLoad(api.CheckModelMetadataHasKey, "CheckModelMetadataHasKey"); + tryLoad(api.GetModelInfoValueSize, "GetModelInfoValueSize"); + tryLoad(api.GetModelInfoValue, "GetModelInfoValue"); + tryLoad(api.GetTreeCount, "GetTreeCount"); + tryLoad(api.GetDimensionsCount, "GetDimensionsCount"); + } + + template + void load(T& func, const std::string & name) { func = lib.get(name); } + + template + void tryLoad(T& func, const std::string & name) { func = lib.tryGet(name); } +}; + +std::shared_ptr getCatBoostWrapperHolder(const std::string & lib_path) +{ + static std::shared_ptr ptr; + static std::mutex mutex; + + std::lock_guard lock(mutex); + + if (!ptr || ptr->getCurrentPath() != lib_path) + ptr = std::make_shared(lib_path); + + return ptr; +} + +class CatBoostModelImpl { public: CatBoostModelImpl(const CatBoostWrapperAPI * api_, const std::string & model_path) : api(api_) @@ -92,13 +143,15 @@ public: handle = std::make_unique(api); if (!handle) { - std::string msg = "Cannot create CatBoost model: "; - throw Exception(msg + api->GetErrorString(), ErrorCodes::CANNOT_LOAD_CATBOOST_MODEL); + throw Exception(ErrorCodes::CANNOT_LOAD_CATBOOST_MODEL, + "Cannot create CatBoost model: {}", + api->GetErrorString()); } if (!api->LoadFullModelFromFile(handle->get(), model_path.c_str())) { - std::string msg = "Cannot load CatBoost model: "; - throw Exception(msg + api->GetErrorString(), ErrorCodes::CANNOT_LOAD_CATBOOST_MODEL); + throw Exception(ErrorCodes::CANNOT_LOAD_CATBOOST_MODEL, + "Cannot load CatBoost model: {}", + api->GetErrorString()); } float_features_count = api->GetFloatFeaturesCount(handle->get()); @@ -108,32 +161,22 @@ public: tree_count = api->GetDimensionsCount(handle->get()); } - ColumnPtr evaluate(const ColumnRawPtrs & columns) const override + ColumnPtr evaluate(const ColumnRawPtrs & columns) const { if (columns.empty()) - throw Exception("Got empty columns list for CatBoost model.", ErrorCodes::BAD_ARGUMENTS); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Got empty columns list for CatBoost model."); if (columns.size() != float_features_count + cat_features_count) - { - std::string msg; - { - WriteBufferFromString buffer(msg); - buffer << "Number of columns is different with number of features: "; - buffer << columns.size() << " vs " << float_features_count << " + " << cat_features_count; - } - throw Exception(msg, ErrorCodes::BAD_ARGUMENTS); - } + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Number of columns is different with number of features: columns size {} float features size {} + cat features size {}", + float_features_count, + cat_features_count); for (size_t i = 0; i < float_features_count; ++i) { if (!columns[i]->isNumeric()) { - std::string msg; - { - WriteBufferFromString buffer(msg); - buffer << "Column " << i << " should be numeric to make float feature."; - } - throw Exception(msg, ErrorCodes::BAD_ARGUMENTS); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Column {} should be numeric to make float feature.", i); } } @@ -142,16 +185,13 @@ public: { const auto * column = columns[i]; if (column->isNumeric()) + { cat_features_are_strings = false; + } else if (!(typeid_cast(column) || typeid_cast(column))) { - std::string msg; - { - WriteBufferFromString buffer(msg); - buffer << "Column " << i << " should be numeric or string."; - } - throw Exception(msg, ErrorCodes::BAD_ARGUMENTS); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Column {} should be numeric or string.", i); } } @@ -187,9 +227,9 @@ public: return ColumnTuple::create(std::move(mutable_columns)); } - size_t getFloatFeaturesCount() const override { return float_features_count; } - size_t getCatFeaturesCount() const override { return cat_features_count; } - size_t getTreeCount() const override { return tree_count; } + size_t getFloatFeaturesCount() const { return float_features_count; } + size_t getCatFeaturesCount() const { return cat_features_count; } + size_t getTreeCount() const { return tree_count; } private: std::unique_ptr handle; @@ -435,66 +475,6 @@ private: } }; - -/// Holds CatBoost wrapper library and provides wrapper interface. -class CatBoostLibHolder: public CatBoostWrapperAPIProvider -{ -public: - explicit CatBoostLibHolder(std::string lib_path_) : lib_path(std::move(lib_path_)), lib(lib_path) { initAPI(); } - - const CatBoostWrapperAPI & getAPI() const override { return api; } - const std::string & getCurrentPath() const { return lib_path; } - -private: - CatBoostWrapperAPI api; - std::string lib_path; - SharedLibrary lib; - - void initAPI(); - - template - void load(T& func, const std::string & name) { func = lib.get(name); } - - template - void tryLoad(T& func, const std::string & name) { func = lib.tryGet(name); } -}; - -void CatBoostLibHolder::initAPI() -{ - load(api.ModelCalcerCreate, "ModelCalcerCreate"); - load(api.ModelCalcerDelete, "ModelCalcerDelete"); - load(api.GetErrorString, "GetErrorString"); - load(api.LoadFullModelFromFile, "LoadFullModelFromFile"); - load(api.CalcModelPredictionFlat, "CalcModelPredictionFlat"); - load(api.CalcModelPrediction, "CalcModelPrediction"); - load(api.CalcModelPredictionWithHashedCatFeatures, "CalcModelPredictionWithHashedCatFeatures"); - load(api.GetStringCatFeatureHash, "GetStringCatFeatureHash"); - load(api.GetIntegerCatFeatureHash, "GetIntegerCatFeatureHash"); - load(api.GetFloatFeaturesCount, "GetFloatFeaturesCount"); - load(api.GetCatFeaturesCount, "GetCatFeaturesCount"); - tryLoad(api.CheckModelMetadataHasKey, "CheckModelMetadataHasKey"); - tryLoad(api.GetModelInfoValueSize, "GetModelInfoValueSize"); - tryLoad(api.GetModelInfoValue, "GetModelInfoValue"); - tryLoad(api.GetTreeCount, "GetTreeCount"); - tryLoad(api.GetDimensionsCount, "GetDimensionsCount"); -} - -std::shared_ptr getCatBoostWrapperHolder(const std::string & lib_path) -{ - static std::shared_ptr ptr; - static std::mutex mutex; - - std::lock_guard lock(mutex); - - if (!ptr || ptr->getCurrentPath() != lib_path) - ptr = std::make_shared(lib_path); - - return ptr; -} - -} - - CatBoostModel::CatBoostModel(std::string name_, std::string model_path_, std::string lib_path_, const ExternalLoadableLifetime & lifetime_) : name(std::move(name_)), model_path(std::move(model_path_)), lib_path(std::move(lib_path_)), lifetime(lifetime_) @@ -502,43 +482,28 @@ CatBoostModel::CatBoostModel(std::string name_, std::string model_path_, std::st api_provider = getCatBoostWrapperHolder(lib_path); api = &api_provider->getAPI(); model = std::make_unique(api, model_path); - float_features_count = model->getFloatFeaturesCount(); - cat_features_count = model->getCatFeaturesCount(); - tree_count = model->getTreeCount(); } -const ExternalLoadableLifetime & CatBoostModel::getLifetime() const -{ - return lifetime; -} - -bool CatBoostModel::isModified() const -{ - return true; -} - -std::shared_ptr CatBoostModel::clone() const -{ - return std::make_shared(name, model_path, lib_path, lifetime); -} +CatBoostModel::~CatBoostModel() = default; size_t CatBoostModel::getFloatFeaturesCount() const { - return float_features_count; + return model->getFloatFeaturesCount(); } size_t CatBoostModel::getCatFeaturesCount() const { - return cat_features_count; + return model->getCatFeaturesCount(); } size_t CatBoostModel::getTreeCount() const { - return tree_count; + return model->getTreeCount(); } DataTypePtr CatBoostModel::getReturnType() const { + size_t tree_count = getTreeCount(); auto type = std::make_shared(); if (tree_count == 1) return type; @@ -552,6 +517,7 @@ ColumnPtr CatBoostModel::evaluate(const ColumnRawPtrs & columns) const { if (!model) throw Exception("CatBoost model was not loaded.", ErrorCodes::LOGICAL_ERROR); + return model->evaluate(columns); } diff --git a/src/Interpreters/CatBoostModel.h b/src/Interpreters/CatBoostModel.h index 51bf0ba94f5..7bb1df92b67 100644 --- a/src/Interpreters/CatBoostModel.h +++ b/src/Interpreters/CatBoostModel.h @@ -8,47 +8,32 @@ namespace DB { -/// CatBoost wrapper interface functions. -struct CatBoostWrapperAPI; -class CatBoostWrapperAPIProvider -{ -public: - virtual ~CatBoostWrapperAPIProvider() = default; - virtual const CatBoostWrapperAPI & getAPI() const = 0; -}; - -/// CatBoost model interface. -class ICatBoostModel -{ -public: - virtual ~ICatBoostModel() = default; - /// Evaluate model. Use first `float_features_count` columns as float features, - /// the others `cat_features_count` as categorical features. - virtual ColumnPtr evaluate(const ColumnRawPtrs & columns) const = 0; - - virtual size_t getFloatFeaturesCount() const = 0; - virtual size_t getCatFeaturesCount() const = 0; - virtual size_t getTreeCount() const = 0; -}; +class CatBoostLibHolder; +class CatBoostWrapperAPI; +class CatBoostModelImpl; class IDataType; using DataTypePtr = std::shared_ptr; /// General ML model evaluator interface. -class IModel : public IExternalLoadable +class IMLModel : public IExternalLoadable { public: + IMLModel() = default; virtual ColumnPtr evaluate(const ColumnRawPtrs & columns) const = 0; virtual std::string getTypeName() const = 0; virtual DataTypePtr getReturnType() const = 0; + virtual ~IMLModel() override = default; }; -class CatBoostModel : public IModel +class CatBoostModel : public IMLModel { public: CatBoostModel(std::string name, std::string model_path, std::string lib_path, const ExternalLoadableLifetime & lifetime); + ~CatBoostModel() override; + ColumnPtr evaluate(const ColumnRawPtrs & columns) const override; std::string getTypeName() const override { return "catboost"; } @@ -59,29 +44,28 @@ public: /// IExternalLoadable interface. - const ExternalLoadableLifetime & getLifetime() const override; + const ExternalLoadableLifetime & getLifetime() const override { return lifetime; } std::string getLoadableName() const override { return name; } bool supportUpdates() const override { return true; } - bool isModified() const override; + bool isModified() const override { return true; } - std::shared_ptr clone() const override; + std::shared_ptr clone() const override + { + return std::make_shared(name, model_path, lib_path, lifetime); + } private: const std::string name; std::string model_path; std::string lib_path; ExternalLoadableLifetime lifetime; - std::shared_ptr api_provider; + std::shared_ptr api_provider; const CatBoostWrapperAPI * api; - std::unique_ptr model; - - size_t float_features_count; - size_t cat_features_count; - size_t tree_count; + std::unique_ptr model; void init(); }; diff --git a/src/Interpreters/Cluster.cpp b/src/Interpreters/Cluster.cpp index c01b19d81de..1039fac6883 100644 --- a/src/Interpreters/Cluster.cpp +++ b/src/Interpreters/Cluster.cpp @@ -25,6 +25,7 @@ namespace ErrorCodes extern const int EXCESSIVE_ELEMENT_IN_CONFIG; extern const int LOGICAL_ERROR; extern const int SHARD_HAS_NO_CONNECTIONS; + extern const int NO_ELEMENTS_IN_CONFIG; extern const int SYNTAX_ERROR; } @@ -97,7 +98,6 @@ Cluster::Address::Address( , replica_index(replica_index_) { host_name = config.getString(config_prefix + ".host"); - port = static_cast(config.getInt(config_prefix + ".port")); if (config.has(config_prefix + ".user")) user_specified = true; @@ -106,7 +106,14 @@ Cluster::Address::Address( default_database = config.getString(config_prefix + ".default_database", ""); secure = ConfigHelper::getBool(config, config_prefix + ".secure", false, /* empty_as */true) ? Protocol::Secure::Enable : Protocol::Secure::Disable; priority = config.getInt(config_prefix + ".priority", 1); + const char * port_type = secure == Protocol::Secure::Enable ? "tcp_port_secure" : "tcp_port"; + auto default_port = config.getInt(port_type, 0); + + port = static_cast(config.getInt(config_prefix + ".port", default_port)); + if (!port) + throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "Port is not specified in cluster configuration: {}", config_prefix + ".port"); + is_local = isLocal(config.getInt(port_type, 0)); /// By default compression is disabled if address looks like localhost. @@ -125,7 +132,9 @@ Cluster::Address::Address( bool secure_, Int64 priority_, UInt32 shard_index_, - UInt32 replica_index_) + UInt32 replica_index_, + String cluster_name_, + String cluster_secret_) : user(user_), password(password_) { bool can_be_local = true; @@ -157,6 +166,8 @@ Cluster::Address::Address( is_local = can_be_local && isLocal(clickhouse_port); shard_index = shard_index_; replica_index = replica_index_; + cluster = cluster_name_; + cluster_secret = cluster_secret_; } @@ -530,10 +541,14 @@ Cluster::Cluster( bool treat_local_as_remote, bool treat_local_port_as_remote, bool secure, - Int64 priority) + Int64 priority, + String cluster_name, + String cluster_secret) { UInt32 current_shard_num = 1; + secret = cluster_secret; + for (const auto & shard : names) { Addresses current; @@ -547,7 +562,9 @@ Cluster::Cluster( secure, priority, current_shard_num, - current.size() + 1); + current.size() + 1, + cluster_name, + cluster_secret); addresses_with_failover.emplace_back(current); @@ -683,6 +700,9 @@ Cluster::Cluster(Cluster::ReplicasAsShardsTag, const Cluster & from, const Setti } } + secret = from.secret; + name = from.name; + initMisc(); } @@ -697,6 +717,9 @@ Cluster::Cluster(Cluster::SubclusterTag, const Cluster & from, const std::vector addresses_with_failover.emplace_back(from.addresses_with_failover.at(index)); } + secret = from.secret; + name = from.name; + initMisc(); } diff --git a/src/Interpreters/Cluster.h b/src/Interpreters/Cluster.h index e9f26c21089..13f19f7c0ed 100644 --- a/src/Interpreters/Cluster.h +++ b/src/Interpreters/Cluster.h @@ -55,7 +55,9 @@ public: bool treat_local_as_remote, bool treat_local_port_as_remote, bool secure = false, - Int64 priority = 1); + Int64 priority = 1, + String cluster_name = "", + String cluster_secret = ""); Cluster(const Cluster &)= delete; Cluster & operator=(const Cluster &) = delete; @@ -127,7 +129,9 @@ public: bool secure_ = false, Int64 priority_ = 1, UInt32 shard_index_ = 0, - UInt32 replica_index_ = 0); + UInt32 replica_index_ = 0, + String cluster_name = "", + String cluster_secret_ = ""); /// Returns 'escaped_host_name:port' String toString() const; diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 3e414d5b6de..4d2cdf7dd2c 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -350,6 +350,12 @@ void DDLWorker::scheduleTasks(bool reinitialized) bool maybe_concurrently_deleting = task && !zookeeper->exists(fs::path(task->entry_path) / "active"); return task && !maybe_concurrently_deleting && !maybe_currently_processing; } + else if (last_skipped_entry_name.has_value() && !queue_fully_loaded_after_initialization_debug_helper) + { + /// If connection was lost during queue loading + /// we may start processing from finished task (because we don't know yet that it's finished) and it's ok. + return false; + } else { /// Return true if entry should not be scheduled. @@ -365,7 +371,11 @@ void DDLWorker::scheduleTasks(bool reinitialized) String reason; auto task = initAndCheckTask(entry_name, reason, zookeeper); - if (!task) + if (task) + { + queue_fully_loaded_after_initialization_debug_helper = true; + } + else { LOG_DEBUG(log, "Will not execute task {}: {}", entry_name, reason); updateMaxDDLEntryID(entry_name); diff --git a/src/Interpreters/DDLWorker.h b/src/Interpreters/DDLWorker.h index dbdf0e94f06..7cdbf880a2b 100644 --- a/src/Interpreters/DDLWorker.h +++ b/src/Interpreters/DDLWorker.h @@ -131,6 +131,9 @@ protected: std::optional first_failed_task_name; std::list current_tasks; + /// This flag is needed for debug assertions only + bool queue_fully_loaded_after_initialization_debug_helper = false; + Coordination::Stat queue_node_stat; std::shared_ptr queue_updated_event = std::make_shared(); std::shared_ptr cleanup_event = std::make_shared(); diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 7e150f59694..5877ca35392 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -100,20 +100,9 @@ bool checkPositionalArguments(ASTPtr & argument, const ASTSelectQuery * select_q { auto columns = select_query->select()->children; - const auto * group_by_expr_with_alias = dynamic_cast(argument.get()); - if (group_by_expr_with_alias && !group_by_expr_with_alias->alias.empty()) - { - for (const auto & column : columns) - { - const auto * col_with_alias = dynamic_cast(column.get()); - if (col_with_alias) - { - const auto & alias = col_with_alias->alias; - if (!alias.empty() && alias == group_by_expr_with_alias->alias) - return false; - } - } - } + const auto * expr_with_alias = dynamic_cast(argument.get()); + if (expr_with_alias && !expr_with_alias->alias.empty()) + return false; const auto * ast_literal = typeid_cast(argument.get()); if (!ast_literal) @@ -130,7 +119,7 @@ bool checkPositionalArguments(ASTPtr & argument, const ASTSelectQuery * select_q pos, columns.size()); const auto & column = columns[--pos]; - if (typeid_cast(column.get())) + if (typeid_cast(column.get()) || typeid_cast(column.get())) { argument = column->clone(); } @@ -259,7 +248,7 @@ NamesAndTypesList ExpressionAnalyzer::getColumnsAfterArrayJoin(ActionsDAGPtr & a if (!array_join_expression_list) return src_columns; - getRootActionsNoMakeSet(array_join_expression_list, true, actions, false); + getRootActionsNoMakeSet(array_join_expression_list, actions, false); auto array_join = addMultipleArrayJoinAction(actions, is_array_join_left); auto sample_columns = actions->getResultColumns(); @@ -294,7 +283,7 @@ NamesAndTypesList ExpressionAnalyzer::analyzeJoin(ActionsDAGPtr & actions, const const ASTTablesInSelectQueryElement * join = select_query->join(); if (join) { - getRootActionsNoMakeSet(analyzedJoin().leftKeysList(), true, actions, false); + getRootActionsNoMakeSet(analyzedJoin().leftKeysList(), actions, false); auto sample_columns = actions->getNamesAndTypesList(); syntax->analyzed_join->addJoinedColumnsAndCorrectTypes(sample_columns, true); actions = std::make_shared(sample_columns); @@ -332,14 +321,14 @@ void ExpressionAnalyzer::analyzeAggregation(ActionsDAGPtr & temp_actions) { NameSet unique_keys; ASTs & group_asts = group_by_ast->children; - for (ssize_t i = 0; i < ssize_t(group_asts.size()); ++i) + for (ssize_t i = 0; i < static_cast(group_asts.size()); ++i) { ssize_t size = group_asts.size(); if (getContext()->getSettingsRef().enable_positional_arguments) replaceForPositionalArguments(group_asts[i], select_query, ASTSelectQuery::Expression::GROUP_BY); - getRootActionsNoMakeSet(group_asts[i], true, temp_actions, false); + getRootActionsNoMakeSet(group_asts[i], temp_actions, false); const auto & column_name = group_asts[i]->getColumnName(); @@ -405,8 +394,8 @@ void ExpressionAnalyzer::initGlobalSubqueriesAndExternalTables(bool do_global) { if (do_global) { - GlobalSubqueriesVisitor::Data subqueries_data(getContext(), subquery_depth, isRemoteStorage(), - external_tables, subqueries_for_sets, has_global_subqueries); + GlobalSubqueriesVisitor::Data subqueries_data( + getContext(), subquery_depth, isRemoteStorage(), external_tables, subqueries_for_sets, has_global_subqueries); GlobalSubqueriesVisitor(subqueries_data).visit(query); } } @@ -416,7 +405,7 @@ void ExpressionAnalyzer::tryMakeSetForIndexFromSubquery(const ASTPtr & subquery_ { auto set_key = PreparedSetKey::forSubquery(*subquery_or_table_name); - if (prepared_sets.count(set_key)) + if (prepared_sets.contains(set_key)) return; /// Already prepared. if (auto set_ptr_from_storage_set = isPlainStorageSetInSubquery(subquery_or_table_name)) @@ -509,33 +498,62 @@ void SelectQueryExpressionAnalyzer::makeSetsForIndex(const ASTPtr & node) } -void ExpressionAnalyzer::getRootActions(const ASTPtr & ast, bool no_subqueries, ActionsDAGPtr & actions, bool only_consts) +void ExpressionAnalyzer::getRootActions(const ASTPtr & ast, bool no_makeset_for_subqueries, ActionsDAGPtr & actions, bool only_consts) { LogAST log; - ActionsVisitor::Data visitor_data(getContext(), settings.size_limits_for_set, subquery_depth, - sourceColumns(), std::move(actions), prepared_sets, subqueries_for_sets, - no_subqueries, false, only_consts, !isRemoteStorage()); + ActionsVisitor::Data visitor_data( + getContext(), + settings.size_limits_for_set, + subquery_depth, + sourceColumns(), + std::move(actions), + prepared_sets, + subqueries_for_sets, + no_makeset_for_subqueries, + false /* no_makeset */, + only_consts, + !isRemoteStorage() /* create_source_for_in */); ActionsVisitor(visitor_data, log.stream()).visit(ast); actions = visitor_data.getActions(); } -void ExpressionAnalyzer::getRootActionsNoMakeSet(const ASTPtr & ast, bool no_subqueries, ActionsDAGPtr & actions, bool only_consts) +void ExpressionAnalyzer::getRootActionsNoMakeSet(const ASTPtr & ast, ActionsDAGPtr & actions, bool only_consts) { LogAST log; - ActionsVisitor::Data visitor_data(getContext(), settings.size_limits_for_set, subquery_depth, - sourceColumns(), std::move(actions), prepared_sets, subqueries_for_sets, - no_subqueries, true, only_consts, !isRemoteStorage()); + ActionsVisitor::Data visitor_data( + getContext(), + settings.size_limits_for_set, + subquery_depth, + sourceColumns(), + std::move(actions), + prepared_sets, + subqueries_for_sets, + true /* no_makeset_for_subqueries, no_makeset implies no_makeset_for_subqueries */, + true /* no_makeset */, + only_consts, + !isRemoteStorage() /* create_source_for_in */); ActionsVisitor(visitor_data, log.stream()).visit(ast); actions = visitor_data.getActions(); } -void ExpressionAnalyzer::getRootActionsForHaving(const ASTPtr & ast, bool no_subqueries, ActionsDAGPtr & actions, bool only_consts) + +void ExpressionAnalyzer::getRootActionsForHaving( + const ASTPtr & ast, bool no_makeset_for_subqueries, ActionsDAGPtr & actions, bool only_consts) { LogAST log; - ActionsVisitor::Data visitor_data(getContext(), settings.size_limits_for_set, subquery_depth, - sourceColumns(), std::move(actions), prepared_sets, subqueries_for_sets, - no_subqueries, false, only_consts, true); + ActionsVisitor::Data visitor_data( + getContext(), + settings.size_limits_for_set, + subquery_depth, + sourceColumns(), + std::move(actions), + prepared_sets, + subqueries_for_sets, + no_makeset_for_subqueries, + false /* no_makeset */, + only_consts, + true /* create_source_for_in */); ActionsVisitor(visitor_data, log.stream()).visit(ast); actions = visitor_data.getActions(); } @@ -547,7 +565,7 @@ void ExpressionAnalyzer::makeAggregateDescriptions(ActionsDAGPtr & actions, Aggr { AggregateDescription aggregate; if (node->arguments) - getRootActionsNoMakeSet(node->arguments, true, actions); + getRootActionsNoMakeSet(node->arguments, actions); aggregate.column_name = node->getColumnName(); @@ -746,8 +764,7 @@ void ExpressionAnalyzer::makeWindowDescriptions(ActionsDAGPtr actions) // Requiring a constant reference to a shared pointer to non-const AST // doesn't really look sane, but the visitor does indeed require it. // Hence we clone the node (not very sane either, I know). - getRootActionsNoMakeSet(window_function.function_node->clone(), - true, actions); + getRootActionsNoMakeSet(window_function.function_node->clone(), actions); const ASTs & arguments = window_function.function_node->arguments->children; @@ -867,8 +884,7 @@ ArrayJoinActionPtr SelectQueryExpressionAnalyzer::appendArrayJoin(ExpressionActi auto array_join = addMultipleArrayJoinAction(step.actions(), is_array_join_left); before_array_join = chain.getLastActions(); - chain.steps.push_back(std::make_unique( - array_join, step.getResultColumns())); + chain.steps.push_back(std::make_unique(array_join, step.getResultColumns())); chain.addStep(); @@ -1099,8 +1115,8 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendPrewhere( } } - chain.steps.emplace_back(std::make_unique( - std::make_shared(std::move(columns)))); + chain.steps.emplace_back( + std::make_unique(std::make_shared(std::move(columns)))); chain.steps.back()->additional_input = std::move(unused_source_columns); chain.getLastActions(); chain.addStep(); @@ -1210,8 +1226,7 @@ void SelectQueryExpressionAnalyzer::appendWindowFunctionsArguments( // recursively together with (1b) as ASTFunction::window_definition. if (getSelectQuery()->window()) { - getRootActionsNoMakeSet(getSelectQuery()->window(), - true /* no_subqueries */, step.actions()); + getRootActionsNoMakeSet(getSelectQuery()->window(), step.actions()); } for (const auto & [_, w] : window_descriptions) @@ -1222,8 +1237,7 @@ void SelectQueryExpressionAnalyzer::appendWindowFunctionsArguments( // definitions (1a). // Requiring a constant reference to a shared pointer to non-const AST // doesn't really look sane, but the visitor does indeed require it. - getRootActionsNoMakeSet(f.function_node->clone(), - true /* no_subqueries */, step.actions()); + getRootActionsNoMakeSet(f.function_node->clone(), step.actions()); // (2b) Required function argument columns. for (const auto & a : f.function_node->arguments->children) @@ -1299,7 +1313,9 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendOrderBy(ExpressionActionsChai throw Exception("Bad ORDER BY expression AST", ErrorCodes::UNKNOWN_TYPE_OF_AST_NODE); if (getContext()->getSettingsRef().enable_positional_arguments) + { replaceForPositionalArguments(ast->children.at(0), select_query, ASTSelectQuery::Expression::ORDER_BY); + } } getRootActions(select_query->orderBy(), only_types, step.actions()); @@ -1456,7 +1472,7 @@ ActionsDAGPtr ExpressionAnalyzer::getActionsDAG(bool add_aliases, bool project_r alias = name; result_columns.emplace_back(name, alias); result_names.push_back(alias); - getRootActions(ast, false, actions_dag); + getRootActions(ast, false /* no_makeset_for_subqueries */, actions_dag); } if (add_aliases) @@ -1496,7 +1512,7 @@ ExpressionActionsPtr ExpressionAnalyzer::getConstActions(const ColumnsWithTypeAn { auto actions = std::make_shared(constant_inputs); - getRootActions(query, true, actions, true); + getRootActions(query, true /* no_makeset_for_subqueries */, actions, true /* only_consts */); return std::make_shared(actions, ExpressionActionsSettings::fromContext(getContext())); } @@ -1513,13 +1529,13 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::simpleSelectActions() } ExpressionAnalysisResult::ExpressionAnalysisResult( - SelectQueryExpressionAnalyzer & query_analyzer, - const StorageMetadataPtr & metadata_snapshot, - bool first_stage_, - bool second_stage_, - bool only_types, - const FilterDAGInfoPtr & filter_info_, - const Block & source_header) + SelectQueryExpressionAnalyzer & query_analyzer, + const StorageMetadataPtr & metadata_snapshot, + bool first_stage_, + bool second_stage_, + bool only_types, + const FilterDAGInfoPtr & filter_info_, + const Block & source_header) : first_stage(first_stage_) , second_stage(second_stage_) , need_aggregate(query_analyzer.hasAggregation()) diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index b6bb3c5fad5..5dcbdc2486b 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -172,15 +172,15 @@ protected: ArrayJoinActionPtr addMultipleArrayJoinAction(ActionsDAGPtr & actions, bool is_left) const; - void getRootActions(const ASTPtr & ast, bool no_subqueries, ActionsDAGPtr & actions, bool only_consts = false); + void getRootActions(const ASTPtr & ast, bool no_makeset_for_subqueries, ActionsDAGPtr & actions, bool only_consts = false); /** Similar to getRootActions but do not make sets when analyzing IN functions. It's used in * analyzeAggregation which happens earlier than analyzing PREWHERE and WHERE. If we did, the * prepared sets would not be applicable for MergeTree index optimization. */ - void getRootActionsNoMakeSet(const ASTPtr & ast, bool no_subqueries, ActionsDAGPtr & actions, bool only_consts = false); + void getRootActionsNoMakeSet(const ASTPtr & ast, ActionsDAGPtr & actions, bool only_consts = false); - void getRootActionsForHaving(const ASTPtr & ast, bool no_subqueries, ActionsDAGPtr & actions, bool only_consts = false); + void getRootActionsForHaving(const ASTPtr & ast, bool no_makeset_for_subqueries, ActionsDAGPtr & actions, bool only_consts = false); /** Add aggregation keys to aggregation_keys, aggregate functions to aggregate_descriptions, * Create a set of columns aggregated_columns resulting after the aggregation, if any, diff --git a/src/Interpreters/ExternalModelsLoader.h b/src/Interpreters/ExternalModelsLoader.h index 18e1f1123f6..042906bee9e 100644 --- a/src/Interpreters/ExternalModelsLoader.h +++ b/src/Interpreters/ExternalModelsLoader.h @@ -15,14 +15,14 @@ namespace DB class ExternalModelsLoader : public ExternalLoader, WithContext { public: - using ModelPtr = std::shared_ptr; + using ModelPtr = std::shared_ptr; /// Models will be loaded immediately and then will be updated in separate thread, each 'reload_period' seconds. explicit ExternalModelsLoader(ContextPtr context_); ModelPtr getModel(const std::string & model_name) const { - return std::static_pointer_cast(load(model_name)); + return std::static_pointer_cast(load(model_name)); } void reloadModel(const std::string & model_name) const diff --git a/src/Interpreters/GlobalSubqueriesVisitor.h b/src/Interpreters/GlobalSubqueriesVisitor.h index 5d2df583b9e..50ce7977534 100644 --- a/src/Interpreters/GlobalSubqueriesVisitor.h +++ b/src/Interpreters/GlobalSubqueriesVisitor.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -17,7 +18,11 @@ #include #include #include +#include #include +#include +#include +#include namespace DB { @@ -34,7 +39,6 @@ public: { size_t subquery_depth; bool is_remote; - size_t external_table_id; TemporaryTablesMapping & external_tables; SubqueriesForSets & subqueries_for_sets; bool & has_global_subqueries; @@ -49,7 +53,6 @@ public: : WithContext(context_) , subquery_depth(subquery_depth_) , is_remote(is_remote_) - , external_table_id(1) , external_tables(tables) , subqueries_for_sets(subqueries_for_sets_) , has_global_subqueries(has_global_subqueries_) @@ -92,48 +95,33 @@ public: { /// If this is already an external table, you do not need to add anything. Just remember its presence. auto temporary_table_name = getIdentifierName(subquery_or_table_name); - bool exists_in_local_map = external_tables.end() != external_tables.find(temporary_table_name); + bool exists_in_local_map = external_tables.contains(temporary_table_name); bool exists_in_context = static_cast(getContext()->tryResolveStorageID( StorageID("", temporary_table_name), Context::ResolveExternal)); if (exists_in_local_map || exists_in_context) return; } - String external_table_name = subquery_or_table_name->tryGetAlias(); - if (external_table_name.empty()) + String alias = subquery_or_table_name->tryGetAlias(); + String external_table_name; + if (alias.empty()) { - /// Generate the name for the external table. - external_table_name = "_data" + toString(external_table_id); - while (external_tables.count(external_table_name)) - { - ++external_table_id; - external_table_name = "_data" + toString(external_table_id); - } + auto hash = subquery_or_table_name->getTreeHash(); + external_table_name = fmt::format("_data_{}_{}", hash.first, hash.second); } - - auto interpreter = interpretSubquery(subquery_or_table_name, getContext(), subquery_depth, {}); - - Block sample = interpreter->getSampleBlock(); - NamesAndTypesList columns = sample.getNamesAndTypesList(); - - auto external_storage_holder = std::make_shared( - getContext(), - ColumnsDescription{columns}, - ConstraintsDescription{}, - nullptr, - /*create_for_global_subquery*/ true); - StoragePtr external_storage = external_storage_holder->getTable(); + else + external_table_name = alias; /** We replace the subquery with the name of the temporary table. * It is in this form, the request will go to the remote server. * This temporary table will go to the remote server, and on its side, * instead of doing a subquery, you just need to read it. + * TODO We can do better than using alias to name external tables */ auto database_and_table_name = std::make_shared(external_table_name); if (set_alias) { - String alias = subquery_or_table_name->tryGetAlias(); if (auto * table_name = subquery_or_table_name->as()) if (alias.empty()) alias = table_name->shortName(); @@ -151,8 +139,27 @@ public: else ast = database_and_table_name; - external_tables[external_table_name] = external_storage_holder; + if (external_tables.contains(external_table_name)) + return; + auto interpreter = interpretSubquery(subquery_or_table_name, getContext(), subquery_depth, {}); + + Block sample = interpreter->getSampleBlock(); + NamesAndTypesList columns = sample.getNamesAndTypesList(); + + auto external_storage_holder = std::make_shared( + getContext(), + ColumnsDescription{columns}, + ConstraintsDescription{}, + nullptr, + /*create_for_global_subquery*/ true); + StoragePtr external_storage = external_storage_holder->getTable(); + + external_tables.emplace(external_table_name, external_storage_holder); + + /// We need to materialize external tables immediately because reading from distributed + /// tables might generate local plans which can refer to external tables during index + /// analysis. It's too late to populate the external table via CreatingSetsTransform. if (getContext()->getSettingsRef().use_index_for_in_with_subqueries) { auto external_table = external_storage_holder->getTable(); diff --git a/src/Interpreters/HashJoin.cpp b/src/Interpreters/HashJoin.cpp index c56529b3214..00568cfdf08 100644 --- a/src/Interpreters/HashJoin.cpp +++ b/src/Interpreters/HashJoin.cpp @@ -962,18 +962,29 @@ public: /// If it's joinGetOrNull, we need to wrap not-nullable columns in StorageJoin. for (size_t j = 0, size = right_indexes.size(); j < size; ++j) { - const auto & column = *block.getByPosition(right_indexes[j]).column; - if (auto * nullable_col = typeid_cast(columns[j].get()); nullable_col && !column.isNullable()) - nullable_col->insertFromNotNullable(column, row_num); + auto column_from_block = block.getByPosition(right_indexes[j]); + if (type_name[j].type->lowCardinality() != column_from_block.type->lowCardinality()) + { + JoinCommon::changeLowCardinalityInplace(column_from_block); + } + + if (auto * nullable_col = typeid_cast(columns[j].get()); + nullable_col && !column_from_block.column->isNullable()) + nullable_col->insertFromNotNullable(*column_from_block.column, row_num); else - columns[j]->insertFrom(column, row_num); + columns[j]->insertFrom(*column_from_block.column, row_num); } } else { for (size_t j = 0, size = right_indexes.size(); j < size; ++j) { - columns[j]->insertFrom(*block.getByPosition(right_indexes[j]).column, row_num); + auto column_from_block = block.getByPosition(right_indexes[j]); + if (type_name[j].type->lowCardinality() != column_from_block.type->lowCardinality()) + { + JoinCommon::changeLowCardinalityInplace(column_from_block); + } + columns[j]->insertFrom(*column_from_block.column, row_num); } } } @@ -1013,6 +1024,7 @@ private: void addColumn(const ColumnWithTypeAndName & src_column, const std::string & qualified_name) { + columns.push_back(src_column.column->cloneEmpty()); columns.back()->reserve(src_column.column->size()); type_name.emplace_back(src_column.type, src_column.name, qualified_name); @@ -1237,16 +1249,16 @@ NO_INLINE IColumn::Filter joinRightColumns( { const IColumn & left_asof_key = added_columns.leftAsofKey(); - auto [block, row_num] = mapped->findAsof(left_asof_key, i); - if (block) + auto row_ref = mapped->findAsof(left_asof_key, i); + if (row_ref.block) { setUsed(filter, i); if constexpr (multiple_disjuncts) - used_flags.template setUsed(block, row_num, 0); + used_flags.template setUsed(row_ref.block, row_ref.row_num, 0); else used_flags.template setUsed(find_result); - added_columns.appendFromBlock(*block, row_num); + added_columns.appendFromBlock(*row_ref.block, row_ref.row_num); } else addNotFoundRow(added_columns, current_offset); diff --git a/src/Interpreters/InterpreterBackupQuery.cpp b/src/Interpreters/InterpreterBackupQuery.cpp index 45eb8e48599..01970bc5cc2 100644 --- a/src/Interpreters/InterpreterBackupQuery.cpp +++ b/src/Interpreters/InterpreterBackupQuery.cpp @@ -1,10 +1,12 @@ #include +#include +#include +#include #include #include #include -#include -#include -#include +#include +#include #include @@ -12,40 +14,43 @@ namespace DB { namespace { - BackupMutablePtr createBackup(const ASTBackupQuery & query, const ContextPtr & context) + BackupMutablePtr createBackup(const BackupInfo & backup_info, const BackupSettings & backup_settings, const ContextPtr & context) { BackupFactory::CreateParams params; - params.open_mode = (query.kind == ASTBackupQuery::BACKUP) ? IBackup::OpenMode::WRITE : IBackup::OpenMode::READ; + params.open_mode = IBackup::OpenMode::WRITE; params.context = context; - - params.backup_info = BackupInfo::fromAST(*query.backup_name); - if (query.base_backup_name) - params.base_backup_info = BackupInfo::fromAST(*query.base_backup_name); - + params.backup_info = backup_info; + params.base_backup_info = backup_settings.base_backup_info; + params.compression_method = backup_settings.compression_method; + params.compression_level = backup_settings.compression_level; + params.password = backup_settings.password; return BackupFactory::instance().createBackup(params); } -#if 0 - void getBackupSettings(const ASTBackupQuery & query, BackupSettings & settings, std::optional & base_backup) + BackupMutablePtr openBackup(const BackupInfo & backup_info, const RestoreSettings & restore_settings, const ContextPtr & context) { - settings = {}; - if (query.settings) - settings.applyChanges(query.settings->as().changes); - return settings; + BackupFactory::CreateParams params; + params.open_mode = IBackup::OpenMode::READ; + params.context = context; + params.backup_info = backup_info; + params.base_backup_info = restore_settings.base_backup_info; + params.password = restore_settings.password; + return BackupFactory::instance().createBackup(params); } -#endif - void executeBackup(const ASTBackupQuery & query, const ContextPtr & context) + void executeBackup(const ContextPtr & context, const ASTBackupQuery & query) { - BackupMutablePtr backup = createBackup(query, context); - auto backup_entries = makeBackupEntries(query.elements, context); + auto backup_settings = BackupSettings::fromBackupQuery(query); + BackupMutablePtr backup = createBackup(BackupInfo::fromAST(*query.backup_name), backup_settings, context); + auto backup_entries = makeBackupEntries(context, query.elements, backup_settings); writeBackupEntries(backup, std::move(backup_entries), context->getSettingsRef().max_backup_threads); } - void executeRestore(const ASTBackupQuery & query, ContextMutablePtr context) + void executeRestore(ContextMutablePtr context, const ASTBackupQuery & query) { - BackupPtr backup = createBackup(query, context); - auto restore_tasks = makeRestoreTasks(query.elements, context, backup); + auto restore_settings = RestoreSettings::fromRestoreQuery(query); + BackupPtr backup = openBackup(BackupInfo::fromAST(*query.backup_name), restore_settings, context); + auto restore_tasks = makeRestoreTasks(context, backup, query.elements, restore_settings); executeRestoreTasks(std::move(restore_tasks), context->getSettingsRef().max_backup_threads); } } @@ -54,9 +59,9 @@ BlockIO InterpreterBackupQuery::execute() { const auto & query = query_ptr->as(); if (query.kind == ASTBackupQuery::BACKUP) - executeBackup(query, context); + executeBackup(context, query); else if (query.kind == ASTBackupQuery::RESTORE) - executeRestore(query, context); + executeRestore(context, query); return {}; } diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index f7dbd1c8b65..d8923b3cc42 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -1180,11 +1180,10 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, /// old instance of the storage. For example, AsynchronousMetrics may cause ATTACH to fail, /// so we allow waiting here. If database_atomic_wait_for_drop_and_detach_synchronously is disabled /// and old storage instance still exists it will throw exception. - bool throw_if_table_in_use = getContext()->getSettingsRef().database_atomic_wait_for_drop_and_detach_synchronously; - if (throw_if_table_in_use) - database->checkDetachedTableNotInUse(create.uuid); - else + if (getContext()->getSettingsRef().database_atomic_wait_for_drop_and_detach_synchronously) database->waitDetachedTableNotInUse(create.uuid); + else + database->checkDetachedTableNotInUse(create.uuid); } StoragePtr res; diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index ce0929f9c6e..27ed8438fc8 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -359,6 +359,7 @@ InterpreterSelectQuery::InterpreterSelectQuery( table_lock.reset(); table_id = StorageID::createEmpty(); metadata_snapshot = nullptr; + storage_snapshot = nullptr; } } @@ -1241,10 +1242,6 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

hasGlobalSubqueries() && !subqueries_for_sets.empty()) - executeSubqueriesInSetsAndJoins(query_plan, subqueries_for_sets); } if (expressions.second_stage || from_aggregation_stage) @@ -1427,7 +1424,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

hasGlobalSubqueries())) + if (!subqueries_for_sets.empty()) executeSubqueriesInSetsAndJoins(query_plan, subqueries_for_sets); } @@ -1891,7 +1888,7 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc && limit_length <= std::numeric_limits::max() - limit_offset && limit_length + limit_offset < max_block_size) { - max_block_size = std::max(UInt64(1), limit_length + limit_offset); + max_block_size = std::max(UInt64{1}, limit_length + limit_offset); max_threads_execute_query = max_streams = 1; } @@ -2577,11 +2574,11 @@ void InterpreterSelectQuery::executeExtremes(QueryPlan & query_plan) void InterpreterSelectQuery::executeSubqueriesInSetsAndJoins(QueryPlan & query_plan, SubqueriesForSets & subqueries_for_sets) { - const auto & input_order_info = query_info.input_order_info - ? query_info.input_order_info - : (query_info.projection ? query_info.projection->input_order_info : nullptr); - if (input_order_info) - executeMergeSorted(query_plan, input_order_info->order_key_prefix_descr, 0, "before creating sets for subqueries and joins"); + // const auto & input_order_info = query_info.input_order_info + // ? query_info.input_order_info + // : (query_info.projection ? query_info.projection->input_order_info : nullptr); + // if (input_order_info) + // executeMergeSorted(query_plan, input_order_info->order_key_prefix_descr, 0, "before creating sets for subqueries and joins"); const Settings & settings = context->getSettingsRef(); diff --git a/src/Interpreters/ProcessList.h b/src/Interpreters/ProcessList.h index c90c271679c..0994f34d003 100644 --- a/src/Interpreters/ProcessList.h +++ b/src/Interpreters/ProcessList.h @@ -351,15 +351,6 @@ public: max_size = max_size_; } - // Before calling this method you should be sure - // that lock is acquired. - template - void processEachQueryStatus(F && func) const - { - for (auto && query : processes) - func(query); - } - void setMaxInsertQueriesAmount(size_t max_insert_queries_amount_) { std::lock_guard lock(mutex); diff --git a/src/Interpreters/RowRefs.cpp b/src/Interpreters/RowRefs.cpp index 39fc7965eb2..2b791f5a189 100644 --- a/src/Interpreters/RowRefs.cpp +++ b/src/Interpreters/RowRefs.cpp @@ -1,5 +1,6 @@ #include +#include #include #include #include @@ -44,38 +45,52 @@ class SortedLookupVector : public SortedLookupVectorBase { struct Entry { - /// We don't store a RowRef and instead keep it's members separately (and return a tuple) to reduce the memory usage. - /// For example, for sizeof(T) == 4 => sizeof(Entry) == 16 (while before it would be 20). Then when you put it into a vector, the effect is even greater - decltype(RowRef::block) block; - decltype(RowRef::row_num) row_num; - TKey asof_value; + TKey value; + uint32_t row_ref_index; Entry() = delete; - Entry(TKey v, const Block * b, size_t r) : block(b), row_num(r), asof_value(v) { } + Entry(TKey value_, uint32_t row_ref_index_) + : value(value_) + , row_ref_index(row_ref_index_) + { } - bool operator<(const Entry & other) const { return asof_value < other.asof_value; } + }; + + struct LessEntryOperator + { + ALWAYS_INLINE bool operator()(const Entry & lhs, const Entry & rhs) const + { + return lhs.value < rhs.value; + } }; struct GreaterEntryOperator { - bool operator()(Entry const & a, Entry const & b) const { return a.asof_value > b.asof_value; } + ALWAYS_INLINE bool operator()(const Entry & lhs, const Entry & rhs) const + { + return lhs.value > rhs.value; + } }; public: - using Base = std::vector; using Keys = std::vector; - static constexpr bool isDescending = (inequality == ASOF::Inequality::Greater || inequality == ASOF::Inequality::GreaterOrEquals); - static constexpr bool isStrict = (inequality == ASOF::Inequality::Less) || (inequality == ASOF::Inequality::Greater); + using Entries = PaddedPODArray; + using RowRefs = PaddedPODArray; + + static constexpr bool is_descending = (inequality == ASOF::Inequality::Greater || inequality == ASOF::Inequality::GreaterOrEquals); + static constexpr bool is_strict = (inequality == ASOF::Inequality::Less) || (inequality == ASOF::Inequality::Greater); void insert(const IColumn & asof_column, const Block * block, size_t row_num) override { using ColumnType = ColumnVectorOrDecimal; const auto & column = assert_cast(asof_column); - TKey k = column.getElement(row_num); + TKey key = column.getElement(row_num); assert(!sorted.load(std::memory_order_acquire)); - array.emplace_back(k, block, row_num); + + entries.emplace_back(key, row_refs.size()); + row_refs.emplace_back(RowRef(block, row_num)); } /// Unrolled version of upper_bound and lower_bound @@ -84,30 +99,30 @@ public: /// at https://en.algorithmica.org/hpc/data-structures/s-tree/ size_t boundSearch(TKey value) { - size_t size = array.size(); + size_t size = entries.size(); size_t low = 0; /// This is a single binary search iteration as a macro to unroll. Takes into account the inequality: - /// isStrict -> Equal values are not requested - /// isDescending -> The vector is sorted in reverse (for greater or greaterOrEquals) + /// is_strict -> Equal values are not requested + /// is_descending -> The vector is sorted in reverse (for greater or greaterOrEquals) #define BOUND_ITERATION \ { \ size_t half = size / 2; \ size_t other_half = size - half; \ size_t probe = low + half; \ size_t other_low = low + other_half; \ - TKey v = array[probe].asof_value; \ + TKey & v = entries[probe].value; \ size = half; \ - if constexpr (isDescending) \ + if constexpr (is_descending) \ { \ - if constexpr (isStrict) \ + if constexpr (is_strict) \ low = value <= v ? other_low : low; \ else \ low = value < v ? other_low : low; \ } \ else \ { \ - if constexpr (isStrict) \ + if constexpr (is_strict) \ low = value >= v ? other_low : low; \ else \ low = value > v ? other_low : low; \ @@ -130,7 +145,7 @@ public: return low; } - std::tuple findAsof(const IColumn & asof_column, size_t row_num) override + RowRef findAsof(const IColumn & asof_column, size_t row_num) override { sort(); @@ -139,8 +154,11 @@ public: TKey k = column.getElement(row_num); size_t pos = boundSearch(k); - if (pos != array.size()) - return std::make_tuple(array[pos].block, array[pos].row_num); + if (pos != entries.size()) + { + size_t row_ref_index = entries[pos].row_ref_index; + return row_refs[row_ref_index]; + } return {nullptr, 0}; } @@ -148,7 +166,8 @@ public: private: std::atomic sorted = false; mutable std::mutex lock; - Base array; + Entries entries; + RowRefs row_refs; // Double checked locking with SC atomics works in C++ // https://preshing.com/20130930/double-checked-locking-is-fixed-in-cpp11/ @@ -160,12 +179,37 @@ private: if (!sorted.load(std::memory_order_acquire)) { std::lock_guard l(lock); + if (!sorted.load(std::memory_order_relaxed)) { - if constexpr (isDescending) - ::sort(array.begin(), array.end(), GreaterEntryOperator()); + if constexpr (std::is_arithmetic_v && !std::is_floating_point_v) + { + if (likely(entries.size() > 256)) + { + struct RadixSortTraits : RadixSortNumTraits + { + using Element = Entry; + using Result = Element; + + static TKey & extractKey(Element & elem) { return elem.value; } + static Element extractResult(Element & elem) { return elem; } + }; + + if constexpr (is_descending) + RadixSort::executeLSD(entries.data(), entries.size(), true); + else + RadixSort::executeLSD(entries.data(), entries.size(), false); + + sorted.store(true, std::memory_order_release); + return; + } + } + + if constexpr (is_descending) + ::sort(entries.begin(), entries.end(), GreaterEntryOperator()); else - ::sort(array.begin(), array.end()); + ::sort(entries.begin(), entries.end(), LessEntryOperator()); + sorted.store(true, std::memory_order_release); } } diff --git a/src/Interpreters/RowRefs.h b/src/Interpreters/RowRefs.h index 02462833050..fa5ce867613 100644 --- a/src/Interpreters/RowRefs.h +++ b/src/Interpreters/RowRefs.h @@ -146,7 +146,7 @@ private: struct SortedLookupVectorBase { SortedLookupVectorBase() = default; - virtual ~SortedLookupVectorBase() { } + virtual ~SortedLookupVectorBase() = default; static std::optional getTypeSize(const IColumn & asof_column, size_t & type_size); @@ -154,7 +154,7 @@ struct SortedLookupVectorBase virtual void insert(const IColumn &, const Block *, size_t) = 0; // This needs to be synchronized internally - virtual std::tuple findAsof(const IColumn &, size_t) = 0; + virtual RowRef findAsof(const IColumn &, size_t) = 0; }; diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index 59545d4314d..3b4d665e41b 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -41,6 +41,57 @@ namespace ErrorCodes { extern const int BAD_ARGUMENTS; extern const int LOGICAL_ERROR; + extern const int NOT_IMPLEMENTED; +} + +namespace +{ + class StorageWithComment : public IAST + { + public: + ASTPtr storage; + ASTPtr comment; + + String getID(char) const override { return "Storage with comment definition"; } + + ASTPtr clone() const override + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method clone is not supported"); + } + + void formatImpl(const FormatSettings &, FormatState &, FormatStateStacked) const override + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method formatImpl is not supported"); + } + }; + + class ParserStorageWithComment : public IParserBase + { + protected: + const char * getName() const override { return "storage definition with comment"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override + { + ParserStorage storage_p; + ASTPtr storage; + + if (!storage_p.parse(pos, storage, expected)) + return false; + + ParserKeyword s_comment("COMMENT"); + ParserStringLiteral string_literal_parser; + ASTPtr comment; + + if (s_comment.ignore(pos, expected)) + string_literal_parser.parse(pos, comment, expected); + + auto storage_with_comment = std::make_shared(); + storage_with_comment->storage = std::move(storage); + storage_with_comment->comment = std::move(comment); + + node = storage_with_comment; + return true; + } + }; } namespace @@ -102,8 +153,9 @@ std::shared_ptr createSystemLog( engine += " TTL " + ttl; engine += " ORDER BY (event_date, event_time)"; } + // Validate engine definition grammatically to prevent some configuration errors - ParserStorage storage_parser; + ParserStorageWithComment storage_parser; parseQuery(storage_parser, engine.data(), engine.data() + engine.size(), "Storage to create table for " + config_prefix, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); @@ -450,7 +502,6 @@ void SystemLog::prepareTable() is_prepared = true; } - template ASTPtr SystemLog::getCreateTableQuery() { @@ -465,11 +516,16 @@ ASTPtr SystemLog::getCreateTableQuery() new_columns_list->set(new_columns_list->columns, InterpreterCreateQuery::formatColumns(ordinary_columns, alias_columns)); create->set(create->columns_list, new_columns_list); - ParserStorage storage_parser; - ASTPtr storage_ast = parseQuery( + ParserStorageWithComment storage_parser; + + ASTPtr storage_with_comment_ast = parseQuery( storage_parser, storage_def.data(), storage_def.data() + storage_def.size(), "Storage to create table for " + LogElement::name(), 0, DBMS_DEFAULT_MAX_PARSER_DEPTH); - create->set(create->storage, storage_ast); + + StorageWithComment & storage_with_comment = storage_with_comment_ast->as(); + + create->set(create->storage, storage_with_comment.storage); + create->set(create->comment, storage_with_comment.comment); /// Write additional (default) settings for MergeTree engine to make it make it possible to compare ASTs /// and recreate tables on settings changes. diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index 78e7ed33f8f..929e516f687 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -345,7 +345,10 @@ void replaceWithSumCount(String column_name, ASTFunction & func) { /// Rewrite "avg" to sumCount().1 / sumCount().2 auto new_arg1 = makeASTFunction("tupleElement", func_base, std::make_shared(UInt8(1))); - auto new_arg2 = makeASTFunction("tupleElement", func_base, std::make_shared(UInt8(2))); + auto new_arg2 = makeASTFunction("CAST", + makeASTFunction("tupleElement", func_base, std::make_shared(UInt8(2))), + std::make_shared("Float64")); + func.name = "divide"; exp_list->children.push_back(new_arg1); exp_list->children.push_back(new_arg2); diff --git a/src/Interpreters/join_common.cpp b/src/Interpreters/join_common.cpp index 478df653f3b..47b792f81e9 100644 --- a/src/Interpreters/join_common.cpp +++ b/src/Interpreters/join_common.cpp @@ -326,9 +326,10 @@ ColumnRawPtrMap materializeColumnsInplaceMap(Block & block, const Names & names) for (const auto & column_name : names) { - auto & column = block.getByName(column_name).column; - column = recursiveRemoveLowCardinality(column->convertToFullColumnIfConst()); - ptrs[column_name] = column.get(); + auto & column = block.getByName(column_name); + column.column = recursiveRemoveLowCardinality(column.column->convertToFullColumnIfConst()); + column.type = recursiveRemoveLowCardinality(column.type); + ptrs[column_name] = column.column.get(); } return ptrs; diff --git a/src/Interpreters/threadPoolCallbackRunner.cpp b/src/Interpreters/threadPoolCallbackRunner.cpp new file mode 100644 index 00000000000..fadad235039 --- /dev/null +++ b/src/Interpreters/threadPoolCallbackRunner.cpp @@ -0,0 +1,39 @@ +#include "threadPoolCallbackRunner.h" + +#include + +#include + +namespace DB +{ + +CallbackRunner threadPoolCallbackRunner(ThreadPool & pool) +{ + return [pool = &pool, thread_group = CurrentThread::getGroup()](auto callback) + { + pool->scheduleOrThrow([callback = std::move(callback), thread_group]() + { + if (thread_group) + CurrentThread::attachTo(thread_group); + + SCOPE_EXIT_SAFE({ + if (thread_group) + CurrentThread::detachQueryIfNotDetached(); + + /// After we detached from the thread_group, parent for memory_tracker inside ThreadStatus will be reset to it's parent. + /// Typically, it may be changes from Process to User. + /// Usually it could be ok, because thread pool task is executed before user-level memory tracker is destroyed. + /// However, thread could stay alive inside the thread pool, and it's ThreadStatus as well. + /// When, finally, we destroy the thread (and the ThreadStatus), + /// it can use memory tracker in the ~ThreadStatus in order to alloc/free untracked_memory,\ + /// and by this time user-level memory tracker may be already destroyed. + /// + /// As a work-around, reset memory tracker to total, which is always alive. + CurrentThread::get().memory_tracker.setParent(&total_memory_tracker); + }); + callback(); + }); + }; +} + +} diff --git a/src/Interpreters/threadPoolCallbackRunner.h b/src/Interpreters/threadPoolCallbackRunner.h new file mode 100644 index 00000000000..59d06f2f1bc --- /dev/null +++ b/src/Interpreters/threadPoolCallbackRunner.h @@ -0,0 +1,15 @@ +#pragma once + +#include + + +namespace DB +{ + +/// High-order function to run callbacks (functions with 'void()' signature) somewhere asynchronously +using CallbackRunner = std::function)>; + +/// Creates CallbackRunner that runs every callback with 'pool->scheduleOrThrow()' +CallbackRunner threadPoolCallbackRunner(ThreadPool & pool); + +} diff --git a/src/Parsers/ASTBackupQuery.cpp b/src/Parsers/ASTBackupQuery.cpp index adc6bb97985..f8fcbd98872 100644 --- a/src/Parsers/ASTBackupQuery.cpp +++ b/src/Parsers/ASTBackupQuery.cpp @@ -11,31 +11,50 @@ namespace using Element = ASTBackupQuery::Element; using ElementType = ASTBackupQuery::ElementType; - void formatName(const DatabaseAndTableName & name, ElementType type, const IAST::FormatSettings & format) + void formatTypeWithName(const DatabaseAndTableName & name, bool name_is_in_temp_db, ElementType type, bool show_type, const IAST::FormatSettings & format) { switch (type) { - case ElementType::TABLE: [[fallthrough]]; - case ElementType::DICTIONARY: + case ElementType::TABLE: { + if (show_type) + { + format.ostr << (format.hilite ? IAST::hilite_keyword : ""); + if (name_is_in_temp_db) + format.ostr << " TEMPORARY TABLE"; + else + format.ostr << " TABLE"; + format.ostr << (format.hilite ? IAST::hilite_none : ""); + } + format.ostr << " "; - if (!name.first.empty()) + if (!name_is_in_temp_db && !name.first.empty()) format.ostr << backQuoteIfNeed(name.first) << "."; format.ostr << backQuoteIfNeed(name.second); break; } case ElementType::DATABASE: { - format.ostr << " " << backQuoteIfNeed(name.first); + if (show_type) + { + format.ostr << (format.hilite ? IAST::hilite_keyword : ""); + if (name_is_in_temp_db) + format.ostr << " ALL TEMPORARY TABLES"; + else + format.ostr << " DATABASE"; + format.ostr << (format.hilite ? IAST::hilite_none : ""); + } + + if (!name_is_in_temp_db) + format.ostr << " " << backQuoteIfNeed(name.first); break; } - case ElementType::TEMPORARY_TABLE: + case ElementType::ALL_DATABASES: { - format.ostr << " " << backQuoteIfNeed(name.second); + if (show_type) + format.ostr << (format.hilite ? IAST::hilite_keyword : "") << " ALL DATABASES" << (format.hilite ? IAST::hilite_none : ""); break; } - default: - break; } } @@ -55,32 +74,36 @@ namespace } } + void formatExceptList(const std::set & except_list, const IAST::FormatSettings & format) + { + if (except_list.empty()) + return; + format.ostr << (format.hilite ? IAST::hilite_keyword : "") << " EXCEPT " + << (format.hilite ? IAST::hilite_none : ""); + bool need_comma = false; + for (const auto & item : except_list) + { + if (std::exchange(need_comma, true)) + format.ostr << ","; + format.ostr << " " << backQuoteIfNeed(item); + } + } + void formatElement(const Element & element, Kind kind, const IAST::FormatSettings & format) { - format.ostr << (format.hilite ? IAST::hilite_keyword : "") << " "; - switch (element.type) - { - case ElementType::TABLE: format.ostr << "TABLE"; break; - case ElementType::DICTIONARY: format.ostr << "DICTIONARY"; break; - case ElementType::DATABASE: format.ostr << "DATABASE"; break; - case ElementType::ALL_DATABASES: format.ostr << "ALL DATABASES"; break; - case ElementType::TEMPORARY_TABLE: format.ostr << "TEMPORARY TABLE"; break; - case ElementType::ALL_TEMPORARY_TABLES: format.ostr << "ALL TEMPORARY TABLES"; break; - case ElementType::EVERYTHING: format.ostr << "EVERYTHING"; break; - } - format.ostr << (format.hilite ? IAST::hilite_none : ""); + formatTypeWithName(element.name, element.name_is_in_temp_db, element.type, true, format); - formatName(element.name, element.type, format); + formatPartitions(element.partitions, format); + formatExceptList(element.except_list, format); - bool under_another_name = !element.new_name.first.empty() || !element.new_name.second.empty(); - if (under_another_name) + bool new_name_is_different = (element.new_name != element.name) || (element.new_name_is_in_temp_db != element.name_is_in_temp_db); + if (new_name_is_different) { format.ostr << (format.hilite ? IAST::hilite_keyword : "") << " " << ((kind == Kind::BACKUP) ? "AS" : "INTO") << (format.hilite ? IAST::hilite_none : ""); - formatName(element.new_name, element.type, format); + bool show_type = (element.new_name_is_in_temp_db != element.name_is_in_temp_db); + formatTypeWithName(element.new_name, element.new_name_is_in_temp_db, element.type, show_type, format); } - - formatPartitions(element.partitions, format); } void formatElements(const std::vector & elements, Kind kind, const IAST::FormatSettings & format) diff --git a/src/Parsers/ASTBackupQuery.h b/src/Parsers/ASTBackupQuery.h index 0042fca558f..648bcf27bce 100644 --- a/src/Parsers/ASTBackupQuery.h +++ b/src/Parsers/ASTBackupQuery.h @@ -11,22 +11,20 @@ using DatabaseAndTableName = std::pair; /** BACKUP { TABLE [db.]table_name [AS [db.]table_name_in_backup] [PARTITION[S] partition_expr [,...]] | * DICTIONARY [db.]dictionary_name [AS [db.]dictionary_name_in_backup] | - * DATABASE database_name [AS database_name_in_backup] | - * ALL DATABASES | - * TEMPORARY TABLE table_name [AS table_name_in_backup] - * ALL TEMPORARY TABLES | - * EVERYTHING } [,...] + * TEMPORARY TABLE table_name [AS table_name_in_backup] | + * ALL TEMPORARY TABLES [EXCEPT ...] | + * DATABASE database_name [EXCEPT ...] [AS database_name_in_backup] | + * ALL DATABASES [EXCEPT ...] } [,...] * TO { File('path/') | * Disk('disk_name', 'path/') * [SETTINGS base_backup = {File(...) | Disk(...)}] * * RESTORE { TABLE [db.]table_name_in_backup [INTO [db.]table_name] [PARTITION[S] partition_expr [,...]] | * DICTIONARY [db.]dictionary_name_in_backup [INTO [db.]dictionary_name] | - * DATABASE database_name_in_backup [INTO database_name] | - * ALL DATABASES | * TEMPORARY TABLE table_name_in_backup [INTO table_name] | - * ALL TEMPORARY TABLES | - * EVERYTHING } [,...] + * ALL TEMPORARY TABLES [EXCEPT ...] | + * DATABASE database_name_in_backup [EXCEPT ...] [INTO database_name] | + * ALL DATABASES [EXCEPT ...] } [,...] * FROM {File(...) | Disk(...)} * * Notes: @@ -57,12 +55,8 @@ public: enum ElementType { TABLE, - DICTIONARY, DATABASE, ALL_DATABASES, - TEMPORARY_TABLE, - ALL_TEMPORARY_TABLES, - EVERYTHING, }; struct Element @@ -70,6 +64,8 @@ public: ElementType type; DatabaseAndTableName name; DatabaseAndTableName new_name; + bool name_is_in_temp_db = false; + bool new_name_is_in_temp_db = false; ASTs partitions; std::set except_list; }; diff --git a/src/Parsers/ParserBackupQuery.cpp b/src/Parsers/ParserBackupQuery.cpp index 666600b58de..844a91fa515 100644 --- a/src/Parsers/ParserBackupQuery.cpp +++ b/src/Parsers/ParserBackupQuery.cpp @@ -18,38 +18,109 @@ namespace using Element = ASTBackupQuery::Element; using ElementType = ASTBackupQuery::ElementType; - bool parseName(IParser::Pos & pos, Expected & expected, ElementType type, DatabaseAndTableName & name) + bool parseType(IParser::Pos & pos, Expected & expected, ElementType & type, bool & name_is_in_temp_db) { + name_is_in_temp_db = false; + if (ParserKeyword{"TABLE"}.ignore(pos, expected) || ParserKeyword{"DICTIONARY"}.ignore(pos, expected)) + { + type = ElementType::TABLE; + return true; + } + if (ParserKeyword{"TEMPORARY TABLE"}.ignore(pos, expected)) + { + type = ElementType::TABLE; + name_is_in_temp_db = true; + return true; + } + if (ParserKeyword{"DATABASE"}.ignore(pos, expected)) + { + type = ElementType::DATABASE; + return true; + } + if (ParserKeyword{"ALL TEMPORARY TABLES"}.ignore(pos, expected)) + { + type = ElementType::DATABASE; + name_is_in_temp_db = true; + return true; + } + if (ParserKeyword{"ALL DATABASES"}.ignore(pos, expected)) + { + type = ElementType::ALL_DATABASES; + return true; + } + return false; + } + + bool parseTempDBFlag(IParser::Pos & pos, Expected & expected, ElementType type, bool & temp_db_flag) + { + temp_db_flag = false; switch (type) { - case ElementType::TABLE: [[fallthrough]]; - case ElementType::DICTIONARY: + case ElementType::TABLE: { + if (ParserKeyword{"TABLE"}.ignore(pos, expected) || ParserKeyword{"DICTIONARY"}.ignore(pos, expected)) + { + return true; + } + if (ParserKeyword{"TEMPORARY TABLE"}.ignore(pos, expected)) + { + temp_db_flag = true; + return true; + } + return false; + } + + case ElementType::DATABASE: + { + if (ParserKeyword{"DATABASE"}.ignore(pos, expected)) + { + return true; + } + if (ParserKeyword{"ALL TEMPORARY TABLES"}.ignore(pos, expected)) + { + temp_db_flag = true; + return true; + } + return false; + } + + default: + return false; + } + } + + bool parseName(IParser::Pos & pos, Expected & expected, ElementType type, bool name_is_in_temp_db, DatabaseAndTableName & name) + { + name.first.clear(); + name.second.clear(); + switch (type) + { + case ElementType::TABLE: + { + if (name_is_in_temp_db) + { + ASTPtr ast; + if (!ParserIdentifier{}.parse(pos, ast, expected)) + return false; + name.second = getIdentifierName(ast); + return true; + } return parseDatabaseAndTableName(pos, expected, name.first, name.second); } case ElementType::DATABASE: { + if (name_is_in_temp_db) + return false; ASTPtr ast; if (!ParserIdentifier{}.parse(pos, ast, expected)) return false; name.first = getIdentifierName(ast); - name.second.clear(); - return true; - } - - case ElementType::TEMPORARY_TABLE: - { - ASTPtr ast; - if (!ParserIdentifier{}.parse(pos, ast, expected)) - return false; - name.second = getIdentifierName(ast); - name.first.clear(); return true; } default: - return true; + return false; } } @@ -64,7 +135,7 @@ namespace ASTPtr ast; if (!ParserPartition{}.parse(pos, ast, expected)) return false; - result.emplace_back(ast); + result.push_back(ast); return true; }; if (!ParserList::parseUtil(pos, expected, parse_list_element, false)) @@ -74,50 +145,72 @@ namespace return true; } + bool parseExceptList(IParser::Pos & pos, Expected & expected, std::set & except_list) + { + if (!ParserKeyword{"EXCEPT"}.ignore(pos, expected)) + return false; + + std::set result; + auto parse_list_element = [&] + { + ASTPtr ast; + if (!ParserIdentifier{}.parse(pos, ast, expected)) + return false; + result.insert(getIdentifierName(ast)); + return true; + }; + if (!ParserList::parseUtil(pos, expected, parse_list_element, false)) + return false; + + except_list = std::move(result); + return true; + } + bool parseElement(IParser::Pos & pos, Expected & expected, Element & entry) { return IParserBase::wrapParseImpl(pos, [&] { ElementType type; - if (ParserKeyword{"TABLE"}.ignore(pos, expected)) - type = ElementType::TABLE; - else if (ParserKeyword{"DICTIONARY"}.ignore(pos, expected)) - type = ElementType::DICTIONARY; - else if (ParserKeyword{"DATABASE"}.ignore(pos, expected)) - type = ElementType::DATABASE; - else if (ParserKeyword{"ALL DATABASES"}.ignore(pos, expected)) - type = ElementType::ALL_DATABASES; - else if (ParserKeyword{"TEMPORARY TABLE"}.ignore(pos, expected)) - type = ElementType::TEMPORARY_TABLE; - else if (ParserKeyword{"ALL TEMPORARY TABLES"}.ignore(pos, expected)) - type = ElementType::ALL_TEMPORARY_TABLES; - else if (ParserKeyword{"EVERYTHING"}.ignore(pos, expected)) - type = ElementType::EVERYTHING; - else + bool name_is_in_temp_db = false; + if (!parseType(pos, expected, type, name_is_in_temp_db)) return false; DatabaseAndTableName name; - if (!parseName(pos, expected, type, name)) - return false; + if ((type == ElementType::TABLE) || (type == ElementType::DATABASE && !name_is_in_temp_db)) + { + if (!parseName(pos, expected, type, name_is_in_temp_db, name)) + return false; + } + + bool new_name_is_in_temp_db = name_is_in_temp_db; + DatabaseAndTableName new_name = name; + if (ParserKeyword{"AS"}.ignore(pos, expected) || ParserKeyword{"INTO"}.ignore(pos, expected)) + { + if (!parseTempDBFlag(pos, expected, type, new_name_is_in_temp_db)) + new_name_is_in_temp_db = name_is_in_temp_db; + + if ((type == ElementType::TABLE) || (type == ElementType::DATABASE && !new_name_is_in_temp_db)) + { + if (!parseName(pos, expected, type, new_name_is_in_temp_db, new_name)) + new_name = name; + } + } ASTs partitions; if (type == ElementType::TABLE) parsePartitions(pos, expected, partitions); - DatabaseAndTableName new_name; - if (ParserKeyword{"AS"}.ignore(pos, expected) || ParserKeyword{"INTO"}.ignore(pos, expected)) - { - if (!parseName(pos, expected, type, new_name)) - return false; - } - - if ((type == ElementType::TABLE) && partitions.empty()) - parsePartitions(pos, expected, partitions); + std::set except_list; + if (type != ElementType::TABLE) + parseExceptList(pos, expected, except_list); entry.type = type; entry.name = std::move(name); entry.new_name = std::move(new_name); + entry.name_is_in_temp_db = name_is_in_temp_db; + entry.new_name_is_in_temp_db = new_name_is_in_temp_db; entry.partitions = std::move(partitions); + entry.except_list = std::move(except_list); return true; }); } diff --git a/src/Parsers/ParserBackupQuery.h b/src/Parsers/ParserBackupQuery.h index e42326c2590..b01c149601c 100644 --- a/src/Parsers/ParserBackupQuery.h +++ b/src/Parsers/ParserBackupQuery.h @@ -8,22 +8,20 @@ namespace DB /** Parses queries like * BACKUP { TABLE [db.]table_name [AS [db.]table_name_in_backup] [PARTITION[S] partition_expr [,...]] | * DICTIONARY [db.]dictionary_name [AS [db.]dictionary_name_in_backup] | - * DATABASE database_name [AS database_name_in_backup] | - * ALL DATABASES | * TEMPORARY TABLE table_name [AS table_name_in_backup] - * ALL TEMPORARY TABLES | - * EVERYTHING } [,...] + * ALL TEMPORARY TABLES [EXCEPT ...] | + * DATABASE database_name [AS database_name_in_backup] [EXCEPT ...] | + * ALL DATABASES [EXCEPT ...] } [,...] * TO { File('path/') | * Disk('disk_name', 'path/') * [SETTINGS base_backup = {FILE(...) | DISK(...)}] * * RESTORE { TABLE [db.]table_name_in_backup [INTO [db.]table_name] [PARTITION[S] partition_expr [,...]] | * DICTIONARY [db.]dictionary_name_in_backup [INTO [db.]dictionary_name] | - * DATABASE database_name_in_backup [INTO database_name] | - * ALL DATABASES | * TEMPORARY TABLE table_name_in_backup [INTO table_name] | - * ALL TEMPORARY TABLES | - * EVERYTHING } [,...] + * ALL TEMPORARY TABLES [EXCEPT ...] | + * DATABASE database_name_in_backup [EXCEPT ...] [INTO database_name] | + * ALL DATABASES [EXCEPT ...] } [,...] * FROM {File(...) | Disk(...)} */ class ParserBackupQuery : public IParserBase diff --git a/src/Parsers/fuzzers/codegen_fuzzer/gen.py b/src/Parsers/fuzzers/codegen_fuzzer/gen.py index 95936247489..84ee09916c4 100644 --- a/src/Parsers/fuzzers/codegen_fuzzer/gen.py +++ b/src/Parsers/fuzzers/codegen_fuzzer/gen.py @@ -7,16 +7,14 @@ import string TOKEN_TEXT = 1 TOKEN_VAR = 2 -TOKEN_COLON = ':' -TOKEN_SEMI = ';' -TOKEN_OR = '|' -TOKEN_QUESTIONMARK = '?' -TOKEN_ROUND_BRACKET_OPEN = '(' -TOKEN_ROUND_BRACKET_CLOSE = ')' -TOKEN_ASTERISK = '*' -TOKEN_SLASH = '/' - - +TOKEN_COLON = ":" +TOKEN_SEMI = ";" +TOKEN_OR = "|" +TOKEN_QUESTIONMARK = "?" +TOKEN_ROUND_BRACKET_OPEN = "(" +TOKEN_ROUND_BRACKET_CLOSE = ")" +TOKEN_ASTERISK = "*" +TOKEN_SLASH = "/" class TextValue: @@ -27,9 +25,9 @@ class TextValue: def get_slug(self): if self.slug is not None: return self.slug - slug = '' + slug = "" for c in self.t: - slug += c if c in string.ascii_letters else '_' + slug += c if c in string.ascii_letters else "_" self.slug = slug return slug @@ -37,12 +35,12 @@ class TextValue: return f"TextValue_{self.get_slug()}" def __repr__(self): - return f"TextValue(\"{self.t}\")" + return f'TextValue("{self.t}")' class Var: def __init__(self, id_): - self.id_ = id_ + self.id_ = id_ def __repr__(self): return f"Var({self.id_})" @@ -59,8 +57,8 @@ class Parser: self.cur_tok = None self.includes = [] - self.proto = '' - self.cpp = '' + self.proto = "" + self.cpp = "" def parse_file(self, filename): with open(filename) as f: @@ -81,7 +79,7 @@ class Parser: if self.text[0] == '"': return self.parse_txt_value() - if self.text[0] == '$': + if self.text[0] == "$": return self.parse_var_value() c, self.text = self.text[0], self.text[1:] @@ -89,9 +87,9 @@ class Parser: return c def parse_var_value(self): - i = self.text.find(' ') + i = self.text.find(" ") - id_, self.text = self.text[1:i], self.text[i+1:] + id_, self.text = self.text[1:i], self.text[i + 1 :] self.var_id = int(id_) self.cur_tok = TOKEN_VAR return TOKEN_VAR @@ -100,12 +98,12 @@ class Parser: if self.text[0] != '"': raise Exception("parse_txt_value: expected quote at the start") - self.t = '' + self.t = "" self.text = self.text[1:] while self.text[0] != '"': - if self.text[0] == '\\': - if self.text[1] == 'x': + if self.text[0] == "\\": + if self.text[1] == "x": self.t += self.text[:4] self.text = self.text[4:] elif self.text[1] in 'nt\\"': @@ -123,7 +121,7 @@ class Parser: def skip_ws(self): while self.text and self.text[0] in string.whitespace: - if self.text[0] == '\n': + if self.text[0] == "\n": self.line += 1 self.col = 0 self.text = self.text[1:] @@ -134,10 +132,9 @@ class Parser: def skip_line(self): self.line += 1 - index = self.text.find('\n') + index = self.text.find("\n") self.text = self.text[index:] - def parse_statement(self): if self.skip_ws() is None: return None @@ -164,52 +161,54 @@ class Parser: def generate(self): self.proto = 'syntax = "proto3";\n\n' - self.cpp = '#include \n#include \n#include \n\n#include \n\n' + self.cpp = "#include \n#include \n#include \n\n#include \n\n" for incl_file in self.includes: self.cpp += f'#include "{incl_file}"\n' - self.cpp += '\n' + self.cpp += "\n" - self.proto += 'message Word {\n' - self.proto += '\tenum Value {\n' + self.proto += "message Word {\n" + self.proto += "\tenum Value {\n" - self.cpp += 'void GenerateWord(const Word&, std::string&, int);\n\n' + self.cpp += "void GenerateWord(const Word&, std::string&, int);\n\n" - self.cpp += 'void GenerateSentence(const Sentence& stc, std::string &s, int depth) {\n' - self.cpp += '\tfor (int i = 0; i < stc.words_size(); i++ ) {\n' - self.cpp += '\t\tGenerateWord(stc.words(i), s, ++depth);\n' - self.cpp += '\t}\n' - self.cpp += '}\n' + self.cpp += ( + "void GenerateSentence(const Sentence& stc, std::string &s, int depth) {\n" + ) + self.cpp += "\tfor (int i = 0; i < stc.words_size(); i++ ) {\n" + self.cpp += "\t\tGenerateWord(stc.words(i), s, ++depth);\n" + self.cpp += "\t}\n" + self.cpp += "}\n" - self.cpp += 'void GenerateWord(const Word& word, std::string &s, int depth) {\n' + self.cpp += "void GenerateWord(const Word& word, std::string &s, int depth) {\n" - self.cpp += '\tif (depth > 5) return;\n\n' - self.cpp += '\tswitch (word.value()) {\n' + self.cpp += "\tif (depth > 5) return;\n\n" + self.cpp += "\tswitch (word.value()) {\n" for idx, chain in enumerate(self.chains): - self.proto += f'\t\tvalue_{idx} = {idx};\n' + self.proto += f"\t\tvalue_{idx} = {idx};\n" - self.cpp += f'\t\tcase {idx}: {{\n' + self.cpp += f"\t\tcase {idx}: {{\n" num_var = 0 for item in chain: if isinstance(item, TextValue): self.cpp += f'\t\t\ts += "{item.t}";\n' elif isinstance(item, Var): - self.cpp += f'\t\t\tif (word.inner().words_size() > {num_var})\t\t\t\tGenerateWord(word.inner().words({num_var}), s, ++depth);\n' + self.cpp += f"\t\t\tif (word.inner().words_size() > {num_var})\t\t\t\tGenerateWord(word.inner().words({num_var}), s, ++depth);\n" num_var += 1 else: raise Exception("unknown token met during generation") - self.cpp += '\t\t\tbreak;\n\t\t}\n' - self.cpp += '\t\tdefault: break;\n' + self.cpp += "\t\t\tbreak;\n\t\t}\n" + self.cpp += "\t\tdefault: break;\n" - self.cpp += '\t}\n' + self.cpp += "\t}\n" - self.proto += '\t}\n' - self.proto += '\tValue value = 1;\n' - self.proto += '\tSentence inner = 2;\n' - self.proto += '}\nmessage Sentence {\n\trepeated Word words = 1;\n}' + self.proto += "\t}\n" + self.proto += "\tValue value = 1;\n" + self.proto += "\tSentence inner = 2;\n" + self.proto += "}\nmessage Sentence {\n\trepeated Word words = 1;\n}" - self.cpp += '}\n' + self.cpp += "}\n" return self.cpp, self.proto def fatal_parsing_error(self, msg): @@ -220,7 +219,7 @@ class Parser: def main(args): input_file, outfile_cpp, outfile_proto = args - if not outfile_proto.endswith('.proto'): + if not outfile_proto.endswith(".proto"): raise Exception("outfile_proto (argv[3]) should end with `.proto`") include_filename = outfile_proto[:-6] + ".pb.h" @@ -231,17 +230,17 @@ def main(args): cpp, proto = p.generate() - proto = proto.replace('\t', ' ' * 4) - cpp = cpp.replace('\t', ' ' * 4) + proto = proto.replace("\t", " " * 4) + cpp = cpp.replace("\t", " " * 4) - with open(outfile_cpp, 'w') as f: + with open(outfile_cpp, "w") as f: f.write(cpp) - with open(outfile_proto, 'w') as f: + with open(outfile_proto, "w") as f: f.write(proto) -if __name__ == '__main__': +if __name__ == "__main__": if len(sys.argv) < 3: print(f"Usage {sys.argv[0]} ") sys.exit(1) diff --git a/src/Parsers/parseIntervalKind.cpp b/src/Parsers/parseIntervalKind.cpp index 7d36133e81c..0704aa107ca 100644 --- a/src/Parsers/parseIntervalKind.cpp +++ b/src/Parsers/parseIntervalKind.cpp @@ -7,6 +7,27 @@ namespace DB { bool parseIntervalKind(IParser::Pos & pos, Expected & expected, IntervalKind & result) { + if (ParserKeyword("NANOSECOND").ignore(pos, expected) || ParserKeyword("SQL_TSI_NANOSECOND").ignore(pos, expected) + || ParserKeyword("NS").ignore(pos, expected)) + { + result = IntervalKind::Nanosecond; + return true; + } + + if (ParserKeyword("MICROSECOND").ignore(pos, expected) || ParserKeyword("SQL_TSI_MICROSECOND").ignore(pos, expected) + || ParserKeyword("MCS").ignore(pos, expected)) + { + result = IntervalKind::Microsecond; + return true; + } + + if (ParserKeyword("MILLISECOND").ignore(pos, expected) || ParserKeyword("SQL_TSI_MILLISECOND").ignore(pos, expected) + || ParserKeyword("MS").ignore(pos, expected)) + { + result = IntervalKind::Millisecond; + return true; + } + if (ParserKeyword("SECOND").ignore(pos, expected) || ParserKeyword("SQL_TSI_SECOND").ignore(pos, expected) || ParserKeyword("SS").ignore(pos, expected) || ParserKeyword("S").ignore(pos, expected)) { diff --git a/src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp b/src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp index cf5cfa681a1..37a107ae367 100644 --- a/src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp @@ -139,7 +139,11 @@ void ArrowBlockInputFormat::prepareReader() } arrow_column_to_ch_column = std::make_unique( - getPort().getHeader(), "Arrow", format_settings.arrow.import_nested, format_settings.arrow.allow_missing_columns); + getPort().getHeader(), + "Arrow", + format_settings.arrow.import_nested, + format_settings.arrow.allow_missing_columns, + format_settings.arrow.case_insensitive_column_matching); missing_columns = arrow_column_to_ch_column->getMissingColumns(*schema); if (stream) diff --git a/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp index 14c81a0d90d..0a72e561e4e 100644 --- a/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp +++ b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp @@ -31,6 +31,7 @@ #include #include #include +#include /// UINT16 and UINT32 are processed separately, see comments in readColumnFromArrowColumn. #define FOR_ARROW_NUMERIC_TYPES(M) \ @@ -484,19 +485,22 @@ static void checkStatus(const arrow::Status & status, const String & column_name throw Exception{ErrorCodes::UNKNOWN_EXCEPTION, "Error with a {} column '{}': {}.", format_name, column_name, status.ToString()}; } -Block ArrowColumnToCHColumn::arrowSchemaToCHHeader(const arrow::Schema & schema, const std::string & format_name, const Block * hint_header) +Block ArrowColumnToCHColumn::arrowSchemaToCHHeader( + const arrow::Schema & schema, const std::string & format_name, const Block * hint_header, bool ignore_case) { ColumnsWithTypeAndName sample_columns; std::unordered_set nested_table_names; if (hint_header) - nested_table_names = Nested::getAllTableNames(*hint_header); + nested_table_names = Nested::getAllTableNames(*hint_header, ignore_case); + for (const auto & field : schema.fields()) { - if (hint_header && !hint_header->has(field->name()) && !nested_table_names.contains(field->name())) + if (hint_header && !hint_header->has(field->name(), ignore_case) + && !nested_table_names.contains(ignore_case ? boost::to_lower_copy(field->name()) : field->name())) continue; /// Create empty arrow column by it's type and convert it to ClickHouse column. - arrow::MemoryPool* pool = arrow::default_memory_pool(); + arrow::MemoryPool * pool = arrow::default_memory_pool(); std::unique_ptr array_builder; arrow::Status status = MakeBuilder(pool, field->type(), &array_builder); checkStatus(status, field->name(), format_name); @@ -516,20 +520,31 @@ Block ArrowColumnToCHColumn::arrowSchemaToCHHeader(const arrow::Schema & schema, } ArrowColumnToCHColumn::ArrowColumnToCHColumn( - const Block & header_, const std::string & format_name_, bool import_nested_, bool allow_missing_columns_) - : header(header_), format_name(format_name_), import_nested(import_nested_), allow_missing_columns(allow_missing_columns_) + const Block & header_, + const std::string & format_name_, + bool import_nested_, + bool allow_missing_columns_, + bool case_insensitive_matching_) + : header(header_) + , format_name(format_name_) + , import_nested(import_nested_) + , allow_missing_columns(allow_missing_columns_) + , case_insensitive_matching(case_insensitive_matching_) { } void ArrowColumnToCHColumn::arrowTableToCHChunk(Chunk & res, std::shared_ptr & table) { NameToColumnPtr name_to_column_ptr; - for (const auto & column_name : table->ColumnNames()) + for (auto column_name : table->ColumnNames()) { std::shared_ptr arrow_column = table->GetColumnByName(column_name); if (!arrow_column) throw Exception(ErrorCodes::DUPLICATE_COLUMN, "Column '{}' is duplicated", column_name); - name_to_column_ptr[column_name] = arrow_column; + + if (case_insensitive_matching) + boost::to_lower(column_name); + name_to_column_ptr[std::move(column_name)] = arrow_column; } arrowColumnsToCHChunk(res, name_to_column_ptr); @@ -548,22 +563,31 @@ void ArrowColumnToCHColumn::arrowColumnsToCHChunk(Chunk & res, NameToColumnPtr & { const ColumnWithTypeAndName & header_column = header.getByPosition(column_i); + auto search_column_name = header_column.name; + if (case_insensitive_matching) + boost::to_lower(search_column_name); + bool read_from_nested = false; String nested_table_name = Nested::extractTableName(header_column.name); - if (!name_to_column_ptr.contains(header_column.name)) + String search_nested_table_name = nested_table_name; + if (case_insensitive_matching) + boost::to_lower(search_nested_table_name); + + if (!name_to_column_ptr.contains(search_column_name)) { /// Check if it's a column from nested table. - if (import_nested && name_to_column_ptr.contains(nested_table_name)) + if (import_nested && name_to_column_ptr.contains(search_nested_table_name)) { - if (!nested_tables.contains(nested_table_name)) + if (!nested_tables.contains(search_nested_table_name)) { - std::shared_ptr arrow_column = name_to_column_ptr[nested_table_name]; - ColumnsWithTypeAndName cols = {readColumnFromArrowColumn(arrow_column, nested_table_name, format_name, false, dictionary_values, true)}; + std::shared_ptr arrow_column = name_to_column_ptr[search_nested_table_name]; + ColumnsWithTypeAndName cols + = {readColumnFromArrowColumn(arrow_column, nested_table_name, format_name, false, dictionary_values, true)}; Block block(cols); - nested_tables[nested_table_name] = std::make_shared(Nested::flatten(block)); + nested_tables[search_nested_table_name] = std::make_shared(Nested::flatten(block)); } - read_from_nested = nested_tables[nested_table_name]->has(header_column.name); + read_from_nested = nested_tables[search_nested_table_name]->has(header_column.name, case_insensitive_matching); } if (!read_from_nested) @@ -580,13 +604,19 @@ void ArrowColumnToCHColumn::arrowColumnsToCHChunk(Chunk & res, NameToColumnPtr & } } - std::shared_ptr arrow_column = name_to_column_ptr[header_column.name]; ColumnWithTypeAndName column; if (read_from_nested) - column = nested_tables[nested_table_name]->getByName(header_column.name); + { + column = nested_tables[search_nested_table_name]->getByName(header_column.name, case_insensitive_matching); + if (case_insensitive_matching) + column.name = header_column.name; + } else + { + auto arrow_column = name_to_column_ptr[search_column_name]; column = readColumnFromArrowColumn(arrow_column, header_column.name, format_name, false, dictionary_values, true); + } try { @@ -594,8 +624,11 @@ void ArrowColumnToCHColumn::arrowColumnsToCHChunk(Chunk & res, NameToColumnPtr & } catch (Exception & e) { - e.addMessage(fmt::format("while converting column {} from type {} to type {}", - backQuote(header_column.name), column.type->getName(), header_column.type->getName())); + e.addMessage(fmt::format( + "while converting column {} from type {} to type {}", + backQuote(header_column.name), + column.type->getName(), + header_column.type->getName())); throw; } @@ -609,22 +642,23 @@ void ArrowColumnToCHColumn::arrowColumnsToCHChunk(Chunk & res, NameToColumnPtr & std::vector ArrowColumnToCHColumn::getMissingColumns(const arrow::Schema & schema) const { std::vector missing_columns; - auto block_from_arrow = arrowSchemaToCHHeader(schema, format_name, &header); + auto block_from_arrow = arrowSchemaToCHHeader(schema, format_name, &header, case_insensitive_matching); auto flatten_block_from_arrow = Nested::flatten(block_from_arrow); + for (size_t i = 0, columns = header.columns(); i < columns; ++i) { - const auto & column = header.getByPosition(i); + const auto & header_column = header.getByPosition(i); bool read_from_nested = false; - String nested_table_name = Nested::extractTableName(column.name); - if (!block_from_arrow.has(column.name)) + String nested_table_name = Nested::extractTableName(header_column.name); + if (!block_from_arrow.has(header_column.name, case_insensitive_matching)) { - if (import_nested && block_from_arrow.has(nested_table_name)) - read_from_nested = flatten_block_from_arrow.has(column.name); + if (import_nested && block_from_arrow.has(nested_table_name, case_insensitive_matching)) + read_from_nested = flatten_block_from_arrow.has(header_column.name, case_insensitive_matching); if (!read_from_nested) { if (!allow_missing_columns) - throw Exception{ErrorCodes::THERE_IS_NO_COLUMN, "Column '{}' is not presented in input data.", column.name}; + throw Exception{ErrorCodes::THERE_IS_NO_COLUMN, "Column '{}' is not presented in input data.", header_column.name}; missing_columns.push_back(i); } diff --git a/src/Processors/Formats/Impl/ArrowColumnToCHColumn.h b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.h index cf4f6bb3ff3..0a712326941 100644 --- a/src/Processors/Formats/Impl/ArrowColumnToCHColumn.h +++ b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.h @@ -25,7 +25,8 @@ public: const Block & header_, const std::string & format_name_, bool import_nested_, - bool allow_missing_columns_); + bool allow_missing_columns_, + bool case_insensitive_matching_ = false); void arrowTableToCHChunk(Chunk & res, std::shared_ptr & table); @@ -36,7 +37,8 @@ public: /// Transform arrow schema to ClickHouse header. If hint_header is provided, /// we will skip columns in schema that are not in hint_header. - static Block arrowSchemaToCHHeader(const arrow::Schema & schema, const std::string & format_name, const Block * hint_header = nullptr); + static Block arrowSchemaToCHHeader( + const arrow::Schema & schema, const std::string & format_name, const Block * hint_header = nullptr, bool ignore_case = false); private: const Block & header; @@ -44,6 +46,7 @@ private: bool import_nested; /// If false, throw exception if some columns in header not exists in arrow table. bool allow_missing_columns; + bool case_insensitive_matching; /// Map {column name : dictionary column}. /// To avoid converting dictionary from Arrow Dictionary diff --git a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp index 914ec27fc46..9bf1682b77e 100644 --- a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp @@ -228,6 +228,14 @@ void registerNonTrivialPrefixAndSuffixCheckerJSONAsString(FormatFactory & factor factory.registerNonTrivialPrefixAndSuffixChecker("JSONAsString", nonTrivialPrefixAndSuffixCheckerJSONEachRowImpl); } +void registerJSONAsStringSchemaReader(FormatFactory & factory) +{ + factory.registerExternalSchemaReader("JSONAsString", [](const FormatSettings &) + { + return std::make_shared(); + }); +} + void registerInputFormatJSONAsObject(FormatFactory & factory) { factory.registerInputFormat("JSONAsObject", []( @@ -245,11 +253,16 @@ void registerNonTrivialPrefixAndSuffixCheckerJSONAsObject(FormatFactory & factor factory.registerNonTrivialPrefixAndSuffixChecker("JSONAsObject", nonTrivialPrefixAndSuffixCheckerJSONEachRowImpl); } -void registerJSONAsStringSchemaReader(FormatFactory & factory) +void registerFileSegmentationEngineJSONAsObject(FormatFactory & factory) { - factory.registerExternalSchemaReader("JSONAsString", [](const FormatSettings &) + factory.registerFileSegmentationEngine("JSONAsObject", &fileSegmentationEngineJSONEachRow); +} + +void registerJSONAsObjectSchemaReader(FormatFactory & factory) +{ + factory.registerExternalSchemaReader("JSONAsObject", [](const FormatSettings &) { - return std::make_shared(); + return std::make_shared(); }); } diff --git a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h index f7880eac867..438107e73e6 100644 --- a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h +++ b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB { @@ -73,4 +74,13 @@ public: } }; +class JSONAsObjectExternalSchemaReader : public IExternalSchemaReader +{ +public: + NamesAndTypesList readSchema() override + { + return {{"json", std::make_shared("json", false)}}; + } +}; + } diff --git a/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp b/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp index aa9f7874ae8..c68b59833db 100644 --- a/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ORCBlockInputFormat.cpp @@ -53,9 +53,6 @@ Chunk ORCBlockInputFormat::generate() if (!table || !table->num_rows()) return res; - if (format_settings.use_lowercase_column_name) - table = *table->RenameColumns(include_column_names); - arrow_column_to_ch_column->arrowTableToCHChunk(res, table); /// If defaults_for_omitted_fields is true, calculate the default values from default expression for omitted fields. /// Otherwise fill the missing columns with zero values of its type. @@ -73,7 +70,6 @@ void ORCBlockInputFormat::resetParser() file_reader.reset(); include_indices.clear(); - include_column_names.clear(); block_missing_values.clear(); } @@ -125,20 +121,6 @@ static void getFileReaderAndSchema( if (!read_schema_result.ok()) throw Exception(read_schema_result.status().ToString(), ErrorCodes::BAD_ARGUMENTS); schema = std::move(read_schema_result).ValueOrDie(); - - if (format_settings.use_lowercase_column_name) - { - std::vector> fields; - fields.reserve(schema->num_fields()); - for (int i = 0; i < schema->num_fields(); ++i) - { - const auto& field = schema->field(i); - auto name = field->name(); - boost::to_lower(name); - fields.push_back(field->WithName(name)); - } - schema = arrow::schema(fields, schema->metadata()); - } } void ORCBlockInputFormat::prepareReader() @@ -149,12 +131,17 @@ void ORCBlockInputFormat::prepareReader() return; arrow_column_to_ch_column = std::make_unique( - getPort().getHeader(), "ORC", format_settings.orc.import_nested, format_settings.orc.allow_missing_columns); + getPort().getHeader(), + "ORC", + format_settings.orc.import_nested, + format_settings.orc.allow_missing_columns, + format_settings.orc.case_insensitive_column_matching); missing_columns = arrow_column_to_ch_column->getMissingColumns(*schema); + const bool ignore_case = format_settings.orc.case_insensitive_column_matching; std::unordered_set nested_table_names; if (format_settings.orc.import_nested) - nested_table_names = Nested::getAllTableNames(getPort().getHeader()); + nested_table_names = Nested::getAllTableNames(getPort().getHeader(), ignore_case); /// In ReadStripe column indices should be started from 1, /// because 0 indicates to select all columns. @@ -165,19 +152,18 @@ void ORCBlockInputFormat::prepareReader() /// so we should recursively count the number of indices we need for this type. int indexes_count = countIndicesForType(schema->field(i)->type()); const auto & name = schema->field(i)->name(); - if (getPort().getHeader().has(name) || nested_table_names.contains(name)) + if (getPort().getHeader().has(name, ignore_case) || nested_table_names.contains(ignore_case ? boost::to_lower_copy(name) : name)) { for (int j = 0; j != indexes_count; ++j) - { include_indices.push_back(index + j); - include_column_names.push_back(name); - } } + index += indexes_count; } } -ORCSchemaReader::ORCSchemaReader(ReadBuffer & in_, const FormatSettings & format_settings_) : ISchemaReader(in_), format_settings(format_settings_) +ORCSchemaReader::ORCSchemaReader(ReadBuffer & in_, const FormatSettings & format_settings_) + : ISchemaReader(in_), format_settings(format_settings_) { } diff --git a/src/Processors/Formats/Impl/ORCBlockInputFormat.h b/src/Processors/Formats/Impl/ORCBlockInputFormat.h index bd2151d78ff..b7a771730ea 100644 --- a/src/Processors/Formats/Impl/ORCBlockInputFormat.h +++ b/src/Processors/Formats/Impl/ORCBlockInputFormat.h @@ -47,7 +47,6 @@ private: // indices of columns to read from ORC file std::vector include_indices; - std::vector include_column_names; std::vector missing_columns; BlockMissingValues block_missing_values; diff --git a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp index 548bf0138f5..13582ce5019 100644 --- a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp @@ -53,11 +53,7 @@ Chunk ParquetBlockInputFormat::generate() std::shared_ptr table; arrow::Status read_status = file_reader->ReadRowGroup(row_group_current, column_indices, &table); if (!read_status.ok()) - throw ParsingException{"Error while reading Parquet data: " + read_status.ToString(), - ErrorCodes::CANNOT_READ_ALL_DATA}; - - if (format_settings.use_lowercase_column_name) - table = *table->RenameColumns(column_names); + throw ParsingException{"Error while reading Parquet data: " + read_status.ToString(), ErrorCodes::CANNOT_READ_ALL_DATA}; ++row_group_current; @@ -78,7 +74,6 @@ void ParquetBlockInputFormat::resetParser() file_reader.reset(); column_indices.clear(); - column_names.clear(); row_group_current = 0; block_missing_values.clear(); } @@ -123,20 +118,6 @@ static void getFileReaderAndSchema( return; THROW_ARROW_NOT_OK(parquet::arrow::OpenFile(std::move(arrow_file), arrow::default_memory_pool(), &file_reader)); THROW_ARROW_NOT_OK(file_reader->GetSchema(&schema)); - - if (format_settings.use_lowercase_column_name) - { - std::vector> fields; - fields.reserve(schema->num_fields()); - for (int i = 0; i < schema->num_fields(); ++i) - { - const auto& field = schema->field(i); - auto name = field->name(); - boost::to_lower(name); - fields.push_back(field->WithName(name)); - } - schema = arrow::schema(fields, schema->metadata()); - } } void ParquetBlockInputFormat::prepareReader() @@ -149,12 +130,18 @@ void ParquetBlockInputFormat::prepareReader() row_group_total = file_reader->num_row_groups(); row_group_current = 0; - arrow_column_to_ch_column = std::make_unique(getPort().getHeader(), "Parquet", format_settings.parquet.import_nested, format_settings.parquet.allow_missing_columns); + arrow_column_to_ch_column = std::make_unique( + getPort().getHeader(), + "Parquet", + format_settings.parquet.import_nested, + format_settings.parquet.allow_missing_columns, + format_settings.parquet.case_insensitive_column_matching); missing_columns = arrow_column_to_ch_column->getMissingColumns(*schema); + const bool ignore_case = format_settings.parquet.case_insensitive_column_matching; std::unordered_set nested_table_names; if (format_settings.parquet.import_nested) - nested_table_names = Nested::getAllTableNames(getPort().getHeader()); + nested_table_names = Nested::getAllTableNames(getPort().getHeader(), ignore_case); int index = 0; for (int i = 0; i < schema->num_fields(); ++i) @@ -164,19 +151,19 @@ void ParquetBlockInputFormat::prepareReader() /// count the number of indices we need for this type. int indexes_count = countIndicesForType(schema->field(i)->type()); const auto & name = schema->field(i)->name(); - if (getPort().getHeader().has(name) || nested_table_names.contains(name)) + + if (getPort().getHeader().has(name, ignore_case) || nested_table_names.contains(ignore_case ? boost::to_lower_copy(name) : name)) { for (int j = 0; j != indexes_count; ++j) - { column_indices.push_back(index + j); - column_names.push_back(name); - } } + index += indexes_count; } } -ParquetSchemaReader::ParquetSchemaReader(ReadBuffer & in_, const FormatSettings & format_settings_) : ISchemaReader(in_), format_settings(format_settings_) +ParquetSchemaReader::ParquetSchemaReader(ReadBuffer & in_, const FormatSettings & format_settings_) + : ISchemaReader(in_), format_settings(format_settings_) { } diff --git a/src/Processors/Formats/Impl/ParquetBlockInputFormat.h b/src/Processors/Formats/Impl/ParquetBlockInputFormat.h index eba9aac29f2..1faadaa3d21 100644 --- a/src/Processors/Formats/Impl/ParquetBlockInputFormat.h +++ b/src/Processors/Formats/Impl/ParquetBlockInputFormat.h @@ -40,7 +40,6 @@ private: int row_group_total = 0; // indices of columns to read from Parquet file std::vector column_indices; - std::vector column_names; std::unique_ptr arrow_column_to_ch_column; int row_group_current = 0; std::vector missing_columns; diff --git a/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp b/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp index f63d6fa9c46..87ba1b18fa7 100644 --- a/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp @@ -4,7 +4,6 @@ #include #include #include -#include namespace DB @@ -242,15 +241,16 @@ std::unordered_map TSKVSchemaReader::readRowAndGetNamesAndD std::unordered_map names_and_types; StringRef name_ref; - String name_tmp; + String name_buf; String value; do { - bool has_value = readName(in, name_ref, name_tmp); + bool has_value = readName(in, name_ref, name_buf); + String name = String(name_ref); if (has_value) { readEscapedString(value, in); - names_and_types[String(name_ref)] = determineDataTypeByEscapingRule(value, format_settings, FormatSettings::EscapingRule::Escaped); + names_and_types[std::move(name)] = determineDataTypeByEscapingRule(value, format_settings, FormatSettings::EscapingRule::Escaped); } else { diff --git a/src/Processors/QueryPlan/QueryPlan.cpp b/src/Processors/QueryPlan/QueryPlan.cpp index d948c16a78d..b2305d9aab2 100644 --- a/src/Processors/QueryPlan/QueryPlan.cpp +++ b/src/Processors/QueryPlan/QueryPlan.cpp @@ -1,16 +1,22 @@ -#include -#include -#include -#include -#include +#include + +#include + #include #include -#include + +#include +#include + +#include +#include #include #include -#include +#include #include -#include + +#include + namespace DB { @@ -388,6 +394,7 @@ void QueryPlan::explainPlan(WriteBuffer & buffer, const ExplainPlanOptions & opt static void explainPipelineStep(IQueryPlanStep & step, IQueryPlanStep::FormatSettings & settings) { settings.out << String(settings.offset, settings.indent_char) << "(" << step.getName() << ")\n"; + size_t current_offset = settings.offset; step.describePipeline(settings); if (current_offset == settings.offset) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 1bfc1ec7306..e1b099e44c3 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -112,6 +112,9 @@ ReadFromMergeTree::ReadFromMergeTree( if (enable_parallel_reading) read_task_callback = context->getMergeTreeReadTaskCallback(); + + /// Add explicit description. + setStepDescription(data.getStorageID().getFullNameNotQuoted()); } Pipe ReadFromMergeTree::readFromPool( diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index 685b99a7bdc..6846506f260 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -100,7 +100,8 @@ public: bool enable_parallel_reading ); - String getName() const override { return "ReadFromMergeTree"; } + static constexpr auto name = "ReadFromMergeTree"; + String getName() const override { return name; } void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override; diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 1276157cc91..ae97a769b23 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -34,16 +34,16 @@ Block FillingTransform::transformHeader(Block header, const SortDescription & so template static FillColumnDescription::StepFunction getStepFunction( - IntervalKind kind, Int64 step, const DateLUTImpl & date_lut) + IntervalKind kind, Int64 step, const DateLUTImpl & date_lut, UInt16 scale = DataTypeDateTime64::default_scale) { switch (kind) { - #define DECLARE_CASE(NAME) \ +#define DECLARE_CASE(NAME) \ case IntervalKind::NAME: \ - return [step, &date_lut](Field & field) { field = Add##NAME##sImpl::execute(get(field), step, date_lut); }; + return [step, scale, &date_lut](Field & field) { field = Add##NAME##sImpl::execute(get(field), step, date_lut, scale); }; FOR_EACH_INTERVAL_KIND(DECLARE_CASE) - #undef DECLARE_CASE +#undef DECLARE_CASE } __builtin_unreachable(); } @@ -92,7 +92,7 @@ static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr & Int64 avg_seconds = get(descr.fill_step) * descr.step_kind->toAvgSeconds(); if (avg_seconds < 86400) throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, - "Value of step is to low ({} seconds). Must be >= 1 day", avg_seconds); + "Value of step is to low ({} seconds). Must be >= 1 day", avg_seconds); } if (which.isDate()) @@ -108,25 +108,23 @@ static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr & switch (*descr.step_kind) { - #define DECLARE_CASE(NAME) \ +#define DECLARE_CASE(NAME) \ case IntervalKind::NAME: \ descr.step_func = [step, &time_zone = date_time64->getTimeZone()](Field & field) \ { \ auto field_decimal = get>(field); \ - auto components = DecimalUtils::splitWithScaleMultiplier(field_decimal.getValue(), field_decimal.getScaleMultiplier()); \ - auto res = Add##NAME##sImpl::execute(components, step, time_zone); \ - auto res_decimal = decimalFromComponentsWithMultiplier(res, field_decimal.getScaleMultiplier()); \ - field = DecimalField(res_decimal, field_decimal.getScale()); \ + auto res = Add##NAME##sImpl::execute(field_decimal.getValue(), step, time_zone, field_decimal.getScale()); \ + field = DecimalField(res, field_decimal.getScale()); \ }; \ break; FOR_EACH_INTERVAL_KIND(DECLARE_CASE) - #undef DECLARE_CASE +#undef DECLARE_CASE } } else throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, - "STEP of Interval type can be used only with Date/DateTime types, but got {}", type->getName()); + "STEP of Interval type can be used only with Date/DateTime types, but got {}", type->getName()); } else { @@ -140,12 +138,12 @@ static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr & } FillingTransform::FillingTransform( - const Block & header_, const SortDescription & sort_description_, bool on_totals_) - : ISimpleTransform(header_, transformHeader(header_, sort_description_), true) - , sort_description(sort_description_) - , on_totals(on_totals_) - , filling_row(sort_description_) - , next_row(sort_description_) + const Block & header_, const SortDescription & sort_description_, bool on_totals_) + : ISimpleTransform(header_, transformHeader(header_, sort_description_), true) + , sort_description(sort_description_) + , on_totals(on_totals_) + , filling_row(sort_description_) + , next_row(sort_description_) { if (on_totals) return; @@ -162,14 +160,14 @@ FillingTransform::FillingTransform( if (!tryConvertFields(descr, type)) throw Exception("Incompatible types of WITH FILL expression values with column type " - + type->getName(), ErrorCodes::INVALID_WITH_FILL_EXPRESSION); + + type->getName(), ErrorCodes::INVALID_WITH_FILL_EXPRESSION); if (type->isValueRepresentedByUnsignedInteger() && ((!descr.fill_from.isNull() && less(descr.fill_from, Field{0}, 1)) || - (!descr.fill_to.isNull() && less(descr.fill_to, Field{0}, 1)))) + (!descr.fill_to.isNull() && less(descr.fill_to, Field{0}, 1)))) { throw Exception("WITH FILL bound values cannot be negative for unsigned type " - + type->getName(), ErrorCodes::INVALID_WITH_FILL_EXPRESSION); + + type->getName(), ErrorCodes::INVALID_WITH_FILL_EXPRESSION); } } @@ -214,7 +212,7 @@ void FillingTransform::transform(Chunk & chunk) MutableColumns res_other_columns; auto init_columns_by_positions = [](const Columns & old_columns, Columns & new_columns, - MutableColumns & new_mutable_columns, const Positions & positions) + MutableColumns & new_mutable_columns, const Positions & positions) { for (size_t pos : positions) { diff --git a/src/Server/KeeperTCPHandlerFactory.h b/src/Server/KeeperTCPHandlerFactory.h index 76309ffc119..eb9f92bdd25 100644 --- a/src/Server/KeeperTCPHandlerFactory.h +++ b/src/Server/KeeperTCPHandlerFactory.h @@ -32,14 +32,14 @@ public: KeeperTCPHandlerFactory( ConfigGetter config_getter_, std::shared_ptr keeper_dispatcher_, - Poco::Timespan receive_timeout_, - Poco::Timespan send_timeout_, + uint64_t receive_timeout_seconds, + uint64_t send_timeout_seconds, bool secure) : config_getter(config_getter_) , keeper_dispatcher(keeper_dispatcher_) , log(&Poco::Logger::get(std::string{"KeeperTCP"} + (secure ? "S" : "") + "HandlerFactory")) - , receive_timeout(receive_timeout_) - , send_timeout(send_timeout_) + , receive_timeout(/* seconds = */ receive_timeout_seconds, /* microseconds = */ 0) + , send_timeout(/* seconds = */ send_timeout_seconds, /* microseconds = */ 0) { } diff --git a/src/Storages/ExternalDataSourceConfiguration.cpp b/src/Storages/ExternalDataSourceConfiguration.cpp index 5549a816a06..abd20e6e5fd 100644 --- a/src/Storages/ExternalDataSourceConfiguration.cpp +++ b/src/Storages/ExternalDataSourceConfiguration.cpp @@ -325,6 +325,7 @@ void URLBasedDataSourceConfiguration::set(const URLBasedDataSourceConfiguration compression_method = conf.compression_method; structure = conf.structure; http_method = conf.http_method; + headers = conf.headers; } @@ -364,6 +365,10 @@ std::optional getURLBasedDataSourceConfiguration(const { configuration.structure = config.getString(config_prefix + ".structure", ""); } + else if (key == "compression_method") + { + configuration.compression_method = config.getString(config_prefix + ".compression_method", ""); + } else if (key == "headers") { Poco::Util::AbstractConfiguration::Keys header_keys; diff --git a/src/Storages/Hive/StorageHive.cpp b/src/Storages/Hive/StorageHive.cpp index 7b6a8db568f..b95f38d4886 100644 --- a/src/Storages/Hive/StorageHive.cpp +++ b/src/Storages/Hive/StorageHive.cpp @@ -124,7 +124,8 @@ public: /// Initialize to_read_block, which is used to read data from HDFS. for (const auto & name_type : source_info->partition_name_types) { - to_read_block.erase(name_type.name); + if (to_read_block.has(name_type.name)) + to_read_block.erase(name_type.name); } /// Initialize format settings diff --git a/src/Storages/IStorage.cpp b/src/Storages/IStorage.cpp index 89403a773b3..88ddde32d83 100644 --- a/src/Storages/IStorage.cpp +++ b/src/Storages/IStorage.cpp @@ -216,14 +216,14 @@ bool IStorage::isStaticStorage() const return false; } -BackupEntries IStorage::backup(const ASTs &, ContextPtr) +BackupEntries IStorage::backupData(ContextPtr, const ASTs &) { throw Exception("Table engine " + getName() + " doesn't support backups", ErrorCodes::NOT_IMPLEMENTED); } -RestoreDataTasks IStorage::restoreFromBackup(const BackupPtr &, const String &, const ASTs &, ContextMutablePtr) +RestoreTaskPtr IStorage::restoreData(ContextMutablePtr, const ASTs &, const BackupPtr &, const String &, const StorageRestoreSettings &) { - throw Exception("Table engine " + getName() + " doesn't support restoring", ErrorCodes::NOT_IMPLEMENTED); + throw Exception("Table engine " + getName() + " doesn't support backups", ErrorCodes::NOT_IMPLEMENTED); } std::string PrewhereInfo::dump() const diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index e9969859d5f..17e9e55455c 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -72,7 +72,9 @@ class IBackup; using BackupPtr = std::shared_ptr; class IBackupEntry; using BackupEntries = std::vector>>; -using RestoreDataTasks = std::vector>; +class IRestoreTask; +using RestoreTaskPtr = std::unique_ptr; +struct StorageRestoreSettings; struct ColumnSize { @@ -216,11 +218,14 @@ public: NameDependencies getDependentViewsByColumn(ContextPtr context) const; + /// Returns true if the backup is hollow, which means it doesn't contain any data. + virtual bool hasDataToBackup() const { return false; } + /// Prepares entries to backup data of the storage. - virtual BackupEntries backup(const ASTs & partitions, ContextPtr context); + virtual BackupEntries backupData(ContextPtr context, const ASTs & partitions); /// Extract data from the backup and put it to the storage. - virtual RestoreDataTasks restoreFromBackup(const BackupPtr & backup, const String & data_path_in_backup, const ASTs & partitions, ContextMutablePtr context); + virtual RestoreTaskPtr restoreData(ContextMutablePtr context, const ASTs & partitions, const BackupPtr & backup, const String & data_path_in_backup, const StorageRestoreSettings & restore_settings); /// Returns whether the column is virtual - by default all columns are real. /// Initially reserved virtual column name may be shadowed by real column. diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index c17eb5a981e..eeff7e4c875 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -715,18 +715,12 @@ bool KeyCondition::transformConstantWithValidFunctions( if (is_valid_chain) { - /// Here we cast constant to the input type. - /// It is not clear, why this works in general. - /// I can imagine the case when expression like `column < const` is legal, - /// but `type(column)` and `type(const)` are of different types, - /// and const cannot be casted to column type. - /// (There could be `superType(type(column), type(const))` which is used for comparison). - /// - /// However, looks like this case newer happenes (I could not find such). - /// Let's assume that any two comparable types are castable to each other. auto const_type = cur_node->result_type; auto const_column = out_type->createColumnConst(1, out_value); - auto const_value = (*castColumn({const_column, out_type, ""}, const_type))[0]; + auto const_value = (*castColumnAccurateOrNull({const_column, out_type, ""}, const_type))[0]; + + if (const_value.isNull()) + return false; while (!chain.empty()) { diff --git a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp index 66356fd005b..68ffb42a90a 100644 --- a/src/Storages/MergeTree/MergeFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MergeFromLogEntryTask.cpp @@ -279,14 +279,17 @@ bool MergeFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWrite ProfileEvents::increment(ProfileEvents::DataAfterMergeDiffersFromReplica); LOG_ERROR(log, - "{}. Data after merge is not byte-identical to data on another replicas. There could be several" - " reasons: 1. Using newer version of compression library after server update. 2. Using another" - " compression method. 3. Non-deterministic compression algorithm (highly unlikely). 4." - " Non-deterministic merge algorithm due to logical error in code. 5. Data corruption in memory due" - " to bug in code. 6. Data corruption in memory due to hardware issue. 7. Manual modification of" - " source data after server startup. 8. Manual modification of checksums stored in ZooKeeper. 9." - " Part format related settings like 'enable_mixed_granularity_parts' are different on different" - " replicas. We will download merged part from replica to force byte-identical result.", + "{}. Data after merge is not byte-identical to data on another replicas. There could be several reasons:" + " 1. Using newer version of compression library after server update." + " 2. Using another compression method." + " 3. Non-deterministic compression algorithm (highly unlikely)." + " 4. Non-deterministic merge algorithm due to logical error in code." + " 5. Data corruption in memory due to bug in code." + " 6. Data corruption in memory due to hardware issue." + " 7. Manual modification of source data after server startup." + " 8. Manual modification of checksums stored in ZooKeeper." + " 9. Part format related settings like 'enable_mixed_granularity_parts' are different on different replicas." + " We will download merged part from replica to force byte-identical result.", getCurrentExceptionMessage(false)); write_part_log(ExecutionStatus::fromCurrentException()); diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 4fbc3376b7e..f66586b121a 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -3629,7 +3630,7 @@ Pipe MergeTreeData::alterPartition( } -BackupEntries MergeTreeData::backup(const ASTs & partitions, ContextPtr local_context) +BackupEntries MergeTreeData::backupData(ContextPtr local_context, const ASTs & partitions) { DataPartsVector data_parts; if (partitions.empty()) @@ -3681,40 +3682,78 @@ BackupEntries MergeTreeData::backupDataParts(const DataPartsVector & data_parts) } -RestoreDataTasks MergeTreeData::restoreDataPartsFromBackup(const BackupPtr & backup, const String & data_path_in_backup, - const std::unordered_set & partition_ids, - SimpleIncrement * increment) +class MergeTreeDataRestoreTask : public IRestoreTask { - RestoreDataTasks restore_tasks; - - Strings part_names = backup->listFiles(data_path_in_backup); - for (const String & part_name : part_names) +public: + MergeTreeDataRestoreTask( + const std::shared_ptr & storage_, + const BackupPtr & backup_, + const String & data_path_in_backup_, + const std::unordered_set & partition_ids_, + SimpleIncrement * increment_) + : storage(storage_) + , backup(backup_) + , data_path_in_backup(data_path_in_backup_) + , partition_ids(partition_ids_) + , increment(increment_) { - auto part_info = MergeTreePartInfo::tryParsePartName(part_name, format_version); + } - if (!part_info) - continue; - - if (!partition_ids.empty() && !partition_ids.contains(part_info->partition_id)) - continue; - - UInt64 total_size_of_part = 0; - Strings filenames = backup->listFiles(data_path_in_backup + part_name + "/", ""); - for (const String & filename : filenames) - total_size_of_part += backup->getFileSize(data_path_in_backup + part_name + "/" + filename); - - std::shared_ptr reservation = getStoragePolicy()->reserveAndCheck(total_size_of_part); - - auto restore_task = [this, - backup, - data_path_in_backup, - part_name, - part_info = std::move(part_info), - filenames = std::move(filenames), - reservation, - increment]() + RestoreTasks run() override + { + RestoreTasks restore_part_tasks; + Strings part_names = backup->listFiles(data_path_in_backup); + for (const String & part_name : part_names) { + const auto part_info = MergeTreePartInfo::tryParsePartName(part_name, storage->format_version); + if (!part_info) + continue; + + if (!partition_ids.empty() && !partition_ids.contains(part_info->partition_id)) + continue; + + restore_part_tasks.push_back( + std::make_unique(storage, backup, data_path_in_backup, part_name, *part_info, increment)); + } + return restore_part_tasks; + } + +private: + std::shared_ptr storage; + BackupPtr backup; + String data_path_in_backup; + std::unordered_set partition_ids; + SimpleIncrement * increment; + + class RestorePartTask : public IRestoreTask + { + public: + RestorePartTask( + const std::shared_ptr & storage_, + const BackupPtr & backup_, + const String & data_path_in_backup_, + const String & part_name_, + const MergeTreePartInfo & part_info_, + SimpleIncrement * increment_) + : storage(storage_) + , backup(backup_) + , data_path_in_backup(data_path_in_backup_) + , part_name(part_name_) + , part_info(part_info_) + , increment(increment_) + { + } + + RestoreTasks run() override + { + UInt64 total_size_of_part = 0; + Strings filenames = backup->listFiles(data_path_in_backup + part_name + "/", ""); + for (const String & filename : filenames) + total_size_of_part += backup->getFileSize(data_path_in_backup + part_name + "/" + filename); + + std::shared_ptr reservation = storage->getStoragePolicy()->reserveAndCheck(total_size_of_part); auto disk = reservation->getDisk(); + String relative_data_path = storage->getRelativeDataPath(); auto temp_part_dir_owner = std::make_shared(disk, relative_data_path + "restoring_" + part_name + "_"); String temp_part_dir = temp_part_dir_owner->getPath(); @@ -3729,18 +3768,33 @@ RestoreDataTasks MergeTreeData::restoreDataPartsFromBackup(const BackupPtr & bac auto read_buffer = backup_entry->getReadBuffer(); auto write_buffer = disk->writeFile(temp_part_dir + "/" + filename); copyData(*read_buffer, *write_buffer); + reservation->update(reservation->getSize() - backup_entry->getSize()); } auto single_disk_volume = std::make_shared(disk->getName(), disk, 0); - auto part = createPart(part_name, *part_info, single_disk_volume, relative_temp_part_dir); + auto part = storage->createPart(part_name, part_info, single_disk_volume, relative_temp_part_dir); part->loadColumnsChecksumsIndexes(false, true); - renameTempPartAndAdd(part, increment); - }; + storage->renameTempPartAndAdd(part, increment); + return {}; + } - restore_tasks.emplace_back(std::move(restore_task)); - } + private: + std::shared_ptr storage; + BackupPtr backup; + String data_path_in_backup; + String part_name; + MergeTreePartInfo part_info; + SimpleIncrement * increment; + }; +}; - return restore_tasks; + +RestoreTaskPtr MergeTreeData::restoreDataParts(const std::unordered_set & partition_ids, + const BackupPtr & backup, const String & data_path_in_backup, + SimpleIncrement * increment) +{ + return std::make_unique( + std::static_pointer_cast(shared_from_this()), backup, data_path_in_backup, partition_ids, increment); } diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index fca052d5538..fb839e5a0dd 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -674,15 +674,18 @@ public: ContextPtr context, TableLockHolder & table_lock_holder); + /// Storage has data to backup. + bool hasDataToBackup() const override { return true; } + /// Prepares entries to backup data of the storage. - BackupEntries backup(const ASTs & partitions, ContextPtr context) override; + BackupEntries backupData(ContextPtr context, const ASTs & partitions) override; static BackupEntries backupDataParts(const DataPartsVector & data_parts); /// Extract data from the backup and put it to the storage. - RestoreDataTasks restoreDataPartsFromBackup( + RestoreTaskPtr restoreDataParts( + const std::unordered_set & partition_ids, const BackupPtr & backup, const String & data_path_in_backup, - const std::unordered_set & partition_ids, SimpleIncrement * increment); /// Moves partition to specified Disk diff --git a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp index 7a85791d172..737e0c9d4b7 100644 --- a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp @@ -399,6 +399,7 @@ bool MergeTreeWhereOptimizer::cannotBeMoved(const ASTPtr & ptr, bool is_final) c return true; /// disallow GLOBAL IN, GLOBAL NOT IN + /// TODO why? if ("globalIn" == function_ptr->name || "globalNotIn" == function_ptr->name) return true; diff --git a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp index 3f220566260..309432e4675 100644 --- a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp @@ -185,7 +185,8 @@ bool MutateFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWrit ProfileEvents::increment(ProfileEvents::DataAfterMutationDiffersFromReplica); - LOG_ERROR(log, "{}. Data after mutation is not byte-identical to data on another replicas. We will download merged part from replica to force byte-identical result.", getCurrentExceptionMessage(false)); + LOG_ERROR(log, "{}. Data after mutation is not byte-identical to data on another replicas. " + "We will download merged part from replica to force byte-identical result.", getCurrentExceptionMessage(false)); write_part_log(ExecutionStatus::fromCurrentException()); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index de34929b43e..dc52660f1f6 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -240,7 +240,7 @@ void ReplicatedMergeTreeRestartingThread::removeFailedQuorumParts() return; /// Firstly, remove parts from ZooKeeper - storage.tryRemovePartsFromZooKeeperWithRetries(failed_parts); + storage.removePartsFromZooKeeperWithRetries(failed_parts); for (const auto & part_name : failed_parts) { diff --git a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp index 5b963a544c8..e3aa4ff82a5 100644 --- a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp +++ b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.cpp @@ -98,8 +98,24 @@ MaterializedPostgreSQLConsumer::StorageData::Buffer::Buffer( } +void MaterializedPostgreSQLConsumer::assertCorrectInsertion(StorageData::Buffer & buffer, size_t column_idx) +{ + if (column_idx >= buffer.description.sample_block.columns() + || column_idx >= buffer.description.types.size() + || column_idx >= buffer.columns.size()) + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Attempt to insert into buffer at position: {}, but block columns size is {}, types size: {}, columns size: {}, buffer structure: {}", + column_idx, + buffer.description.sample_block.columns(), buffer.description.types.size(), buffer.columns.size(), + buffer.description.sample_block.dumpStructure()); +} + + void MaterializedPostgreSQLConsumer::insertValue(StorageData::Buffer & buffer, const std::string & value, size_t column_idx) { + assertCorrectInsertion(buffer, column_idx); + const auto & sample = buffer.description.sample_block.getByPosition(column_idx); bool is_nullable = buffer.description.types[column_idx].second; @@ -134,6 +150,8 @@ void MaterializedPostgreSQLConsumer::insertValue(StorageData::Buffer & buffer, c void MaterializedPostgreSQLConsumer::insertDefaultValue(StorageData::Buffer & buffer, size_t column_idx) { + assertCorrectInsertion(buffer, column_idx); + const auto & sample = buffer.description.sample_block.getByPosition(column_idx); insertDefaultPostgreSQLValue(*buffer.columns[column_idx], *sample.column); } @@ -515,13 +533,14 @@ void MaterializedPostgreSQLConsumer::processReplicationMessage(const char * repl void MaterializedPostgreSQLConsumer::syncTables() { - try + for (const auto & table_name : tables_to_sync) { - for (const auto & table_name : tables_to_sync) - { - auto & storage_data = storages.find(table_name)->second; - Block result_rows = storage_data.buffer.description.sample_block.cloneWithColumns(std::move(storage_data.buffer.columns)); + auto & storage_data = storages.find(table_name)->second; + Block result_rows = storage_data.buffer.description.sample_block.cloneWithColumns(std::move(storage_data.buffer.columns)); + storage_data.buffer.columns = storage_data.buffer.description.sample_block.cloneEmptyColumns(); + try + { if (result_rows.rows()) { auto storage = storage_data.storage; @@ -543,13 +562,18 @@ void MaterializedPostgreSQLConsumer::syncTables() CompletedPipelineExecutor executor(io.pipeline); executor.execute(); - - storage_data.buffer.columns = storage_data.buffer.description.sample_block.cloneEmptyColumns(); } } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + } - LOG_DEBUG(log, "Table sync end for {} tables, last lsn: {} = {}, (attempted lsn {})", tables_to_sync.size(), current_lsn, getLSNValue(current_lsn), getLSNValue(final_lsn)); + LOG_DEBUG(log, "Table sync end for {} tables, last lsn: {} = {}, (attempted lsn {})", tables_to_sync.size(), current_lsn, getLSNValue(current_lsn), getLSNValue(final_lsn)); + try + { auto tx = std::make_shared(connection->getRef()); current_lsn = advanceLSN(tx); tables_to_sync.clear(); diff --git a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h index a01f9394190..5193feee708 100644 --- a/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h +++ b/src/Storages/PostgreSQL/MaterializedPostgreSQLConsumer.h @@ -122,6 +122,8 @@ private: void markTableAsSkipped(Int32 relation_id, const String & relation_name); + static void assertCorrectInsertion(StorageData::Buffer & buffer, size_t column_idx); + /// lsn - log sequnce nuumber, like wal offset (64 bit). static Int64 getLSNValue(const std::string & lsn) { diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index 876ba9b1698..fa90295bcd6 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -64,8 +64,8 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( bool is_attach_, const MaterializedPostgreSQLSettings & replication_settings, bool is_materialized_postgresql_database_) - : log(&Poco::Logger::get("PostgreSQLReplicationHandler")) - , context(context_) + : WithContext(context_->getGlobalContext()) + , log(&Poco::Logger::get("PostgreSQLReplicationHandler")) , is_attach(is_attach_) , postgres_database(postgres_database_) , postgres_schema(replication_settings.materialized_postgresql_schema) @@ -94,9 +94,9 @@ PostgreSQLReplicationHandler::PostgreSQLReplicationHandler( } publication_name = fmt::format("{}_ch_publication", replication_identifier); - startup_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ checkConnectionAndStart(); }); - consumer_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ consumerFunc(); }); - cleanup_task = context->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ cleanupFunc(); }); + startup_task = getContext()->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ checkConnectionAndStart(); }); + consumer_task = getContext()->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ consumerFunc(); }); + cleanup_task = getContext()->getSchedulePool().createTask("PostgreSQLReplicaStartup", [this]{ cleanupFunc(); }); } @@ -296,7 +296,7 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) /// (Apart from the case, when shutdownFinal is called). /// Handler uses it only for loadFromSnapshot and shutdown methods. consumer = std::make_shared( - context, + getContext(), std::move(tmp_connection), replication_slot, publication_name, @@ -921,9 +921,9 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vectoras (); - auto materialized_table_lock = materialized_storage->lockForShare(String(), context->getSettingsRef().lock_acquire_timeout); + auto materialized_table_lock = materialized_storage->lockForShare(String(), getContext()->getSettingsRef().lock_acquire_timeout); /// If for some reason this temporary table already exists - also drop it. auto temp_materialized_storage = materialized_storage->createTemporary(); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 263095ec9c2..16e531f5247 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -13,7 +13,7 @@ namespace DB class StorageMaterializedPostgreSQL; struct SettingChange; -class PostgreSQLReplicationHandler +class PostgreSQLReplicationHandler : WithContext { friend class TemporaryReplicationSlot; @@ -98,7 +98,6 @@ private: std::pair getSchemaAndTableName(const String & table_name) const; Poco::Logger * log; - ContextPtr context; /// If it is not attach, i.e. a create query, then if publication already exists - always drop it. bool is_attach; diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 93cceadaf93..761b4ecdeb1 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -179,8 +179,9 @@ std::unique_ptr createReadBuffer( method = chooseCompressionMethod(current_path, compression_method); } - /// For clickhouse-local add progress callback to display progress bar. - if (context->getApplicationType() == Context::ApplicationType::LOCAL) + /// For clickhouse-local and clickhouse-client add progress callback to display progress bar. + if (context->getApplicationType() == Context::ApplicationType::LOCAL + || context->getApplicationType() == Context::ApplicationType::CLIENT) { auto & in = static_cast(*nested_buffer); in.setProgressCallback(context); @@ -643,7 +644,9 @@ Pipe StorageFile::read( /// Set total number of bytes to process. For progress bar. auto progress_callback = context->getFileProgressCallback(); - if (context->getApplicationType() == Context::ApplicationType::LOCAL && progress_callback) + if ((context->getApplicationType() == Context::ApplicationType::LOCAL + || context->getApplicationType() == Context::ApplicationType::CLIENT) + && progress_callback) progress_callback(FileProgress(0, total_bytes_to_read)); for (size_t i = 0; i < num_streams; ++i) diff --git a/src/Storages/StorageLog.cpp b/src/Storages/StorageLog.cpp index d3923a190a1..11116780734 100644 --- a/src/Storages/StorageLog.cpp +++ b/src/Storages/StorageLog.cpp @@ -25,9 +25,10 @@ #include #include -#include +#include #include #include +#include #include #include @@ -887,7 +888,7 @@ IStorage::ColumnSizeByName StorageLog::getColumnSizes() const } -BackupEntries StorageLog::backup(const ASTs & partitions, ContextPtr context) +BackupEntries StorageLog::backupData(ContextPtr context, const ASTs & partitions) { if (!partitions.empty()) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Table engine {} doesn't support partitions", getName()); @@ -913,12 +914,12 @@ BackupEntries StorageLog::backup(const ASTs & partitions, ContextPtr context) { /// We make a copy of the data file because it can be changed later in write() or in truncate(). String data_file_name = fileName(data_file.path); - String temp_file_path = temp_dir + "/" + data_file_name; - disk->copy(data_file.path, disk, temp_file_path); + String hardlink_file_path = temp_dir + "/" + data_file_name; + disk->createHardLink(data_file.path, hardlink_file_path); backup_entries.emplace_back( data_file_name, - std::make_unique( - disk, temp_file_path, file_checker.getFileSize(data_file.path), std::nullopt, temp_dir_owner)); + std::make_unique( + disk, hardlink_file_path, file_checker.getFileSize(data_file.path), std::nullopt, temp_dir_owner)); } /// __marks.mrk @@ -926,12 +927,12 @@ BackupEntries StorageLog::backup(const ASTs & partitions, ContextPtr context) { /// We make a copy of the data file because it can be changed later in write() or in truncate(). String marks_file_name = fileName(marks_file_path); - String temp_file_path = temp_dir + "/" + marks_file_name; - disk->copy(marks_file_path, disk, temp_file_path); + String hardlink_file_path = temp_dir + "/" + marks_file_name; + disk->createHardLink(marks_file_path, hardlink_file_path); backup_entries.emplace_back( marks_file_name, - std::make_unique( - disk, temp_file_path, file_checker.getFileSize(marks_file_path), std::nullopt, temp_dir_owner)); + std::make_unique( + disk, hardlink_file_path, file_checker.getFileSize(marks_file_path), std::nullopt, temp_dir_owner)); } /// sizes.json @@ -952,43 +953,57 @@ BackupEntries StorageLog::backup(const ASTs & partitions, ContextPtr context) return backup_entries; } -RestoreDataTasks StorageLog::restoreFromBackup(const BackupPtr & backup, const String & data_path_in_backup, const ASTs & partitions, ContextMutablePtr context) +class LogRestoreTask : public IRestoreTask { - if (!partitions.empty()) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Table engine {} doesn't support partitions", getName()); + using WriteLock = StorageLog::WriteLock; + using Mark = StorageLog::Mark; - auto restore_task = [this, backup, data_path_in_backup, context]() +public: + LogRestoreTask( + std::shared_ptr storage_, const BackupPtr & backup_, const String & data_path_in_backup_, ContextMutablePtr context_) + : storage(storage_), backup(backup_), data_path_in_backup(data_path_in_backup_), context(context_) + { + } + + RestoreTasks run() override { auto lock_timeout = getLockTimeout(context); - WriteLock lock{rwlock, lock_timeout}; + WriteLock lock{storage->rwlock, lock_timeout}; if (!lock) throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED); + const auto num_data_files = storage->num_data_files; if (!num_data_files) - return; + return {}; + + auto & file_checker = storage->file_checker; /// Load the marks if not loaded yet. We have to do that now because we're going to update these marks. - loadMarks(lock); + storage->loadMarks(lock); /// If there were no files, save zero file sizes to be able to rollback in case of error. - saveFileSizes(lock); + storage->saveFileSizes(lock); try { /// Append data files. + auto & data_files = storage->data_files; for (const auto & data_file : data_files) { String file_path_in_backup = data_path_in_backup + fileName(data_file.path); auto backup_entry = backup->readFile(file_path_in_backup); + const auto & disk = storage->disk; auto in = backup_entry->getReadBuffer(); - auto out = disk->writeFile(data_file.path, max_compress_block_size, WriteMode::Append); + auto out = disk->writeFile(data_file.path, storage->max_compress_block_size, WriteMode::Append); copyData(*in, *out); } + const bool use_marks_file = storage->use_marks_file; if (use_marks_file) { /// Append marks. size_t num_extra_marks = 0; + const auto & marks_file_path = storage->marks_file_path; String file_path_in_backup = data_path_in_backup + fileName(marks_file_path); size_t file_size = backup->getFileSize(file_path_in_backup); if (file_size % (num_data_files * sizeof(Mark)) != 0) @@ -1027,19 +1042,34 @@ RestoreDataTasks StorageLog::restoreFromBackup(const BackupPtr & backup, const S } /// Finish writing. - saveMarks(lock); - saveFileSizes(lock); + storage->saveMarks(lock); + storage->saveFileSizes(lock); } catch (...) { /// Rollback partial writes. file_checker.repair(); - removeUnsavedMarks(lock); + storage->removeUnsavedMarks(lock); throw; } - }; - return {restore_task}; + return {}; + } + +private: + std::shared_ptr storage; + BackupPtr backup; + String data_path_in_backup; + ContextMutablePtr context; +}; + +RestoreTaskPtr StorageLog::restoreData(ContextMutablePtr context, const ASTs & partitions, const BackupPtr & backup, const String & data_path_in_backup, const StorageRestoreSettings &) +{ + if (!partitions.empty()) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Table engine {} doesn't support partitions", getName()); + + return std::make_unique( + typeid_cast>(shared_from_this()), backup, data_path_in_backup, context); } diff --git a/src/Storages/StorageLog.h b/src/Storages/StorageLog.h index d6f3208f693..b9255c16f2b 100644 --- a/src/Storages/StorageLog.h +++ b/src/Storages/StorageLog.h @@ -23,6 +23,7 @@ class StorageLog final : public shared_ptr_helper, public IStorage { friend class LogSource; friend class LogSink; + friend class LogRestoreTask; friend struct shared_ptr_helper; public: @@ -51,8 +52,9 @@ public: bool supportsSubcolumns() const override { return true; } ColumnSizeByName getColumnSizes() const override; - BackupEntries backup(const ASTs & partitions, ContextPtr context) override; - RestoreDataTasks restoreFromBackup(const BackupPtr & backup, const String & data_path_in_backup, const ASTs & partitions, ContextMutablePtr context) override; + bool hasDataToBackup() const override { return true; } + BackupEntries backupData(ContextPtr context, const ASTs & partitions) override; + RestoreTaskPtr restoreData(ContextMutablePtr context, const ASTs & partitions, const BackupPtr & backup, const String & data_path_in_backup, const StorageRestoreSettings & restore_settings) override; protected: /** Attach the table with the appropriate name, along the appropriate path (with / at the end), diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 008b42e3299..0c79c31eb7a 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -25,6 +25,9 @@ #include #include +#include +#include + namespace DB { @@ -430,6 +433,20 @@ Strings StorageMaterializedView::getDataPaths() const return {}; } +BackupEntries StorageMaterializedView::backupData(ContextPtr context_, const ASTs & partitions_) +{ + if (!hasInnerTable()) + return {}; + return getTargetTable()->backupData(context_, partitions_); +} + +RestoreTaskPtr StorageMaterializedView::restoreData(ContextMutablePtr context_, const ASTs & partitions_, const BackupPtr & backup_, const String & data_path_in_backup_, const StorageRestoreSettings & restore_settings_) +{ + if (!hasInnerTable()) + return {}; + return getTargetTable()->restoreData(context_, partitions_, backup_, data_path_in_backup_, restore_settings_); +} + ActionLock StorageMaterializedView::getActionLock(StorageActionBlockType type) { if (has_inner_table) diff --git a/src/Storages/StorageMaterializedView.h b/src/Storages/StorageMaterializedView.h index 838f5278aa9..41c97fbc4d8 100644 --- a/src/Storages/StorageMaterializedView.h +++ b/src/Storages/StorageMaterializedView.h @@ -97,6 +97,10 @@ public: Strings getDataPaths() const override; + bool hasDataToBackup() const override { return hasInnerTable(); } + BackupEntries backupData(ContextPtr context_, const ASTs & partitions_) override; + RestoreTaskPtr restoreData(ContextMutablePtr context_, const ASTs & partitions_, const BackupPtr & backup, const String & data_path_in_backup_, const StorageRestoreSettings & restore_settings_) override; + private: /// Will be initialized in constructor StorageID target_table_id = StorageID::createEmpty(); diff --git a/src/Storages/StorageMemory.cpp b/src/Storages/StorageMemory.cpp index c3601b33a04..30be297194a 100644 --- a/src/Storages/StorageMemory.cpp +++ b/src/Storages/StorageMemory.cpp @@ -17,6 +17,17 @@ #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + namespace DB { @@ -24,6 +35,7 @@ namespace DB namespace ErrorCodes { extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int NOT_IMPLEMENTED; } @@ -364,6 +376,193 @@ void StorageMemory::truncate( total_size_rows.store(0, std::memory_order_relaxed); } + +class MemoryBackupEntriesBatch : public shared_ptr_helper, public IBackupEntriesBatch +{ +private: + friend struct shared_ptr_helper; + + MemoryBackupEntriesBatch( + const StorageMetadataPtr & metadata_snapshot_, const std::shared_ptr blocks_, UInt64 max_compress_block_size_) + : IBackupEntriesBatch({"data.bin", "index.mrk", "sizes.json"}) + , metadata_snapshot(metadata_snapshot_) + , blocks(blocks_) + , max_compress_block_size(max_compress_block_size_) + { + } + + static constexpr const size_t kDataBinPos = 0; + static constexpr const size_t kIndexMrkPos = 1; + static constexpr const size_t kSizesJsonPos = 2; + static constexpr const size_t kSize = 3; + + void initialize() + { + std::call_once(initialized_flag, [this]() + { + temp_dir_owner.emplace(); + auto temp_dir = temp_dir_owner->path(); + fs::create_directories(temp_dir); + + /// Writing data.bin + constexpr char data_file_name[] = "data.bin"; + String data_file_path = temp_dir + "/" + data_file_name; + IndexForNativeFormat index; + { + auto data_out_compressed = std::make_unique(data_file_path); + CompressedWriteBuffer data_out{*data_out_compressed, CompressionCodecFactory::instance().getDefaultCodec(), max_compress_block_size}; + NativeWriter block_out{data_out, 0, metadata_snapshot->getSampleBlock(), false, &index}; + for (const auto & block : *blocks) + block_out.write(block); + } + + /// Writing index.mrk + constexpr char index_file_name[] = "index.mrk"; + String index_file_path = temp_dir + "/" + index_file_name; + { + auto index_out_compressed = std::make_unique(index_file_path); + CompressedWriteBuffer index_out{*index_out_compressed}; + index.write(index_out); + } + + /// Writing sizes.json + constexpr char sizes_file_name[] = "sizes.json"; + String sizes_file_path = temp_dir + "/" + sizes_file_name; + FileChecker file_checker{sizes_file_path}; + file_checker.update(data_file_path); + file_checker.update(index_file_path); + file_checker.save(); + + file_paths[kDataBinPos] = data_file_path; + file_sizes[kDataBinPos] = file_checker.getFileSize(data_file_path); + + file_paths[kIndexMrkPos] = index_file_path; + file_sizes[kIndexMrkPos] = file_checker.getFileSize(index_file_path); + + file_paths[kSizesJsonPos] = sizes_file_path; + file_sizes[kSizesJsonPos] = fs::file_size(sizes_file_path); + + /// We don't need to keep `blocks` any longer. + blocks.reset(); + metadata_snapshot.reset(); + }); + } + + std::unique_ptr getReadBuffer(size_t index) override + { + initialize(); + return createReadBufferFromFileBase(file_paths[index], {}); + } + + UInt64 getSize(size_t index) override + { + initialize(); + return file_sizes[index]; + } + + StorageMetadataPtr metadata_snapshot; + std::shared_ptr blocks; + UInt64 max_compress_block_size; + std::once_flag initialized_flag; + std::optional temp_dir_owner; + std::array file_paths; + std::array file_sizes; +}; + + +BackupEntries StorageMemory::backupData(ContextPtr context, const ASTs & partitions) +{ + if (!partitions.empty()) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Table engine {} doesn't support partitions", getName()); + + return MemoryBackupEntriesBatch::create(getInMemoryMetadataPtr(), data.get(), context->getSettingsRef().max_compress_block_size) + ->getBackupEntries(); +} + + +class MemoryRestoreTask : public IRestoreTask +{ +public: + MemoryRestoreTask( + std::shared_ptr storage_, const BackupPtr & backup_, const String & data_path_in_backup_, ContextMutablePtr context_) + : storage(storage_), backup(backup_), data_path_in_backup(data_path_in_backup_), context(context_) + { + } + + RestoreTasks run() override + { + /// Our data are in the StripeLog format. + + /// Reading index.mrk + IndexForNativeFormat index; + { + String index_file_path = data_path_in_backup + "index.mrk"; + auto backup_entry = backup->readFile(index_file_path); + auto in = backup_entry->getReadBuffer(); + CompressedReadBuffer compressed_in{*in}; + index.read(compressed_in); + } + + /// Reading data.bin + Blocks new_blocks; + size_t new_bytes = 0; + size_t new_rows = 0; + { + String data_file_path = data_path_in_backup + "data.bin"; + auto backup_entry = backup->readFile(data_file_path); + std::unique_ptr in = backup_entry->getReadBuffer(); + std::optional temp_data_copy; + if (!dynamic_cast(in.get())) + { + temp_data_copy.emplace(); + auto temp_data_copy_out = std::make_unique(temp_data_copy->path()); + copyData(*in, *temp_data_copy_out); + temp_data_copy_out.reset(); + in = createReadBufferFromFileBase(temp_data_copy->path(), {}); + } + std::unique_ptr in_from_file{static_cast(in.release())}; + CompressedReadBufferFromFile compressed_in{std::move(in_from_file)}; + NativeReader block_in{compressed_in, 0, index.blocks.begin(), index.blocks.end()}; + + while (auto block = block_in.read()) + { + new_bytes += block.bytes(); + new_rows += block.rows(); + new_blocks.push_back(std::move(block)); + } + } + + /// Append old blocks with the new ones. + auto old_blocks = storage->data.get(); + Blocks old_and_new_blocks = *old_blocks; + old_and_new_blocks.insert(old_and_new_blocks.end(), std::make_move_iterator(new_blocks.begin()), std::make_move_iterator(new_blocks.end())); + + /// Finish restoring. + storage->data.set(std::make_unique(std::move(old_and_new_blocks))); + storage->total_size_bytes += new_bytes; + storage->total_size_rows += new_rows; + + return {}; + } + +private: + std::shared_ptr storage; + BackupPtr backup; + String data_path_in_backup; + ContextMutablePtr context; +}; + + +RestoreTaskPtr StorageMemory::restoreData(ContextMutablePtr context, const ASTs & partitions, const BackupPtr & backup, const String & data_path_in_backup, const StorageRestoreSettings &) +{ + if (!partitions.empty()) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Table engine {} doesn't support partitions", getName()); + + return std::make_unique( + typeid_cast>(shared_from_this()), backup, data_path_in_backup, context); +} + + std::optional StorageMemory::totalRows(const Settings &) const { /// All modifications of these counters are done under mutex which automatically guarantees synchronization/consistency diff --git a/src/Storages/StorageMemory.h b/src/Storages/StorageMemory.h index 1c4421e51a6..20f47828846 100644 --- a/src/Storages/StorageMemory.h +++ b/src/Storages/StorageMemory.h @@ -22,6 +22,7 @@ namespace DB class StorageMemory final : public shared_ptr_helper, public IStorage { friend class MemorySink; +friend class MemoryRestoreTask; friend struct shared_ptr_helper; public: @@ -65,6 +66,10 @@ public: void truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr, TableExclusiveLockHolder &) override; + bool hasDataToBackup() const override { return true; } + BackupEntries backupData(ContextPtr context, const ASTs & partitions) override; + RestoreTaskPtr restoreData(ContextMutablePtr context, const ASTs & partitions, const BackupPtr & backup, const String & data_path_in_backup, const StorageRestoreSettings & restore_settings) override; + std::optional totalRows(const Settings &) const override; std::optional totalBytes(const Settings &) const override; diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 812e2264adb..7f4c3deca37 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -3,7 +3,7 @@ #include #include - +#include #include #include #include @@ -1644,9 +1644,9 @@ CheckResults StorageMergeTree::checkData(const ASTPtr & query, ContextPtr local_ } -RestoreDataTasks StorageMergeTree::restoreFromBackup(const BackupPtr & backup, const String & data_path_in_backup, const ASTs & partitions, ContextMutablePtr local_context) +RestoreTaskPtr StorageMergeTree::restoreData(ContextMutablePtr local_context, const ASTs & partitions, const BackupPtr & backup, const String & data_path_in_backup, const StorageRestoreSettings &) { - return restoreDataPartsFromBackup(backup, data_path_in_backup, getPartitionIDsFromQuery(partitions, local_context), &increment); + return restoreDataParts(getPartitionIDsFromQuery(partitions, local_context), backup, data_path_in_backup, &increment); } diff --git a/src/Storages/StorageMergeTree.h b/src/Storages/StorageMergeTree.h index abdaf6934d6..a1fc310d912 100644 --- a/src/Storages/StorageMergeTree.h +++ b/src/Storages/StorageMergeTree.h @@ -97,7 +97,7 @@ public: CheckResults checkData(const ASTPtr & query, ContextPtr context) override; - RestoreDataTasks restoreFromBackup(const BackupPtr & backup, const String & data_path_in_backup, const ASTs & partitions, ContextMutablePtr context) override; + RestoreTaskPtr restoreData(ContextMutablePtr context, const ASTs & partitions, const BackupPtr & backup, const String & data_path_in_backup, const StorageRestoreSettings & restore_settings) override; bool scheduleDataProcessingJob(BackgroundJobsAssignee & assignee) override; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 495fc141d07..d9f72cf7feb 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -1312,10 +1312,14 @@ void StorageReplicatedMergeTree::checkPartChecksumsAndAddCommitOps(const zkutil: if (replica_part_header.getColumnsHash() != local_part_header.getColumnsHash()) { - /// Either it's a bug or ZooKeeper contains broken data. - /// TODO Fix KILL MUTATION and replace CHECKSUM_DOESNT_MATCH with LOGICAL_ERROR - /// (some replicas may skip killed mutation even if it was executed on other replicas) - throw Exception(ErrorCodes::CHECKSUM_DOESNT_MATCH, "Part {} from {} has different columns hash", part_name, replica); + /// Currently there are two (known) cases when it may happen: + /// - KILL MUTATION query had removed mutation before all replicas have executed assigned MUTATE_PART entries. + /// Some replicas may skip this mutation and update part version without actually applying any changes. + /// It leads to mismatching checksum if changes were applied on other replicas. + /// - ALTER_METADATA and MERGE_PARTS were reordered on some replicas. + /// It may lead to different number of columns in merged parts on these replicas. + throw Exception(ErrorCodes::CHECKSUM_DOESNT_MATCH, "Part {} from {} has different columns hash " + "(it may rarely happen on race condition with KILL MUTATION or ALTER COLUMN).", part_name, replica); } replica_part_header.getChecksums().checkEqual(local_part_header.getChecksums(), true); @@ -1824,7 +1828,7 @@ void StorageReplicatedMergeTree::executeDropRange(const LogEntry & entry) } /// Forcibly remove parts from ZooKeeper - tryRemovePartsFromZooKeeperWithRetries(parts_to_remove); + removePartsFromZooKeeperWithRetries(parts_to_remove); if (entry.detach) LOG_DEBUG(log, "Detached {} parts inside {}.", parts_to_remove.size(), entry.new_part_name); @@ -1946,7 +1950,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) if (parts_to_add.empty()) { LOG_INFO(log, "All parts from REPLACE PARTITION command have been already attached"); - tryRemovePartsFromZooKeeperWithRetries(parts_to_remove); + removePartsFromZooKeeperWithRetries(parts_to_remove); return true; } @@ -2190,7 +2194,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) throw; } - tryRemovePartsFromZooKeeperWithRetries(parts_to_remove); + removePartsFromZooKeeperWithRetries(parts_to_remove); res_parts.clear(); parts_to_remove.clear(); cleanup_thread.wakeup(); @@ -2422,7 +2426,7 @@ void StorageReplicatedMergeTree::cloneReplica(const String & source_replica, Coo "or removed broken part from ZooKeeper", source_replica); } - tryRemovePartsFromZooKeeperWithRetries(parts_to_remove_from_zk); + removePartsFromZooKeeperWithRetries(parts_to_remove_from_zk); auto local_active_parts = getDataParts(); @@ -6008,16 +6012,16 @@ void StorageReplicatedMergeTree::clearOldPartsAndRemoveFromZK() } -bool StorageReplicatedMergeTree::tryRemovePartsFromZooKeeperWithRetries(DataPartsVector & parts, size_t max_retries) +void StorageReplicatedMergeTree::removePartsFromZooKeeperWithRetries(DataPartsVector & parts, size_t max_retries) { Strings part_names_to_remove; for (const auto & part : parts) part_names_to_remove.emplace_back(part->name); - return tryRemovePartsFromZooKeeperWithRetries(part_names_to_remove, max_retries); + return removePartsFromZooKeeperWithRetries(part_names_to_remove, max_retries); } -bool StorageReplicatedMergeTree::tryRemovePartsFromZooKeeperWithRetries(const Strings & part_names, size_t max_retries) +void StorageReplicatedMergeTree::removePartsFromZooKeeperWithRetries(const Strings & part_names, size_t max_retries) { size_t num_tries = 0; bool success = false; @@ -6082,7 +6086,8 @@ bool StorageReplicatedMergeTree::tryRemovePartsFromZooKeeperWithRetries(const St std::this_thread::sleep_for(std::chrono::milliseconds(1000)); } - return success; + if (!success) + throw Exception(ErrorCodes::UNFINISHED, "Failed to remove parts from ZooKeeper after {} retries", num_tries); } void StorageReplicatedMergeTree::removePartsFromZooKeeper( @@ -6395,7 +6400,7 @@ void StorageReplicatedMergeTree::replacePartitionFrom( lock.assumeUnlocked(); /// Forcibly remove replaced parts from ZooKeeper - tryRemovePartsFromZooKeeperWithRetries(parts_to_remove); + removePartsFromZooKeeperWithRetries(parts_to_remove); /// Speedup removing of replaced parts from filesystem parts_to_remove.clear(); @@ -6602,7 +6607,7 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta for (auto & lock : ephemeral_locks) lock.assumeUnlocked(); - tryRemovePartsFromZooKeeperWithRetries(parts_to_remove); + removePartsFromZooKeeperWithRetries(parts_to_remove); parts_to_remove.clear(); cleanup_thread.wakeup(); diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 31d6e069fe1..c567447e9f2 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -491,8 +491,9 @@ private: void removePartsFromZooKeeper(zkutil::ZooKeeperPtr & zookeeper, const Strings & part_names, NameSet * parts_should_be_retried = nullptr); - bool tryRemovePartsFromZooKeeperWithRetries(const Strings & part_names, size_t max_retries = 5); - bool tryRemovePartsFromZooKeeperWithRetries(DataPartsVector & parts, size_t max_retries = 5); + /// Remove parts from ZooKeeper, throw exception if unable to do so after max_retries. + void removePartsFromZooKeeperWithRetries(const Strings & part_names, size_t max_retries = 5); + void removePartsFromZooKeeperWithRetries(DataPartsVector & parts, size_t max_retries = 5); /// Removes a part from ZooKeeper and adds a task to the queue to download it. It is supposed to do this with broken parts. void removePartAndEnqueueFetch(const String & part_name); diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index f319bd1097b..ce31308fdd7 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -20,6 +21,7 @@ #include #include #include +#include #include #include @@ -374,6 +376,16 @@ static bool checkIfObjectExists(const std::shared_ptr & clien return false; } +// TODO: common thread pool for IO must be used instead after PR #35150 +static ThreadPool & getThreadPoolStorageS3() +{ + constexpr size_t pool_size = 100; + constexpr size_t queue_size = 1000000; + static ThreadPool pool(pool_size, pool_size, queue_size); + return pool; +} + + class StorageS3Sink : public SinkToStorage { public: @@ -398,7 +410,7 @@ public: std::make_unique( client, bucket, key, min_upload_part_size, upload_part_size_multiply_factor, upload_part_size_multiply_parts_count_threshold, - max_single_part_upload_size), compression_method, 3); + max_single_part_upload_size, std::nullopt, DBMS_DEFAULT_BUFFER_SIZE, threadPoolCallbackRunner(getThreadPoolStorageS3())), compression_method, 3); writer = FormatFactory::instance().getOutputFormatParallelIfPossible(format, *write_buf, sample_block, context, {}, format_settings); } diff --git a/src/Storages/StorageStripeLog.cpp b/src/Storages/StorageStripeLog.cpp index f1f84a88c36..274789f012b 100644 --- a/src/Storages/StorageStripeLog.cpp +++ b/src/Storages/StorageStripeLog.cpp @@ -35,9 +35,10 @@ #include #include -#include +#include #include #include +#include #include #include @@ -490,7 +491,7 @@ void StorageStripeLog::saveFileSizes(const WriteLock & /* already locked for wri } -BackupEntries StorageStripeLog::backup(const ASTs & partitions, ContextPtr context) +BackupEntries StorageStripeLog::backupData(ContextPtr context, const ASTs & partitions) { if (!partitions.empty()) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Table engine {} doesn't support partitions", getName()); @@ -515,24 +516,24 @@ BackupEntries StorageStripeLog::backup(const ASTs & partitions, ContextPtr conte { /// We make a copy of the data file because it can be changed later in write() or in truncate(). String data_file_name = fileName(data_file_path); - String temp_file_path = temp_dir + "/" + data_file_name; - disk->copy(data_file_path, disk, temp_file_path); + String hardlink_file_path = temp_dir + "/" + data_file_name; + disk->createHardLink(data_file_path, hardlink_file_path); backup_entries.emplace_back( data_file_name, - std::make_unique( - disk, temp_file_path, file_checker.getFileSize(data_file_path), std::nullopt, temp_dir_owner)); + std::make_unique( + disk, hardlink_file_path, file_checker.getFileSize(data_file_path), std::nullopt, temp_dir_owner)); } /// index.mrk { /// We make a copy of the data file because it can be changed later in write() or in truncate(). String index_file_name = fileName(index_file_path); - String temp_file_path = temp_dir + "/" + index_file_name; - disk->copy(index_file_path, disk, temp_file_path); + String hardlink_file_path = temp_dir + "/" + index_file_name; + disk->createHardLink(index_file_path, hardlink_file_path); backup_entries.emplace_back( index_file_name, - std::make_unique( - disk, temp_file_path, file_checker.getFileSize(index_file_path), std::nullopt, temp_dir_owner)); + std::make_unique( + disk, hardlink_file_path, file_checker.getFileSize(index_file_path), std::nullopt, temp_dir_owner)); } /// sizes.json @@ -552,37 +553,51 @@ BackupEntries StorageStripeLog::backup(const ASTs & partitions, ContextPtr conte return backup_entries; } -RestoreDataTasks StorageStripeLog::restoreFromBackup(const BackupPtr & backup, const String & data_path_in_backup, const ASTs & partitions, ContextMutablePtr context) +class StripeLogRestoreTask : public IRestoreTask { - if (!partitions.empty()) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Table engine {} doesn't support partitions", getName()); + using WriteLock = StorageStripeLog::WriteLock; - auto restore_task = [this, backup, data_path_in_backup, context]() +public: + StripeLogRestoreTask( + const std::shared_ptr storage_, + const BackupPtr & backup_, + const String & data_path_in_backup_, + ContextMutablePtr context_) + : storage(storage_), backup(backup_), data_path_in_backup(data_path_in_backup_), context(context_) { - WriteLock lock{rwlock, getLockTimeout(context)}; + } + + RestoreTasks run() override + { + WriteLock lock{storage->rwlock, getLockTimeout(context)}; if (!lock) throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED); + auto & file_checker = storage->file_checker; + /// Load the indices if not loaded yet. We have to do that now because we're going to update these indices. - loadIndices(lock); + storage->loadIndices(lock); /// If there were no files, save zero file sizes to be able to rollback in case of error. - saveFileSizes(lock); + storage->saveFileSizes(lock); try { /// Append the data file. - auto old_data_size = file_checker.getFileSize(data_file_path); + auto old_data_size = file_checker.getFileSize(storage->data_file_path); { + const auto & data_file_path = storage->data_file_path; String file_path_in_backup = data_path_in_backup + fileName(data_file_path); auto backup_entry = backup->readFile(file_path_in_backup); + const auto & disk = storage->disk; auto in = backup_entry->getReadBuffer(); - auto out = disk->writeFile(data_file_path, max_compress_block_size, WriteMode::Append); + auto out = disk->writeFile(data_file_path, storage->max_compress_block_size, WriteMode::Append); copyData(*in, *out); } /// Append the index. { + const auto & index_file_path = storage->index_file_path; String index_path_in_backup = data_path_in_backup + fileName(index_file_path); IndexForNativeFormat extra_indices; auto backup_entry = backup->readFile(index_path_in_backup); @@ -597,23 +612,38 @@ RestoreDataTasks StorageStripeLog::restoreFromBackup(const BackupPtr & backup, c column.location.offset_in_compressed_file += old_data_size; } - insertAtEnd(indices.blocks, std::move(extra_indices.blocks)); + insertAtEnd(storage->indices.blocks, std::move(extra_indices.blocks)); } /// Finish writing. - saveIndices(lock); - saveFileSizes(lock); + storage->saveIndices(lock); + storage->saveFileSizes(lock); + return {}; } catch (...) { /// Rollback partial writes. file_checker.repair(); - removeUnsavedIndices(lock); + storage->removeUnsavedIndices(lock); throw; } + } - }; - return {restore_task}; +private: + std::shared_ptr storage; + BackupPtr backup; + String data_path_in_backup; + ContextMutablePtr context; +}; + + +RestoreTaskPtr StorageStripeLog::restoreData(ContextMutablePtr context, const ASTs & partitions, const BackupPtr & backup, const String & data_path_in_backup, const StorageRestoreSettings &) +{ + if (!partitions.empty()) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Table engine {} doesn't support partitions", getName()); + + return std::make_unique( + typeid_cast>(shared_from_this()), backup, data_path_in_backup, context); } diff --git a/src/Storages/StorageStripeLog.h b/src/Storages/StorageStripeLog.h index bab5116cfc1..223b662d13c 100644 --- a/src/Storages/StorageStripeLog.h +++ b/src/Storages/StorageStripeLog.h @@ -10,6 +10,7 @@ #include #include #include +#include namespace DB @@ -23,6 +24,7 @@ class StorageStripeLog final : public shared_ptr_helper, publi { friend class StripeLogSource; friend class StripeLogSink; + friend class StripeLogRestoreTask; friend struct shared_ptr_helper; public: @@ -50,8 +52,9 @@ public: void truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr, TableExclusiveLockHolder&) override; - BackupEntries backup(const ASTs & partitions, ContextPtr context) override; - RestoreDataTasks restoreFromBackup(const BackupPtr & backup, const String & data_path_in_backup, const ASTs & partitions, ContextMutablePtr context) override; + bool hasDataToBackup() const override { return true; } + BackupEntries backupData(ContextPtr context, const ASTs & partitions) override; + RestoreTaskPtr restoreData(ContextMutablePtr context, const ASTs & partitions, const BackupPtr & backup, const String & data_path_in_backup, const StorageRestoreSettings & restore_settings) override; protected: StorageStripeLog( diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index f727b8f6952..5c8a7ea2be5 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -3,30 +3,35 @@ #include #include #include -#include #include +#include +#include +#include +#include +#include #include #include #include -#include -#include #include #include #include #include -#include #include #include +#include "Common/ThreadStatus.h" +#include +#include "IO/HTTPCommon.h" +#include "IO/ReadWriteBufferFromHTTP.h" -#include +#include +#include #include #include -#include #include -#include +#include namespace DB @@ -43,8 +48,7 @@ namespace ErrorCodes static bool urlWithGlobs(const String & uri) { - return (uri.find('{') != std::string::npos && uri.find('}') != std::string::npos) - || uri.find('|') != std::string::npos; + return (uri.find('{') != std::string::npos && uri.find('}') != std::string::npos) || uri.find('|') != std::string::npos; } @@ -88,8 +92,7 @@ IStorageURLBase::IStorageURLBase( namespace { - ReadWriteBufferFromHTTP::HTTPHeaderEntries getHeaders( - const ReadWriteBufferFromHTTP::HTTPHeaderEntries & headers_) + ReadWriteBufferFromHTTP::HTTPHeaderEntries getHeaders(const ReadWriteBufferFromHTTP::HTTPHeaderEntries & headers_) { ReadWriteBufferFromHTTP::HTTPHeaderEntries headers(headers_.begin(), headers_.end()); // Propagate OpenTelemetry trace context, if any, downstream. @@ -98,13 +101,11 @@ namespace const auto & thread_trace_context = CurrentThread::get().thread_trace_context; if (thread_trace_context.trace_id != UUID()) { - headers.emplace_back("traceparent", - thread_trace_context.composeTraceparentHeader()); + headers.emplace_back("traceparent", thread_trace_context.composeTraceparentHeader()); if (!thread_trace_context.tracestate.empty()) { - headers.emplace_back("tracestate", - thread_trace_context.tracestate); + headers.emplace_back("tracestate", thread_trace_context.tracestate); } } } @@ -114,8 +115,7 @@ namespace class StorageURLSource : public SourceWithProgress { - - using URIParams = std::vector>; + using URIParams = std::vector>; public: struct URIInfo @@ -160,11 +160,11 @@ namespace UInt64 max_block_size, const ConnectionTimeouts & timeouts, const String & compression_method, + size_t download_threads, const ReadWriteBufferFromHTTP::HTTPHeaderEntries & headers_ = {}, const URIParams & params = {}, bool glob_url = false) - : SourceWithProgress(sample_block), name(std::move(name_)) - , uri_info(uri_info_) + : SourceWithProgress(sample_block), name(std::move(name_)), uri_info(uri_info_) { auto headers = getHeaders(headers_); @@ -176,33 +176,40 @@ namespace auto first_option = uri_options.begin(); read_buf = getFirstAvailableURLReadBuffer( - first_option, uri_options.end(), context, params, http_method, - callback, timeouts, compression_method, credentials, headers, glob_url, uri_options.size() == 1); + first_option, + uri_options.end(), + context, + params, + http_method, + callback, + timeouts, + compression_method, + credentials, + headers, + glob_url, + uri_options.size() == 1, + download_threads); - auto input_format = FormatFactory::instance().getInput(format, *read_buf, sample_block, context, max_block_size, format_settings); + auto input_format + = FormatFactory::instance().getInput(format, *read_buf, sample_block, context, max_block_size, format_settings); QueryPipelineBuilder builder; builder.init(Pipe(input_format)); - builder.addSimpleTransform([&](const Block & cur_header) - { - return std::make_shared(cur_header, columns, *input_format, context); - }); + builder.addSimpleTransform( + [&](const Block & cur_header) + { return std::make_shared(cur_header, columns, *input_format, context); }); pipeline = std::make_unique(QueryPipelineBuilder::getPipeline(std::move(builder))); reader = std::make_unique(*pipeline); }; } - String getName() const override - { - return name; - } + String getName() const override { return name; } Chunk generate() override { while (true) { - if (!reader) { auto current_uri_pos = uri_info->next_uri_to_read.fetch_add(1); @@ -239,7 +246,8 @@ namespace Poco::Net::HTTPBasicCredentials & credentials, const ReadWriteBufferFromHTTP::HTTPHeaderEntries & headers, bool glob_url, - bool delay_initialization) + bool delay_initialization, + size_t download_threads) { String first_exception_message; ReadSettings read_settings = context->getReadSettings(); @@ -255,8 +263,137 @@ namespace setCredentials(credentials, request_uri); + const auto settings = context->getSettings(); try { + if (download_threads > 1) + { + try + { + ReadWriteBufferFromHTTP buffer( + request_uri, + Poco::Net::HTTPRequest::HTTP_HEAD, + callback, + timeouts, + credentials, + settings.max_http_get_redirects, + DBMS_DEFAULT_BUFFER_SIZE, + read_settings, + headers, + ReadWriteBufferFromHTTP::Range{0, std::nullopt}, + &context->getRemoteHostFilter(), + true, + /* use_external_buffer */ false, + /* skip_url_not_found_error */ skip_url_not_found_error); + + Poco::Net::HTTPResponse res; + + for (size_t i = 0; i < settings.http_max_tries; ++i) + { + try + { + buffer.callWithRedirects(res, Poco::Net::HTTPRequest::HTTP_HEAD, true); + break; + } + catch (const Poco::Exception & e) + { + LOG_TRACE( + &Poco::Logger::get("StorageURLSource"), + "HTTP HEAD request to `{}` failed at try {}/{}. " + "Error: {}.", + request_uri.toString(), + i + 1, + settings.http_max_tries, + e.displayText()); + if (!ReadWriteBufferFromHTTP::isRetriableError(res.getStatus())) + { + throw; + } + } + } + + // to check if Range header is supported, we need to send a request with it set + const bool supports_ranges = (res.has("Accept-Ranges") && res.get("Accept-Ranges") == "bytes") + || (res.has("Content-Range") && res.get("Content-Range").starts_with("bytes")); + LOG_TRACE( + &Poco::Logger::get("StorageURLSource"), + fmt::runtime(supports_ranges ? "HTTP Range is supported" : "HTTP Range is not supported")); + + + if (supports_ranges && res.getStatus() == Poco::Net::HTTPResponse::HTTP_PARTIAL_CONTENT + && res.hasContentLength()) + { + LOG_TRACE( + &Poco::Logger::get("StorageURLSource"), + "Using ParallelReadBuffer with {} workers with chunks of {} bytes", + download_threads, + settings.max_download_buffer_size); + + auto read_buffer_factory = std::make_unique( + res.getContentLength(), + settings.max_download_buffer_size, + request_uri, + http_method, + callback, + timeouts, + credentials, + settings.max_http_get_redirects, + DBMS_DEFAULT_BUFFER_SIZE, + read_settings, + headers, + &context->getRemoteHostFilter(), + delay_initialization, + /* use_external_buffer */ false, + /* skip_url_not_found_error */ skip_url_not_found_error); + + ThreadGroupStatusPtr running_group = CurrentThread::isInitialized() && CurrentThread::get().getThreadGroup() + ? CurrentThread::get().getThreadGroup() + : MainThreadStatus::getInstance().getThreadGroup(); + + ContextPtr query_context + = CurrentThread::isInitialized() ? CurrentThread::get().getQueryContext() : nullptr; + + auto worker_cleanup = [has_running_group = running_group == nullptr](ThreadStatus & thread_status) + { + if (has_running_group) + thread_status.detachQuery(false); + }; + + auto worker_setup = [query_context = std::move(query_context), + running_group = std::move(running_group)](ThreadStatus & thread_status) + { + /// Save query context if any, because cache implementation needs it. + if (query_context) + thread_status.attachQueryContext(query_context); + + /// To be able to pass ProfileEvents. + if (running_group) + thread_status.attachQuery(running_group); + }; + + + return wrapReadBufferWithCompressionMethod( + std::make_unique( + std::move(read_buffer_factory), + &IOThreadPool::get(), + download_threads, + std::move(worker_setup), + std::move(worker_cleanup)), + chooseCompressionMethod(request_uri.getPath(), compression_method)); + } + } + catch (const Poco::Exception & e) + { + LOG_TRACE( + &Poco::Logger::get("StorageURLSource"), + "Failed to setup ParallelReadBuffer because of an exception:\n{}.\nFalling back to the single-threaded " + "buffer", + e.displayText()); + } + } + + LOG_TRACE(&Poco::Logger::get("StorageURLSource"), "Using single-threaded read buffer"); + return wrapReadBufferWithCompressionMethod( std::make_unique( request_uri, @@ -264,15 +401,15 @@ namespace callback, timeouts, credentials, - context->getSettingsRef().max_http_get_redirects, + settings.max_http_get_redirects, DBMS_DEFAULT_BUFFER_SIZE, read_settings, headers, ReadWriteBufferFromHTTP::Range{}, &context->getRemoteHostFilter(), delay_initialization, - /* use_external_buffer */false, - /* skip_url_not_found_error */skip_url_not_found_error), + /* use_external_buffer */ false, + /* skip_url_not_found_error */ skip_url_not_found_error), chooseCompressionMethod(request_uri.getPath(), compression_method)); } catch (...) @@ -323,10 +460,10 @@ StorageURLSink::StorageURLSink( std::string content_encoding = toContentEncodingName(compression_method); write_buf = wrapWriteBufferWithCompressionMethod( - std::make_unique(Poco::URI(uri), http_method, content_type, content_encoding, timeouts), - compression_method, 3); - writer = FormatFactory::instance().getOutputFormat(format, *write_buf, sample_block, - context, {} /* write callback */, format_settings); + std::make_unique(Poco::URI(uri), http_method, content_type, content_encoding, timeouts), + compression_method, + 3); + writer = FormatFactory::instance().getOutputFormat(format, *write_buf, sample_block, context, {} /* write callback */, format_settings); } @@ -355,15 +492,15 @@ public: const ConnectionTimeouts & timeouts_, const CompressionMethod compression_method_, const String & http_method_) - : PartitionedSink(partition_by, context_, sample_block_) - , uri(uri_) - , format(format_) - , format_settings(format_settings_) - , sample_block(sample_block_) - , context(context_) - , timeouts(timeouts_) - , compression_method(compression_method_) - , http_method(http_method_) + : PartitionedSink(partition_by, context_, sample_block_) + , uri(uri_) + , format(format_) + , format_settings(format_settings_) + , sample_block(sample_block_) + , context(context_) + , timeouts(timeouts_) + , compression_method(compression_method_) + , http_method(http_method_) { } @@ -371,8 +508,8 @@ public: { auto partition_path = PartitionedSink::replaceWildcards(uri, partition_id); context->getRemoteHostFilter().checkURL(Poco::URI(partition_path)); - return std::make_shared(partition_path, format, - format_settings, sample_block, context, timeouts, compression_method, http_method); + return std::make_shared( + partition_path, format, format_settings, sample_block, context, timeouts, compression_method, http_method); } private: @@ -462,7 +599,8 @@ ColumnsDescription IStorageURLBase::getTableStructureFromData( credentials, headers, false, - false); + false, + context->getSettingsRef().max_download_threads); }; try @@ -479,7 +617,10 @@ ColumnsDescription IStorageURLBase::getTableStructureFromData( } while (++option < urls_to_check.end()); - throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, "All attempts to extract table structure from urls failed. Errors:\n{}", exception_messages); + throw Exception( + ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, + "All attempts to extract table structure from urls failed. Errors:\n{}", + exception_messages); } bool IStorageURLBase::isColumnOriented() const @@ -512,6 +653,8 @@ Pipe IStorageURLBase::read( block_for_format = storage_snapshot->metadata->getSampleBlock(); } + size_t max_download_threads = local_context->getSettingsRef().max_download_threads; + if (urlWithGlobs(uri)) { size_t max_addresses = local_context->getSettingsRef().glob_expansion_max_elements; @@ -528,14 +671,13 @@ Pipe IStorageURLBase::read( Pipes pipes; pipes.reserve(num_streams); + size_t download_threads = num_streams >= max_download_threads ? 1 : (max_download_threads / num_streams); for (size_t i = 0; i < num_streams; ++i) { pipes.emplace_back(std::make_shared( uri_info, getReadMethod(), - getReadPOSTDataCallback( - column_names, columns_description, query_info, - local_context, processed_stage, max_block_size), + getReadPOSTDataCallback(column_names, columns_description, query_info, local_context, processed_stage, max_block_size), format_name, format_settings, getName(), @@ -544,7 +686,11 @@ Pipe IStorageURLBase::read( columns_description, max_block_size, ConnectionTimeouts::getHTTPTimeouts(local_context), - compression_method, headers, params, /* glob_url */true)); + compression_method, + download_threads, + headers, + params, + /* glob_url */ true)); } return Pipe::unitePipes(std::move(pipes)); } @@ -555,9 +701,7 @@ Pipe IStorageURLBase::read( return Pipe(std::make_shared( uri_info, getReadMethod(), - getReadPOSTDataCallback( - column_names, columns_description, query_info, - local_context, processed_stage, max_block_size), + getReadPOSTDataCallback(column_names, columns_description, query_info, local_context, processed_stage, max_block_size), format_name, format_settings, getName(), @@ -566,7 +710,10 @@ Pipe IStorageURLBase::read( columns_description, max_block_size, ConnectionTimeouts::getHTTPTimeouts(local_context), - compression_method, headers, params)); + compression_method, + max_download_threads, + headers, + params)); } } @@ -598,12 +745,10 @@ Pipe StorageURLWithFailover::read( auto uri_info = std::make_shared(); uri_info->uri_list_to_read.emplace_back(uri_options); - auto pipe = Pipe(std::make_shared( + auto pipe = Pipe(std::make_shared( uri_info, getReadMethod(), - getReadPOSTDataCallback( - column_names, columns_description, query_info, - local_context, processed_stage, max_block_size), + getReadPOSTDataCallback(column_names, columns_description, query_info, local_context, processed_stage, max_block_size), format_name, format_settings, getName(), @@ -612,7 +757,10 @@ Pipe StorageURLWithFailover::read( columns_description, max_block_size, ConnectionTimeouts::getHTTPTimeouts(local_context), - compression_method, headers, params)); + compression_method, + local_context->getSettingsRef().max_download_threads, + headers, + params)); std::shuffle(uri_options.begin(), uri_options.end(), thread_local_rng); return pipe; } @@ -632,17 +780,26 @@ SinkToStoragePtr IStorageURLBase::write(const ASTPtr & query, const StorageMetad { return std::make_shared( partition_by_ast, - uri, format_name, - format_settings, metadata_snapshot->getSampleBlock(), context, + uri, + format_name, + format_settings, + metadata_snapshot->getSampleBlock(), + context, ConnectionTimeouts::getHTTPTimeouts(context), - chooseCompressionMethod(uri, compression_method), http_method); + chooseCompressionMethod(uri, compression_method), + http_method); } else { - return std::make_shared(uri, format_name, - format_settings, metadata_snapshot->getSampleBlock(), context, + return std::make_shared( + uri, + format_name, + format_settings, + metadata_snapshot->getSampleBlock(), + context, ConnectionTimeouts::getHTTPTimeouts(context), - chooseCompressionMethod(uri, compression_method), http_method); + chooseCompressionMethod(uri, compression_method), + http_method); } } @@ -659,8 +816,19 @@ StorageURL::StorageURL( const ReadWriteBufferFromHTTP::HTTPHeaderEntries & headers_, const String & http_method_, ASTPtr partition_by_) - : IStorageURLBase(uri_, context_, table_id_, format_name_, format_settings_, - columns_, constraints_, comment, compression_method_, headers_, http_method_, partition_by_) + : IStorageURLBase( + uri_, + context_, + table_id_, + format_name_, + format_settings_, + columns_, + constraints_, + comment, + compression_method_, + headers_, + http_method_, + partition_by_) { context_->getRemoteHostFilter().checkURL(Poco::URI(uri)); } @@ -711,8 +879,7 @@ FormatSettings StorageURL::getFormatSettingsFromArgs(const StorageFactory::Argum // Apply changes from SETTINGS clause, with validation. user_format_settings.applyChanges(args.storage_def->settings->changes); - format_settings = getFormatSettings(args.getContext(), - user_format_settings); + format_settings = getFormatSettings(args.getContext(), user_format_settings); } else { @@ -731,12 +898,12 @@ URLBasedDataSourceConfiguration StorageURL::getConfiguration(ASTs & args, Contex auto [common_configuration, storage_specific_args] = named_collection.value(); configuration.set(common_configuration); - if (!configuration.http_method.empty() - && configuration.http_method != Poco::Net::HTTPRequest::HTTP_POST + if (!configuration.http_method.empty() && configuration.http_method != Poco::Net::HTTPRequest::HTTP_POST && configuration.http_method != Poco::Net::HTTPRequest::HTTP_PUT) - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Http method can be POST or PUT (current: {}). For insert default is POST, for select GET", - configuration.http_method); + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Http method can be POST or PUT (current: {}). For insert default is POST, for select GET", + configuration.http_method); if (!storage_specific_args.empty()) { @@ -754,7 +921,8 @@ URLBasedDataSourceConfiguration StorageURL::getConfiguration(ASTs & args, Contex { if (args.empty() || args.size() > 3) throw Exception( - "Storage URL requires 1, 2 or 3 arguments: url, name of used format (taken from file extension by default) and optional compression method.", + "Storage URL requires 1, 2 or 3 arguments: url, name of used format (taken from file extension by default) and optional " + "compression method.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); for (auto & arg : args) @@ -776,43 +944,45 @@ URLBasedDataSourceConfiguration StorageURL::getConfiguration(ASTs & args, Contex void registerStorageURL(StorageFactory & factory) { - factory.registerStorage("URL", [](const StorageFactory::Arguments & args) - { - ASTs & engine_args = args.engine_args; - auto configuration = StorageURL::getConfiguration(engine_args, args.getLocalContext()); - auto format_settings = StorageURL::getFormatSettingsFromArgs(args); - - ReadWriteBufferFromHTTP::HTTPHeaderEntries headers; - for (const auto & [header, value] : configuration.headers) + factory.registerStorage( + "URL", + [](const StorageFactory::Arguments & args) { - auto value_literal = value.safeGet(); - if (header == "Range") - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Range headers are not allowed"); - headers.emplace_back(std::make_pair(header, value_literal)); - } + ASTs & engine_args = args.engine_args; + auto configuration = StorageURL::getConfiguration(engine_args, args.getLocalContext()); + auto format_settings = StorageURL::getFormatSettingsFromArgs(args); - ASTPtr partition_by; - if (args.storage_def->partition_by) - partition_by = args.storage_def->partition_by->clone(); + ReadWriteBufferFromHTTP::HTTPHeaderEntries headers; + for (const auto & [header, value] : configuration.headers) + { + auto value_literal = value.safeGet(); + if (header == "Range") + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Range headers are not allowed"); + headers.emplace_back(std::make_pair(header, value_literal)); + } - return StorageURL::create( - configuration.url, - args.table_id, - configuration.format, - format_settings, - args.columns, - args.constraints, - args.comment, - args.getContext(), - configuration.compression_method, - headers, - configuration.http_method, - partition_by); - }, - { - .supports_settings = true, - .supports_schema_inference = true, - .source_access_type = AccessType::URL, - }); + ASTPtr partition_by; + if (args.storage_def->partition_by) + partition_by = args.storage_def->partition_by->clone(); + + return StorageURL::create( + configuration.url, + args.table_id, + configuration.format, + format_settings, + args.columns, + args.constraints, + args.comment, + args.getContext(), + configuration.compression_method, + headers, + configuration.http_method, + partition_by); + }, + { + .supports_settings = true, + .supports_schema_inference = true, + .source_access_type = AccessType::URL, + }); } } diff --git a/src/Storages/System/StorageSystemModels.cpp b/src/Storages/System/StorageSystemModels.cpp index 3df48e830bb..4a4dbbc69df 100644 --- a/src/Storages/System/StorageSystemModels.cpp +++ b/src/Storages/System/StorageSystemModels.cpp @@ -38,7 +38,7 @@ void StorageSystemModels::fillData(MutableColumns & res_columns, ContextPtr cont if (load_result.object) { - const auto model_ptr = std::static_pointer_cast(load_result.object); + const auto model_ptr = std::static_pointer_cast(load_result.object); res_columns[3]->insert(model_ptr->getTypeName()); } else diff --git a/src/Storages/System/StorageSystemPartsColumns.cpp b/src/Storages/System/StorageSystemPartsColumns.cpp index f5e9b82c136..a9341abb9cd 100644 --- a/src/Storages/System/StorageSystemPartsColumns.cpp +++ b/src/Storages/System/StorageSystemPartsColumns.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -64,7 +65,11 @@ StorageSystemPartsColumns::StorageSystemPartsColumns(const StorageID & table_id_ {"serialization_kind", std::make_shared()}, {"subcolumns.names", std::make_shared(std::make_shared())}, {"subcolumns.types", std::make_shared(std::make_shared())}, - {"subcolumns.serializations", std::make_shared(std::make_shared())} + {"subcolumns.serializations", std::make_shared(std::make_shared())}, + {"subcolumns.bytes_on_disk", std::make_shared(std::make_shared())}, + {"subcolumns.data_compressed_bytes", std::make_shared(std::make_shared())}, + {"subcolumns.data_uncompressed_bytes", std::make_shared(std::make_shared())}, + {"subcolumns.marks_bytes", std::make_shared(std::make_shared())}, } ) { @@ -228,13 +233,43 @@ void StorageSystemPartsColumns::processNextStorage( Array subcolumn_names; Array subcolumn_types; - Array subcolumn_sers; + Array subcolumn_serializations; + Array subcolumn_bytes_on_disk; + Array subcolumn_data_compressed_bytes; + Array subcolumn_data_uncompressed_bytes; + Array subcolumn_marks_bytes; - IDataType::forEachSubcolumn([&](const auto &, const auto & name, const auto & data) + IDataType::forEachSubcolumn([&](const auto & subpath, const auto & name, const auto & data) { + /// We count only final subcolumns, which are represented by files on disk + /// and skip intermediate suibcolumns of types Tuple and Nested. + if (isTuple(data.type) || isNested(data.type)) + return; + subcolumn_names.push_back(name); subcolumn_types.push_back(data.type->getName()); - subcolumn_sers.push_back(ISerialization::kindToString(data.serialization->getKind())); + subcolumn_serializations.push_back(ISerialization::kindToString(data.serialization->getKind())); + + ColumnSize size; + NameAndTypePair subcolumn(column.name, name, column.type, data.type); + String file_name = ISerialization::getFileNameForStream(subcolumn, subpath); + + auto bin_checksum = part->checksums.files.find(file_name + ".bin"); + if (bin_checksum != part->checksums.files.end()) + { + size.data_compressed += bin_checksum->second.file_size; + size.data_uncompressed += bin_checksum->second.uncompressed_size; + } + + auto mrk_checksum = part->checksums.files.find(file_name + part->index_granularity_info.marks_file_extension); + if (mrk_checksum != part->checksums.files.end()) + size.marks += mrk_checksum->second.file_size; + + subcolumn_bytes_on_disk.push_back(size.data_compressed + size.marks); + subcolumn_data_compressed_bytes.push_back(size.data_compressed); + subcolumn_data_uncompressed_bytes.push_back(size.data_uncompressed); + subcolumn_marks_bytes.push_back(size.marks); + }, { serialization, column.type, nullptr, nullptr }); if (columns_mask[src_index++]) @@ -242,7 +277,15 @@ void StorageSystemPartsColumns::processNextStorage( if (columns_mask[src_index++]) columns[res_index++]->insert(subcolumn_types); if (columns_mask[src_index++]) - columns[res_index++]->insert(subcolumn_sers); + columns[res_index++]->insert(subcolumn_serializations); + if (columns_mask[src_index++]) + columns[res_index++]->insert(subcolumn_bytes_on_disk); + if (columns_mask[src_index++]) + columns[res_index++]->insert(subcolumn_data_compressed_bytes); + if (columns_mask[src_index++]) + columns[res_index++]->insert(subcolumn_data_uncompressed_bytes); + if (columns_mask[src_index++]) + columns[res_index++]->insert(subcolumn_marks_bytes); if (has_state_column) columns[res_index++]->insert(part->stateString()); diff --git a/src/Storages/System/attachInformationSchemaTables.cpp b/src/Storages/System/attachInformationSchemaTables.cpp index 68a1eac305e..61a91685324 100644 --- a/src/Storages/System/attachInformationSchemaTables.cpp +++ b/src/Storages/System/attachInformationSchemaTables.cpp @@ -32,6 +32,8 @@ static void createInformationSchemaView(ContextMutablePtr context, IDatabase & d auto & ast_create = ast->as(); assert(view_name == ast_create.getTable()); + ast_create.attach = false; + ast_create.setDatabase(database.getDatabaseName()); if (is_uppercase) ast_create.setTable(Poco::toUpper(view_name)); diff --git a/src/Storages/WindowView/StorageWindowView.cpp b/src/Storages/WindowView/StorageWindowView.cpp index a329b01e9f2..644ab5d57c2 100644 --- a/src/Storages/WindowView/StorageWindowView.cpp +++ b/src/Storages/WindowView/StorageWindowView.cpp @@ -57,6 +57,7 @@ namespace ErrorCodes { extern const int ARGUMENT_OUT_OF_BOUND; extern const int BAD_ARGUMENTS; + extern const int SYNTAX_ERROR; extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int INCORRECT_QUERY; @@ -262,7 +263,13 @@ namespace IntervalKind strToIntervalKind(const String& interval_str) { - if (interval_str == "Second") + if (interval_str == "Nanosecond") + return IntervalKind::Nanosecond; + else if (interval_str == "Microsecond") + return IntervalKind::Microsecond; + else if (interval_str == "Millisecond") + return IntervalKind::Millisecond; + else if (interval_str == "Second") return IntervalKind::Second; else if (interval_str == "Minute") return IntervalKind::Minute; @@ -307,6 +314,12 @@ namespace { switch (kind) { + case IntervalKind::Nanosecond: + throw Exception("Fractional seconds are not supported by windows yet", ErrorCodes::SYNTAX_ERROR); + case IntervalKind::Microsecond: + throw Exception("Fractional seconds are not supported by windows yet", ErrorCodes::SYNTAX_ERROR); + case IntervalKind::Millisecond: + throw Exception("Fractional seconds are not supported by windows yet", ErrorCodes::SYNTAX_ERROR); #define CASE_WINDOW_KIND(KIND) \ case IntervalKind::KIND: { \ return AddTime::execute(time_sec, num_units, time_zone); \ @@ -738,6 +751,12 @@ UInt32 StorageWindowView::getWindowLowerBound(UInt32 time_sec) switch (window_interval_kind) { + case IntervalKind::Nanosecond: + throw Exception("Fractional seconds are not supported by windows yet", ErrorCodes::SYNTAX_ERROR); + case IntervalKind::Microsecond: + throw Exception("Fractional seconds are not supported by windows yet", ErrorCodes::SYNTAX_ERROR); + case IntervalKind::Millisecond: + throw Exception("Fractional seconds are not supported by windows yet", ErrorCodes::SYNTAX_ERROR); #define CASE_WINDOW_KIND(KIND) \ case IntervalKind::KIND: \ { \ @@ -773,6 +792,13 @@ UInt32 StorageWindowView::getWindowUpperBound(UInt32 time_sec) switch (window_interval_kind) { + case IntervalKind::Nanosecond: + throw Exception("Fractional seconds are not supported by window view yet", ErrorCodes::SYNTAX_ERROR); + case IntervalKind::Microsecond: + throw Exception("Fractional seconds are not supported by window view yet", ErrorCodes::SYNTAX_ERROR); + case IntervalKind::Millisecond: + throw Exception("Fractional seconds are not supported by window view yet", ErrorCodes::SYNTAX_ERROR); + #define CASE_WINDOW_KIND(KIND) \ case IntervalKind::KIND: \ { \ diff --git a/src/Storages/examples/active_parts.py b/src/Storages/examples/active_parts.py index a818a76017d..d82c5ca96bf 100644 --- a/src/Storages/examples/active_parts.py +++ b/src/Storages/examples/active_parts.py @@ -9,7 +9,9 @@ import re parts = {} for s in sys.stdin.read().split(): - m = re.match('^([0-9]{6})[0-9]{2}_([0-9]{6})[0-9]{2}_([0-9]+)_([0-9]+)_([0-9]+)$', s) + m = re.match( + "^([0-9]{6})[0-9]{2}_([0-9]{6})[0-9]{2}_([0-9]+)_([0-9]+)_([0-9]+)$", s + ) if m == None: continue m1 = m.group(1) @@ -18,7 +20,7 @@ for s in sys.stdin.read().split(): i2 = int(m.group(4)) l = int(m.group(5)) if m1 != m2: - raise Exception('not in single month: ' + s) + raise Exception("not in single month: " + s) if m1 not in parts: parts[m1] = [] parts[m1].append((i1, i2, l, s)) @@ -27,13 +29,13 @@ for m, ps in sorted(parts.items()): ps.sort(key=lambda i1_i2_l_s: (i1_i2_l_s[0], -i1_i2_l_s[1], -i1_i2_l_s[2])) (x2, y2, l2, s2) = (-1, -1, -1, -1) for x1, y1, l1, s1 in ps: - if x1 >= x2 and y1 <= y2 and l1 < l2 and (x1, y1) != (x2, y2): # 2 contains 1 + if x1 >= x2 and y1 <= y2 and l1 < l2 and (x1, y1) != (x2, y2): # 2 contains 1 pass - elif x1 > y2: # 1 is to the right of 2 + elif x1 > y2: # 1 is to the right of 2 if x1 != y2 + 1 and y2 != -1: - print() # to see the missing numbers + print() # to see the missing numbers (x2, y2, l2, s2) = (x1, y1, l1, s1) print(s1) else: - raise Exception('invalid parts intersection: ' + s1 + ' and ' + s2) + raise Exception("invalid parts intersection: " + s1 + " and " + s2) print() diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index c9858910837..22c89aaafa7 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -7,7 +7,7 @@ else () include (${ClickHouse_SOURCE_DIR}/cmake/add_check.cmake) endif () -option (ENABLE_CLICKHOUSE_TEST "Install clickhouse-test script and relevant tests scenarios" ON) +option (ENABLE_CLICKHOUSE_TEST "Install clickhouse-test script and relevant tests scenarios" OFF) if (ENABLE_CLICKHOUSE_TEST) install (PROGRAMS clickhouse-test DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) diff --git a/tests/ci/ast_fuzzer_check.py b/tests/ci/ast_fuzzer_check.py index 319a6fc3fa5..94f5eff51d7 100644 --- a/tests/ci/ast_fuzzer_check.py +++ b/tests/ci/ast_fuzzer_check.py @@ -7,8 +7,13 @@ import sys from github import Github -from env_helper import GITHUB_REPOSITORY, TEMP_PATH, REPO_COPY, REPORTS_PATH, GITHUB_SERVER_URL, \ - GITHUB_RUN_ID +from env_helper import ( + GITHUB_REPOSITORY, + GITHUB_RUN_URL, + REPORTS_PATH, + REPO_COPY, + TEMP_PATH, +) from s3_helper import S3Helper from get_robot_token import get_best_robot_token from pr_info import PRInfo @@ -19,19 +24,24 @@ from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickh from stopwatch import Stopwatch from rerun_helper import RerunHelper -IMAGE_NAME = 'clickhouse/fuzzer' +IMAGE_NAME = "clickhouse/fuzzer" + def get_run_command(pr_number, sha, download_url, workspace_path, image): - return f'docker run --network=host --volume={workspace_path}:/workspace ' \ - '--cap-add syslog --cap-add sys_admin --cap-add=SYS_PTRACE ' \ - f'-e PR_TO_TEST={pr_number} -e SHA_TO_TEST={sha} -e BINARY_URL_TO_DOWNLOAD="{download_url}" '\ - f'{image}' + return ( + f"docker run --network=host --volume={workspace_path}:/workspace " + "--cap-add syslog --cap-add sys_admin --cap-add=SYS_PTRACE " + f'-e PR_TO_TEST={pr_number} -e SHA_TO_TEST={sha} -e BINARY_URL_TO_DOWNLOAD="{download_url}" ' + f"{image}" + ) + def get_commit(gh, commit_sha): repo = gh.get_repo(GITHUB_REPOSITORY) commit = repo.get_commit(commit_sha) return commit + if __name__ == "__main__": logging.basicConfig(level=logging.INFO) @@ -64,7 +74,7 @@ if __name__ == "__main__": raise Exception("No build URLs found") for url in urls: - if url.endswith('/clickhouse'): + if url.endswith("/clickhouse"): build_url = url break else: @@ -72,16 +82,20 @@ if __name__ == "__main__": logging.info("Got build url %s", build_url) - workspace_path = os.path.join(temp_path, 'workspace') + workspace_path = os.path.join(temp_path, "workspace") if not os.path.exists(workspace_path): os.makedirs(workspace_path) - run_command = get_run_command(pr_info.number, pr_info.sha, build_url, workspace_path, docker_image) + run_command = get_run_command( + pr_info.number, pr_info.sha, build_url, workspace_path, docker_image + ) logging.info("Going to run %s", run_command) run_log_path = os.path.join(temp_path, "runlog.log") - with open(run_log_path, 'w', encoding='utf-8') as log: - with subprocess.Popen(run_command, shell=True, stderr=log, stdout=log) as process: + with open(run_log_path, "w", encoding="utf-8") as log: + with subprocess.Popen( + run_command, shell=True, stderr=log, stdout=log + ) as process: retcode = process.wait() if retcode == 0: logging.info("Run successfully") @@ -90,56 +104,70 @@ if __name__ == "__main__": subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True) - check_name_lower = check_name.lower().replace('(', '').replace(')', '').replace(' ', '') - s3_prefix = f'{pr_info.number}/{pr_info.sha}/fuzzer_{check_name_lower}/' + check_name_lower = ( + check_name.lower().replace("(", "").replace(")", "").replace(" ", "") + ) + s3_prefix = f"{pr_info.number}/{pr_info.sha}/fuzzer_{check_name_lower}/" paths = { - 'runlog.log': run_log_path, - 'main.log': os.path.join(workspace_path, 'main.log'), - 'server.log': os.path.join(workspace_path, 'server.log'), - 'fuzzer.log': os.path.join(workspace_path, 'fuzzer.log'), - 'report.html': os.path.join(workspace_path, 'report.html'), - 'core.gz': os.path.join(workspace_path, 'core.gz'), + "runlog.log": run_log_path, + "main.log": os.path.join(workspace_path, "main.log"), + "server.log": os.path.join(workspace_path, "server.log"), + "fuzzer.log": os.path.join(workspace_path, "fuzzer.log"), + "report.html": os.path.join(workspace_path, "report.html"), + "core.gz": os.path.join(workspace_path, "core.gz"), } - s3_helper = S3Helper('https://s3.amazonaws.com') + s3_helper = S3Helper("https://s3.amazonaws.com") for f in paths: try: - paths[f] = s3_helper.upload_test_report_to_s3(paths[f], s3_prefix + '/' + f) + paths[f] = s3_helper.upload_test_report_to_s3(paths[f], s3_prefix + "/" + f) except Exception as ex: logging.info("Exception uploading file %s text %s", f, ex) - paths[f] = '' + paths[f] = "" - report_url = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/actions/runs/{GITHUB_RUN_ID}" - if paths['runlog.log']: - report_url = paths['runlog.log'] - if paths['main.log']: - report_url = paths['main.log'] - if paths['server.log']: - report_url = paths['server.log'] - if paths['fuzzer.log']: - report_url = paths['fuzzer.log'] - if paths['report.html']: - report_url = paths['report.html'] + report_url = GITHUB_RUN_URL + if paths["runlog.log"]: + report_url = paths["runlog.log"] + if paths["main.log"]: + report_url = paths["main.log"] + if paths["server.log"]: + report_url = paths["server.log"] + if paths["fuzzer.log"]: + report_url = paths["fuzzer.log"] + if paths["report.html"]: + report_url = paths["report.html"] # Try to get status message saved by the fuzzer try: - with open(os.path.join(workspace_path, 'status.txt'), 'r', encoding='utf-8') as status_f: - status = status_f.readline().rstrip('\n') + with open( + os.path.join(workspace_path, "status.txt"), "r", encoding="utf-8" + ) as status_f: + status = status_f.readline().rstrip("\n") - with open(os.path.join(workspace_path, 'description.txt'), 'r', encoding='utf-8') as desc_f: - description = desc_f.readline().rstrip('\n')[:140] + with open( + os.path.join(workspace_path, "description.txt"), "r", encoding="utf-8" + ) as desc_f: + description = desc_f.readline().rstrip("\n")[:140] except: - status = 'failure' - description = 'Task failed: $?=' + str(retcode) + status = "failure" + description = "Task failed: $?=" + str(retcode) - if 'fail' in status: - test_result = [(description, 'FAIL')] + if "fail" in status: + test_result = [(description, "FAIL")] else: - test_result = [(description, 'OK')] + test_result = [(description, "OK")] ch_helper = ClickHouseHelper() - prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_result, status, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, check_name) + prepared_events = prepare_tests_results_for_clickhouse( + pr_info, + test_result, + status, + stopwatch.duration_seconds, + stopwatch.start_time_str, + report_url, + check_name, + ) logging.info("Result: '%s', '%s', '%s'", status, description, report_url) print(f"::notice ::Report url: {report_url}") diff --git a/tests/ci/bugfix_validate_check.py b/tests/ci/bugfix_validate_check.py new file mode 100644 index 00000000000..7c130a766a1 --- /dev/null +++ b/tests/ci/bugfix_validate_check.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 + +import argparse +import csv +import itertools +import os +import sys + +NO_CHANGES_MSG = "Nothing to run" + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("report1") + parser.add_argument("report2") + return parser.parse_args() + + +def post_commit_status_from_file(file_path): + res = [] + with open(file_path, "r", encoding="utf-8") as f: + fin = csv.reader(f, delimiter="\t") + res = list(itertools.islice(fin, 1)) + if len(res) < 1: + raise Exception(f'Can\'t read from "{file_path}"') + if len(res[0]) != 3: + raise Exception(f'Can\'t read from "{file_path}"') + return res[0] + + +def process_results(file_path): + state, report_url, description = post_commit_status_from_file(file_path) + prefix = os.path.basename(os.path.dirname(file_path)) + print( + f"::notice:: bugfix check: {prefix} - {state}: {description} Report url: {report_url}" + ) + return state == "success" + + +def main(args): + is_ok = False + is_ok = process_results(args.report1) or is_ok + is_ok = process_results(args.report2) or is_ok + sys.exit(0 if is_ok else 1) + + +if __name__ == "__main__": + main(parse_args()) diff --git a/tests/ci/build_check.py b/tests/ci/build_check.py index c318e163689..24af9e5194c 100644 --- a/tests/ci/build_check.py +++ b/tests/ci/build_check.py @@ -21,6 +21,8 @@ from ci_config import CI_CONFIG, BuildConfig from docker_pull_helper import get_image_with_version from tee_popen import TeePopen +IMAGE_NAME = "clickhouse/binary-builder" + def get_build_config(build_check_name: str, build_name: str) -> BuildConfig: if build_check_name == "ClickHouse build check (actions)": @@ -52,7 +54,6 @@ def get_packager_cmd( build_version: str, image_version: str, ccache_path: str, - pr_info: PRInfo, ) -> str: package_type = build_config["package_type"] comp = build_config["compiler"] @@ -73,9 +74,8 @@ def get_packager_cmd( cmd += " --cache=ccache" cmd += " --ccache_dir={}".format(ccache_path) - if "alien_pkgs" in build_config and build_config["alien_pkgs"]: - if pr_info.number == 0 or "release" in pr_info.labels: - cmd += " --alien-pkgs rpm tgz" + if "additional_pkgs" in build_config and build_config["additional_pkgs"]: + cmd += " --additional-pkgs" cmd += " --docker-image-version={}".format(image_version) cmd += " --version={}".format(build_version) @@ -86,13 +86,6 @@ def get_packager_cmd( return cmd -def get_image_name(build_config: BuildConfig) -> str: - if build_config["package_type"] != "deb": - return "clickhouse/binary-builder" - else: - return "clickhouse/deb-builder" - - def build_clickhouse( packager_cmd: str, logs_path: str, build_output_path: str ) -> Tuple[str, bool]: @@ -256,8 +249,7 @@ def main(): else: sys.exit(0) - image_name = get_image_name(build_config) - docker_image = get_image_with_version(IMAGES_PATH, image_name) + docker_image = get_image_with_version(IMAGES_PATH, IMAGE_NAME) image_version = docker_image.version logging.info("Got version from repo %s", version.string) @@ -298,7 +290,6 @@ def main(): version.string, image_version, ccache_path, - pr_info, ) logging.info("Going to run packager with %s", packager_cmd) diff --git a/tests/ci/build_report_check.py b/tests/ci/build_report_check.py index a85558ebe33..592e905bcb5 100644 --- a/tests/ci/build_report_check.py +++ b/tests/ci/build_report_check.py @@ -6,7 +6,13 @@ import os import sys from github import Github -from env_helper import REPORTS_PATH, TEMP_PATH, GITHUB_REPOSITORY, GITHUB_SERVER_URL, GITHUB_RUN_ID +from env_helper import ( + REPORTS_PATH, + TEMP_PATH, + GITHUB_REPOSITORY, + GITHUB_SERVER_URL, + GITHUB_RUN_URL, +) from report import create_build_html_report from s3_helper import S3Helper from get_robot_token import get_best_robot_token @@ -15,8 +21,19 @@ from commit_status_helper import get_commit from ci_config import CI_CONFIG from rerun_helper import RerunHelper -class BuildResult(): - def __init__(self, compiler, build_type, sanitizer, bundled, splitted, status, elapsed_seconds, with_coverage): + +class BuildResult: + def __init__( + self, + compiler, + build_type, + sanitizer, + bundled, + splitted, + status, + elapsed_seconds, + with_coverage, + ): self.compiler = compiler self.build_type = build_type self.sanitizer = sanitizer @@ -26,54 +43,72 @@ class BuildResult(): self.elapsed_seconds = elapsed_seconds self.with_coverage = with_coverage + def group_by_artifacts(build_urls): - groups = {'deb': [], 'binary': [], 'tgz': [], 'rpm': [], 'performance': []} + groups = { + "apk": [], + "deb": [], + "binary": [], + "tgz": [], + "rpm": [], + "performance": [], + } for url in build_urls: - if url.endswith('performance.tgz'): - groups['performance'].append(url) - elif url.endswith('.deb') or url.endswith('.buildinfo') or url.endswith('.changes') or url.endswith('.tar.gz'): - groups['deb'].append(url) - elif url.endswith('.rpm'): - groups['rpm'].append(url) - elif url.endswith('.tgz'): - groups['tgz'].append(url) + if url.endswith("performance.tgz"): + groups["performance"].append(url) + elif ( + url.endswith(".deb") + or url.endswith(".buildinfo") + or url.endswith(".changes") + or url.endswith(".tar.gz") + ): + groups["deb"].append(url) + elif url.endswith(".apk"): + groups["apk"].append(url) + elif url.endswith(".rpm"): + groups["rpm"].append(url) + elif url.endswith(".tgz"): + groups["tgz"].append(url) else: - groups['binary'].append(url) + groups["binary"].append(url) return groups + def process_report(build_report): - build_config = build_report['build_config'] + build_config = build_report["build_config"] build_result = BuildResult( - compiler=build_config['compiler'], - build_type=build_config['build_type'], - sanitizer=build_config['sanitizer'], - bundled=build_config['bundled'], - splitted=build_config['splitted'], - status="success" if build_report['status'] else "failure", - elapsed_seconds=build_report['elapsed_seconds'], - with_coverage=False + compiler=build_config["compiler"], + build_type=build_config["build_type"], + sanitizer=build_config["sanitizer"], + bundled=build_config["bundled"], + splitted=build_config["splitted"], + status="success" if build_report["status"] else "failure", + elapsed_seconds=build_report["elapsed_seconds"], + with_coverage=False, ) build_results = [] build_urls = [] build_logs_urls = [] - urls_groups = group_by_artifacts(build_report['build_urls']) + urls_groups = group_by_artifacts(build_report["build_urls"]) found_group = False for _, group_urls in urls_groups.items(): if group_urls: build_results.append(build_result) build_urls.append(group_urls) - build_logs_urls.append(build_report['log_url']) + build_logs_urls.append(build_report["log_url"]) found_group = True if not found_group: build_results.append(build_result) build_urls.append([""]) - build_logs_urls.append(build_report['log_url']) + build_logs_urls.append(build_report["log_url"]) return build_results, build_urls, build_logs_urls + def get_build_name_from_file_name(file_name): - return file_name.replace('build_urls_', '').replace('.json', '') + return file_name.replace("build_urls_", "").replace(".json", "") + if __name__ == "__main__": logging.basicConfig(level=logging.INFO) @@ -99,17 +134,36 @@ if __name__ == "__main__": build_reports_map = {} for root, dirs, files in os.walk(reports_path): for f in files: - if f.startswith("build_urls_") and f.endswith('.json'): + if f.startswith("build_urls_") and f.endswith(".json"): logging.info("Found build report json %s", f) build_name = get_build_name_from_file_name(f) if build_name in reports_order: - with open(os.path.join(root, f), 'r') as file_handler: + with open(os.path.join(root, f), "r") as file_handler: build_report = json.load(file_handler) build_reports_map[build_name] = build_report else: - logging.info("Skipping report %s for build %s, it's not in our reports list", f, build_name) + logging.info( + "Skipping report %s for build %s, it's not in our reports list", + f, + build_name, + ) - build_reports = [build_reports_map[build_name] for build_name in reports_order if build_name in build_reports_map] + some_builds_are_missing = len(build_reports_map) < len(reports_order) + + if some_builds_are_missing: + logging.info( + "Expected to get %s build results, got %s", + len(reports_order), + len(build_reports_map), + ) + else: + logging.info("Got exactly %s builds", len(build_reports_map)) + + build_reports = [ + build_reports_map[build_name] + for build_name in reports_order + if build_name in build_reports_map + ] build_results = [] build_artifacts = [] @@ -127,7 +181,7 @@ if __name__ == "__main__": logging.info("No builds, failing check") sys.exit(1) - s3_helper = S3Helper('https://s3.amazonaws.com') + s3_helper = S3Helper("https://s3.amazonaws.com") pr_info = PRInfo() @@ -137,7 +191,7 @@ if __name__ == "__main__": branch_name = "PR #{}".format(pr_info.number) branch_url = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/pull/{pr_info.number}" commit_url = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/commit/{pr_info.sha}" - task_url = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/actions/runs/{GITHUB_RUN_ID or '0'}" + task_url = GITHUB_RUN_URL report = create_build_html_report( build_check_name, build_results, @@ -146,18 +200,22 @@ if __name__ == "__main__": task_url, branch_url, branch_name, - commit_url + commit_url, ) - report_path = os.path.join(temp_path, 'report.html') - with open(report_path, 'w') as f: + report_path = os.path.join(temp_path, "report.html") + with open(report_path, "w") as f: f.write(report) logging.info("Going to upload prepared report") - context_name_for_path = build_check_name.lower().replace(' ', '_') - s3_path_prefix = str(pr_info.number) + "/" + pr_info.sha + "/" + context_name_for_path + context_name_for_path = build_check_name.lower().replace(" ", "_") + s3_path_prefix = ( + str(pr_info.number) + "/" + pr_info.sha + "/" + context_name_for_path + ) - url = s3_helper.upload_build_file_to_s3(report_path, s3_path_prefix + "/report.html") + url = s3_helper.upload_build_file_to_s3( + report_path, s3_path_prefix + "/report.html" + ) logging.info("Report url %s", url) total_builds = len(build_results) @@ -172,12 +230,17 @@ if __name__ == "__main__": if build_result.status == "success": ok_builds += 1 - if ok_builds == 0: + if ok_builds == 0 or some_builds_are_missing: summary_status = "error" - description = "{}/{} builds are OK".format(ok_builds, total_builds) + description = f"{ok_builds}/{total_builds} builds are OK" print("::notice ::Report url: {}".format(url)) commit = get_commit(gh, pr_info.sha) - commit.create_status(context=build_check_name, description=description, state=summary_status, target_url=url) + commit.create_status( + context=build_check_name, + description=description, + state=summary_status, + target_url=url, + ) diff --git a/tests/ci/ccache_utils.py b/tests/ci/ccache_utils.py index f21f1a8c965..7b0b0f01aa3 100644 --- a/tests/ci/ccache_utils.py +++ b/tests/ci/ccache_utils.py @@ -13,16 +13,19 @@ from compress_files import decompress_fast, compress_fast DOWNLOAD_RETRIES_COUNT = 5 + def dowload_file_with_progress(url, path): logging.info("Downloading from %s to temp path %s", url, path) for i in range(DOWNLOAD_RETRIES_COUNT): try: - with open(path, 'wb') as f: + with open(path, "wb") as f: response = requests.get(url, stream=True) response.raise_for_status() - total_length = response.headers.get('content-length') + total_length = response.headers.get("content-length") if total_length is None or int(total_length) == 0: - logging.info("No content-length, will download file without progress") + logging.info( + "No content-length, will download file without progress" + ) f.write(response.content) else: dl = 0 @@ -34,8 +37,8 @@ def dowload_file_with_progress(url, path): if sys.stdout.isatty(): done = int(50 * dl / total_length) percent = int(100 * float(dl) / total_length) - eq_str = '=' * done - space_str = ' ' * (50 - done) + eq_str = "=" * done + space_str = " " * (50 - done) sys.stdout.write(f"\r[{eq_str}{space_str}] {percent}%") sys.stdout.flush() break @@ -52,7 +55,9 @@ def dowload_file_with_progress(url, path): logging.info("Downloading finished") -def get_ccache_if_not_exists(path_to_ccache_dir, s3_helper, current_pr_number, temp_path): +def get_ccache_if_not_exists( + path_to_ccache_dir, s3_helper, current_pr_number, temp_path +): ccache_name = os.path.basename(path_to_ccache_dir) cache_found = False prs_to_check = [current_pr_number] @@ -93,13 +98,16 @@ def get_ccache_if_not_exists(path_to_ccache_dir, s3_helper, current_pr_number, t else: logging.info("ccache downloaded") + def upload_ccache(path_to_ccache_dir, s3_helper, current_pr_number, temp_path): logging.info("Uploading cache %s for pr %s", path_to_ccache_dir, current_pr_number) ccache_name = os.path.basename(path_to_ccache_dir) compressed_cache_path = os.path.join(temp_path, ccache_name + ".tar.gz") compress_fast(path_to_ccache_dir, compressed_cache_path) - s3_path = str(current_pr_number) + "/ccaches/" + os.path.basename(compressed_cache_path) + s3_path = ( + str(current_pr_number) + "/ccaches/" + os.path.basename(compressed_cache_path) + ) logging.info("Will upload %s to path %s", compressed_cache_path, s3_path) s3_helper.upload_build_file_to_s3(compressed_cache_path, s3_path) logging.info("Upload finished") diff --git a/tests/ci/cherry_pick.py b/tests/ci/cherry_pick.py index 91a018f158f..4bbd30cd186 100644 --- a/tests/ci/cherry_pick.py +++ b/tests/ci/cherry_pick.py @@ -20,21 +20,29 @@ if __name__ == "__main__": if not os.path.exists(temp_path): os.makedirs(temp_path) - sys.path.append(os.path.join(repo_path, "utils/github")) - with SSHKey("ROBOT_CLICKHOUSE_SSH_KEY"): token = get_parameter_from_ssm("github_robot_token_1") - bp = Backport(token, os.environ.get("REPO_OWNER"), os.environ.get("REPO_NAME"), os.environ.get("REPO_TEAM")) + bp = Backport( + token, + os.environ.get("REPO_OWNER"), + os.environ.get("REPO_NAME"), + os.environ.get("REPO_TEAM"), + ) + def cherrypick_run(token, pr, branch): - return CherryPick(token, - os.environ.get("REPO_OWNER"), os.environ.get("REPO_NAME"), - os.environ.get("REPO_TEAM"), pr, branch - ).execute(repo_path, False) + return CherryPick( + token, + os.environ.get("REPO_OWNER"), + os.environ.get("REPO_NAME"), + os.environ.get("REPO_TEAM"), + pr, + branch, + ).execute(repo_path, False) try: - bp.execute(repo_path, 'origin', None, cherrypick_run) + bp.execute(repo_path, "origin", None, cherrypick_run) except subprocess.CalledProcessError as e: logging.error(e.output) diff --git a/tests/ci/cherry_pick_utils/backport.py b/tests/ci/cherry_pick_utils/backport.py index 9227dbf4108..615c0d19ffa 100644 --- a/tests/ci/cherry_pick_utils/backport.py +++ b/tests/ci/cherry_pick_utils/backport.py @@ -17,7 +17,9 @@ import sys class Backport: def __init__(self, token, owner, name, team): - self._gh = RemoteRepo(token, owner=owner, name=name, team=team, max_page_size=30, min_page_size=7) + self._gh = RemoteRepo( + token, owner=owner, name=name, team=team, max_page_size=30, min_page_size=7 + ) self._token = token self.default_branch_name = self._gh.default_branch self.ssh_url = self._gh.ssh_url @@ -28,7 +30,7 @@ class Backport: def getBranchesWithRelease(self): branches = set() for pull_request in self._gh.find_pull_requests("release"): - branches.add(pull_request['headRefName']) + branches.add(pull_request["headRefName"]) return branches def execute(self, repo, upstream, until_commit, run_cherrypick): @@ -44,11 +46,11 @@ class Backport: branches.append(branch) if not branches: - logging.info('No release branches found!') + logging.info("No release branches found!") return for branch in branches: - logging.info('Found release branch: %s', branch[0]) + logging.info("Found release branch: %s", branch[0]) if not until_commit: until_commit = branches[0][1] @@ -56,73 +58,128 @@ class Backport: backport_map = {} - RE_MUST_BACKPORT = re.compile(r'^v(\d+\.\d+)-must-backport$') - RE_NO_BACKPORT = re.compile(r'^v(\d+\.\d+)-no-backport$') - RE_BACKPORTED = re.compile(r'^v(\d+\.\d+)-backported$') + RE_MUST_BACKPORT = re.compile(r"^v(\d+\.\d+)-must-backport$") + RE_NO_BACKPORT = re.compile(r"^v(\d+\.\d+)-no-backport$") + RE_BACKPORTED = re.compile(r"^v(\d+\.\d+)-backported$") # pull-requests are sorted by ancestry from the most recent. for pr in pull_requests: - while repo.comparator(branches[-1][1]) >= repo.comparator(pr['mergeCommit']['oid']): - logging.info("PR #{} is already inside {}. Dropping this branch for further PRs".format(pr['number'], branches[-1][0])) + while repo.comparator(branches[-1][1]) >= repo.comparator( + pr["mergeCommit"]["oid"] + ): + logging.info( + "PR #{} is already inside {}. Dropping this branch for further PRs".format( + pr["number"], branches[-1][0] + ) + ) branches.pop() - logging.info("Processing PR #{}".format(pr['number'])) + logging.info("Processing PR #{}".format(pr["number"])) assert len(branches) branch_set = set([branch[0] for branch in branches]) # First pass. Find all must-backports - for label in pr['labels']['nodes']: - if label['name'] == 'pr-must-backport': - backport_map[pr['number']] = branch_set.copy() + for label in pr["labels"]["nodes"]: + if label["name"] == "pr-must-backport": + backport_map[pr["number"]] = branch_set.copy() continue - matched = RE_MUST_BACKPORT.match(label['name']) + matched = RE_MUST_BACKPORT.match(label["name"]) if matched: - if pr['number'] not in backport_map: - backport_map[pr['number']] = set() - backport_map[pr['number']].add(matched.group(1)) + if pr["number"] not in backport_map: + backport_map[pr["number"]] = set() + backport_map[pr["number"]].add(matched.group(1)) # Second pass. Find all no-backports - for label in pr['labels']['nodes']: - if label['name'] == 'pr-no-backport' and pr['number'] in backport_map: - del backport_map[pr['number']] + for label in pr["labels"]["nodes"]: + if label["name"] == "pr-no-backport" and pr["number"] in backport_map: + del backport_map[pr["number"]] break - matched_no_backport = RE_NO_BACKPORT.match(label['name']) - matched_backported = RE_BACKPORTED.match(label['name']) - if matched_no_backport and pr['number'] in backport_map and matched_no_backport.group(1) in backport_map[pr['number']]: - backport_map[pr['number']].remove(matched_no_backport.group(1)) - logging.info('\tskipping %s because of forced no-backport', matched_no_backport.group(1)) - elif matched_backported and pr['number'] in backport_map and matched_backported.group(1) in backport_map[pr['number']]: - backport_map[pr['number']].remove(matched_backported.group(1)) - logging.info('\tskipping %s because it\'s already backported manually', matched_backported.group(1)) + matched_no_backport = RE_NO_BACKPORT.match(label["name"]) + matched_backported = RE_BACKPORTED.match(label["name"]) + if ( + matched_no_backport + and pr["number"] in backport_map + and matched_no_backport.group(1) in backport_map[pr["number"]] + ): + backport_map[pr["number"]].remove(matched_no_backport.group(1)) + logging.info( + "\tskipping %s because of forced no-backport", + matched_no_backport.group(1), + ) + elif ( + matched_backported + and pr["number"] in backport_map + and matched_backported.group(1) in backport_map[pr["number"]] + ): + backport_map[pr["number"]].remove(matched_backported.group(1)) + logging.info( + "\tskipping %s because it's already backported manually", + matched_backported.group(1), + ) for pr, branches in list(backport_map.items()): - logging.info('PR #%s needs to be backported to:', pr) + logging.info("PR #%s needs to be backported to:", pr) for branch in branches: - logging.info('\t%s, and the status is: %s', branch, run_cherrypick(self._token, pr, branch)) + logging.info( + "\t%s, and the status is: %s", + branch, + run_cherrypick(self._token, pr, branch), + ) # print API costs - logging.info('\nGitHub API total costs per query:') + logging.info("\nGitHub API total costs per query:") for name, value in list(self._gh.api_costs.items()): - logging.info('%s : %s', name, value) + logging.info("%s : %s", name, value) if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('--token', type=str, required=True, help='token for Github access') - parser.add_argument('--repo', type=str, required=True, help='path to full repository', metavar='PATH') - parser.add_argument('--til', type=str, help='check PRs from HEAD til this commit', metavar='COMMIT') - parser.add_argument('--dry-run', action='store_true', help='do not create or merge any PRs', default=False) - parser.add_argument('--verbose', '-v', action='store_true', help='more verbose output', default=False) - parser.add_argument('--upstream', '-u', type=str, help='remote name of upstream in repository', default='origin') + parser.add_argument( + "--token", type=str, required=True, help="token for Github access" + ) + parser.add_argument( + "--repo", + type=str, + required=True, + help="path to full repository", + metavar="PATH", + ) + parser.add_argument( + "--til", type=str, help="check PRs from HEAD til this commit", metavar="COMMIT" + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="do not create or merge any PRs", + default=False, + ) + parser.add_argument( + "--verbose", + "-v", + action="store_true", + help="more verbose output", + default=False, + ) + parser.add_argument( + "--upstream", + "-u", + type=str, + help="remote name of upstream in repository", + default="origin", + ) args = parser.parse_args() if args.verbose: - logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.DEBUG) + logging.basicConfig( + format="%(message)s", stream=sys.stdout, level=logging.DEBUG + ) else: - logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.INFO) + logging.basicConfig(format="%(message)s", stream=sys.stdout, level=logging.INFO) - cherrypick_run = lambda token, pr, branch: CherryPick(token, 'ClickHouse', 'ClickHouse', 'core', pr, branch).execute(args.repo, args.dry_run) - bp = Backport(args.token, 'ClickHouse', 'ClickHouse', 'core') + cherrypick_run = lambda token, pr, branch: CherryPick( + token, "ClickHouse", "ClickHouse", "core", pr, branch + ).execute(args.repo, args.dry_run) + bp = Backport(args.token, "ClickHouse", "ClickHouse", "core") bp.execute(args.repo, args.upstream, args.til, cherrypick_run) diff --git a/tests/ci/cherry_pick_utils/cherrypick.py b/tests/ci/cherry_pick_utils/cherrypick.py index 8bedf54fefa..c6469fa62a9 100644 --- a/tests/ci/cherry_pick_utils/cherrypick.py +++ b/tests/ci/cherry_pick_utils/cherrypick.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -''' +""" Backports changes from PR to release branch. Requires multiple separate runs as part of the implementation. @@ -12,7 +12,7 @@ First run should do the following: Second run checks PR from previous run to be merged or at least being mergeable. If it's not merged then try to merge it. Third run creates PR from backport branch (with merged previous PR) to release branch. -''' +""" try: from clickhouse.utils.github.query import Query as RemoteRepo @@ -29,13 +29,13 @@ import sys class CherryPick: class Status(Enum): - DISCARDED = 'discarded' - NOT_INITIATED = 'not started' - FIRST_MERGEABLE = 'waiting for 1st stage' - FIRST_CONFLICTS = 'conflicts on 1st stage' - SECOND_MERGEABLE = 'waiting for 2nd stage' - SECOND_CONFLICTS = 'conflicts on 2nd stage' - MERGED = 'backported' + DISCARDED = "discarded" + NOT_INITIATED = "not started" + FIRST_MERGEABLE = "waiting for 1st stage" + FIRST_CONFLICTS = "conflicts on 1st stage" + SECOND_MERGEABLE = "waiting for 2nd stage" + SECOND_CONFLICTS = "conflicts on 2nd stage" + MERGED = "backported" def _run(self, args): out = subprocess.check_output(args).rstrip() @@ -50,51 +50,90 @@ class CherryPick: # TODO: check if pull-request is merged. - self.merge_commit_oid = self._pr['mergeCommit']['oid'] + self.merge_commit_oid = self._pr["mergeCommit"]["oid"] self.target_branch = target_branch - self.backport_branch = 'backport/{branch}/{pr}'.format(branch=target_branch, pr=pr_number) - self.cherrypick_branch = 'cherrypick/{branch}/{oid}'.format(branch=target_branch, oid=self.merge_commit_oid) + self.backport_branch = "backport/{branch}/{pr}".format( + branch=target_branch, pr=pr_number + ) + self.cherrypick_branch = "cherrypick/{branch}/{oid}".format( + branch=target_branch, oid=self.merge_commit_oid + ) def getCherryPickPullRequest(self): - return self._gh.find_pull_request(base=self.backport_branch, head=self.cherrypick_branch) + return self._gh.find_pull_request( + base=self.backport_branch, head=self.cherrypick_branch + ) def createCherryPickPullRequest(self, repo_path): DESCRIPTION = ( - 'This pull-request is a first step of an automated backporting.\n' - 'It contains changes like after calling a local command `git cherry-pick`.\n' - 'If you intend to continue backporting this changes, then resolve all conflicts if any.\n' - 'Otherwise, if you do not want to backport them, then just close this pull-request.\n' - '\n' - 'The check results does not matter at this step - you can safely ignore them.\n' - 'Also this pull-request will be merged automatically as it reaches the mergeable state, but you always can merge it manually.\n' + "This pull-request is a first step of an automated backporting.\n" + "It contains changes like after calling a local command `git cherry-pick`.\n" + "If you intend to continue backporting this changes, then resolve all conflicts if any.\n" + "Otherwise, if you do not want to backport them, then just close this pull-request.\n" + "\n" + "The check results does not matter at this step - you can safely ignore them.\n" + "Also this pull-request will be merged automatically as it reaches the mergeable state, but you always can merge it manually.\n" ) # FIXME: replace with something better than os.system() - git_prefix = ['git', '-C', repo_path, '-c', 'user.email=robot-clickhouse@yandex-team.ru', '-c', 'user.name=robot-clickhouse'] - base_commit_oid = self._pr['mergeCommit']['parents']['nodes'][0]['oid'] + git_prefix = [ + "git", + "-C", + repo_path, + "-c", + "user.email=robot-clickhouse@yandex-team.ru", + "-c", + "user.name=robot-clickhouse", + ] + base_commit_oid = self._pr["mergeCommit"]["parents"]["nodes"][0]["oid"] # Create separate branch for backporting, and make it look like real cherry-pick. - self._run(git_prefix + ['checkout', '-f', self.target_branch]) - self._run(git_prefix + ['checkout', '-B', self.backport_branch]) - self._run(git_prefix + ['merge', '-s', 'ours', '--no-edit', base_commit_oid]) + self._run(git_prefix + ["checkout", "-f", self.target_branch]) + self._run(git_prefix + ["checkout", "-B", self.backport_branch]) + self._run(git_prefix + ["merge", "-s", "ours", "--no-edit", base_commit_oid]) # Create secondary branch to allow pull request with cherry-picked commit. - self._run(git_prefix + ['branch', '-f', self.cherrypick_branch, self.merge_commit_oid]) + self._run( + git_prefix + ["branch", "-f", self.cherrypick_branch, self.merge_commit_oid] + ) - self._run(git_prefix + ['push', '-f', 'origin', '{branch}:{branch}'.format(branch=self.backport_branch)]) - self._run(git_prefix + ['push', '-f', 'origin', '{branch}:{branch}'.format(branch=self.cherrypick_branch)]) + self._run( + git_prefix + + [ + "push", + "-f", + "origin", + "{branch}:{branch}".format(branch=self.backport_branch), + ] + ) + self._run( + git_prefix + + [ + "push", + "-f", + "origin", + "{branch}:{branch}".format(branch=self.cherrypick_branch), + ] + ) # Create pull-request like a local cherry-pick - pr = self._gh.create_pull_request(source=self.cherrypick_branch, target=self.backport_branch, - title='Cherry pick #{number} to {target}: {title}'.format( - number=self._pr['number'], target=self.target_branch, - title=self._pr['title'].replace('"', '\\"')), - description='Original pull-request #{}\n\n{}'.format(self._pr['number'], DESCRIPTION)) + pr = self._gh.create_pull_request( + source=self.cherrypick_branch, + target=self.backport_branch, + title="Cherry pick #{number} to {target}: {title}".format( + number=self._pr["number"], + target=self.target_branch, + title=self._pr["title"].replace('"', '\\"'), + ), + description="Original pull-request #{}\n\n{}".format( + self._pr["number"], DESCRIPTION + ), + ) # FIXME: use `team` to leave a single eligible assignee. - self._gh.add_assignee(pr, self._pr['author']) - self._gh.add_assignee(pr, self._pr['mergedBy']) + self._gh.add_assignee(pr, self._pr["author"]) + self._gh.add_assignee(pr, self._pr["mergedBy"]) self._gh.set_label(pr, "do not test") self._gh.set_label(pr, "pr-cherrypick") @@ -102,36 +141,76 @@ class CherryPick: return pr def mergeCherryPickPullRequest(self, cherrypick_pr): - return self._gh.merge_pull_request(cherrypick_pr['id']) + return self._gh.merge_pull_request(cherrypick_pr["id"]) def getBackportPullRequest(self): - return self._gh.find_pull_request(base=self.target_branch, head=self.backport_branch) + return self._gh.find_pull_request( + base=self.target_branch, head=self.backport_branch + ) def createBackportPullRequest(self, cherrypick_pr, repo_path): DESCRIPTION = ( - 'This pull-request is a last step of an automated backporting.\n' - 'Treat it as a standard pull-request: look at the checks and resolve conflicts.\n' - 'Merge it only if you intend to backport changes to the target branch, otherwise just close it.\n' + "This pull-request is a last step of an automated backporting.\n" + "Treat it as a standard pull-request: look at the checks and resolve conflicts.\n" + "Merge it only if you intend to backport changes to the target branch, otherwise just close it.\n" ) - git_prefix = ['git', '-C', repo_path, '-c', 'user.email=robot-clickhouse@clickhouse.com', '-c', 'user.name=robot-clickhouse'] + git_prefix = [ + "git", + "-C", + repo_path, + "-c", + "user.email=robot-clickhouse@clickhouse.com", + "-c", + "user.name=robot-clickhouse", + ] - pr_title = 'Backport #{number} to {target}: {title}'.format( - number=self._pr['number'], target=self.target_branch, - title=self._pr['title'].replace('"', '\\"')) + pr_title = "Backport #{number} to {target}: {title}".format( + number=self._pr["number"], + target=self.target_branch, + title=self._pr["title"].replace('"', '\\"'), + ) - self._run(git_prefix + ['checkout', '-f', self.backport_branch]) - self._run(git_prefix + ['pull', '--ff-only', 'origin', self.backport_branch]) - self._run(git_prefix + ['reset', '--soft', self._run(git_prefix + ['merge-base', 'origin/' + self.target_branch, self.backport_branch])]) - self._run(git_prefix + ['commit', '-a', '--allow-empty', '-m', pr_title]) - self._run(git_prefix + ['push', '-f', 'origin', '{branch}:{branch}'.format(branch=self.backport_branch)]) + self._run(git_prefix + ["checkout", "-f", self.backport_branch]) + self._run(git_prefix + ["pull", "--ff-only", "origin", self.backport_branch]) + self._run( + git_prefix + + [ + "reset", + "--soft", + self._run( + git_prefix + + [ + "merge-base", + "origin/" + self.target_branch, + self.backport_branch, + ] + ), + ] + ) + self._run(git_prefix + ["commit", "-a", "--allow-empty", "-m", pr_title]) + self._run( + git_prefix + + [ + "push", + "-f", + "origin", + "{branch}:{branch}".format(branch=self.backport_branch), + ] + ) - pr = self._gh.create_pull_request(source=self.backport_branch, target=self.target_branch, title=pr_title, - description='Original pull-request #{}\nCherry-pick pull-request #{}\n\n{}'.format(self._pr['number'], cherrypick_pr['number'], DESCRIPTION)) + pr = self._gh.create_pull_request( + source=self.backport_branch, + target=self.target_branch, + title=pr_title, + description="Original pull-request #{}\nCherry-pick pull-request #{}\n\n{}".format( + self._pr["number"], cherrypick_pr["number"], DESCRIPTION + ), + ) # FIXME: use `team` to leave a single eligible assignee. - self._gh.add_assignee(pr, self._pr['author']) - self._gh.add_assignee(pr, self._pr['mergedBy']) + self._gh.add_assignee(pr, self._pr["author"]) + self._gh.add_assignee(pr, self._pr["mergedBy"]) self._gh.set_label(pr, "pr-backport") @@ -142,23 +221,43 @@ class CherryPick: if not pr1: if not dry_run: pr1 = self.createCherryPickPullRequest(repo_path) - logging.debug('Created PR with cherry-pick of %s to %s: %s', self._pr['number'], self.target_branch, pr1['url']) + logging.debug( + "Created PR with cherry-pick of %s to %s: %s", + self._pr["number"], + self.target_branch, + pr1["url"], + ) else: return CherryPick.Status.NOT_INITIATED else: - logging.debug('Found PR with cherry-pick of %s to %s: %s', self._pr['number'], self.target_branch, pr1['url']) + logging.debug( + "Found PR with cherry-pick of %s to %s: %s", + self._pr["number"], + self.target_branch, + pr1["url"], + ) - if not pr1['merged'] and pr1['mergeable'] == 'MERGEABLE' and not pr1['closed']: + if not pr1["merged"] and pr1["mergeable"] == "MERGEABLE" and not pr1["closed"]: if not dry_run: pr1 = self.mergeCherryPickPullRequest(pr1) - logging.debug('Merged PR with cherry-pick of %s to %s: %s', self._pr['number'], self.target_branch, pr1['url']) + logging.debug( + "Merged PR with cherry-pick of %s to %s: %s", + self._pr["number"], + self.target_branch, + pr1["url"], + ) - if not pr1['merged']: - logging.debug('Waiting for PR with cherry-pick of %s to %s: %s', self._pr['number'], self.target_branch, pr1['url']) + if not pr1["merged"]: + logging.debug( + "Waiting for PR with cherry-pick of %s to %s: %s", + self._pr["number"], + self.target_branch, + pr1["url"], + ) - if pr1['closed']: + if pr1["closed"]: return CherryPick.Status.DISCARDED - elif pr1['mergeable'] == 'CONFLICTING': + elif pr1["mergeable"] == "CONFLICTING": return CherryPick.Status.FIRST_CONFLICTS else: return CherryPick.Status.FIRST_MERGEABLE @@ -167,31 +266,58 @@ class CherryPick: if not pr2: if not dry_run: pr2 = self.createBackportPullRequest(pr1, repo_path) - logging.debug('Created PR with backport of %s to %s: %s', self._pr['number'], self.target_branch, pr2['url']) + logging.debug( + "Created PR with backport of %s to %s: %s", + self._pr["number"], + self.target_branch, + pr2["url"], + ) else: return CherryPick.Status.FIRST_MERGEABLE else: - logging.debug('Found PR with backport of %s to %s: %s', self._pr['number'], self.target_branch, pr2['url']) + logging.debug( + "Found PR with backport of %s to %s: %s", + self._pr["number"], + self.target_branch, + pr2["url"], + ) - if pr2['merged']: + if pr2["merged"]: return CherryPick.Status.MERGED - elif pr2['closed']: + elif pr2["closed"]: return CherryPick.Status.DISCARDED - elif pr2['mergeable'] == 'CONFLICTING': + elif pr2["mergeable"] == "CONFLICTING": return CherryPick.Status.SECOND_CONFLICTS else: return CherryPick.Status.SECOND_MERGEABLE if __name__ == "__main__": - logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.DEBUG) + logging.basicConfig(format="%(message)s", stream=sys.stdout, level=logging.DEBUG) parser = argparse.ArgumentParser() - parser.add_argument('--token', '-t', type=str, required=True, help='token for Github access') - parser.add_argument('--pr', type=str, required=True, help='PR# to cherry-pick') - parser.add_argument('--branch', '-b', type=str, required=True, help='target branch name for cherry-pick') - parser.add_argument('--repo', '-r', type=str, required=True, help='path to full repository', metavar='PATH') + parser.add_argument( + "--token", "-t", type=str, required=True, help="token for Github access" + ) + parser.add_argument("--pr", type=str, required=True, help="PR# to cherry-pick") + parser.add_argument( + "--branch", + "-b", + type=str, + required=True, + help="target branch name for cherry-pick", + ) + parser.add_argument( + "--repo", + "-r", + type=str, + required=True, + help="path to full repository", + metavar="PATH", + ) args = parser.parse_args() - cp = CherryPick(args.token, 'ClickHouse', 'ClickHouse', 'core', args.pr, args.branch) + cp = CherryPick( + args.token, "ClickHouse", "ClickHouse", "core", args.pr, args.branch + ) cp.execute(args.repo) diff --git a/tests/ci/cherry_pick_utils/local.py b/tests/ci/cherry_pick_utils/local.py index 2ad8d4b8b71..571c9102ba0 100644 --- a/tests/ci/cherry_pick_utils/local.py +++ b/tests/ci/cherry_pick_utils/local.py @@ -20,13 +20,14 @@ class RepositoryBase: return -1 else: return 1 + self.comparator = functools.cmp_to_key(cmp) def get_head_commit(self): return self._repo.commit(self._default) def iterate(self, begin, end): - rev_range = '{}...{}'.format(begin, end) + rev_range = "{}...{}".format(begin, end) for commit in self._repo.iter_commits(rev_range, first_parent=True): yield commit @@ -39,27 +40,35 @@ class Repository(RepositoryBase): self._default = self._remote.refs[default_branch_name] def get_release_branches(self): - ''' + """ Returns sorted list of tuples: * remote branch (git.refs.remote.RemoteReference), * base commit (git.Commit), * head (git.Commit)). List is sorted by commits in ascending order. - ''' + """ release_branches = [] - RE_RELEASE_BRANCH_REF = re.compile(r'^refs/remotes/.+/\d+\.\d+$') + RE_RELEASE_BRANCH_REF = re.compile(r"^refs/remotes/.+/\d+\.\d+$") - for branch in [r for r in self._remote.refs if RE_RELEASE_BRANCH_REF.match(r.path)]: + for branch in [ + r for r in self._remote.refs if RE_RELEASE_BRANCH_REF.match(r.path) + ]: base = self._repo.merge_base(self._default, self._repo.commit(branch)) if not base: - logging.info('Branch %s is not based on branch %s. Ignoring.', branch.path, self._default) + logging.info( + "Branch %s is not based on branch %s. Ignoring.", + branch.path, + self._default, + ) elif len(base) > 1: - logging.info('Branch %s has more than one base commit. Ignoring.', branch.path) + logging.info( + "Branch %s has more than one base commit. Ignoring.", branch.path + ) else: release_branches.append((os.path.basename(branch.name), base[0])) - return sorted(release_branches, key=lambda x : self.comparator(x[1])) + return sorted(release_branches, key=lambda x: self.comparator(x[1])) class BareRepository(RepositoryBase): @@ -68,24 +77,32 @@ class BareRepository(RepositoryBase): self._default = self._repo.branches[default_branch_name] def get_release_branches(self): - ''' + """ Returns sorted list of tuples: * branch (git.refs.head?), * base commit (git.Commit), * head (git.Commit)). List is sorted by commits in ascending order. - ''' + """ release_branches = [] - RE_RELEASE_BRANCH_REF = re.compile(r'^refs/heads/\d+\.\d+$') + RE_RELEASE_BRANCH_REF = re.compile(r"^refs/heads/\d+\.\d+$") - for branch in [r for r in self._repo.branches if RE_RELEASE_BRANCH_REF.match(r.path)]: + for branch in [ + r for r in self._repo.branches if RE_RELEASE_BRANCH_REF.match(r.path) + ]: base = self._repo.merge_base(self._default, self._repo.commit(branch)) if not base: - logging.info('Branch %s is not based on branch %s. Ignoring.', branch.path, self._default) + logging.info( + "Branch %s is not based on branch %s. Ignoring.", + branch.path, + self._default, + ) elif len(base) > 1: - logging.info('Branch %s has more than one base commit. Ignoring.', branch.path) + logging.info( + "Branch %s has more than one base commit. Ignoring.", branch.path + ) else: release_branches.append((os.path.basename(branch.name), base[0])) - return sorted(release_branches, key=lambda x : self.comparator(x[1])) + return sorted(release_branches, key=lambda x: self.comparator(x[1])) diff --git a/tests/ci/cherry_pick_utils/parser.py b/tests/ci/cherry_pick_utils/parser.py index 570410ba23d..d8348e6d964 100644 --- a/tests/ci/cherry_pick_utils/parser.py +++ b/tests/ci/cherry_pick_utils/parser.py @@ -1,19 +1,20 @@ # -*- coding: utf-8 -*- + class Description: - '''Parsed description representation - ''' + """Parsed description representation""" + MAP_CATEGORY_TO_LABEL = { - 'New Feature': 'pr-feature', - 'Bug Fix': 'pr-bugfix', - 'Improvement': 'pr-improvement', - 'Performance Improvement': 'pr-performance', + "New Feature": "pr-feature", + "Bug Fix": "pr-bugfix", + "Improvement": "pr-improvement", + "Performance Improvement": "pr-performance", # 'Backward Incompatible Change': doesn't match anything - 'Build/Testing/Packaging Improvement': 'pr-build', - 'Non-significant (changelog entry is not needed)': 'pr-non-significant', - 'Non-significant (changelog entry is not required)': 'pr-non-significant', - 'Non-significant': 'pr-non-significant', - 'Documentation (changelog entry is not required)': 'pr-documentation', + "Build/Testing/Packaging Improvement": "pr-build", + "Non-significant (changelog entry is not needed)": "pr-non-significant", + "Non-significant (changelog entry is not required)": "pr-non-significant", + "Non-significant": "pr-non-significant", + "Documentation (changelog entry is not required)": "pr-documentation", # 'Other': doesn't match anything } @@ -21,7 +22,7 @@ class Description: self.label_name = str() self.legal = False - self._parse(pull_request['bodyText']) + self._parse(pull_request["bodyText"]) def _parse(self, text): lines = text.splitlines() @@ -38,14 +39,17 @@ class Description: category = stripped next_category = False - if stripped == 'I hereby agree to the terms of the CLA available at: https://yandex.ru/legal/cla/?lang=en': + if ( + stripped + == "I hereby agree to the terms of the CLA available at: https://yandex.ru/legal/cla/?lang=en" + ): self.legal = True category_headers = ( - 'Category (leave one):', - 'Changelog category (leave one):', - 'Changelog category:', - 'Category:' + "Category (leave one):", + "Changelog category (leave one):", + "Changelog category:", + "Category:", ) if stripped in category_headers: @@ -55,6 +59,6 @@ class Description: self.label_name = Description.MAP_CATEGORY_TO_LABEL[category] else: if not category: - print('Cannot find category in pr description') + print("Cannot find category in pr description") else: - print(('Unknown category: ' + category)) + print(("Unknown category: " + category)) diff --git a/tests/ci/cherry_pick_utils/query.py b/tests/ci/cherry_pick_utils/query.py index a9a8f4f1cd1..40eb5bf3604 100644 --- a/tests/ci/cherry_pick_utils/query.py +++ b/tests/ci/cherry_pick_utils/query.py @@ -5,11 +5,11 @@ import time class Query: - ''' + """ Implements queries to the Github API using GraphQL - ''' + """ - _PULL_REQUEST = ''' + _PULL_REQUEST = """ author {{ ... on User {{ id @@ -47,7 +47,7 @@ class Query: number title url - ''' + """ def __init__(self, token, owner, name, team, max_page_size=100, min_page_size=10): self._PULL_REQUEST = Query._PULL_REQUEST.format(min_page_size=min_page_size) @@ -63,14 +63,14 @@ class Query: self.api_costs = {} repo = self.get_repository() - self._id = repo['id'] - self.ssh_url = repo['sshUrl'] - self.default_branch = repo['defaultBranchRef']['name'] + self._id = repo["id"] + self.ssh_url = repo["sshUrl"] + self.default_branch = repo["defaultBranchRef"]["name"] self.members = set(self.get_members()) def get_repository(self): - _QUERY = ''' + _QUERY = """ repository(owner: "{owner}" name: "{name}") {{ defaultBranchRef {{ name @@ -78,19 +78,19 @@ class Query: id sshUrl }} - ''' + """ query = _QUERY.format(owner=self._owner, name=self._name) - return self._run(query)['repository'] + return self._run(query)["repository"] def get_members(self): - '''Get all team members for organization + """Get all team members for organization Returns: members: a map of members' logins to ids - ''' + """ - _QUERY = ''' + _QUERY = """ organization(login: "{organization}") {{ team(slug: "{team}") {{ members(first: {max_page_size} {next}) {{ @@ -105,43 +105,54 @@ class Query: }} }} }} - ''' + """ members = {} not_end = True - query = _QUERY.format(organization=self._owner, team=self._team, - max_page_size=self._max_page_size, - next='') + query = _QUERY.format( + organization=self._owner, + team=self._team, + max_page_size=self._max_page_size, + next="", + ) while not_end: - result = self._run(query)['organization']['team'] + result = self._run(query)["organization"]["team"] if result is None: break - result = result['members'] - not_end = result['pageInfo']['hasNextPage'] - query = _QUERY.format(organization=self._owner, team=self._team, - max_page_size=self._max_page_size, - next='after: "{}"'.format(result["pageInfo"]["endCursor"])) + result = result["members"] + not_end = result["pageInfo"]["hasNextPage"] + query = _QUERY.format( + organization=self._owner, + team=self._team, + max_page_size=self._max_page_size, + next='after: "{}"'.format(result["pageInfo"]["endCursor"]), + ) - members += dict([(node['login'], node['id']) for node in result['nodes']]) + members += dict([(node["login"], node["id"]) for node in result["nodes"]]) return members def get_pull_request(self, number): - _QUERY = ''' + _QUERY = """ repository(owner: "{owner}" name: "{name}") {{ pullRequest(number: {number}) {{ {pull_request_data} }} }} - ''' + """ - query = _QUERY.format(owner=self._owner, name=self._name, number=number, - pull_request_data=self._PULL_REQUEST, min_page_size=self._min_page_size) - return self._run(query)['repository']['pullRequest'] + query = _QUERY.format( + owner=self._owner, + name=self._name, + number=number, + pull_request_data=self._PULL_REQUEST, + min_page_size=self._min_page_size, + ) + return self._run(query)["repository"]["pullRequest"] def find_pull_request(self, base, head): - _QUERY = ''' + _QUERY = """ repository(owner: "{owner}" name: "{name}") {{ pullRequests(first: {min_page_size} baseRefName: "{base}" headRefName: "{head}") {{ nodes {{ @@ -150,21 +161,27 @@ class Query: totalCount }} }} - ''' + """ - query = _QUERY.format(owner=self._owner, name=self._name, base=base, head=head, - pull_request_data=self._PULL_REQUEST, min_page_size=self._min_page_size) - result = self._run(query)['repository']['pullRequests'] - if result['totalCount'] > 0: - return result['nodes'][0] + query = _QUERY.format( + owner=self._owner, + name=self._name, + base=base, + head=head, + pull_request_data=self._PULL_REQUEST, + min_page_size=self._min_page_size, + ) + result = self._run(query)["repository"]["pullRequests"] + if result["totalCount"] > 0: + return result["nodes"][0] else: return {} def find_pull_requests(self, label_name): - ''' + """ Get all pull-requests filtered by label name - ''' - _QUERY = ''' + """ + _QUERY = """ repository(owner: "{owner}" name: "{name}") {{ pullRequests(first: {min_page_size} labels: "{label_name}" states: OPEN) {{ nodes {{ @@ -172,18 +189,23 @@ class Query: }} }} }} - ''' + """ - query = _QUERY.format(owner=self._owner, name=self._name, label_name=label_name, - pull_request_data=self._PULL_REQUEST, min_page_size=self._min_page_size) - return self._run(query)['repository']['pullRequests']['nodes'] + query = _QUERY.format( + owner=self._owner, + name=self._name, + label_name=label_name, + pull_request_data=self._PULL_REQUEST, + min_page_size=self._min_page_size, + ) + return self._run(query)["repository"]["pullRequests"]["nodes"] def get_pull_requests(self, before_commit): - ''' + """ Get all merged pull-requests from the HEAD of default branch to the last commit (excluding) - ''' + """ - _QUERY = ''' + _QUERY = """ repository(owner: "{owner}" name: "{name}") {{ defaultBranchRef {{ target {{ @@ -221,44 +243,60 @@ class Query: }} }} }} - ''' + """ pull_requests = [] not_end = True - query = _QUERY.format(owner=self._owner, name=self._name, - max_page_size=self._max_page_size, - min_page_size=self._min_page_size, - pull_request_data=self._PULL_REQUEST, - next='') + query = _QUERY.format( + owner=self._owner, + name=self._name, + max_page_size=self._max_page_size, + min_page_size=self._min_page_size, + pull_request_data=self._PULL_REQUEST, + next="", + ) while not_end: - result = self._run(query)['repository']['defaultBranchRef']['target']['history'] - not_end = result['pageInfo']['hasNextPage'] - query = _QUERY.format(owner=self._owner, name=self._name, - max_page_size=self._max_page_size, - min_page_size=self._min_page_size, - pull_request_data=self._PULL_REQUEST, - next='after: "{}"'.format(result["pageInfo"]["endCursor"])) + result = self._run(query)["repository"]["defaultBranchRef"]["target"][ + "history" + ] + not_end = result["pageInfo"]["hasNextPage"] + query = _QUERY.format( + owner=self._owner, + name=self._name, + max_page_size=self._max_page_size, + min_page_size=self._min_page_size, + pull_request_data=self._PULL_REQUEST, + next='after: "{}"'.format(result["pageInfo"]["endCursor"]), + ) - for commit in result['nodes']: + for commit in result["nodes"]: # FIXME: maybe include `before_commit`? - if str(commit['oid']) == str(before_commit): + if str(commit["oid"]) == str(before_commit): not_end = False break # TODO: fetch all pull-requests that were merged in a single commit. - assert commit['associatedPullRequests']['totalCount'] <= self._min_page_size + assert ( + commit["associatedPullRequests"]["totalCount"] + <= self._min_page_size + ) - for pull_request in commit['associatedPullRequests']['nodes']: - if(pull_request['baseRepository']['nameWithOwner'] == '{}/{}'.format(self._owner, self._name) and - pull_request['baseRefName'] == self.default_branch and - pull_request['mergeCommit']['oid'] == commit['oid']): + for pull_request in commit["associatedPullRequests"]["nodes"]: + if ( + pull_request["baseRepository"]["nameWithOwner"] + == "{}/{}".format(self._owner, self._name) + and pull_request["baseRefName"] == self.default_branch + and pull_request["mergeCommit"]["oid"] == commit["oid"] + ): pull_requests.append(pull_request) return pull_requests - def create_pull_request(self, source, target, title, description="", draft=False, can_modify=True): - _QUERY = ''' + def create_pull_request( + self, source, target, title, description="", draft=False, can_modify=True + ): + _QUERY = """ createPullRequest(input: {{ baseRefName: "{target}", headRefName: "{source}", @@ -272,15 +310,22 @@ class Query: {pull_request_data} }} }} - ''' + """ - query = _QUERY.format(target=target, source=source, id=self._id, title=title, body=description, - draft="true" if draft else "false", modify="true" if can_modify else "false", - pull_request_data=self._PULL_REQUEST) - return self._run(query, is_mutation=True)['createPullRequest']['pullRequest'] + query = _QUERY.format( + target=target, + source=source, + id=self._id, + title=title, + body=description, + draft="true" if draft else "false", + modify="true" if can_modify else "false", + pull_request_data=self._PULL_REQUEST, + ) + return self._run(query, is_mutation=True)["createPullRequest"]["pullRequest"] def merge_pull_request(self, id): - _QUERY = ''' + _QUERY = """ mergePullRequest(input: {{ pullRequestId: "{id}" }}) {{ @@ -288,35 +333,35 @@ class Query: {pull_request_data} }} }} - ''' + """ query = _QUERY.format(id=id, pull_request_data=self._PULL_REQUEST) - return self._run(query, is_mutation=True)['mergePullRequest']['pullRequest'] + return self._run(query, is_mutation=True)["mergePullRequest"]["pullRequest"] # FIXME: figure out how to add more assignees at once def add_assignee(self, pr, assignee): - _QUERY = ''' + _QUERY = """ addAssigneesToAssignable(input: {{ assignableId: "{id1}", assigneeIds: "{id2}" }}) {{ clientMutationId }} - ''' + """ - query = _QUERY.format(id1=pr['id'], id2=assignee['id']) + query = _QUERY.format(id1=pr["id"], id2=assignee["id"]) self._run(query, is_mutation=True) def set_label(self, pull_request, label_name): - ''' + """ Set label by name to the pull request Args: pull_request: JSON object returned by `get_pull_requests()` label_name (string): label name - ''' + """ - _GET_LABEL = ''' + _GET_LABEL = """ repository(owner: "{owner}" name: "{name}") {{ labels(first: {max_page_size} {next} query: "{label_name}") {{ pageInfo {{ @@ -330,36 +375,44 @@ class Query: }} }} }} - ''' + """ - _SET_LABEL = ''' + _SET_LABEL = """ addLabelsToLabelable(input: {{ labelableId: "{pr_id}", labelIds: "{label_id}" }}) {{ clientMutationId }} - ''' + """ labels = [] not_end = True - query = _GET_LABEL.format(owner=self._owner, name=self._name, label_name=label_name, - max_page_size=self._max_page_size, - next='') + query = _GET_LABEL.format( + owner=self._owner, + name=self._name, + label_name=label_name, + max_page_size=self._max_page_size, + next="", + ) while not_end: - result = self._run(query)['repository']['labels'] - not_end = result['pageInfo']['hasNextPage'] - query = _GET_LABEL.format(owner=self._owner, name=self._name, label_name=label_name, - max_page_size=self._max_page_size, - next='after: "{}"'.format(result["pageInfo"]["endCursor"])) + result = self._run(query)["repository"]["labels"] + not_end = result["pageInfo"]["hasNextPage"] + query = _GET_LABEL.format( + owner=self._owner, + name=self._name, + label_name=label_name, + max_page_size=self._max_page_size, + next='after: "{}"'.format(result["pageInfo"]["endCursor"]), + ) - labels += [label for label in result['nodes']] + labels += [label for label in result["nodes"]] if not labels: return - query = _SET_LABEL.format(pr_id=pull_request['id'], label_id=labels[0]['id']) + query = _SET_LABEL.format(pr_id=pull_request["id"], label_id=labels[0]["id"]) self._run(query, is_mutation=True) def _run(self, query, is_mutation=False): @@ -385,19 +438,21 @@ class Query: status_forcelist=status_forcelist, ) adapter = HTTPAdapter(max_retries=retry) - session.mount('http://', adapter) - session.mount('https://', adapter) + session.mount("http://", adapter) + session.mount("https://", adapter) return session - headers = {'Authorization': 'bearer {}'.format(self._token)} + headers = {"Authorization": "bearer {}".format(self._token)} if is_mutation: - query = ''' + query = """ mutation {{ {query} }} - '''.format(query=query) + """.format( + query=query + ) else: - query = ''' + query = """ query {{ {query} rateLimit {{ @@ -405,23 +460,38 @@ class Query: remaining }} }} - '''.format(query=query) + """.format( + query=query + ) while True: - request = requests_retry_session().post('https://api.github.com/graphql', json={'query': query}, headers=headers) + request = requests_retry_session().post( + "https://api.github.com/graphql", json={"query": query}, headers=headers + ) if request.status_code == 200: result = request.json() - if 'errors' in result: - raise Exception('Errors occurred: {}\nOriginal query: {}'.format(result["errors"], query)) + if "errors" in result: + raise Exception( + "Errors occurred: {}\nOriginal query: {}".format( + result["errors"], query + ) + ) if not is_mutation: import inspect + caller = inspect.getouterframes(inspect.currentframe(), 2)[1][3] if caller not in list(self.api_costs.keys()): self.api_costs[caller] = 0 - self.api_costs[caller] += result['data']['rateLimit']['cost'] + self.api_costs[caller] += result["data"]["rateLimit"]["cost"] - return result['data'] + return result["data"] else: import json - raise Exception('Query failed with code {code}:\n{json}'.format(code=request.status_code, json=json.dumps(request.json(), indent=4))) + + raise Exception( + "Query failed with code {code}:\n{json}".format( + code=request.status_code, + json=json.dumps(request.json(), indent=4), + ) + ) diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index b45a4ce90c6..5c63b3f1ad1 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -14,7 +14,7 @@ CI_CONFIG = { "package_type": "deb", "bundled": "bundled", "splitted": "unsplitted", - "alien_pkgs": True, + "additional_pkgs": True, "tidy": "disable", "with_coverage": False, }, @@ -45,7 +45,7 @@ CI_CONFIG = { "package_type": "deb", "bundled": "bundled", "splitted": "unsplitted", - "alien_pkgs": True, + "additional_pkgs": True, "tidy": "disable", "with_coverage": False, }, @@ -349,6 +349,9 @@ CI_CONFIG = { "Stateless tests flaky check (address, actions)": { "required_build": "package_asan", }, + "Stateless tests bugfix validate check (address, actions)": { + "required_build": "package_asan", + }, "ClickHouse Keeper Jepsen (actions)": { "required_build": "binary_release", }, diff --git a/tests/ci/clickhouse_helper.py b/tests/ci/clickhouse_helper.py index 0d8aee552f5..7ccbcb4a47e 100644 --- a/tests/ci/clickhouse_helper.py +++ b/tests/ci/clickhouse_helper.py @@ -6,36 +6,31 @@ import json import requests # type: ignore from get_robot_token import get_parameter_from_ssm + class ClickHouseHelper: - def __init__(self, url=None, user=None, password=None): - self.url2 = None - self.auth2 = None - + def __init__(self, url=None): if url is None: - url = get_parameter_from_ssm("clickhouse-test-stat-url") - self.url2 = get_parameter_from_ssm("clickhouse-test-stat-url2") - self.auth2 = { - 'X-ClickHouse-User': get_parameter_from_ssm("clickhouse-test-stat-login2"), - 'X-ClickHouse-Key': '' + self.url = get_parameter_from_ssm("clickhouse-test-stat-url2") + self.auth = { + "X-ClickHouse-User": get_parameter_from_ssm( + "clickhouse-test-stat-login2" + ), + "X-ClickHouse-Key": "", } - self.url = url - self.auth = { - 'X-ClickHouse-User': user if user is not None else get_parameter_from_ssm("clickhouse-test-stat-login"), - 'X-ClickHouse-Key': password if password is not None else get_parameter_from_ssm("clickhouse-test-stat-password") - } - @staticmethod def _insert_json_str_info_impl(url, auth, db, table, json_str): params = { - 'database': db, - 'query': 'INSERT INTO {table} FORMAT JSONEachRow'.format(table=table), - 'date_time_input_format': 'best_effort', - 'send_logs_level': 'warning', + "database": db, + "query": "INSERT INTO {table} FORMAT JSONEachRow".format(table=table), + "date_time_input_format": "best_effort", + "send_logs_level": "warning", } for i in range(5): - response = requests.post(url, params=params, data=json_str, headers=auth, verify=False) + response = requests.post( + url, params=params, data=json_str, headers=auth, verify=False + ) logging.info("Response content '%s'", response.content) @@ -43,16 +38,25 @@ class ClickHouseHelper: break error = ( - "Cannot insert data into clickhouse at try " + str(i) - + ": HTTP code " + str(response.status_code) + ": '" - + str(response.text) + "'") + "Cannot insert data into clickhouse at try " + + str(i) + + ": HTTP code " + + str(response.status_code) + + ": '" + + str(response.text) + + "'" + ) if response.status_code >= 500: # A retriable error time.sleep(1) continue - logging.info("Request headers '%s', body '%s'", response.request.headers, response.request.body) + logging.info( + "Request headers '%s', body '%s'", + response.request.headers, + response.request.body, + ) raise Exception(error) else: @@ -60,8 +64,6 @@ class ClickHouseHelper: def _insert_json_str_info(self, db, table, json_str): self._insert_json_str_info_impl(self.url, self.auth, db, table, json_str) - if self.url2: - self._insert_json_str_info_impl(self.url2, self.auth2, db, table, json_str) def insert_event_into(self, db, table, event): event_str = json.dumps(event) @@ -72,18 +74,20 @@ class ClickHouseHelper: for event in events: jsons.append(json.dumps(event)) - self._insert_json_str_info(db, table, ','.join(jsons)) + self._insert_json_str_info(db, table, ",".join(jsons)) def _select_and_get_json_each_row(self, db, query): params = { - 'database': db, - 'query': query, - 'default_format': 'JSONEachRow', + "database": db, + "query": query, + "default_format": "JSONEachRow", } for i in range(5): response = None try: - response = requests.get(self.url, params=params, headers=self.auth, verify=False) + response = requests.get( + self.url, params=params, headers=self.auth, verify=False + ) response.raise_for_status() return response.text except Exception as ex: @@ -97,15 +101,21 @@ class ClickHouseHelper: def select_json_each_row(self, db, query): text = self._select_and_get_json_each_row(db, query) result = [] - for line in text.split('\n'): + for line in text.split("\n"): if line: result.append(json.loads(line)) return result + def prepare_tests_results_for_clickhouse( - pr_info, test_results, - check_status, check_duration, check_start_time, - report_url, check_name): + pr_info, + test_results, + check_status, + check_duration, + check_start_time, + report_url, + check_name, +): pull_request_url = "https://github.com/ClickHouse/ClickHouse/commits/master" base_ref = "master" @@ -147,13 +157,14 @@ def prepare_tests_results_for_clickhouse( test_time = 0 if len(test_result) > 2 and test_result[2]: test_time = test_result[2] - current_row['test_duration_ms'] = int(float(test_time) * 1000) - current_row['test_name'] = test_name - current_row['test_status'] = test_status + current_row["test_duration_ms"] = int(float(test_time) * 1000) + current_row["test_name"] = test_name + current_row["test_status"] = test_status result.append(current_row) return result + def mark_flaky_tests(clickhouse_helper, check_name, test_results): try: query = """ @@ -164,14 +175,16 @@ def mark_flaky_tests(clickhouse_helper, check_name, test_results): AND check_name = '{check_name}' AND (test_status = 'FAIL' OR test_status = 'FLAKY') AND pull_request_number = 0 - """.format(check_name=check_name) + """.format( + check_name=check_name + ) - tests_data = clickhouse_helper.select_json_each_row('gh-data', query) - master_failed_tests = {row['test_name'] for row in tests_data} - logging.info("Found flaky tests: %s", ', '.join(master_failed_tests)) + tests_data = clickhouse_helper.select_json_each_row("gh-data", query) + master_failed_tests = {row["test_name"] for row in tests_data} + logging.info("Found flaky tests: %s", ", ".join(master_failed_tests)) for test_result in test_results: - if test_result[1] == 'FAIL' and test_result[0] in master_failed_tests: - test_result[1] = 'FLAKY' + if test_result[1] == "FAIL" and test_result[0] in master_failed_tests: + test_result[1] = "FLAKY" except Exception as ex: logging.info("Exception happened during flaky tests fetch %s", ex) diff --git a/tests/ci/codebrowser_check.py b/tests/ci/codebrowser_check.py index 97fd58c3235..48c92e9f6ac 100644 --- a/tests/ci/codebrowser_check.py +++ b/tests/ci/codebrowser_check.py @@ -18,13 +18,16 @@ from tee_popen import TeePopen NAME = "Woboq Build (actions)" + def get_run_command(repo_path, output_path, image): - cmd = "docker run " + \ - f"--volume={repo_path}:/repo_folder " \ - f"--volume={output_path}:/test_output " \ - f"-e 'DATA=https://s3.amazonaws.com/clickhouse-test-reports/codebrowser/data' {image}" + cmd = ( + "docker run " + f"--volume={repo_path}:/repo_folder " + f"--volume={output_path}:/test_output " + f"-e 'DATA=https://s3.amazonaws.com/clickhouse-test-reports/codebrowser/data' {image}" + ) return cmd + if __name__ == "__main__": logging.basicConfig(level=logging.INFO) @@ -37,8 +40,8 @@ if __name__ == "__main__": if not os.path.exists(temp_path): os.makedirs(temp_path) - docker_image = get_image_with_version(IMAGES_PATH, 'clickhouse/codebrowser') - s3_helper = S3Helper('https://s3.amazonaws.com') + docker_image = get_image_with_version(IMAGES_PATH, "clickhouse/codebrowser") + s3_helper = S3Helper("https://s3.amazonaws.com") result_path = os.path.join(temp_path, "result_path") if not os.path.exists(result_path): @@ -62,14 +65,20 @@ if __name__ == "__main__": report_path = os.path.join(result_path, "html_report") logging.info("Report path %s", report_path) s3_path_prefix = "codebrowser" - html_urls = s3_helper.fast_parallel_upload_dir(report_path, s3_path_prefix, 'clickhouse-test-reports') + html_urls = s3_helper.fast_parallel_upload_dir( + report_path, s3_path_prefix, "clickhouse-test-reports" + ) index_html = 'HTML report' test_results = [(index_html, "Look at the report")] - report_url = upload_results(s3_helper, 0, os.getenv("GITHUB_SHA"), test_results, [], NAME) + report_url = upload_results( + s3_helper, 0, os.getenv("GITHUB_SHA"), test_results, [], NAME + ) print(f"::notice ::Report url: {report_url}") - post_commit_status(gh, os.getenv("GITHUB_SHA"), NAME, "Report built", "success", report_url) + post_commit_status( + gh, os.getenv("GITHUB_SHA"), NAME, "Report built", "success", report_url + ) diff --git a/tests/ci/commit_status_helper.py b/tests/ci/commit_status_helper.py index dd57f742ff1..e379c9a2254 100644 --- a/tests/ci/commit_status_helper.py +++ b/tests/ci/commit_status_helper.py @@ -1,15 +1,23 @@ #!/usr/bin/env python3 import time +import os +import csv from env_helper import GITHUB_REPOSITORY from ci_config import CI_CONFIG RETRY = 5 -def override_status(status, check_name): - if CI_CONFIG["tests_config"][check_name].get("force_tests", False): +def override_status(status, check_name, invert=False): + if CI_CONFIG["tests_config"].get(check_name, {}).get("force_tests", False): return "success" + + if invert: + if status == "success": + return "error" + return "success" + return status @@ -43,3 +51,11 @@ def post_commit_status(gh, sha, check_name, description, state, report_url): if i == RETRY - 1: raise ex time.sleep(i) + + +def post_commit_status_to_file(file_path, description, state, report_url): + if os.path.exists(file_path): + raise Exception(f'File "{file_path}" already exists!') + with open(file_path, "w", encoding="utf-8") as f: + out = csv.writer(f, delimiter="\t") + out.writerow([state, report_url, description]) diff --git a/tests/ci/compatibility_check.py b/tests/ci/compatibility_check.py index 72626bd6364..d546fabf231 100644 --- a/tests/ci/compatibility_check.py +++ b/tests/ci/compatibility_check.py @@ -16,34 +16,40 @@ from build_download_helper import download_builds_filter from upload_result_helper import upload_results from docker_pull_helper import get_images_with_versions from commit_status_helper import post_commit_status -from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse +from clickhouse_helper import ( + ClickHouseHelper, + mark_flaky_tests, + prepare_tests_results_for_clickhouse, +) from stopwatch import Stopwatch from rerun_helper import RerunHelper IMAGE_UBUNTU = "clickhouse/test-old-ubuntu" IMAGE_CENTOS = "clickhouse/test-old-centos" -MAX_GLIBC_VERSION = '2.4' +MAX_GLIBC_VERSION = "2.4" DOWNLOAD_RETRIES_COUNT = 5 CHECK_NAME = "Compatibility check (actions)" + def process_os_check(log_path): name = os.path.basename(log_path) - with open(log_path, 'r') as log: - line = log.read().split('\n')[0].strip() - if line != 'OK': + with open(log_path, "r") as log: + line = log.read().split("\n")[0].strip() + if line != "OK": return (name, "FAIL") else: return (name, "OK") + def process_glibc_check(log_path): bad_lines = [] - with open(log_path, 'r') as log: + with open(log_path, "r") as log: for line in log: if line.strip(): - columns = line.strip().split(' ') + columns = line.strip().split(" ") symbol_with_glibc = columns[-2] # sysconf@GLIBC_2.2.5 - _, version = symbol_with_glibc.split('@GLIBC_') - if version == 'PRIVATE': + _, version = symbol_with_glibc.split("@GLIBC_") + if version == "PRIVATE": bad_lines.append((symbol_with_glibc, "FAIL")) elif StrictVersion(version) > MAX_GLIBC_VERSION: bad_lines.append((symbol_with_glibc, "FAIL")) @@ -51,6 +57,7 @@ def process_glibc_check(log_path): bad_lines.append(("glibc check", "OK")) return bad_lines + def process_result(result_folder, server_log_folder): summary = process_glibc_check(os.path.join(result_folder, "glibc.log")) @@ -86,16 +93,18 @@ def process_result(result_folder, server_log_folder): return status, description, summary, result_logs -def get_run_commands(build_path, result_folder, server_log_folder, image_centos, image_ubuntu): +def get_run_commands( + build_path, result_folder, server_log_folder, image_centos, image_ubuntu +): return [ f"readelf -s {build_path}/usr/bin/clickhouse | grep '@GLIBC_' > {result_folder}/glibc.log", f"readelf -s {build_path}/usr/bin/clickhouse-odbc-bridge | grep '@GLIBC_' >> {result_folder}/glibc.log", - f"docker run --network=host --volume={build_path}/usr/bin/clickhouse:/clickhouse " \ - f"--volume={build_path}/etc/clickhouse-server:/config " \ - f"--volume={server_log_folder}:/var/log/clickhouse-server {image_ubuntu} > {result_folder}/ubuntu:12.04", - f"docker run --network=host --volume={build_path}/usr/bin/clickhouse:/clickhouse " \ - f"--volume={build_path}/etc/clickhouse-server:/config " \ - f"--volume={server_log_folder}:/var/log/clickhouse-server {image_centos} > {result_folder}/centos:5", + f"docker run --network=host --volume={build_path}/usr/bin/clickhouse:/clickhouse " + f"--volume={build_path}/etc/clickhouse-server:/config " + f"--volume={server_log_folder}:/var/log/clickhouse-server {image_ubuntu} > {result_folder}/ubuntu:12.04", + f"docker run --network=host --volume={build_path}/usr/bin/clickhouse:/clickhouse " + f"--volume={build_path}/etc/clickhouse-server:/config " + f"--volume={server_log_folder}:/var/log/clickhouse-server {image_centos} > {result_folder}/centos:5", ] @@ -124,14 +133,18 @@ if __name__ == "__main__": os.makedirs(packages_path) def url_filter(url): - return url.endswith('.deb') and ('clickhouse-common-static_' in url or 'clickhouse-server_' in url) + return url.endswith(".deb") and ( + "clickhouse-common-static_" in url or "clickhouse-server_" in url + ) download_builds_filter(CHECK_NAME, reports_path, packages_path, url_filter) for f in os.listdir(packages_path): - if '.deb' in f: + if ".deb" in f: full_path = os.path.join(packages_path, f) - subprocess.check_call(f"dpkg -x {full_path} {packages_path} && rm {full_path}", shell=True) + subprocess.check_call( + f"dpkg -x {full_path} {packages_path} && rm {full_path}", shell=True + ) server_log_path = os.path.join(temp_path, "server_log") if not os.path.exists(server_log_path): @@ -141,7 +154,9 @@ if __name__ == "__main__": if not os.path.exists(result_path): os.makedirs(result_path) - run_commands = get_run_commands(packages_path, result_path, server_log_path, docker_images[0], docker_images[1]) + run_commands = get_run_commands( + packages_path, result_path, server_log_path, docker_images[0], docker_images[1] + ) state = "success" for run_command in run_commands: @@ -154,15 +169,32 @@ if __name__ == "__main__": subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True) - s3_helper = S3Helper('https://s3.amazonaws.com') - state, description, test_results, additional_logs = process_result(result_path, server_log_path) + s3_helper = S3Helper("https://s3.amazonaws.com") + state, description, test_results, additional_logs = process_result( + result_path, server_log_path + ) ch_helper = ClickHouseHelper() mark_flaky_tests(ch_helper, CHECK_NAME, test_results) - report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, additional_logs, CHECK_NAME) + report_url = upload_results( + s3_helper, + pr_info.number, + pr_info.sha, + test_results, + additional_logs, + CHECK_NAME, + ) print(f"::notice ::Report url: {report_url}") post_commit_status(gh, pr_info.sha, CHECK_NAME, description, state, report_url) - prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_results, state, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, CHECK_NAME) + prepared_events = prepare_tests_results_for_clickhouse( + pr_info, + test_results, + state, + stopwatch.duration_seconds, + stopwatch.start_time_str, + report_url, + CHECK_NAME, + ) ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) diff --git a/tests/ci/compress_files.py b/tests/ci/compress_files.py index f3d2349408f..53170a4e9e3 100644 --- a/tests/ci/compress_files.py +++ b/tests/ci/compress_files.py @@ -3,20 +3,21 @@ import subprocess import logging import os + def compress_file_fast(path, archive_path): - if os.path.exists('/usr/bin/pigz'): + if os.path.exists("/usr/bin/pigz"): subprocess.check_call("pigz < {} > {}".format(path, archive_path), shell=True) else: subprocess.check_call("gzip < {} > {}".format(path, archive_path), shell=True) def compress_fast(path, archive_path, exclude=None): - pigz_part = '' - if os.path.exists('/usr/bin/pigz'): + pigz_part = "" + if os.path.exists("/usr/bin/pigz"): logging.info("pigz found, will compress and decompress faster") pigz_part = "--use-compress-program='pigz'" else: - pigz_part = '-z' + pigz_part = "-z" logging.info("no pigz, compressing with default tar") if exclude is None: @@ -31,21 +32,36 @@ def compress_fast(path, archive_path, exclude=None): path = os.path.dirname(path) else: path += "/.." - cmd = "tar {} {} -cf {} -C {} {}".format(pigz_part, exclude_part, archive_path, path, fname) + cmd = "tar {} {} -cf {} -C {} {}".format( + pigz_part, exclude_part, archive_path, path, fname + ) logging.debug("compress_fast cmd: %s", cmd) subprocess.check_call(cmd, shell=True) def decompress_fast(archive_path, result_path=None): - pigz_part = '' - if os.path.exists('/usr/bin/pigz'): - logging.info("pigz found, will compress and decompress faster ('%s' -> '%s')", archive_path, result_path) + pigz_part = "" + if os.path.exists("/usr/bin/pigz"): + logging.info( + "pigz found, will compress and decompress faster ('%s' -> '%s')", + archive_path, + result_path, + ) pigz_part = "--use-compress-program='pigz'" else: - pigz_part = '-z' - logging.info("no pigz, decompressing with default tar ('%s' -> '%s')", archive_path, result_path) + pigz_part = "-z" + logging.info( + "no pigz, decompressing with default tar ('%s' -> '%s')", + archive_path, + result_path, + ) if result_path is None: - subprocess.check_call("tar {} -xf {}".format(pigz_part, archive_path), shell=True) + subprocess.check_call( + "tar {} -xf {}".format(pigz_part, archive_path), shell=True + ) else: - subprocess.check_call("tar {} -xf {} -C {}".format(pigz_part, archive_path, result_path), shell=True) + subprocess.check_call( + "tar {} -xf {} -C {}".format(pigz_part, archive_path, result_path), + shell=True, + ) diff --git a/tests/ci/docker_images_check.py b/tests/ci/docker_images_check.py index 818478f6430..3d0cc468aec 100644 --- a/tests/ci/docker_images_check.py +++ b/tests/ci/docker_images_check.py @@ -11,7 +11,7 @@ from typing import Dict, List, Optional, Set, Tuple, Union from github import Github -from env_helper import GITHUB_WORKSPACE, RUNNER_TEMP +from env_helper import GITHUB_WORKSPACE, RUNNER_TEMP, GITHUB_RUN_URL from s3_helper import S3Helper from pr_info import PRInfo from get_robot_token import get_best_robot_token, get_parameter_from_ssm @@ -234,6 +234,7 @@ def build_and_push_one_image( with open(build_log, "wb") as bl: cmd = ( "docker buildx build --builder default " + f"--label build-url={GITHUB_RUN_URL} " f"{from_tag_arg}" f"--build-arg BUILDKIT_INLINE_CACHE=1 " f"--tag {image.repo}:{version_string} " diff --git a/tests/ci/docker_pull_helper.py b/tests/ci/docker_pull_helper.py index 50354da6801..ee7f3337cd9 100644 --- a/tests/ci/docker_pull_helper.py +++ b/tests/ci/docker_pull_helper.py @@ -6,23 +6,29 @@ import time import subprocess import logging +from typing import Optional + + class DockerImage: - def __init__(self, name, version=None): + def __init__(self, name, version: Optional[str] = None): self.name = name if version is None: - self.version = 'latest' + self.version = "latest" else: self.version = version def __str__(self): return f"{self.name}:{self.version}" -def get_images_with_versions(reports_path, required_image, pull=True): + +def get_images_with_versions( + reports_path, required_image, pull=True, version: Optional[str] = None +): images_path = None for root, _, files in os.walk(reports_path): for f in files: - if f == 'changed_images.json': - images_path = os.path.join(root, 'changed_images.json') + if f == "changed_images.json": + images_path = os.path.join(root, "changed_images.json") break if not images_path: @@ -32,7 +38,7 @@ def get_images_with_versions(reports_path, required_image, pull=True): if images_path is not None and os.path.exists(images_path): logging.info("Images file exists") - with open(images_path, 'r', encoding='utf-8') as images_fd: + with open(images_path, "r", encoding="utf-8") as images_fd: images = json.load(images_fd) logging.info("Got images %s", images) else: @@ -40,7 +46,7 @@ def get_images_with_versions(reports_path, required_image, pull=True): docker_images = [] for image_name in required_image: - docker_image = DockerImage(image_name) + docker_image = DockerImage(image_name, version) if image_name in images: docker_image.version = images[image_name] docker_images.append(docker_image) @@ -50,15 +56,22 @@ def get_images_with_versions(reports_path, required_image, pull=True): for i in range(10): try: logging.info("Pulling image %s", docker_image) - latest_error = subprocess.check_output(f"docker pull {docker_image}", stderr=subprocess.STDOUT, shell=True) + latest_error = subprocess.check_output( + f"docker pull {docker_image}", + stderr=subprocess.STDOUT, + shell=True, + ) break except Exception as ex: time.sleep(i * 3) logging.info("Got execption pulling docker %s", ex) else: - raise Exception(f"Cannot pull dockerhub for image docker pull {docker_image} because of {latest_error}") + raise Exception( + f"Cannot pull dockerhub for image docker pull {docker_image} because of {latest_error}" + ) return docker_images -def get_image_with_version(reports_path, image, pull=True): - return get_images_with_versions(reports_path, [image], pull)[0] + +def get_image_with_version(reports_path, image, pull=True, version=None): + return get_images_with_versions(reports_path, [image], pull, version=version)[0] diff --git a/tests/ci/docker_test.py b/tests/ci/docker_test.py index 27bfe07db53..2b864b6b94c 100644 --- a/tests/ci/docker_test.py +++ b/tests/ci/docker_test.py @@ -4,6 +4,7 @@ import os import unittest from unittest.mock import patch +from env_helper import GITHUB_RUN_URL from pr_info import PRInfo import docker_images_check as di @@ -117,7 +118,8 @@ class TestDockerImageCheck(unittest.TestCase): mock_popen.assert_called_once() mock_machine.assert_not_called() self.assertIn( - "docker buildx build --builder default --build-arg FROM_TAG=version " + f"docker buildx build --builder default --label build-url={GITHUB_RUN_URL} " + "--build-arg FROM_TAG=version " "--build-arg BUILDKIT_INLINE_CACHE=1 --tag name:version --cache-from " "type=registry,ref=name:version --push --progress plain path", mock_popen.call_args.args, @@ -133,7 +135,8 @@ class TestDockerImageCheck(unittest.TestCase): mock_popen.assert_called_once() mock_machine.assert_not_called() self.assertIn( - "docker buildx build --builder default --build-arg FROM_TAG=version2 " + f"docker buildx build --builder default --label build-url={GITHUB_RUN_URL} " + "--build-arg FROM_TAG=version2 " "--build-arg BUILDKIT_INLINE_CACHE=1 --tag name:version2 --cache-from " "type=registry,ref=name:version2 --progress plain path", mock_popen.call_args.args, @@ -149,7 +152,7 @@ class TestDockerImageCheck(unittest.TestCase): mock_popen.assert_called_once() mock_machine.assert_not_called() self.assertIn( - "docker buildx build --builder default " + f"docker buildx build --builder default --label build-url={GITHUB_RUN_URL} " "--build-arg BUILDKIT_INLINE_CACHE=1 --tag name:version2 --cache-from " "type=registry,ref=name:version2 --progress plain path", mock_popen.call_args.args, diff --git a/tests/ci/docs_check.py b/tests/ci/docs_check.py index 23e90aa5b60..58678b160a4 100644 --- a/tests/ci/docs_check.py +++ b/tests/ci/docs_check.py @@ -40,7 +40,9 @@ if __name__ == "__main__": if not pr_info.has_changes_in_documentation(): logging.info("No changes in documentation") commit = get_commit(gh, pr_info.sha) - commit.create_status(context=NAME, description="No changes in docs", state="success") + commit.create_status( + context=NAME, description="No changes in docs", state="success" + ) sys.exit(0) logging.info("Has changes in docs") @@ -48,15 +50,15 @@ if __name__ == "__main__": if not os.path.exists(temp_path): os.makedirs(temp_path) - docker_image = get_image_with_version(temp_path, 'clickhouse/docs-check') + docker_image = get_image_with_version(temp_path, "clickhouse/docs-check") - test_output = os.path.join(temp_path, 'docs_check_log') + test_output = os.path.join(temp_path, "docs_check_log") if not os.path.exists(test_output): os.makedirs(test_output) cmd = f"docker run --cap-add=SYS_PTRACE --volume={repo_path}:/repo_path --volume={test_output}:/output_path {docker_image}" - run_log_path = os.path.join(test_output, 'runlog.log') + run_log_path = os.path.join(test_output, "runlog.log") logging.info("Running command: '%s'", cmd) with TeePopen(cmd, run_log_path) as process: @@ -82,10 +84,10 @@ if __name__ == "__main__": for f in files: path = os.path.join(test_output, f) additional_files.append(path) - with open(path, 'r', encoding='utf-8') as check_file: + with open(path, "r", encoding="utf-8") as check_file: for line in check_file: if "ERROR" in line: - lines.append((line.split(':')[-1], "FAIL")) + lines.append((line.split(":")[-1], "FAIL")) if lines: status = "failure" description = "Found errors in docs" @@ -94,12 +96,22 @@ if __name__ == "__main__": else: lines.append(("Non zero exit code", "FAIL")) - s3_helper = S3Helper('https://s3.amazonaws.com') + s3_helper = S3Helper("https://s3.amazonaws.com") ch_helper = ClickHouseHelper() - report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, lines, additional_files, NAME) + report_url = upload_results( + s3_helper, pr_info.number, pr_info.sha, lines, additional_files, NAME + ) print("::notice ::Report url: {report_url}") post_commit_status(gh, pr_info.sha, NAME, description, status, report_url) - prepared_events = prepare_tests_results_for_clickhouse(pr_info, lines, status, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, NAME) + prepared_events = prepare_tests_results_for_clickhouse( + pr_info, + lines, + status, + stopwatch.duration_seconds, + stopwatch.start_time_str, + report_url, + NAME, + ) ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) diff --git a/tests/ci/docs_release.py b/tests/ci/docs_release.py index 825bca0b68b..b6d47326f9b 100644 --- a/tests/ci/docs_release.py +++ b/tests/ci/docs_release.py @@ -34,19 +34,23 @@ if __name__ == "__main__": if not os.path.exists(temp_path): os.makedirs(temp_path) - docker_image = get_image_with_version(temp_path, 'clickhouse/docs-release') + docker_image = get_image_with_version(temp_path, "clickhouse/docs-release") - test_output = os.path.join(temp_path, 'docs_release_log') + test_output = os.path.join(temp_path, "docs_release_log") if not os.path.exists(test_output): os.makedirs(test_output) token = CLOUDFLARE_TOKEN - cmd = "docker run --cap-add=SYS_PTRACE --volume=$SSH_AUTH_SOCK:/ssh-agent -e SSH_AUTH_SOCK=/ssh-agent " \ - f"-e CLOUDFLARE_TOKEN={token} --volume={repo_path}:/repo_path --volume={test_output}:/output_path {docker_image}" + cmd = ( + "docker run --cap-add=SYS_PTRACE --volume=$SSH_AUTH_SOCK:/ssh-agent -e SSH_AUTH_SOCK=/ssh-agent " + f"-e CLOUDFLARE_TOKEN={token} --volume={repo_path}:/repo_path --volume={test_output}:/output_path {docker_image}" + ) - run_log_path = os.path.join(test_output, 'runlog.log') + run_log_path = os.path.join(test_output, "runlog.log") - with open(run_log_path, 'w', encoding='utf-8') as log, SSHKey("ROBOT_CLICKHOUSE_SSH_KEY"): + with open(run_log_path, "w", encoding="utf-8") as log, SSHKey( + "ROBOT_CLICKHOUSE_SSH_KEY" + ): with subprocess.Popen(cmd, shell=True, stderr=log, stdout=log) as process: retcode = process.wait() if retcode == 0: @@ -70,10 +74,10 @@ if __name__ == "__main__": for f in files: path = os.path.join(test_output, f) additional_files.append(path) - with open(path, 'r', encoding='utf-8') as check_file: + with open(path, "r", encoding="utf-8") as check_file: for line in check_file: if "ERROR" in line: - lines.append((line.split(':')[-1], "FAIL")) + lines.append((line.split(":")[-1], "FAIL")) if lines: status = "failure" description = "Found errors in docs" @@ -82,9 +86,13 @@ if __name__ == "__main__": else: lines.append(("Non zero exit code", "FAIL")) - s3_helper = S3Helper('https://s3.amazonaws.com') + s3_helper = S3Helper("https://s3.amazonaws.com") - report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, lines, additional_files, NAME) + report_url = upload_results( + s3_helper, pr_info.number, pr_info.sha, lines, additional_files, NAME + ) print("::notice ::Report url: {report_url}") commit = get_commit(gh, pr_info.sha) - commit.create_status(context=NAME, description=description, state=status, target_url=report_url) + commit.create_status( + context=NAME, description=description, state=status, target_url=report_url + ) diff --git a/tests/ci/download_previous_release.py b/tests/ci/download_previous_release.py new file mode 100644 index 00000000000..16d0f9e4939 --- /dev/null +++ b/tests/ci/download_previous_release.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python3 + +########################################################################### +# # +# TODO (@vdimir, @Avogar) # +# Merge with one from https://github.com/ClickHouse/ClickHouse/pull/27928 # +# # +########################################################################### + +import re +import os +import logging + +import requests + +CLICKHOUSE_TAGS_URL = "https://api.github.com/repos/ClickHouse/ClickHouse/tags" + +CLICKHOUSE_COMMON_STATIC_DOWNLOAD_URL = "https://github.com/ClickHouse/ClickHouse/releases/download/v{version}-{type}/clickhouse-common-static_{version}_amd64.deb" +CLICKHOUSE_COMMON_STATIC_DBG_DOWNLOAD_URL = "https://github.com/ClickHouse/ClickHouse/releases/download/v{version}-{type}/clickhouse-common-static-dbg_{version}_amd64.deb" +CLICKHOUSE_SERVER_DOWNLOAD_URL = "https://github.com/ClickHouse/ClickHouse/releases/download/v{version}-{type}/clickhouse-server_{version}_all.deb" +CLICKHOUSE_CLIENT_DOWNLOAD_URL = "https://github.com/ClickHouse/ClickHouse/releases/download/v{version}-{type}/clickhouse-client_{version}_all.deb" + + +CLICKHOUSE_COMMON_STATIC_PACKET_NAME = "clickhouse-common-static_{version}_amd64.deb" +CLICKHOUSE_COMMON_STATIC_DBG_PACKET_NAME = ( + "clickhouse-common-static-dbg_{version}_amd64.deb" +) +CLICKHOUSE_SERVER_PACKET_NAME = "clickhouse-server_{version}_all.deb" +CLICKHOUSE_CLIENT_PACKET_NAME = "clickhouse-client_{version}_all.deb" + +PACKETS_DIR = "previous_release_package_folder/" +VERSION_PATTERN = r"((?:\d+\.)?(?:\d+\.)?(?:\d+\.)?\d+-[a-zA-Z]*)" + + +class Version: + def __init__(self, version): + self.version = version + + def __lt__(self, other): + return list(map(int, self.version.split("."))) < list( + map(int, other.version.split(".")) + ) + + def __str__(self): + return self.version + + +class ReleaseInfo: + def __init__(self, version, release_type): + self.version = version + self.type = release_type + + def __repr__(self): + return f"ReleaseInfo: {self.version}-{self.type}" + + +def find_previous_release(server_version, releases): + releases.sort(key=lambda x: x.version, reverse=True) + + if server_version is None: + return True, releases[0] + + for release in releases: + if release.version < server_version: + return True, release + + return False, None + + +def get_previous_release(server_version=None): + page = 1 + found = False + while not found: + response = requests.get(CLICKHOUSE_TAGS_URL, {"page": page, "per_page": 100}) + if not response.ok: + raise Exception( + "Cannot load the list of tags from github: " + response.reason + ) + + releases_str = set(re.findall(VERSION_PATTERN, response.text)) + if len(releases_str) == 0: + raise Exception( + "Cannot find previous release for " + + str(server_version) + + " server version" + ) + + releases = list( + map( + lambda x: ReleaseInfo(Version(x.split("-")[0]), x.split("-")[1]), + releases_str, + ) + ) + found, previous_release = find_previous_release(server_version, releases) + page += 1 + + return previous_release + + +def download_packet(url, out_path): + """ + TODO: use dowload_build_with_progress from build_download_helper.py + """ + + response = requests.get(url) + logging.info("Downloading %s", url) + if response.ok: + open(out_path, "wb").write(response.content) + + +def download_packets(release, dest_path=PACKETS_DIR): + if not os.path.exists(dest_path): + os.makedirs(dest_path) + + logging.info("Will download %s", release) + + download_packet( + CLICKHOUSE_COMMON_STATIC_DOWNLOAD_URL.format( + version=release.version, type=release.type + ), + out_path=os.path.join( + dest_path, + CLICKHOUSE_COMMON_STATIC_PACKET_NAME.format(version=release.version), + ), + ) + + download_packet( + CLICKHOUSE_COMMON_STATIC_DBG_DOWNLOAD_URL.format( + version=release.version, type=release.type + ), + out_path=os.path.join( + dest_path, + CLICKHOUSE_COMMON_STATIC_DBG_PACKET_NAME.format(version=release.version), + ), + ) + + download_packet( + CLICKHOUSE_SERVER_DOWNLOAD_URL.format( + version=release.version, type=release.type + ), + out_path=os.path.join( + dest_path, CLICKHOUSE_SERVER_PACKET_NAME.format(version=release.version) + ), + ) + + download_packet( + CLICKHOUSE_CLIENT_DOWNLOAD_URL.format( + version=release.version, type=release.type + ), + out_path=os.path.join( + dest_path, CLICKHOUSE_CLIENT_PACKET_NAME.format(version=release.version) + ), + ) + + +def download_previous_release(dest_path): + current_release = get_previous_release(None) + download_packets(current_release, dest_path=dest_path) + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + server_version = Version(input()) + previous_release = get_previous_release(server_version) + download_packets(previous_release) diff --git a/tests/ci/env_helper.py b/tests/ci/env_helper.py index 90178e5c56a..c34162ba51a 100644 --- a/tests/ci/env_helper.py +++ b/tests/ci/env_helper.py @@ -7,9 +7,10 @@ CACHES_PATH = os.getenv("CACHES_PATH", TEMP_PATH) CLOUDFLARE_TOKEN = os.getenv("CLOUDFLARE_TOKEN") GITHUB_EVENT_PATH = os.getenv("GITHUB_EVENT_PATH") GITHUB_REPOSITORY = os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse") -GITHUB_RUN_ID = os.getenv("GITHUB_RUN_ID") +GITHUB_RUN_ID = os.getenv("GITHUB_RUN_ID", "0") GITHUB_SERVER_URL = os.getenv("GITHUB_SERVER_URL", "https://github.com") GITHUB_WORKSPACE = os.getenv("GITHUB_WORKSPACE", os.path.abspath("../../")) +GITHUB_RUN_URL = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/actions/runs/{GITHUB_RUN_ID}" IMAGES_PATH = os.getenv("IMAGES_PATH") REPORTS_PATH = os.getenv("REPORTS_PATH", "./reports") REPO_COPY = os.getenv("REPO_COPY", os.path.abspath("../../")) diff --git a/tests/ci/finish_check.py b/tests/ci/finish_check.py index 72f26daf4cd..289e32406ef 100644 --- a/tests/ci/finish_check.py +++ b/tests/ci/finish_check.py @@ -2,12 +2,12 @@ import logging from github import Github -from env_helper import GITHUB_SERVER_URL, GITHUB_REPOSITORY, GITHUB_RUN_ID +from env_helper import GITHUB_RUN_URL from pr_info import PRInfo from get_robot_token import get_best_robot_token from commit_status_helper import get_commit -NAME = 'Run Check (actions)' +NAME = "Run Check (actions)" def filter_statuses(statuses): @@ -33,7 +33,12 @@ if __name__ == "__main__": gh = Github(get_best_robot_token()) commit = get_commit(gh, pr_info.sha) - url = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/actions/runs/{GITHUB_RUN_ID}" + url = GITHUB_RUN_URL statuses = filter_statuses(list(commit.get_statuses())) if NAME in statuses and statuses[NAME].state == "pending": - commit.create_status(context=NAME, description="All checks finished", state="success", target_url=url) + commit.create_status( + context=NAME, + description="All checks finished", + state="success", + target_url=url, + ) diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py index da08ff28a0f..52ec5a0f8e9 100644 --- a/tests/ci/functional_test_check.py +++ b/tests/ci/functional_test_check.py @@ -1,9 +1,10 @@ #!/usr/bin/env python3 +import argparse import csv import logging -import subprocess import os +import subprocess import sys from github import Github @@ -13,25 +14,38 @@ from s3_helper import S3Helper from get_robot_token import get_best_robot_token from pr_info import PRInfo from build_download_helper import download_all_deb_packages +from download_previous_release import download_previous_release from upload_result_helper import upload_results from docker_pull_helper import get_image_with_version -from commit_status_helper import post_commit_status, get_commit, override_status -from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse +from commit_status_helper import ( + post_commit_status, + get_commit, + override_status, + post_commit_status_to_file, +) +from clickhouse_helper import ( + ClickHouseHelper, + mark_flaky_tests, + prepare_tests_results_for_clickhouse, +) from stopwatch import Stopwatch from rerun_helper import RerunHelper from tee_popen import TeePopen +NO_CHANGES_MSG = "Nothing to run" + + def get_additional_envs(check_name, run_by_hash_num, run_by_hash_total): result = [] - if 'DatabaseReplicated' in check_name: + if "DatabaseReplicated" in check_name: result.append("USE_DATABASE_REPLICATED=1") - if 'DatabaseOrdinary' in check_name: + if "DatabaseOrdinary" in check_name: result.append("USE_DATABASE_ORDINARY=1") - if 'wide parts enabled' in check_name: + if "wide parts enabled" in check_name: result.append("USE_POLYMORPHIC_PARTS=1") - #temporary - if 's3 storage' in check_name: + # temporary + if "s3 storage" in check_name: result.append("USE_S3_STORAGE_FOR_MERGE_TREE=1") if run_by_hash_total != 0: @@ -40,36 +54,55 @@ def get_additional_envs(check_name, run_by_hash_num, run_by_hash_total): return result + def get_image_name(check_name): - if 'stateless' in check_name.lower(): - return 'clickhouse/stateless-test' - if 'stateful' in check_name.lower(): - return 'clickhouse/stateful-test' + if "stateless" in check_name.lower(): + return "clickhouse/stateless-test" + if "stateful" in check_name.lower(): + return "clickhouse/stateful-test" else: raise Exception(f"Cannot deduce image name based on check name {check_name}") -def get_run_command(builds_path, repo_tests_path, result_path, server_log_path, kill_timeout, additional_envs, image, flaky_check, tests_to_run): - additional_options = ['--hung-check'] - additional_options.append('--print-time') + +def get_run_command( + builds_path, + repo_tests_path, + result_path, + server_log_path, + kill_timeout, + additional_envs, + image, + flaky_check, + tests_to_run, +): + additional_options = ["--hung-check"] + additional_options.append("--print-time") if tests_to_run: additional_options += tests_to_run - additional_options_str = '-e ADDITIONAL_OPTIONS="' + ' '.join(additional_options) + '"' + additional_options_str = ( + '-e ADDITIONAL_OPTIONS="' + " ".join(additional_options) + '"' + ) - envs = [f'-e MAX_RUN_TIME={int(0.9 * kill_timeout)}', '-e S3_URL="https://clickhouse-datasets.s3.amazonaws.com"'] + envs = [ + f"-e MAX_RUN_TIME={int(0.9 * kill_timeout)}", + '-e S3_URL="https://clickhouse-datasets.s3.amazonaws.com"', + ] if flaky_check: - envs += ['-e NUM_TRIES=100', '-e MAX_RUN_TIME=1800'] + envs += ["-e NUM_TRIES=100", "-e MAX_RUN_TIME=1800"] - envs += [f'-e {e}' for e in additional_envs] + envs += [f"-e {e}" for e in additional_envs] - env_str = ' '.join(envs) + env_str = " ".join(envs) - return f"docker run --volume={builds_path}:/package_folder " \ - f"--volume={repo_tests_path}:/usr/share/clickhouse-test " \ - f"--volume={result_path}:/test_output --volume={server_log_path}:/var/log/clickhouse-server " \ + return ( + f"docker run --volume={builds_path}:/package_folder " + f"--volume={repo_tests_path}:/usr/share/clickhouse-test " + f"--volume={result_path}:/test_output --volume={server_log_path}:/var/log/clickhouse-server " f"--cap-add=SYS_PTRACE {env_str} {additional_options_str} {image}" + ) def get_tests_to_run(pr_info): @@ -79,32 +112,43 @@ def get_tests_to_run(pr_info): return [] for fpath in pr_info.changed_files: - if 'tests/queries/0_stateless/0' in fpath: - logging.info('File %s changed and seems like stateless test', fpath) - fname = fpath.split('/')[3] + if "tests/queries/0_stateless/0" in fpath: + logging.info("File %s changed and seems like stateless test", fpath) + fname = fpath.split("/")[3] fname_without_ext = os.path.splitext(fname)[0] - result.add(fname_without_ext + '.') + result.add(fname_without_ext + ".") return list(result) + def process_results(result_folder, server_log_path): test_results = [] additional_files = [] # Just upload all files from result_folder. # If task provides processed results, then it's responsible for content of result_folder. if os.path.exists(result_folder): - test_files = [f for f in os.listdir(result_folder) if os.path.isfile(os.path.join(result_folder, f))] + test_files = [ + f + for f in os.listdir(result_folder) + if os.path.isfile(os.path.join(result_folder, f)) + ] additional_files = [os.path.join(result_folder, f) for f in test_files] if os.path.exists(server_log_path): - server_log_files = [f for f in os.listdir(server_log_path) if os.path.isfile(os.path.join(server_log_path, f))] - additional_files = additional_files + [os.path.join(server_log_path, f) for f in server_log_files] + server_log_files = [ + f + for f in os.listdir(server_log_path) + if os.path.isfile(os.path.join(server_log_path, f)) + ] + additional_files = additional_files + [ + os.path.join(server_log_path, f) for f in server_log_files + ] status = [] status_path = os.path.join(result_folder, "check_status.tsv") if os.path.exists(status_path): logging.info("Found test_results.tsv") - with open(status_path, 'r', encoding='utf-8') as status_file: - status = list(csv.reader(status_file, delimiter='\t')) + with open(status_path, "r", encoding="utf-8") as status_file: + status = list(csv.reader(status_file, delimiter="\t")) if len(status) != 1 or len(status[0]) != 2: logging.info("Files in result folder %s", os.listdir(result_folder)) @@ -119,14 +163,32 @@ def process_results(result_folder, server_log_path): logging.info("Files in result folder %s", os.listdir(result_folder)) return "error", "Not found test_results.tsv", test_results, additional_files - with open(results_path, 'r', encoding='utf-8') as results_file: - test_results = list(csv.reader(results_file, delimiter='\t')) + with open(results_path, "r", encoding="utf-8") as results_file: + test_results = list(csv.reader(results_file, delimiter="\t")) if len(test_results) == 0: return "error", "Empty test_results.tsv", test_results, additional_files return state, description, test_results, additional_files +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("check_name") + parser.add_argument("kill_timeout", type=int) + parser.add_argument( + "--validate-bugfix", + action="store_true", + help="Check that added tests failed on latest stable", + ) + parser.add_argument( + "--post-commit-status", + default="commit_status", + choices=["commit_status", "file"], + help="Where to public post commit status", + ) + return parser.parse_args() + + if __name__ == "__main__": logging.basicConfig(level=logging.INFO) @@ -136,18 +198,38 @@ if __name__ == "__main__": repo_path = REPO_COPY reports_path = REPORTS_PATH - check_name = sys.argv[1] - kill_timeout = int(sys.argv[2]) + args = parse_args() + check_name = args.check_name + kill_timeout = args.kill_timeout + validate_bugix_check = args.validate_bugfix - flaky_check = 'flaky' in check_name.lower() + flaky_check = "flaky" in check_name.lower() + + run_changed_tests = flaky_check or validate_bugix_check gh = Github(get_best_robot_token()) - pr_info = PRInfo(need_changed_files=flaky_check) + pr_info = PRInfo(need_changed_files=run_changed_tests) - if 'RUN_BY_HASH_NUM' in os.environ: - run_by_hash_num = int(os.getenv('RUN_BY_HASH_NUM')) - run_by_hash_total = int(os.getenv('RUN_BY_HASH_TOTAL')) - check_name_with_group = check_name + f' [{run_by_hash_num + 1}/{run_by_hash_total}]' + if not os.path.exists(temp_path): + os.makedirs(temp_path) + + if validate_bugix_check and "pr-bugfix" not in pr_info.labels: + if args.post_commit_status == "file": + post_commit_status_to_file( + os.path.join(temp_path, "post_commit_status.tsv"), + "Skipped (no pr-bugfix)", + "success", + "null", + ) + logging.info("Skipping '%s' (no pr-bugfix)", check_name) + sys.exit(0) + + if "RUN_BY_HASH_NUM" in os.environ: + run_by_hash_num = int(os.getenv("RUN_BY_HASH_NUM")) + run_by_hash_total = int(os.getenv("RUN_BY_HASH_TOTAL")) + check_name_with_group = ( + check_name + f" [{run_by_hash_num + 1}/{run_by_hash_total}]" + ) else: run_by_hash_num = 0 run_by_hash_total = 0 @@ -158,15 +240,23 @@ if __name__ == "__main__": logging.info("Check is already finished according to github status, exiting") sys.exit(0) - if not os.path.exists(temp_path): - os.makedirs(temp_path) - tests_to_run = [] - if flaky_check: + if run_changed_tests: tests_to_run = get_tests_to_run(pr_info) if not tests_to_run: commit = get_commit(gh, pr_info.sha) - commit.create_status(context=check_name_with_group, description='Not found changed stateless tests', state='success') + state = override_status("success", check_name, validate_bugix_check) + if args.post_commit_status == "commit_status": + commit.create_status( + context=check_name_with_group, + description=NO_CHANGES_MSG, + state=state, + ) + elif args.post_commit_status == "file": + fpath = os.path.join(temp_path, "post_commit_status.tsv") + post_commit_status_to_file( + fpath, description=NO_CHANGES_MSG, state=state, report_url="null" + ) sys.exit(0) image_name = get_image_name(check_name) @@ -178,7 +268,10 @@ if __name__ == "__main__": if not os.path.exists(packages_path): os.makedirs(packages_path) - download_all_deb_packages(check_name, reports_path, packages_path) + if validate_bugix_check: + download_previous_release(packages_path) + else: + download_all_deb_packages(check_name, reports_path, packages_path) server_log_path = os.path.join(temp_path, "server_log") if not os.path.exists(server_log_path): @@ -190,8 +283,23 @@ if __name__ == "__main__": run_log_path = os.path.join(result_path, "runlog.log") - additional_envs = get_additional_envs(check_name, run_by_hash_num, run_by_hash_total) - run_command = get_run_command(packages_path, repo_tests_path, result_path, server_log_path, kill_timeout, additional_envs, docker_image, flaky_check, tests_to_run) + additional_envs = get_additional_envs( + check_name, run_by_hash_num, run_by_hash_total + ) + if validate_bugix_check: + additional_envs.append("GLOBAL_TAGS=no-random-settings") + + run_command = get_run_command( + packages_path, + repo_tests_path, + result_path, + server_log_path, + kill_timeout, + additional_envs, + docker_image, + flaky_check, + tests_to_run, + ) logging.info("Going to run func tests: %s", run_command) with TeePopen(run_command, run_log_path) as process: @@ -203,24 +311,55 @@ if __name__ == "__main__": subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True) - s3_helper = S3Helper('https://s3.amazonaws.com') + s3_helper = S3Helper("https://s3.amazonaws.com") - state, description, test_results, additional_logs = process_results(result_path, server_log_path) - state = override_status(state, check_name) + state, description, test_results, additional_logs = process_results( + result_path, server_log_path + ) + state = override_status(state, check_name, validate_bugix_check) ch_helper = ClickHouseHelper() mark_flaky_tests(ch_helper, check_name, test_results) - report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [run_log_path] + additional_logs, check_name_with_group) + report_url = upload_results( + s3_helper, + pr_info.number, + pr_info.sha, + test_results, + [run_log_path] + additional_logs, + check_name_with_group, + ) - print(f"::notice ::Report url: {report_url}") - post_commit_status(gh, pr_info.sha, check_name_with_group, description, state, report_url) + print(f"::notice:: {check_name} Report url: {report_url}") + if args.post_commit_status == "commit_status": + post_commit_status( + gh, pr_info.sha, check_name_with_group, description, state, report_url + ) + elif args.post_commit_status == "file": + post_commit_status_to_file( + os.path.join(temp_path, "post_commit_status.tsv"), + description, + state, + report_url, + ) + else: + raise Exception( + f'Unknown post_commit_status option "{args.post_commit_status}"' + ) - prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_results, state, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, check_name_with_group) + prepared_events = prepare_tests_results_for_clickhouse( + pr_info, + test_results, + state, + stopwatch.duration_seconds, + stopwatch.start_time_str, + report_url, + check_name_with_group, + ) ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) - if state != 'success': - if 'force-tests' in pr_info.labels: + if state != "success": + if "force-tests" in pr_info.labels: print("'force-tests' enabled, will report success") else: sys.exit(1) diff --git a/tests/ci/get_robot_token.py b/tests/ci/get_robot_token.py index fae277fe319..cb79d9ae01a 100644 --- a/tests/ci/get_robot_token.py +++ b/tests/ci/get_robot_token.py @@ -2,13 +2,15 @@ import boto3 # type: ignore from github import Github # type: ignore + def get_parameter_from_ssm(name, decrypt=True, client=None): if not client: - client = boto3.client('ssm', region_name='us-east-1') - return client.get_parameter(Name=name, WithDecryption=decrypt)['Parameter']['Value'] + client = boto3.client("ssm", region_name="us-east-1") + return client.get_parameter(Name=name, WithDecryption=decrypt)["Parameter"]["Value"] + def get_best_robot_token(token_prefix_env_name="github_robot_token_", total_tokens=4): - client = boto3.client('ssm', region_name='us-east-1') + client = boto3.client("ssm", region_name="us-east-1") tokens = {} for i in range(1, total_tokens + 1): token_name = token_prefix_env_name + str(i) diff --git a/tests/ci/integration_test_check.py b/tests/ci/integration_test_check.py index 786521db418..30009414d6e 100644 --- a/tests/ci/integration_test_check.py +++ b/tests/ci/integration_test_check.py @@ -1,11 +1,12 @@ #!/usr/bin/env python3 -import os -import logging -import sys -import json -import subprocess +import argparse import csv +import json +import logging +import os +import subprocess +import sys from github import Github @@ -14,10 +15,19 @@ from s3_helper import S3Helper from get_robot_token import get_best_robot_token from pr_info import PRInfo from build_download_helper import download_all_deb_packages +from download_previous_release import download_previous_release from upload_result_helper import upload_results from docker_pull_helper import get_images_with_versions -from commit_status_helper import post_commit_status -from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse +from commit_status_helper import ( + post_commit_status, + override_status, + post_commit_status_to_file, +) +from clickhouse_helper import ( + ClickHouseHelper, + mark_flaky_tests, + prepare_tests_results_for_clickhouse, +) from stopwatch import Stopwatch from rerun_helper import RerunHelper from tee_popen import TeePopen @@ -39,24 +49,28 @@ IMAGES = [ "clickhouse/dotnet-client", ] -def get_json_params_dict(check_name, pr_info, docker_images, run_by_hash_total, run_by_hash_num): + +def get_json_params_dict( + check_name, pr_info, docker_images, run_by_hash_total, run_by_hash_num +): return { - 'context_name': check_name, - 'commit': pr_info.sha, - 'pull_request': pr_info.number, - 'pr_info': {'changed_files' : list(pr_info.changed_files)}, - 'docker_images_with_versions': docker_images, - 'shuffle_test_groups': False, - 'use_tmpfs': False, - 'disable_net_host': True, - 'run_by_hash_total': run_by_hash_total, - 'run_by_hash_num': run_by_hash_num, + "context_name": check_name, + "commit": pr_info.sha, + "pull_request": pr_info.number, + "pr_info": {"changed_files": list(pr_info.changed_files)}, + "docker_images_with_versions": docker_images, + "shuffle_test_groups": False, + "use_tmpfs": False, + "disable_net_host": True, + "run_by_hash_total": run_by_hash_total, + "run_by_hash_num": run_by_hash_num, } + def get_env_for_runner(build_path, repo_path, result_path, work_path): - binary_path = os.path.join(build_path, 'clickhouse') - odbc_bridge_path = os.path.join(build_path, 'clickhouse-odbc-bridge') - library_bridge_path = os.path.join(build_path, 'clickhouse-library-bridge') + binary_path = os.path.join(build_path, "clickhouse") + odbc_bridge_path = os.path.join(build_path, "clickhouse-odbc-bridge") + library_bridge_path = os.path.join(build_path, "clickhouse-library-bridge") my_env = os.environ.copy() my_env["CLICKHOUSE_TESTS_BUILD_PATH"] = build_path @@ -68,25 +82,30 @@ def get_env_for_runner(build_path, repo_path, result_path, work_path): my_env["CLICKHOUSE_TESTS_RESULT_PATH"] = result_path my_env["CLICKHOUSE_TESTS_BASE_CONFIG_DIR"] = f"{repo_path}/programs/server" my_env["CLICKHOUSE_TESTS_JSON_PARAMS_PATH"] = os.path.join(work_path, "params.json") - my_env["CLICKHOUSE_TESTS_RUNNER_RESTART_DOCKER"] = '0' + my_env["CLICKHOUSE_TESTS_RUNNER_RESTART_DOCKER"] = "0" return my_env + def process_results(result_folder): test_results = [] additional_files = [] # Just upload all files from result_folder. # If task provides processed results, then it's responsible for content of result_folder. if os.path.exists(result_folder): - test_files = [f for f in os.listdir(result_folder) if os.path.isfile(os.path.join(result_folder, f))] + test_files = [ + f + for f in os.listdir(result_folder) + if os.path.isfile(os.path.join(result_folder, f)) + ] additional_files = [os.path.join(result_folder, f) for f in test_files] status = [] status_path = os.path.join(result_folder, "check_status.tsv") if os.path.exists(status_path): logging.info("Found test_results.tsv") - with open(status_path, 'r', encoding='utf-8') as status_file: - status = list(csv.reader(status_file, delimiter='\t')) + with open(status_path, "r", encoding="utf-8") as status_file: + status = list(csv.reader(status_file, delimiter="\t")) if len(status) != 1 or len(status[0]) != 2: logging.info("Files in result folder %s", os.listdir(result_folder)) @@ -95,13 +114,31 @@ def process_results(result_folder): results_path = os.path.join(result_folder, "test_results.tsv") if os.path.exists(results_path): - with open(results_path, 'r', encoding='utf-8') as results_file: - test_results = list(csv.reader(results_file, delimiter='\t')) + with open(results_path, "r", encoding="utf-8") as results_file: + test_results = list(csv.reader(results_file, delimiter="\t")) if len(test_results) == 0: return "error", "Empty test_results.tsv", test_results, additional_files return state, description, test_results, additional_files + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("check_name") + parser.add_argument( + "--validate-bugfix", + action="store_true", + help="Check that added tests failed on latest stable", + ) + parser.add_argument( + "--post-commit-status", + default="commit_status", + choices=["commit_status", "file"], + help="Where to public post commit status", + ) + return parser.parse_args() + + if __name__ == "__main__": logging.basicConfig(level=logging.INFO) @@ -111,12 +148,16 @@ if __name__ == "__main__": repo_path = REPO_COPY reports_path = REPORTS_PATH - check_name = sys.argv[1] + args = parse_args() + check_name = args.check_name + validate_bugix_check = args.validate_bugfix - if 'RUN_BY_HASH_NUM' in os.environ: - run_by_hash_num = int(os.getenv('RUN_BY_HASH_NUM')) - run_by_hash_total = int(os.getenv('RUN_BY_HASH_TOTAL')) - check_name_with_group = check_name + f' [{run_by_hash_num + 1}/{run_by_hash_total}]' + if "RUN_BY_HASH_NUM" in os.environ: + run_by_hash_num = int(os.getenv("RUN_BY_HASH_NUM")) + run_by_hash_total = int(os.getenv("RUN_BY_HASH_TOTAL")) + check_name_with_group = ( + check_name + f" [{run_by_hash_num + 1}/{run_by_hash_total}]" + ) else: run_by_hash_num = 0 run_by_hash_total = 0 @@ -125,8 +166,19 @@ if __name__ == "__main__": if not os.path.exists(temp_path): os.makedirs(temp_path) - is_flaky_check = 'flaky' in check_name - pr_info = PRInfo(need_changed_files=is_flaky_check) + is_flaky_check = "flaky" in check_name + pr_info = PRInfo(need_changed_files=is_flaky_check or validate_bugix_check) + + if validate_bugix_check and "pr-bugfix" not in pr_info.labels: + if args.post_commit_status == "file": + post_commit_status_to_file( + os.path.join(temp_path, "post_commit_status.tsv"), + "Skipped (no pr-bugfix)", + "success", + "null", + ) + logging.info("Skipping '%s' (no pr-bugfix)", check_name) + sys.exit(0) gh = Github(get_best_robot_token()) @@ -149,13 +201,26 @@ if __name__ == "__main__": if not os.path.exists(build_path): os.makedirs(build_path) - download_all_deb_packages(check_name, reports_path, build_path) + if validate_bugix_check: + download_previous_release(build_path) + else: + download_all_deb_packages(check_name, reports_path, build_path) my_env = get_env_for_runner(build_path, repo_path, result_path, work_path) - json_path = os.path.join(work_path, 'params.json') - with open(json_path, 'w', encoding='utf-8') as json_params: - json_params.write(json.dumps(get_json_params_dict(check_name, pr_info, images_with_versions, run_by_hash_total, run_by_hash_num))) + json_path = os.path.join(work_path, "params.json") + with open(json_path, "w", encoding="utf-8") as json_params: + json_params.write( + json.dumps( + get_json_params_dict( + check_name, + pr_info, + images_with_versions, + run_by_hash_total, + run_by_hash_num, + ) + ) + ) output_path_log = os.path.join(result_path, "main_script_log.txt") @@ -172,14 +237,46 @@ if __name__ == "__main__": subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True) state, description, test_results, additional_logs = process_results(result_path) + state = override_status(state, check_name, validate_bugix_check) ch_helper = ClickHouseHelper() mark_flaky_tests(ch_helper, check_name, test_results) - s3_helper = S3Helper('https://s3.amazonaws.com') - report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [output_path_log] + additional_logs, check_name_with_group, False) - print(f"::notice ::Report url: {report_url}") - post_commit_status(gh, pr_info.sha, check_name_with_group, description, state, report_url) + s3_helper = S3Helper("https://s3.amazonaws.com") + report_url = upload_results( + s3_helper, + pr_info.number, + pr_info.sha, + test_results, + [output_path_log] + additional_logs, + check_name_with_group, + False, + ) - prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_results, state, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, check_name_with_group) + print(f"::notice:: {check_name} Report url: {report_url}") + if args.post_commit_status == "commit_status": + post_commit_status( + gh, pr_info.sha, check_name_with_group, description, state, report_url + ) + elif args.post_commit_status == "file": + post_commit_status_to_file( + os.path.join(temp_path, "post_commit_status.tsv"), + description, + state, + report_url, + ) + else: + raise Exception( + f'Unknown post_commit_status option "{args.post_commit_status}"' + ) + + prepared_events = prepare_tests_results_for_clickhouse( + pr_info, + test_results, + state, + stopwatch.duration_seconds, + stopwatch.start_time_str, + report_url, + check_name_with_group, + ) ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) diff --git a/tests/ci/keeper_jepsen_check.py b/tests/ci/keeper_jepsen_check.py index 70d1ced6fe8..24d720e67ab 100644 --- a/tests/ci/keeper_jepsen_check.py +++ b/tests/ci/keeper_jepsen_check.py @@ -24,10 +24,10 @@ from ssh import SSHKey from build_download_helper import get_build_name_for_check from rerun_helper import RerunHelper -JEPSEN_GROUP_NAME = 'jepsen_group' +JEPSEN_GROUP_NAME = "jepsen_group" DESIRED_INSTANCE_COUNT = 3 -IMAGE_NAME = 'clickhouse/keeper-jepsen-test' -CHECK_NAME = 'ClickHouse Keeper Jepsen (actions)' +IMAGE_NAME = "clickhouse/keeper-jepsen-test" +CHECK_NAME = "ClickHouse Keeper Jepsen (actions)" SUCCESSFUL_TESTS_ANCHOR = "# Successful tests" @@ -35,45 +35,58 @@ INTERMINATE_TESTS_ANCHOR = "# Indeterminate tests" CRASHED_TESTS_ANCHOR = "# Crashed tests" FAILED_TESTS_ANCHOR = "# Failed tests" + def _parse_jepsen_output(path): test_results = [] - current_type = '' - with open(path, 'r') as f: + current_type = "" + with open(path, "r") as f: for line in f: if SUCCESSFUL_TESTS_ANCHOR in line: - current_type = 'OK' + current_type = "OK" elif INTERMINATE_TESTS_ANCHOR in line or CRASHED_TESTS_ANCHOR in line: - current_type = 'ERROR' + current_type = "ERROR" elif FAILED_TESTS_ANCHOR in line: - current_type = 'FAIL' + current_type = "FAIL" - if (line.startswith('store/clickhouse-keeper') or line.startswith('clickhouse-keeper')) and current_type: + if ( + line.startswith("store/clickhouse-keeper") + or line.startswith("clickhouse-keeper") + ) and current_type: test_results.append((line.strip(), current_type)) return test_results + def get_autoscaling_group_instances_ids(asg_client, group_name): - group_description = asg_client.describe_auto_scaling_groups(AutoScalingGroupNames=[group_name]) - our_group = group_description['AutoScalingGroups'][0] + group_description = asg_client.describe_auto_scaling_groups( + AutoScalingGroupNames=[group_name] + ) + our_group = group_description["AutoScalingGroups"][0] instance_ids = [] - for instance in our_group['Instances']: - if instance['LifecycleState'] == 'InService' and instance['HealthStatus'] == 'Healthy': - instance_ids.append(instance['InstanceId']) + for instance in our_group["Instances"]: + if ( + instance["LifecycleState"] == "InService" + and instance["HealthStatus"] == "Healthy" + ): + instance_ids.append(instance["InstanceId"]) return instance_ids + def get_instances_addresses(ec2_client, instance_ids): - ec2_response = ec2_client.describe_instances(InstanceIds = instance_ids) + ec2_response = ec2_client.describe_instances(InstanceIds=instance_ids) instance_ips = [] - for instances in ec2_response['Reservations']: - for ip in instances['Instances']: - instance_ips.append(ip['PrivateIpAddress']) + for instances in ec2_response["Reservations"]: + for ip in instances["Instances"]: + instance_ips.append(ip["PrivateIpAddress"]) return instance_ips def prepare_autoscaling_group_and_get_hostnames(): - asg_client = boto3.client('autoscaling', region_name='us-east-1') - asg_client.set_desired_capacity(AutoScalingGroupName=JEPSEN_GROUP_NAME, DesiredCapacity=DESIRED_INSTANCE_COUNT) + asg_client = boto3.client("autoscaling", region_name="us-east-1") + asg_client.set_desired_capacity( + AutoScalingGroupName=JEPSEN_GROUP_NAME, DesiredCapacity=DESIRED_INSTANCE_COUNT + ) instances = get_autoscaling_group_instances_ids(asg_client, JEPSEN_GROUP_NAME) counter = 0 @@ -84,13 +97,15 @@ def prepare_autoscaling_group_and_get_hostnames(): if counter > 30: raise Exception("Cannot wait autoscaling group") - ec2_client = boto3.client('ec2', region_name='us-east-1') + ec2_client = boto3.client("ec2", region_name="us-east-1") return get_instances_addresses(ec2_client, instances) def clear_autoscaling_group(): - asg_client = boto3.client('autoscaling', region_name='us-east-1') - asg_client.set_desired_capacity(AutoScalingGroupName=JEPSEN_GROUP_NAME, DesiredCapacity=0) + asg_client = boto3.client("autoscaling", region_name="us-east-1") + asg_client.set_desired_capacity( + AutoScalingGroupName=JEPSEN_GROUP_NAME, DesiredCapacity=0 + ) instances = get_autoscaling_group_instances_ids(asg_client, JEPSEN_GROUP_NAME) counter = 0 while len(instances) > 0: @@ -103,15 +118,28 @@ def clear_autoscaling_group(): def save_nodes_to_file(instances, temp_path): nodes_path = os.path.join(temp_path, "nodes.txt") - with open(nodes_path, 'w') as f: + with open(nodes_path, "w") as f: f.write("\n".join(instances)) f.flush() return nodes_path -def get_run_command(ssh_auth_sock, ssh_sock_dir, pr_info, nodes_path, repo_path, build_url, result_path, docker_image): - return f"docker run --network=host -v '{ssh_sock_dir}:{ssh_sock_dir}' -e SSH_AUTH_SOCK={ssh_auth_sock} " \ - f"-e PR_TO_TEST={pr_info.number} -e SHA_TO_TEST={pr_info.sha} -v '{nodes_path}:/nodes.txt' -v {result_path}:/test_output " \ - f"-e 'CLICKHOUSE_PACKAGE={build_url}' -v '{repo_path}:/ch' -e 'CLICKHOUSE_REPO_PATH=/ch' -e NODES_USERNAME=ubuntu {docker_image}" + +def get_run_command( + ssh_auth_sock, + ssh_sock_dir, + pr_info, + nodes_path, + repo_path, + build_url, + result_path, + docker_image, +): + return ( + f"docker run --network=host -v '{ssh_sock_dir}:{ssh_sock_dir}' -e SSH_AUTH_SOCK={ssh_auth_sock} " + f"-e PR_TO_TEST={pr_info.number} -e SHA_TO_TEST={pr_info.sha} -v '{nodes_path}:/nodes.txt' -v {result_path}:/test_output " + f"-e 'CLICKHOUSE_PACKAGE={build_url}' -v '{repo_path}:/ch' -e 'CLICKHOUSE_REPO_PATH=/ch' -e NODES_USERNAME=ubuntu {docker_image}" + ) + if __name__ == "__main__": logging.basicConfig(level=logging.INFO) @@ -120,9 +148,14 @@ if __name__ == "__main__": pr_info = PRInfo() - logging.info("Start at PR number %s, commit sha %s labels %s", pr_info.number, pr_info.sha, pr_info.labels) + logging.info( + "Start at PR number %s, commit sha %s labels %s", + pr_info.number, + pr_info.sha, + pr_info.labels, + ) - if pr_info.number != 0 and 'jepsen-test' not in pr_info.labels: + if pr_info.number != 0 and "jepsen-test" not in pr_info.labels: logging.info("Not jepsen test label in labels list, skipping") sys.exit(0) @@ -167,13 +200,22 @@ if __name__ == "__main__": head = requests.head(build_url) counter += 1 if counter >= 180: - post_commit_status(gh, pr_info.sha, CHECK_NAME, "Cannot fetch build to run", "error", "") - raise Exception("Cannot fetch build") + logging.warning("Cannot fetch build in 30 minutes, exiting") + sys.exit(0) - with SSHKey(key_value=get_parameter_from_ssm("jepsen_ssh_key") + '\n'): - ssh_auth_sock = os.environ['SSH_AUTH_SOCK'] + with SSHKey(key_value=get_parameter_from_ssm("jepsen_ssh_key") + "\n"): + ssh_auth_sock = os.environ["SSH_AUTH_SOCK"] auth_sock_dir = os.path.dirname(ssh_auth_sock) - cmd = get_run_command(ssh_auth_sock, auth_sock_dir, pr_info, nodes_path, REPO_COPY, build_url, result_path, docker_image) + cmd = get_run_command( + ssh_auth_sock, + auth_sock_dir, + pr_info, + nodes_path, + REPO_COPY, + build_url, + result_path, + docker_image, + ) logging.info("Going to run jepsen: %s", cmd) run_log_path = os.path.join(TEMP_PATH, "runlog.log") @@ -185,31 +227,49 @@ if __name__ == "__main__": else: logging.info("Run failed") - status = 'success' - description = 'No invalid analysis found ヽ(‘ー`)ノ' - jepsen_log_path = os.path.join(result_path, 'jepsen_run_all_tests.log') + status = "success" + description = "No invalid analysis found ヽ(‘ー`)ノ" + jepsen_log_path = os.path.join(result_path, "jepsen_run_all_tests.log") additional_data = [] try: test_result = _parse_jepsen_output(jepsen_log_path) - if any(r[1] == 'FAIL' for r in test_result): - status = 'failure' - description = 'Found invalid analysis (ノಥ益ಥ)ノ ┻━┻' + if any(r[1] == "FAIL" for r in test_result): + status = "failure" + description = "Found invalid analysis (ノಥ益ಥ)ノ ┻━┻" - compress_fast(os.path.join(result_path, 'store'), os.path.join(result_path, 'jepsen_store.tar.gz')) - additional_data.append(os.path.join(result_path, 'jepsen_store.tar.gz')) + compress_fast( + os.path.join(result_path, "store"), + os.path.join(result_path, "jepsen_store.tar.gz"), + ) + additional_data.append(os.path.join(result_path, "jepsen_store.tar.gz")) except Exception as ex: print("Exception", ex) - status = 'failure' - description = 'No Jepsen output log' - test_result = [('No Jepsen output log', 'FAIL')] + status = "failure" + description = "No Jepsen output log" + test_result = [("No Jepsen output log", "FAIL")] - s3_helper = S3Helper('https://s3.amazonaws.com') - report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_result, [run_log_path] + additional_data, CHECK_NAME) + s3_helper = S3Helper("https://s3.amazonaws.com") + report_url = upload_results( + s3_helper, + pr_info.number, + pr_info.sha, + test_result, + [run_log_path] + additional_data, + CHECK_NAME, + ) print(f"::notice ::Report url: {report_url}") post_commit_status(gh, pr_info.sha, CHECK_NAME, description, status, report_url) ch_helper = ClickHouseHelper() - prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_result, status, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, CHECK_NAME) + prepared_events = prepare_tests_results_for_clickhouse( + pr_info, + test_result, + status, + stopwatch.duration_seconds, + stopwatch.start_time_str, + report_url, + CHECK_NAME, + ) ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) clear_autoscaling_group() diff --git a/tests/ci/performance_comparison_check.py b/tests/ci/performance_comparison_check.py index 2700c747b12..c6ce86b2ce1 100644 --- a/tests/ci/performance_comparison_check.py +++ b/tests/ci/performance_comparison_check.py @@ -11,6 +11,7 @@ import re from github import Github +from env_helper import GITHUB_RUN_URL from pr_info import PRInfo from s3_helper import S3Helper from get_robot_token import get_best_robot_token @@ -19,14 +20,26 @@ from commit_status_helper import get_commit, post_commit_status from tee_popen import TeePopen from rerun_helper import RerunHelper -IMAGE_NAME = 'clickhouse/performance-comparison' +IMAGE_NAME = "clickhouse/performance-comparison" -def get_run_command(workspace, result_path, repo_tests_path, pr_to_test, sha_to_test, additional_env, image): - return f"docker run --privileged --volume={workspace}:/workspace --volume={result_path}:/output " \ - f"--volume={repo_tests_path}:/usr/share/clickhouse-test " \ - f"--cap-add syslog --cap-add sys_admin --cap-add sys_rawio " \ - f"-e PR_TO_TEST={pr_to_test} -e SHA_TO_TEST={sha_to_test} {additional_env} " \ + +def get_run_command( + workspace, + result_path, + repo_tests_path, + pr_to_test, + sha_to_test, + additional_env, + image, +): + return ( + f"docker run --privileged --volume={workspace}:/workspace --volume={result_path}:/output " + f"--volume={repo_tests_path}:/usr/share/clickhouse-test " + f"--cap-add syslog --cap-add sys_admin --cap-add sys_rawio " + f"-e PR_TO_TEST={pr_to_test} -e SHA_TO_TEST={sha_to_test} {additional_env} " f"{image}" + ) + class RamDrive: def __init__(self, path, size): @@ -37,11 +50,14 @@ class RamDrive: if not os.path.exists(self.path): os.makedirs(self.path) - subprocess.check_call(f"sudo mount -t tmpfs -o rw,size={self.size} tmpfs {self.path}", shell=True) + subprocess.check_call( + f"sudo mount -t tmpfs -o rw,size={self.size} tmpfs {self.path}", shell=True + ) def __exit__(self, exc_type, exc_val, exc_tb): subprocess.check_call(f"sudo umount {self.path}", shell=True) + if __name__ == "__main__": logging.basicConfig(level=logging.INFO) temp_path = os.getenv("TEMP_PATH", os.path.abspath(".")) @@ -49,7 +65,7 @@ if __name__ == "__main__": repo_tests_path = os.path.join(repo_path, "tests") ramdrive_path = os.getenv("RAMDRIVE_PATH", os.path.join(temp_path, "ramdrive")) # currently unused, doesn't make tests more stable - ramdrive_size = os.getenv("RAMDRIVE_SIZE", '0G') + ramdrive_size = os.getenv("RAMDRIVE_SIZE", "0G") reports_path = os.getenv("REPORTS_PATH", "./reports") check_name = sys.argv[1] @@ -57,14 +73,14 @@ if __name__ == "__main__": if not os.path.exists(temp_path): os.makedirs(temp_path) - with open(os.getenv('GITHUB_EVENT_PATH'), 'r', encoding='utf-8') as event_file: + with open(os.getenv("GITHUB_EVENT_PATH"), "r", encoding="utf-8") as event_file: event = json.load(event_file) gh = Github(get_best_robot_token()) pr_info = PRInfo(event) commit = get_commit(gh, pr_info.sha) - docker_env = '' + docker_env = "" docker_env += " -e S3_URL=https://s3.amazonaws.com/clickhouse-builds" @@ -73,15 +89,18 @@ if __name__ == "__main__": else: pr_link = f"https://github.com/ClickHouse/ClickHouse/pull/{pr_info.number}" - task_url = f"https://github.com/ClickHouse/ClickHouse/actions/runs/{os.getenv('GITHUB_RUN_ID')}" - docker_env += ' -e CHPC_ADD_REPORT_LINKS="Job (actions) Tested commit"'.format( - task_url, pr_link) + docker_env += ( + f' -e CHPC_ADD_REPORT_LINKS="' + f'Job (actions) Tested commit"' + ) - if 'RUN_BY_HASH_TOTAL' in os.environ: - run_by_hash_total = int(os.getenv('RUN_BY_HASH_TOTAL')) - run_by_hash_num = int(os.getenv('RUN_BY_HASH_NUM')) - docker_env += f' -e CHPC_TEST_RUN_BY_HASH_TOTAL={run_by_hash_total} -e CHPC_TEST_RUN_BY_HASH_NUM={run_by_hash_num}' - check_name_with_group = check_name + f' [{run_by_hash_num + 1}/{run_by_hash_total}]' + if "RUN_BY_HASH_TOTAL" in os.environ: + run_by_hash_total = int(os.getenv("RUN_BY_HASH_TOTAL")) + run_by_hash_num = int(os.getenv("RUN_BY_HASH_NUM")) + docker_env += f" -e CHPC_TEST_RUN_BY_HASH_TOTAL={run_by_hash_total} -e CHPC_TEST_RUN_BY_HASH_NUM={run_by_hash_num}" + check_name_with_group = ( + check_name + f" [{run_by_hash_num + 1}/{run_by_hash_total}]" + ) else: check_name_with_group = check_name @@ -92,12 +111,20 @@ if __name__ == "__main__": docker_image = get_image_with_version(reports_path, IMAGE_NAME) - #with RamDrive(ramdrive_path, ramdrive_size): + # with RamDrive(ramdrive_path, ramdrive_size): result_path = ramdrive_path if not os.path.exists(result_path): os.makedirs(result_path) - run_command = get_run_command(result_path, result_path, repo_tests_path, pr_info.number, pr_info.sha, docker_env, docker_image) + run_command = get_run_command( + result_path, + result_path, + repo_tests_path, + pr_info.number, + pr_info.sha, + docker_env, + docker_image, + ) logging.info("Going to run command %s", run_command) run_log_path = os.path.join(temp_path, "runlog.log") with TeePopen(run_command, run_log_path) as process: @@ -110,74 +137,83 @@ if __name__ == "__main__": subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True) paths = { - 'compare.log': os.path.join(result_path, 'compare.log'), - 'output.7z': os.path.join(result_path, 'output.7z'), - 'report.html': os.path.join(result_path, 'report.html'), - 'all-queries.html': os.path.join(result_path, 'all-queries.html'), - 'queries.rep': os.path.join(result_path, 'queries.rep'), - 'all-query-metrics.tsv': os.path.join(result_path, 'report/all-query-metrics.tsv'), - 'runlog.log': run_log_path, + "compare.log": os.path.join(result_path, "compare.log"), + "output.7z": os.path.join(result_path, "output.7z"), + "report.html": os.path.join(result_path, "report.html"), + "all-queries.html": os.path.join(result_path, "all-queries.html"), + "queries.rep": os.path.join(result_path, "queries.rep"), + "all-query-metrics.tsv": os.path.join( + result_path, "report/all-query-metrics.tsv" + ), + "runlog.log": run_log_path, } - check_name_prefix = check_name_with_group.lower().replace(' ', '_').replace('(', '_').replace(')', '_').replace(',', '_') - s3_prefix = f'{pr_info.number}/{pr_info.sha}/{check_name_prefix}/' - s3_helper = S3Helper('https://s3.amazonaws.com') + check_name_prefix = ( + check_name_with_group.lower() + .replace(" ", "_") + .replace("(", "_") + .replace(")", "_") + .replace(",", "_") + ) + s3_prefix = f"{pr_info.number}/{pr_info.sha}/{check_name_prefix}/" + s3_helper = S3Helper("https://s3.amazonaws.com") for file in paths: try: - paths[file] = s3_helper.upload_test_report_to_s3(paths[file], - s3_prefix + file) + paths[file] = s3_helper.upload_test_report_to_s3( + paths[file], s3_prefix + file + ) except Exception: - paths[file] = '' + paths[file] = "" traceback.print_exc() # Upload all images and flamegraphs to S3 try: s3_helper.upload_test_folder_to_s3( - os.path.join(result_path, 'images'), - s3_prefix + 'images' + os.path.join(result_path, "images"), s3_prefix + "images" ) except Exception: traceback.print_exc() # Try to fetch status from the report. - status = '' - message = '' + status = "" + message = "" try: - report_text = open(os.path.join(result_path, 'report.html'), 'r').read() - status_match = re.search('', report_text) - message_match = re.search('', report_text) + report_text = open(os.path.join(result_path, "report.html"), "r").read() + status_match = re.search("", report_text) + message_match = re.search("", report_text) if status_match: status = status_match.group(1).strip() if message_match: message = message_match.group(1).strip() # TODO: Remove me, always green mode for the first time - status = 'success' + status = "success" except Exception: traceback.print_exc() - status = 'failure' - message = 'Failed to parse the report.' + status = "failure" + message = "Failed to parse the report." if not status: - status = 'failure' - message = 'No status in report.' + status = "failure" + message = "No status in report." elif not message: - status = 'failure' - message = 'No message in report.' + status = "failure" + message = "No message in report." - report_url = task_url + report_url = GITHUB_RUN_URL - if paths['runlog.log']: - report_url = paths['runlog.log'] + if paths["runlog.log"]: + report_url = paths["runlog.log"] - if paths['compare.log']: - report_url = paths['compare.log'] + if paths["compare.log"]: + report_url = paths["compare.log"] - if paths['output.7z']: - report_url = paths['output.7z'] + if paths["output.7z"]: + report_url = paths["output.7z"] - if paths['report.html']: - report_url = paths['report.html'] + if paths["report.html"]: + report_url = paths["report.html"] - - post_commit_status(gh, pr_info.sha, check_name_with_group, message, status, report_url) + post_commit_status( + gh, pr_info.sha, check_name_with_group, message, status, report_url + ) diff --git a/tests/ci/pr_info.py b/tests/ci/pr_info.py index 378804874aa..ee4399792ae 100644 --- a/tests/ci/pr_info.py +++ b/tests/ci/pr_info.py @@ -8,7 +8,7 @@ from build_download_helper import get_with_retries from env_helper import ( GITHUB_REPOSITORY, GITHUB_SERVER_URL, - GITHUB_RUN_ID, + GITHUB_RUN_URL, GITHUB_EVENT_PATH, ) @@ -78,7 +78,7 @@ class PRInfo: else: github_event = PRInfo.default_event.copy() self.event = github_event - self.changed_files = set([]) + self.changed_files = set() self.body = "" ref = github_event.get("ref", "refs/head/master") if ref and ref.startswith("refs/heads/"): @@ -111,7 +111,7 @@ class PRInfo: self.sha = github_event["pull_request"]["head"]["sha"] repo_prefix = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}" - self.task_url = f"{repo_prefix}/actions/runs/{GITHUB_RUN_ID or '0'}" + self.task_url = GITHUB_RUN_URL self.repo_full_name = GITHUB_REPOSITORY self.commit_html_url = f"{repo_prefix}/commits/{self.sha}" @@ -142,7 +142,7 @@ class PRInfo: self.sha = github_event["after"] pull_request = get_pr_for_commit(self.sha, github_event["ref"]) repo_prefix = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}" - self.task_url = f"{repo_prefix}/actions/runs/{GITHUB_RUN_ID or '0'}" + self.task_url = GITHUB_RUN_URL self.commit_html_url = f"{repo_prefix}/commits/{self.sha}" self.repo_full_name = GITHUB_REPOSITORY if pull_request is None or pull_request["state"] == "closed": @@ -180,7 +180,7 @@ class PRInfo: self.number = 0 self.labels = {} repo_prefix = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}" - self.task_url = f"{repo_prefix}/actions/runs/{GITHUB_RUN_ID or '0'}" + self.task_url = GITHUB_RUN_URL self.commit_html_url = f"{repo_prefix}/commits/{self.sha}" self.repo_full_name = GITHUB_REPOSITORY self.pr_html_url = f"{repo_prefix}/commits/{ref}" @@ -209,6 +209,7 @@ class PRInfo: else: diff_object = PatchSet(response.text) self.changed_files = {f.path for f in diff_object} + print("Fetched info about %d changed files", len(self.changed_files)) def get_dict(self): return { diff --git a/tests/ci/report.py b/tests/ci/report.py index c8ba46da106..c79a5406998 100644 --- a/tests/ci/report.py +++ b/tests/ci/report.py @@ -92,16 +92,27 @@ HTML_TEST_PART = """ """ -BASE_HEADERS = ['Test name', 'Test status'] +BASE_HEADERS = ["Test name", "Test status"] + + +class ReportColorTheme: + class ReportColor: + yellow = "#FFB400" + red = "#F00" + green = "#0A0" + blue = "#00B4FF" + + default = (ReportColor.green, ReportColor.red, ReportColor.yellow) + bugfixcheck = (ReportColor.yellow, ReportColor.blue, ReportColor.blue) def _format_header(header, branch_name, branch_url=None): - result = ' '.join([w.capitalize() for w in header.split(' ')]) + result = " ".join([w.capitalize() for w in header.split(" ")]) result = result.replace("Clickhouse", "ClickHouse") result = result.replace("clickhouse", "ClickHouse") - if 'ClickHouse' not in result: - result = 'ClickHouse ' + result - result += ' for ' + if "ClickHouse" not in result: + result = "ClickHouse " + result + result += " for " if branch_url: result += '{name}'.format(url=branch_url, name=branch_name) else: @@ -109,22 +120,28 @@ def _format_header(header, branch_name, branch_url=None): return result -def _get_status_style(status): +def _get_status_style(status, colortheme=None): + ok_statuses = ("OK", "success", "PASSED") + fail_statuses = ("FAIL", "failure", "error", "FAILED", "Timeout") + + if colortheme is None: + colortheme = ReportColorTheme.default + style = "font-weight: bold;" - if status in ('OK', 'success', 'PASSED'): - style += 'color: #0A0;' - elif status in ('FAIL', 'failure', 'error', 'FAILED', 'Timeout'): - style += 'color: #F00;' + if status in ok_statuses: + style += f"color: {colortheme[0]};" + elif status in fail_statuses: + style += f"color: {colortheme[1]};" else: - style += 'color: #FFB400;' + style += f"color: {colortheme[2]};" return style def _get_html_url_name(url): if isinstance(url, str): - return os.path.basename(url).replace('%2B', '+').replace('%20', ' ') + return os.path.basename(url).replace("%2B", "+").replace("%20", " ") if isinstance(url, tuple): - return url[1].replace('%2B', '+').replace('%20', ' ') + return url[1].replace("%2B", "+").replace("%20", " ") return None @@ -136,11 +153,24 @@ def _get_html_url(url): if isinstance(url, tuple): href, name = url[0], _get_html_url_name(url) if href and name: - return '{name}'.format(href=href, name=_get_html_url_name(url)) - return '' + return '{name}'.format( + href=href, name=_get_html_url_name(url) + ) + return "" -def create_test_html_report(header, test_result, raw_log_url, task_url, branch_url, branch_name, commit_url, additional_urls=None, with_raw_logs=False): +def create_test_html_report( + header, + test_result, + raw_log_url, + task_url, + branch_url, + branch_name, + commit_url, + additional_urls=None, + with_raw_logs=False, + statuscolors=None, +): if additional_urls is None: additional_urls = [] @@ -164,11 +194,11 @@ def create_test_html_report(header, test_result, raw_log_url, task_url, branch_u has_test_logs = True row = "" - is_fail = test_status in ('FAIL', 'FLAKY') + is_fail = test_status in ("FAIL", "FLAKY") if is_fail and with_raw_logs and test_logs is not None: - row = "" + row = '' row += "" + test_name + "" - style = _get_status_style(test_status) + style = _get_status_style(test_status, colortheme=statuscolors) # Allow to quickly scroll to the first failure. is_fail_id = "" @@ -176,7 +206,13 @@ def create_test_html_report(header, test_result, raw_log_url, task_url, branch_u num_fails = num_fails + 1 is_fail_id = 'id="fail' + str(num_fails) + '" ' - row += ''.format(style) + test_status + "" + row += ( + "'.format(style) + + test_status + + "" + ) if test_time is not None: row += "" + test_time + "" @@ -188,24 +224,26 @@ def create_test_html_report(header, test_result, raw_log_url, task_url, branch_u row += "" rows_part += row if test_logs is not None and with_raw_logs: - row = "" + row = '' # TODO: compute colspan too - row += "

" + test_logs + "
" + row += '
' + test_logs + "
" row += "" rows_part += row headers = BASE_HEADERS if has_test_time: - headers.append('Test time, sec.') + headers.append("Test time, sec.") if has_test_logs and not with_raw_logs: - headers.append('Logs') + headers.append("Logs") - headers = ''.join(['' + h + '' for h in headers]) + headers = "".join(["" + h + "" for h in headers]) test_part = HTML_TEST_PART.format(headers=headers, rows=rows_part) else: test_part = "" - additional_html_urls = ' '.join([_get_html_url(url) for url in sorted(additional_urls, key=_get_html_url_name)]) + additional_html_urls = " ".join( + [_get_html_url(url) for url in sorted(additional_urls, key=_get_html_url_name)] + ) result = HTML_BASE_TEST_TEMPLATE.format( title=_format_header(header, branch_name), @@ -216,7 +254,7 @@ def create_test_html_report(header, test_result, raw_log_url, task_url, branch_u test_part=test_part, branch_name=branch_name, commit_url=commit_url, - additional_urls=additional_html_urls + additional_urls=additional_html_urls, ) return result @@ -280,9 +318,20 @@ tr:hover td {{filter: brightness(95%);}} LINK_TEMPLATE = '{text}' -def create_build_html_report(header, build_results, build_logs_urls, artifact_urls_list, task_url, branch_url, branch_name, commit_url): +def create_build_html_report( + header, + build_results, + build_logs_urls, + artifact_urls_list, + task_url, + branch_url, + branch_name, + commit_url, +): rows = "" - for (build_result, build_log_url, artifact_urls) in zip(build_results, build_logs_urls, artifact_urls_list): + for (build_result, build_log_url, artifact_urls) in zip( + build_results, build_logs_urls, artifact_urls_list + ): row = "" row += "{}".format(build_result.compiler) if build_result.build_type: @@ -309,18 +358,20 @@ def create_build_html_report(header, build_results, build_logs_urls, artifact_ur if build_result.elapsed_seconds: delta = datetime.timedelta(seconds=build_result.elapsed_seconds) else: - delta = 'unknown' + delta = "unknown" - row += '{}'.format(str(delta)) + row += "{}".format(str(delta)) links = "" link_separator = "
" if artifact_urls: for artifact_url in artifact_urls: - links += LINK_TEMPLATE.format(text=_get_html_url_name(artifact_url), url=artifact_url) + links += LINK_TEMPLATE.format( + text=_get_html_url_name(artifact_url), url=artifact_url + ) links += link_separator if links: - links = links[:-len(link_separator)] + links = links[: -len(link_separator)] row += "{}".format(links) row += "" @@ -331,4 +382,5 @@ def create_build_html_report(header, build_results, build_logs_urls, artifact_ur rows=rows, task_url=task_url, branch_name=branch_name, - commit_url=commit_url) + commit_url=commit_url, + ) diff --git a/tests/ci/rerun_helper.py b/tests/ci/rerun_helper.py index 0ba50334d28..35363593db6 100644 --- a/tests/ci/rerun_helper.py +++ b/tests/ci/rerun_helper.py @@ -2,6 +2,7 @@ from commit_status_helper import get_commit + def _filter_statuses(statuses): """ Squash statuses to latest state @@ -19,7 +20,6 @@ def _filter_statuses(statuses): class RerunHelper: - def __init__(self, gh, pr_info, check_name): self.gh = gh self.pr_info = pr_info @@ -30,6 +30,9 @@ class RerunHelper: def is_already_finished_by_status(self): # currently we agree even for failed statuses for status in self.statuses: - if self.check_name in status.context and status.state in ('success', 'failure'): + if self.check_name in status.context and status.state in ( + "success", + "failure", + ): return True return False diff --git a/tests/ci/run_check.py b/tests/ci/run_check.py index 5b89082532d..9c7ba13f8e4 100644 --- a/tests/ci/run_check.py +++ b/tests/ci/run_check.py @@ -5,7 +5,7 @@ import re from typing import Tuple from github import Github -from env_helper import GITHUB_RUN_ID, GITHUB_REPOSITORY, GITHUB_SERVER_URL +from env_helper import GITHUB_RUN_URL, GITHUB_REPOSITORY, GITHUB_SERVER_URL from pr_info import PRInfo from get_robot_token import get_best_robot_token from commit_status_helper import get_commit @@ -231,7 +231,7 @@ if __name__ == "__main__": ) sys.exit(1) - url = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/actions/runs/{GITHUB_RUN_ID}" + url = GITHUB_RUN_URL if not can_run: print("::notice ::Cannot run") commit.create_status( diff --git a/tests/ci/s3_helper.py b/tests/ci/s3_helper.py index 902b97fdb95..91e67135f6f 100644 --- a/tests/ci/s3_helper.py +++ b/tests/ci/s3_helper.py @@ -34,30 +34,59 @@ def _flatten_list(lst): class S3Helper: def __init__(self, host): - self.session = boto3.session.Session(region_name='us-east-1') - self.client = self.session.client('s3', endpoint_url=host) + self.session = boto3.session.Session(region_name="us-east-1") + self.client = self.session.client("s3", endpoint_url=host) def _upload_file_to_s3(self, bucket_name, file_path, s3_path): - logging.debug("Start uploading %s to bucket=%s path=%s", file_path, bucket_name, s3_path) + logging.debug( + "Start uploading %s to bucket=%s path=%s", file_path, bucket_name, s3_path + ) metadata = {} if os.path.getsize(file_path) < 64 * 1024 * 1024: - if s3_path.endswith("txt") or s3_path.endswith("log") or s3_path.endswith("err") or s3_path.endswith("out"): - metadata['ContentType'] = "text/plain; charset=utf-8" - logging.info("Content type %s for file path %s", "text/plain; charset=utf-8", file_path) + if ( + s3_path.endswith("txt") + or s3_path.endswith("log") + or s3_path.endswith("err") + or s3_path.endswith("out") + ): + metadata["ContentType"] = "text/plain; charset=utf-8" + logging.info( + "Content type %s for file path %s", + "text/plain; charset=utf-8", + file_path, + ) elif s3_path.endswith("html"): - metadata['ContentType'] = "text/html; charset=utf-8" - logging.info("Content type %s for file path %s", "text/html; charset=utf-8", file_path) + metadata["ContentType"] = "text/html; charset=utf-8" + logging.info( + "Content type %s for file path %s", + "text/html; charset=utf-8", + file_path, + ) elif s3_path.endswith("css"): - metadata['ContentType'] = "text/css; charset=utf-8" - logging.info("Content type %s for file path %s", "text/css; charset=utf-8", file_path) + metadata["ContentType"] = "text/css; charset=utf-8" + logging.info( + "Content type %s for file path %s", + "text/css; charset=utf-8", + file_path, + ) elif s3_path.endswith("js"): - metadata['ContentType'] = "text/javascript; charset=utf-8" - logging.info("Content type %s for file path %s", "text/css; charset=utf-8", file_path) + metadata["ContentType"] = "text/javascript; charset=utf-8" + logging.info( + "Content type %s for file path %s", + "text/css; charset=utf-8", + file_path, + ) else: logging.info("No content type provied for %s", file_path) else: - if re.search(r'\.(txt|log|err|out)$', s3_path) or re.search(r'\.log\..*(?{result_folder}/{RESULT_LOG_NAME}" + return ( + f"docker run --network=host --volume={build_path}:/package_folder" + f" --volume={server_log_folder}:/var/log/clickhouse-server" + f" --volume={result_folder}:/test_output" + f" {docker_image} >{result_folder}/{RESULT_LOG_NAME}" + ) if __name__ == "__main__": @@ -76,8 +85,8 @@ if __name__ == "__main__": for root, _, files in os.walk(reports_path): for f in files: - if f == 'changed_images.json': - images_path = os.path.join(root, 'changed_images.json') + if f == "changed_images.json": + images_path = os.path.join(root, "changed_images.json") break docker_image = get_image_with_version(reports_path, DOCKER_IMAGE) @@ -96,7 +105,9 @@ if __name__ == "__main__": if not os.path.exists(result_path): os.makedirs(result_path) - run_command = get_run_command(packages_path, result_path, server_log_path, docker_image) + run_command = get_run_command( + packages_path, result_path, server_log_path, docker_image + ) logging.info("Going to run command %s", run_command) with subprocess.Popen(run_command, shell=True) as process: @@ -110,13 +121,30 @@ if __name__ == "__main__": print("Result path", os.listdir(result_path)) print("Server log path", os.listdir(server_log_path)) - state, description, test_results, additional_logs = process_result(result_path, server_log_path) + state, description, test_results, additional_logs = process_result( + result_path, server_log_path + ) ch_helper = ClickHouseHelper() - s3_helper = S3Helper('https://s3.amazonaws.com') - report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, additional_logs, CHECK_NAME) + s3_helper = S3Helper("https://s3.amazonaws.com") + report_url = upload_results( + s3_helper, + pr_info.number, + pr_info.sha, + test_results, + additional_logs, + CHECK_NAME, + ) print(f"::notice ::Report url: {report_url}") post_commit_status(gh, pr_info.sha, CHECK_NAME, description, state, report_url) - prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_results, state, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, CHECK_NAME) + prepared_events = prepare_tests_results_for_clickhouse( + pr_info, + test_results, + state, + stopwatch.duration_seconds, + stopwatch.start_time_str, + report_url, + CHECK_NAME, + ) ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) diff --git a/tests/ci/ssh.py b/tests/ci/ssh.py index f6309e31d0f..275f26fd65f 100644 --- a/tests/ci/ssh.py +++ b/tests/ci/ssh.py @@ -27,15 +27,19 @@ class SSHAgent: self._env_backup["SSH_OPTIONS"] = os.environ.get("SSH_OPTIONS") # set ENV from stdout of ssh-agent - for line in self._run(['ssh-agent']).splitlines(): + for line in self._run(["ssh-agent"]).splitlines(): name, _, value = line.partition(b"=") if _ == b"=": value = value.split(b";", 1)[0] self._env[name.decode()] = value.decode() os.environ[name.decode()] = value.decode() - ssh_options = "," + os.environ["SSH_OPTIONS"] if os.environ.get("SSH_OPTIONS") else "" - os.environ["SSH_OPTIONS"] = f"{ssh_options}UserKnownHostsFile=/dev/null,StrictHostKeyChecking=no" + ssh_options = ( + "," + os.environ["SSH_OPTIONS"] if os.environ.get("SSH_OPTIONS") else "" + ) + os.environ[ + "SSH_OPTIONS" + ] = f"{ssh_options}UserKnownHostsFile=/dev/null,StrictHostKeyChecking=no" def add(self, key): key_pub = self._key_pub(key) @@ -89,7 +93,13 @@ class SSHAgent: @staticmethod def _run(cmd, stdin=None): shell = isinstance(cmd, str) - with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE if stdin else None, shell=shell) as p: + with subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE if stdin else None, + shell=shell, + ) as p: stdout, stderr = p.communicate(stdin) if stdout.strip().decode() == "The agent has no identities.": @@ -101,6 +111,7 @@ class SSHAgent: return stdout + class SSHKey: def __init__(self, key_name=None, key_value=None): if key_name is None and key_value is None: diff --git a/tests/ci/stopwatch.py b/tests/ci/stopwatch.py index b6ae8674df1..db174550c03 100644 --- a/tests/ci/stopwatch.py +++ b/tests/ci/stopwatch.py @@ -2,7 +2,8 @@ import datetime -class Stopwatch(): + +class Stopwatch: def __init__(self): self.start_time = datetime.datetime.utcnow() self.start_time_str_value = self.start_time.strftime("%Y-%m-%d %H:%M:%S") diff --git a/tests/ci/termination_lambda/app.py b/tests/ci/termination_lambda/app.py index 5de3d1531f2..14a0b2d1250 100644 --- a/tests/ci/termination_lambda/app.py +++ b/tests/ci/termination_lambda/app.py @@ -8,18 +8,19 @@ import json import time from collections import namedtuple + def get_key_and_app_from_aws(): import boto3 + secret_name = "clickhouse_github_secret_key" session = boto3.session.Session() client = session.client( - service_name='secretsmanager', + service_name="secretsmanager", ) - get_secret_value_response = client.get_secret_value( - SecretId=secret_name - ) - data = json.loads(get_secret_value_response['SecretString']) - return data['clickhouse-app-key'], int(data['clickhouse-app-id']) + get_secret_value_response = client.get_secret_value(SecretId=secret_name) + data = json.loads(get_secret_value_response["SecretString"]) + return data["clickhouse-app-key"], int(data["clickhouse-app-id"]) + def get_installation_id(jwt_token): headers = { @@ -29,117 +30,152 @@ def get_installation_id(jwt_token): response = requests.get("https://api.github.com/app/installations", headers=headers) response.raise_for_status() data = response.json() - return data[0]['id'] + return data[0]["id"] + def get_access_token(jwt_token, installation_id): headers = { "Authorization": f"Bearer {jwt_token}", "Accept": "application/vnd.github.v3+json", } - response = requests.post(f"https://api.github.com/app/installations/{installation_id}/access_tokens", headers=headers) + response = requests.post( + f"https://api.github.com/app/installations/{installation_id}/access_tokens", + headers=headers, + ) response.raise_for_status() data = response.json() - return data['token'] + return data["token"] -RunnerDescription = namedtuple('RunnerDescription', ['id', 'name', 'tags', 'offline', 'busy']) +RunnerDescription = namedtuple( + "RunnerDescription", ["id", "name", "tags", "offline", "busy"] +) + def list_runners(access_token): headers = { "Authorization": f"token {access_token}", "Accept": "application/vnd.github.v3+json", } - response = requests.get("https://api.github.com/orgs/ClickHouse/actions/runners?per_page=100", headers=headers) + response = requests.get( + "https://api.github.com/orgs/ClickHouse/actions/runners?per_page=100", + headers=headers, + ) response.raise_for_status() data = response.json() - total_runners = data['total_count'] - runners = data['runners'] + total_runners = data["total_count"] + runners = data["runners"] total_pages = int(total_runners / 100 + 1) for i in range(2, total_pages + 1): - response = requests.get(f"https://api.github.com/orgs/ClickHouse/actions/runners?page={i}&per_page=100", headers=headers) + response = requests.get( + f"https://api.github.com/orgs/ClickHouse/actions/runners?page={i}&per_page=100", + headers=headers, + ) response.raise_for_status() data = response.json() - runners += data['runners'] + runners += data["runners"] print("Total runners", len(runners)) result = [] for runner in runners: - tags = [tag['name'] for tag in runner['labels']] - desc = RunnerDescription(id=runner['id'], name=runner['name'], tags=tags, - offline=runner['status']=='offline', busy=runner['busy']) + tags = [tag["name"] for tag in runner["labels"]] + desc = RunnerDescription( + id=runner["id"], + name=runner["name"], + tags=tags, + offline=runner["status"] == "offline", + busy=runner["busy"], + ) result.append(desc) return result + def push_metrics_to_cloudwatch(listed_runners, namespace): import boto3 - client = boto3.client('cloudwatch') + + client = boto3.client("cloudwatch") metrics_data = [] busy_runners = sum(1 for runner in listed_runners if runner.busy) - metrics_data.append({ - 'MetricName': 'BusyRunners', - 'Value': busy_runners, - 'Unit': 'Count', - }) + metrics_data.append( + { + "MetricName": "BusyRunners", + "Value": busy_runners, + "Unit": "Count", + } + ) total_active_runners = sum(1 for runner in listed_runners if not runner.offline) - metrics_data.append({ - 'MetricName': 'ActiveRunners', - 'Value': total_active_runners, - 'Unit': 'Count', - }) + metrics_data.append( + { + "MetricName": "ActiveRunners", + "Value": total_active_runners, + "Unit": "Count", + } + ) total_runners = len(listed_runners) - metrics_data.append({ - 'MetricName': 'TotalRunners', - 'Value': total_runners, - 'Unit': 'Count', - }) + metrics_data.append( + { + "MetricName": "TotalRunners", + "Value": total_runners, + "Unit": "Count", + } + ) if total_active_runners == 0: busy_ratio = 100 else: busy_ratio = busy_runners / total_active_runners * 100 - metrics_data.append({ - 'MetricName': 'BusyRunnersRatio', - 'Value': busy_ratio, - 'Unit': 'Percent', - }) + metrics_data.append( + { + "MetricName": "BusyRunnersRatio", + "Value": busy_ratio, + "Unit": "Percent", + } + ) - client.put_metric_data(Namespace='RunnersMetrics', MetricData=metrics_data) + client.put_metric_data(Namespace="RunnersMetrics", MetricData=metrics_data) def how_many_instances_to_kill(event_data): - data_array = event_data['CapacityToTerminate'] + data_array = event_data["CapacityToTerminate"] to_kill_by_zone = {} for av_zone in data_array: - zone_name = av_zone['AvailabilityZone'] - to_kill = av_zone['Capacity'] + zone_name = av_zone["AvailabilityZone"] + to_kill = av_zone["Capacity"] if zone_name not in to_kill_by_zone: to_kill_by_zone[zone_name] = 0 to_kill_by_zone[zone_name] += to_kill return to_kill_by_zone + def get_candidates_to_be_killed(event_data): - data_array = event_data['Instances'] + data_array = event_data["Instances"] instances_by_zone = {} for instance in data_array: - zone_name = instance['AvailabilityZone'] - instance_id = instance['InstanceId'] + zone_name = instance["AvailabilityZone"] + instance_id = instance["InstanceId"] if zone_name not in instances_by_zone: instances_by_zone[zone_name] = [] instances_by_zone[zone_name].append(instance_id) return instances_by_zone + def delete_runner(access_token, runner): headers = { "Authorization": f"token {access_token}", "Accept": "application/vnd.github.v3+json", } - response = requests.delete(f"https://api.github.com/orgs/ClickHouse/actions/runners/{runner.id}", headers=headers) + response = requests.delete( + f"https://api.github.com/orgs/ClickHouse/actions/runners/{runner.id}", + headers=headers, + ) response.raise_for_status() - print(f"Response code deleting {runner.name} with id {runner.id} is {response.status_code}") + print( + f"Response code deleting {runner.name} with id {runner.id} is {response.status_code}" + ) return response.status_code == 204 @@ -166,12 +202,16 @@ def main(github_secret_key, github_app_id, event): num_to_kill = to_kill_by_zone[zone] candidates = instances_by_zone[zone] if num_to_kill > len(candidates): - raise Exception(f"Required to kill {num_to_kill}, but have only {len(candidates)} candidates in AV {zone}") + raise Exception( + f"Required to kill {num_to_kill}, but have only {len(candidates)} candidates in AV {zone}" + ) delete_for_av = [] for candidate in candidates: if candidate not in set([runner.name for runner in runners]): - print(f"Candidate {candidate} was not in runners list, simply delete it") + print( + f"Candidate {candidate} was not in runners list, simply delete it" + ) instances_to_kill.append(candidate) for candidate in candidates: @@ -183,57 +223,76 @@ def main(github_secret_key, github_app_id, event): for runner in runners: if runner.name == candidate: if not runner.busy: - print(f"Runner {runner.name} is not busy and can be deleted from AV {zone}") + print( + f"Runner {runner.name} is not busy and can be deleted from AV {zone}" + ) delete_for_av.append(runner) else: print(f"Runner {runner.name} is busy, not going to delete it") break if len(delete_for_av) < num_to_kill: - print(f"Checked all candidates for av {zone}, get to delete {len(delete_for_av)}, but still cannot get required {num_to_kill}") + print( + f"Checked all candidates for av {zone}, get to delete {len(delete_for_av)}, but still cannot get required {num_to_kill}" + ) to_delete_runners += delete_for_av - print("Got instances to kill: ", ', '.join(instances_to_kill)) - print("Going to delete runners:", ', '.join([runner.name for runner in to_delete_runners])) + print("Got instances to kill: ", ", ".join(instances_to_kill)) + print( + "Going to delete runners:", + ", ".join([runner.name for runner in to_delete_runners]), + ) for runner in to_delete_runners: if delete_runner(access_token, runner): - print(f"Runner with name {runner.name} and id {runner.id} successfuly deleted from github") + print( + f"Runner with name {runner.name} and id {runner.id} successfuly deleted from github" + ) instances_to_kill.append(runner.name) else: print(f"Cannot delete {runner.name} from github") ## push metrics - #runners = list_runners(access_token) - #push_metrics_to_cloudwatch(runners, 'RunnersMetrics') + # runners = list_runners(access_token) + # push_metrics_to_cloudwatch(runners, 'RunnersMetrics') - response = { - "InstanceIDs": instances_to_kill - } + response = {"InstanceIDs": instances_to_kill} print(response) return response + def handler(event, context): private_key, app_id = get_key_and_app_from_aws() return main(private_key, app_id, event) + if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Get list of runners and their states') - parser.add_argument('-p', '--private-key-path', help='Path to file with private key') - parser.add_argument('-k', '--private-key', help='Private key') - parser.add_argument('-a', '--app-id', type=int, help='GitHub application ID', required=True) + parser = argparse.ArgumentParser(description="Get list of runners and their states") + parser.add_argument( + "-p", "--private-key-path", help="Path to file with private key" + ) + parser.add_argument("-k", "--private-key", help="Private key") + parser.add_argument( + "-a", "--app-id", type=int, help="GitHub application ID", required=True + ) args = parser.parse_args() if not args.private_key_path and not args.private_key: - print("Either --private-key-path or --private-key must be specified", file=sys.stderr) + print( + "Either --private-key-path or --private-key must be specified", + file=sys.stderr, + ) if args.private_key_path and args.private_key: - print("Either --private-key-path or --private-key must be specified", file=sys.stderr) + print( + "Either --private-key-path or --private-key must be specified", + file=sys.stderr, + ) if args.private_key: private_key = args.private_key else: - with open(args.private_key_path, 'r') as key_file: + with open(args.private_key_path, "r") as key_file: private_key = key_file.read() sample_event = { @@ -243,41 +302,41 @@ if __name__ == "__main__": { "AvailabilityZone": "us-east-1b", "Capacity": 1, - "InstanceMarketOption": "OnDemand" + "InstanceMarketOption": "OnDemand", }, { "AvailabilityZone": "us-east-1c", "Capacity": 2, - "InstanceMarketOption": "OnDemand" - } + "InstanceMarketOption": "OnDemand", + }, ], "Instances": [ { "AvailabilityZone": "us-east-1b", "InstanceId": "i-08d0b3c1a137e02a5", "InstanceType": "t2.nano", - "InstanceMarketOption": "OnDemand" + "InstanceMarketOption": "OnDemand", }, { "AvailabilityZone": "us-east-1c", "InstanceId": "ip-172-31-45-253.eu-west-1.compute.internal", "InstanceType": "t2.nano", - "InstanceMarketOption": "OnDemand" + "InstanceMarketOption": "OnDemand", }, { "AvailabilityZone": "us-east-1c", "InstanceId": "ip-172-31-27-227.eu-west-1.compute.internal", "InstanceType": "t2.nano", - "InstanceMarketOption": "OnDemand" + "InstanceMarketOption": "OnDemand", }, { "AvailabilityZone": "us-east-1c", "InstanceId": "ip-172-31-45-253.eu-west-1.compute.internal", "InstanceType": "t2.nano", - "InstanceMarketOption": "OnDemand" - } + "InstanceMarketOption": "OnDemand", + }, ], - "Cause": "SCALE_IN" + "Cause": "SCALE_IN", } main(private_key, args.app_id, sample_event) diff --git a/tests/ci/token_lambda/app.py b/tests/ci/token_lambda/app.py index 731d6c040de..e3b768fca36 100644 --- a/tests/ci/token_lambda/app.py +++ b/tests/ci/token_lambda/app.py @@ -7,6 +7,7 @@ import sys import json import time + def get_installation_id(jwt_token): headers = { "Authorization": f"Bearer {jwt_token}", @@ -15,40 +16,48 @@ def get_installation_id(jwt_token): response = requests.get("https://api.github.com/app/installations", headers=headers) response.raise_for_status() data = response.json() - return data[0]['id'] + return data[0]["id"] + def get_access_token(jwt_token, installation_id): headers = { "Authorization": f"Bearer {jwt_token}", "Accept": "application/vnd.github.v3+json", } - response = requests.post(f"https://api.github.com/app/installations/{installation_id}/access_tokens", headers=headers) + response = requests.post( + f"https://api.github.com/app/installations/{installation_id}/access_tokens", + headers=headers, + ) response.raise_for_status() data = response.json() - return data['token'] + return data["token"] + def get_runner_registration_token(access_token): headers = { "Authorization": f"token {access_token}", "Accept": "application/vnd.github.v3+json", } - response = requests.post("https://api.github.com/orgs/ClickHouse/actions/runners/registration-token", headers=headers) + response = requests.post( + "https://api.github.com/orgs/ClickHouse/actions/runners/registration-token", + headers=headers, + ) response.raise_for_status() data = response.json() - return data['token'] + return data["token"] + def get_key_and_app_from_aws(): import boto3 + secret_name = "clickhouse_github_secret_key" session = boto3.session.Session() client = session.client( - service_name='secretsmanager', + service_name="secretsmanager", ) - get_secret_value_response = client.get_secret_value( - SecretId=secret_name - ) - data = json.loads(get_secret_value_response['SecretString']) - return data['clickhouse-app-key'], int(data['clickhouse-app-id']) + get_secret_value_response = client.get_secret_value(SecretId=secret_name) + data = json.loads(get_secret_value_response["SecretString"]) + return data["clickhouse-app-key"], int(data["clickhouse-app-id"]) def main(github_secret_key, github_app_id, push_to_ssm, ssm_parameter_name): @@ -67,40 +76,65 @@ def main(github_secret_key, github_app_id, push_to_ssm, ssm_parameter_name): import boto3 print("Trying to put params into ssm manager") - client = boto3.client('ssm') + client = boto3.client("ssm") client.put_parameter( Name=ssm_parameter_name, Value=runner_registration_token, - Type='SecureString', - Overwrite=True) + Type="SecureString", + Overwrite=True, + ) else: - print("Not push token to AWS Parameter Store, just print:", runner_registration_token) + print( + "Not push token to AWS Parameter Store, just print:", + runner_registration_token, + ) def handler(event, context): private_key, app_id = get_key_and_app_from_aws() - main(private_key, app_id, True, 'github_runner_registration_token') + main(private_key, app_id, True, "github_runner_registration_token") + if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Get new token from github to add runners') - parser.add_argument('-p', '--private-key-path', help='Path to file with private key') - parser.add_argument('-k', '--private-key', help='Private key') - parser.add_argument('-a', '--app-id', type=int, help='GitHub application ID', required=True) - parser.add_argument('--push-to-ssm', action='store_true', help='Store received token in parameter store') - parser.add_argument('--ssm-parameter-name', default='github_runner_registration_token', help='AWS paramater store parameter name') + parser = argparse.ArgumentParser( + description="Get new token from github to add runners" + ) + parser.add_argument( + "-p", "--private-key-path", help="Path to file with private key" + ) + parser.add_argument("-k", "--private-key", help="Private key") + parser.add_argument( + "-a", "--app-id", type=int, help="GitHub application ID", required=True + ) + parser.add_argument( + "--push-to-ssm", + action="store_true", + help="Store received token in parameter store", + ) + parser.add_argument( + "--ssm-parameter-name", + default="github_runner_registration_token", + help="AWS paramater store parameter name", + ) args = parser.parse_args() if not args.private_key_path and not args.private_key: - print("Either --private-key-path or --private-key must be specified", file=sys.stderr) + print( + "Either --private-key-path or --private-key must be specified", + file=sys.stderr, + ) if args.private_key_path and args.private_key: - print("Either --private-key-path or --private-key must be specified", file=sys.stderr) + print( + "Either --private-key-path or --private-key must be specified", + file=sys.stderr, + ) if args.private_key: private_key = args.private_key else: - with open(args.private_key_path, 'r') as key_file: + with open(args.private_key_path, "r") as key_file: private_key = key_file.read() main(private_key, args.app_id, args.push_to_ssm, args.ssm_parameter_name) diff --git a/tests/ci/unit_tests_check.py b/tests/ci/unit_tests_check.py index 06faa5704af..84c4faa822d 100644 --- a/tests/ci/unit_tests_check.py +++ b/tests/ci/unit_tests_check.py @@ -15,32 +15,38 @@ from build_download_helper import download_unit_tests from upload_result_helper import upload_results from docker_pull_helper import get_image_with_version from commit_status_helper import post_commit_status -from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse +from clickhouse_helper import ( + ClickHouseHelper, + mark_flaky_tests, + prepare_tests_results_for_clickhouse, +) from stopwatch import Stopwatch from rerun_helper import RerunHelper from tee_popen import TeePopen -IMAGE_NAME = 'clickhouse/unit-test' +IMAGE_NAME = "clickhouse/unit-test" + def get_test_name(line): - elements = reversed(line.split(' ')) + elements = reversed(line.split(" ")) for element in elements: - if '(' not in element and ')' not in element: + if "(" not in element and ")" not in element: return element raise Exception(f"No test name in line '{line}'") + def process_result(result_folder): - OK_SIGN = 'OK ]' - FAILED_SIGN = 'FAILED ]' - SEGFAULT = 'Segmentation fault' - SIGNAL = 'received signal SIG' - PASSED = 'PASSED' + OK_SIGN = "OK ]" + FAILED_SIGN = "FAILED ]" + SEGFAULT = "Segmentation fault" + SIGNAL = "received signal SIG" + PASSED = "PASSED" summary = [] total_counter = 0 failed_counter = 0 - result_log_path = f'{result_folder}/test_result.txt' + result_log_path = f"{result_folder}/test_result.txt" if not os.path.exists(result_log_path): logging.info("No output log on path %s", result_log_path) return "error", "No output log", summary, [] @@ -48,7 +54,7 @@ def process_result(result_folder): status = "success" description = "" passed = False - with open(result_log_path, 'r', encoding='utf-8') as test_result: + with open(result_log_path, "r", encoding="utf-8") as test_result: for line in test_result: if OK_SIGN in line: logging.info("Found ok line: '%s'", line) @@ -56,7 +62,7 @@ def process_result(result_folder): logging.info("Test name: '%s'", test_name) summary.append((test_name, "OK")) total_counter += 1 - elif FAILED_SIGN in line and 'listed below' not in line and 'ms)' in line: + elif FAILED_SIGN in line and "listed below" not in line and "ms)" in line: logging.info("Found fail line: '%s'", line) test_name = get_test_name(line.strip()) logging.info("Test name: '%s'", test_name) @@ -85,7 +91,9 @@ def process_result(result_folder): status = "failure" if not description: - description += f"fail: {failed_counter}, passed: {total_counter - failed_counter}" + description += ( + f"fail: {failed_counter}, passed: {total_counter - failed_counter}" + ) return status, description, summary, [result_log_path] @@ -139,15 +147,30 @@ if __name__ == "__main__": subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True) - s3_helper = S3Helper('https://s3.amazonaws.com') + s3_helper = S3Helper("https://s3.amazonaws.com") state, description, test_results, additional_logs = process_result(test_output) ch_helper = ClickHouseHelper() mark_flaky_tests(ch_helper, check_name, test_results) - report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [run_log_path] + additional_logs, check_name) + report_url = upload_results( + s3_helper, + pr_info.number, + pr_info.sha, + test_results, + [run_log_path] + additional_logs, + check_name, + ) print(f"::notice ::Report url: {report_url}") post_commit_status(gh, pr_info.sha, check_name, description, state, report_url) - prepared_events = prepare_tests_results_for_clickhouse(pr_info, test_results, state, stopwatch.duration_seconds, stopwatch.start_time_str, report_url, check_name) + prepared_events = prepare_tests_results_for_clickhouse( + pr_info, + test_results, + state, + stopwatch.duration_seconds, + stopwatch.start_time_str, + report_url, + check_name, + ) ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) diff --git a/tests/ci/upload_result_helper.py b/tests/ci/upload_result_helper.py index 5a5e8d3f36a..289fc4b3184 100644 --- a/tests/ci/upload_result_helper.py +++ b/tests/ci/upload_result_helper.py @@ -2,11 +2,13 @@ import os import logging import ast -from env_helper import GITHUB_SERVER_URL, GITHUB_REPOSITORY, GITHUB_RUN_ID -from report import create_test_html_report +from env_helper import GITHUB_SERVER_URL, GITHUB_REPOSITORY, GITHUB_RUN_URL +from report import ReportColorTheme, create_test_html_report -def process_logs(s3_client, additional_logs, s3_path_prefix, test_results, with_raw_logs): +def process_logs( + s3_client, additional_logs, s3_path_prefix, test_results, with_raw_logs +): processed_logs = {} # Firstly convert paths of logs from test_results to urls to s3. for test_result in test_results: @@ -21,8 +23,8 @@ def process_logs(s3_client, additional_logs, s3_path_prefix, test_results, with_ test_log_urls.append(processed_logs[log_path]) elif log_path: url = s3_client.upload_test_report_to_s3( - log_path, - s3_path_prefix + "/" + os.path.basename(log_path)) + log_path, s3_path_prefix + "/" + os.path.basename(log_path) + ) test_log_urls.append(url) processed_logs[log_path] = url @@ -33,15 +35,29 @@ def process_logs(s3_client, additional_logs, s3_path_prefix, test_results, with_ if log_path: additional_urls.append( s3_client.upload_test_report_to_s3( - log_path, - s3_path_prefix + "/" + os.path.basename(log_path))) + log_path, s3_path_prefix + "/" + os.path.basename(log_path) + ) + ) return additional_urls -def upload_results(s3_client, pr_number, commit_sha, test_results, additional_files, check_name, with_raw_logs=True): - s3_path_prefix = f"{pr_number}/{commit_sha}/" + check_name.lower().replace(' ', '_').replace('(', '_').replace(')', '_').replace(',', '_') - additional_urls = process_logs(s3_client, additional_files, s3_path_prefix, test_results, with_raw_logs) +def upload_results( + s3_client, + pr_number, + commit_sha, + test_results, + additional_files, + check_name, + with_raw_logs=True, + statuscolors=None, +): + s3_path_prefix = f"{pr_number}/{commit_sha}/" + check_name.lower().replace( + " ", "_" + ).replace("(", "_").replace(")", "_").replace(",", "_") + additional_urls = process_logs( + s3_client, additional_files, s3_path_prefix, test_results, with_raw_logs + ) branch_url = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/commits/master" branch_name = "master" @@ -50,7 +66,7 @@ def upload_results(s3_client, pr_number, commit_sha, test_results, additional_fi branch_url = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/pull/{pr_number}" commit_url = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/commit/{commit_sha}" - task_url = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/actions/runs/{GITHUB_RUN_ID}" + task_url = GITHUB_RUN_URL if additional_urls: raw_log_url = additional_urls[0] @@ -58,10 +74,25 @@ def upload_results(s3_client, pr_number, commit_sha, test_results, additional_fi else: raw_log_url = task_url - html_report = create_test_html_report(check_name, test_results, raw_log_url, task_url, branch_url, branch_name, commit_url, additional_urls, with_raw_logs) - with open('report.html', 'w', encoding='utf-8') as f: + statuscolors = ( + ReportColorTheme.bugfixcheck if "bugfix validate check" in check_name else None + ) + + html_report = create_test_html_report( + check_name, + test_results, + raw_log_url, + task_url, + branch_url, + branch_name, + commit_url, + additional_urls, + with_raw_logs, + statuscolors=statuscolors, + ) + with open("report.html", "w", encoding="utf-8") as f: f.write(html_report) - url = s3_client.upload_test_report_to_s3('report.html', s3_path_prefix + ".html") + url = s3_client.upload_test_report_to_s3("report.html", s3_path_prefix + ".html") logging.info("Search result in url %s", url) return url diff --git a/tests/ci/version_helper.py b/tests/ci/version_helper.py index 02e22ee0c4d..3bb547333e7 100755 --- a/tests/ci/version_helper.py +++ b/tests/ci/version_helper.py @@ -238,7 +238,7 @@ def _update_dockerfile(repo_path: str, version: ClickHouseVersion): def update_version_local(repo_path, version, version_type="testing"): update_contributors() version.with_description(version_type) - update_cmake_version(version, version_type) + update_cmake_version(version) _update_changelog(repo_path, version) _update_dockerfile(repo_path, version) diff --git a/tests/ci/workflow_approve_rerun_lambda/app.py b/tests/ci/workflow_approve_rerun_lambda/app.py index 50b9d9bfedc..b650d1651fe 100644 --- a/tests/ci/workflow_approve_rerun_lambda/app.py +++ b/tests/ci/workflow_approve_rerun_lambda/app.py @@ -379,12 +379,16 @@ def check_need_to_rerun(workflow_description): def rerun_workflow(workflow_description, token): print("Going to rerun workflow") - _exec_post_with_retry(workflow_description.rerun_url, token) + try: + _exec_post_with_retry(f"{workflow_description.rerun_url}-failed-jobs", token) + except Exception: + _exec_post_with_retry(workflow_description.rerun_url, token) def main(event): token = get_token_from_aws() event_data = json.loads(event["body"]) + print("The body received:", event_data) workflow_description = get_workflow_description_from_event(event_data) print("Got workflow description", workflow_description) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index b5e09a21f92..863e061085a 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -373,6 +373,11 @@ class SettingsRandomizer: "priority": lambda: int(abs(random.gauss(0, 2))), "output_format_parallel_formatting": lambda: random.randint(0, 1), "input_format_parallel_parsing": lambda: random.randint(0, 1), + "min_chunk_bytes_for_parallel_parsing": lambda: max(1024, int(random.gauss(10 * 1024 * 1024, 5 * 1000 * 1000))), + "max_read_buffer_size": lambda: random.randint(500000, 1048576), + "prefer_localhost_replica": lambda: random.randint(0, 1), + "max_block_size": lambda: random.randint(8000, 100000), + "max_threads": lambda: random.randint(1, 64), } @staticmethod @@ -463,9 +468,11 @@ class TestCase: return testcase_args - def add_random_settings(self, client_options): + def add_random_settings(self, args, client_options): if self.tags and 'no-random-settings' in self.tags: return client_options + if args.no_random_settings: + return client_options if len(self.base_url_params) == 0: os.environ['CLICKHOUSE_URL_PARAMS'] = '&'.join(self.random_settings) @@ -480,9 +487,11 @@ class TestCase: os.environ['CLICKHOUSE_URL_PARAMS'] = self.base_url_params os.environ['CLICKHOUSE_CLIENT_OPT'] = self.base_client_options - def add_info_about_settings(self, description): + def add_info_about_settings(self, args, description): if self.tags and 'no-random-settings' in self.tags: return description + if args.no_random_settings: + return description return description + "\n" + "Settings used in the test: " + "--" + " --".join(self.random_settings) + "\n" @@ -490,6 +499,9 @@ class TestCase: self.case: str = case # case file name self.tags: Set[str] = suite.all_tags[case] if case in suite.all_tags else set() + for tag in os.getenv("GLOBAL_TAGS", "").split(","): + self.tags.add(tag.strip()) + self.case_file: str = os.path.join(suite.suite_path, case) (self.name, self.ext) = os.path.splitext(case) @@ -780,13 +792,13 @@ class TestCase: self.runs_count += 1 self.testcase_args = self.configure_testcase_args(args, self.case_file, suite.suite_tmp_path) - client_options = self.add_random_settings(client_options) + client_options = self.add_random_settings(args, client_options) proc, stdout, stderr, total_time = self.run_single_test(server_logs_level, client_options) result = self.process_result_impl(proc, stdout, stderr, total_time) result.check_if_need_retry(args, stdout, stderr, self.runs_count) if result.status == TestStatus.FAIL: - result.description = self.add_info_about_settings(result.description) + result.description = self.add_info_about_settings(args, result.description) return result except KeyboardInterrupt as e: raise e @@ -794,12 +806,12 @@ class TestCase: return TestResult(self.name, TestStatus.FAIL, FailureReason.INTERNAL_QUERY_FAIL, 0., - self.add_info_about_settings(self.get_description_from_exception_info(sys.exc_info()))) + self.add_info_about_settings(args, self.get_description_from_exception_info(sys.exc_info()))) except (ConnectionRefusedError, ConnectionResetError): return TestResult(self.name, TestStatus.FAIL, FailureReason.SERVER_DIED, 0., - self.add_info_about_settings(self.get_description_from_exception_info(sys.exc_info()))) + self.add_info_about_settings(args, self.get_description_from_exception_info(sys.exc_info()))) except: return TestResult(self.name, TestStatus.UNKNOWN, FailureReason.INTERNAL_ERROR, @@ -1493,6 +1505,7 @@ if __name__ == '__main__': parser.add_argument('--print-time', action='store_true', dest='print_time', help='Print test time') parser.add_argument('--check-zookeeper-session', action='store_true', help='Check ZooKeeper session uptime to determine if failed test should be retried') parser.add_argument('--s3-storage', action='store_true', default=False, help='Run tests over s3 storage') + parser.add_argument('--no-random-settings', action='store_true', default=False, help='Disable settings randomization') parser.add_argument('--run-by-hash-num', type=int, help='Run tests matching crc32(test_name) % run_by_hash_total == run_by_hash_num') parser.add_argument('--run-by-hash-total', type=int, help='Total test groups for crc32(test_name) % run_by_hash_total == run_by_hash_num') diff --git a/tests/config/config.d/named_collection.xml b/tests/config/config.d/named_collection.xml index f3b7074e1ce..bc75461d664 100644 --- a/tests/config/config.d/named_collection.xml +++ b/tests/config/config.d/named_collection.xml @@ -14,5 +14,13 @@ default s
+ + +
+ X-ClickHouse-Format + JSONEachRow +
+
+
diff --git a/tests/config/config.d/zookeeper.xml b/tests/config/config.d/zookeeper.xml index 4fa529a6180..63057224ef9 100644 --- a/tests/config/config.d/zookeeper.xml +++ b/tests/config/config.d/zookeeper.xml @@ -1,5 +1,7 @@ + + random localhost 9181 diff --git a/tests/fuzz/all.dict b/tests/fuzz/all.dict index bf25f1fa484..1863cd20bdd 100644 --- a/tests/fuzz/all.dict +++ b/tests/fuzz/all.dict @@ -1459,7 +1459,7 @@ "xor" "xxHash32" "xxHash64" -"yandexConsistentHash" +"kostikConsistentHash" "YEAR" "yearweek" "yesterday" diff --git a/tests/fuzz/dictionaries/functions.dict b/tests/fuzz/dictionaries/functions.dict index 722e931dc09..3f393aa6846 100644 --- a/tests/fuzz/dictionaries/functions.dict +++ b/tests/fuzz/dictionaries/functions.dict @@ -26,7 +26,7 @@ "toUnixTimestamp64Nano" "toUnixTimestamp64Micro" "jumpConsistentHash" -"yandexConsistentHash" +"kostikConsistentHash" "addressToSymbol" "toJSONString" "JSON_VALUE" diff --git a/tests/integration/ci-runner.py b/tests/integration/ci-runner.py index 8f228d91e9e..05e56d2a910 100755 --- a/tests/integration/ci-runner.py +++ b/tests/integration/ci-runner.py @@ -21,19 +21,21 @@ CLICKHOUSE_BINARY_PATH = "usr/bin/clickhouse" CLICKHOUSE_ODBC_BRIDGE_BINARY_PATH = "usr/bin/clickhouse-odbc-bridge" CLICKHOUSE_LIBRARY_BRIDGE_BINARY_PATH = "usr/bin/clickhouse-library-bridge" -TRIES_COUNT = 10 +FLAKY_TRIES_COUNT = 10 MAX_TIME_SECONDS = 3600 MAX_TIME_IN_SANDBOX = 20 * 60 # 20 minutes TASK_TIMEOUT = 8 * 60 * 60 # 8 hours +NO_CHANGES_MSG = "Nothing to run" + def stringhash(s): return zlib.crc32(s.encode("utf-8")) -def get_tests_to_run(pr_info): - result = set([]) +def get_changed_tests_to_run(pr_info, repo_path): + result = set() changed_files = pr_info["changed_files"] if changed_files is None: @@ -43,7 +45,7 @@ def get_tests_to_run(pr_info): if "tests/integration/test_" in fpath: logging.info("File %s changed and seems like integration test", fpath) result.add(fpath.split("/")[2]) - return list(result) + return filter_existing_tests(result, repo_path) def filter_existing_tests(tests_to_run, repo_path): @@ -207,6 +209,9 @@ class ClickhouseIntegrationTestsRunner: self.image_versions = self.params["docker_images_with_versions"] self.shuffle_groups = self.params["shuffle_test_groups"] self.flaky_check = "flaky check" in self.params["context_name"] + self.bugfix_validate_check = ( + "bugfix validate check" in self.params["context_name"] + ) # if use_tmpfs is not set we assume it to be true, otherwise check self.use_tmpfs = "use_tmpfs" not in self.params or self.params["use_tmpfs"] self.disable_net_host = ( @@ -703,14 +708,13 @@ class ClickhouseIntegrationTestsRunner: return counters, tests_times, log_paths - def run_flaky_check(self, repo_path, build_path): + def run_flaky_check(self, repo_path, build_path, should_fail=False): pr_info = self.params["pr_info"] - # pytest swears, if we require to run some tests which was renamed or deleted - tests_to_run = filter_existing_tests(get_tests_to_run(pr_info), repo_path) + tests_to_run = get_changed_tests_to_run(pr_info, repo_path) if not tests_to_run: logging.info("No tests to run found") - return "success", "Nothing to run", [("Nothing to run", "OK")], "" + return "success", NO_CHANGES_MSG, [(NO_CHANGES_MSG, "OK")], "" self._install_clickhouse(build_path) logging.info("Found '%s' tests to run", " ".join(tests_to_run)) @@ -720,26 +724,29 @@ class ClickhouseIntegrationTestsRunner: logging.info("Starting check with retries") final_retry = 0 logs = [] - for i in range(TRIES_COUNT): + tires_num = 1 if should_fail else FLAKY_TRIES_COUNT + for i in range(tires_num): final_retry += 1 logging.info("Running tests for the %s time", i) counters, tests_times, log_paths = self.try_run_test_group( - repo_path, "flaky", tests_to_run, 1, 1 + repo_path, "bugfix" if should_fail else "flaky", tests_to_run, 1, 1 ) logs += log_paths if counters["FAILED"]: logging.info("Found failed tests: %s", " ".join(counters["FAILED"])) - description_prefix = "Flaky tests found: " + description_prefix = "Failed tests found: " result_state = "failure" - break + if not should_fail: + break if counters["ERROR"]: - description_prefix = "Flaky tests found: " + description_prefix = "Failed tests found: " logging.info("Found error tests: %s", " ".join(counters["ERROR"])) # NOTE "error" result state will restart the whole test task, # so we use "failure" here result_state = "failure" - break - assert len(counters["FLAKY"]) == 0 + if not should_fail: + break + assert len(counters["FLAKY"]) == 0 or should_fail logging.info("Try is OK, all tests passed, going to clear env") clear_ip_tables_and_restart_daemons() logging.info("And going to sleep for some time") @@ -774,8 +781,10 @@ class ClickhouseIntegrationTestsRunner: return result_state, status_text, test_result, logs def run_impl(self, repo_path, build_path): - if self.flaky_check: - return self.run_flaky_check(repo_path, build_path) + if self.flaky_check or self.bugfix_validate_check: + return self.run_flaky_check( + repo_path, build_path, should_fail=self.bugfix_validate_check + ) self._install_clickhouse(build_path) logging.info( diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 4b0a9a2835b..9dd10ce9b52 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -5,23 +5,34 @@ import os from helpers.test_tools import TSV from helpers.network import _NetworkManager + @pytest.fixture(autouse=True, scope="session") def cleanup_environment(): try: if int(os.environ.get("PYTEST_CLEANUP_CONTAINERS", 0)) == 1: logging.debug(f"Cleaning all iptables rules") _NetworkManager.clean_all_user_iptables_rules() - result = run_and_check(['docker ps | wc -l'], shell=True) + result = run_and_check(["docker ps | wc -l"], shell=True) if int(result) > 1: if int(os.environ.get("PYTEST_CLEANUP_CONTAINERS", 0)) != 1: - logging.warning(f"Docker containters({int(result)}) are running before tests run. They can be left from previous pytest run and cause test failures.\n"\ - "You can set env PYTEST_CLEANUP_CONTAINERS=1 or use runner with --cleanup-containers argument to enable automatic containers cleanup.") + logging.warning( + f"Docker containters({int(result)}) are running before tests run. They can be left from previous pytest run and cause test failures.\n" + "You can set env PYTEST_CLEANUP_CONTAINERS=1 or use runner with --cleanup-containers argument to enable automatic containers cleanup." + ) else: logging.debug("Trying to kill unstopped containers...") - run_and_check([f'docker kill $(docker container list --all --quiet)'], shell=True, nothrow=True) - run_and_check([f'docker rm $docker container list --all --quiet)'], shell=True, nothrow=True) + run_and_check( + [f"docker kill $(docker container list --all --quiet)"], + shell=True, + nothrow=True, + ) + run_and_check( + [f"docker rm $docker container list --all --quiet)"], + shell=True, + nothrow=True, + ) logging.debug("Unstopped containers killed") - r = run_and_check(['docker-compose', 'ps', '--services', '--all']) + r = run_and_check(["docker-compose", "ps", "--services", "--all"]) logging.debug(f"Docker ps before start:{r.stdout}") else: logging.debug(f"No running containers") @@ -31,8 +42,14 @@ def cleanup_environment(): yield + def pytest_addoption(parser): - parser.addoption("--run-id", default="", help="run-id is used as postfix in _instances_{} directory") + parser.addoption( + "--run-id", + default="", + help="run-id is used as postfix in _instances_{} directory", + ) + def pytest_configure(config): - os.environ['INTEGRATION_TESTS_RUN_ID'] = config.option.run_id + os.environ["INTEGRATION_TESTS_RUN_ID"] = config.option.run_id diff --git a/tests/integration/helpers/client.py b/tests/integration/helpers/client.py index b0e764bf174..af49408abee 100644 --- a/tests/integration/helpers/client.py +++ b/tests/integration/helpers/client.py @@ -6,79 +6,117 @@ from threading import Timer class Client: - def __init__(self, host, port=9000, command='/usr/bin/clickhouse-client'): + def __init__(self, host, port=9000, command="/usr/bin/clickhouse-client"): self.host = host self.port = port self.command = [command] - if os.path.basename(command) == 'clickhouse': - self.command.append('client') + if os.path.basename(command) == "clickhouse": + self.command.append("client") - self.command += ['--host', self.host, '--port', str(self.port), '--stacktrace'] + self.command += ["--host", self.host, "--port", str(self.port), "--stacktrace"] - def query(self, sql, - stdin=None, - timeout=None, - settings=None, - user=None, - password=None, - database=None, - ignore_error=False, - query_id=None): - return self.get_query_request(sql, - stdin=stdin, - timeout=timeout, - settings=settings, - user=user, - password=password, - database=database, - ignore_error=ignore_error, - query_id=query_id).get_answer() + def query( + self, + sql, + stdin=None, + timeout=None, + settings=None, + user=None, + password=None, + database=None, + ignore_error=False, + query_id=None, + ): + return self.get_query_request( + sql, + stdin=stdin, + timeout=timeout, + settings=settings, + user=user, + password=password, + database=database, + ignore_error=ignore_error, + query_id=query_id, + ).get_answer() - def get_query_request(self, sql, - stdin=None, - timeout=None, - settings=None, - user=None, - password=None, - database=None, - ignore_error=False, - query_id=None): + def get_query_request( + self, + sql, + stdin=None, + timeout=None, + settings=None, + user=None, + password=None, + database=None, + ignore_error=False, + query_id=None, + ): command = self.command[:] if stdin is None: - command += ['--multiquery', '--testmode'] + command += ["--multiquery", "--testmode"] stdin = sql else: - command += ['--query', sql] + command += ["--query", sql] if settings is not None: for setting, value in settings.items(): - command += ['--' + setting, str(value)] + command += ["--" + setting, str(value)] if user is not None: - command += ['--user', user] + command += ["--user", user] if password is not None: - command += ['--password', password] + command += ["--password", password] if database is not None: - command += ['--database', database] + command += ["--database", database] if query_id is not None: - command += ['--query_id', query_id] + command += ["--query_id", query_id] return CommandRequest(command, stdin, timeout, ignore_error) - def query_and_get_error(self, sql, stdin=None, timeout=None, settings=None, user=None, password=None, - database=None): - return self.get_query_request(sql, stdin=stdin, timeout=timeout, settings=settings, user=user, - password=password, database=database).get_error() + def query_and_get_error( + self, + sql, + stdin=None, + timeout=None, + settings=None, + user=None, + password=None, + database=None, + ): + return self.get_query_request( + sql, + stdin=stdin, + timeout=timeout, + settings=settings, + user=user, + password=password, + database=database, + ).get_error() - def query_and_get_answer_with_error(self, sql, stdin=None, timeout=None, settings=None, user=None, password=None, - database=None): - return self.get_query_request(sql, stdin=stdin, timeout=timeout, settings=settings, user=user, - password=password, database=database).get_answer_and_error() + def query_and_get_answer_with_error( + self, + sql, + stdin=None, + timeout=None, + settings=None, + user=None, + password=None, + database=None, + ): + return self.get_query_request( + sql, + stdin=stdin, + timeout=timeout, + settings=settings, + user=user, + password=password, + database=database, + ).get_answer_and_error() class QueryTimeoutExceedException(Exception): @@ -95,7 +133,7 @@ class QueryRuntimeException(Exception): class CommandRequest: def __init__(self, command, stdin=None, timeout=None, ignore_error=False): # Write data to tmp file to avoid PIPEs and execution blocking - stdin_file = tempfile.TemporaryFile(mode='w+') + stdin_file = tempfile.TemporaryFile(mode="w+") stdin_file.write(stdin) stdin_file.seek(0) self.stdout_file = tempfile.TemporaryFile() @@ -108,11 +146,19 @@ class CommandRequest: # can print some debug information there env = {} env["TSAN_OPTIONS"] = "verbosity=0" - self.process = sp.Popen(command, stdin=stdin_file, stdout=self.stdout_file, stderr=self.stderr_file, env=env, universal_newlines=True) + self.process = sp.Popen( + command, + stdin=stdin_file, + stdout=self.stdout_file, + stderr=self.stderr_file, + env=env, + universal_newlines=True, + ) self.timer = None self.process_finished_before_timeout = True if timeout is not None: + def kill_process(): if self.process.poll() is None: self.process_finished_before_timeout = False @@ -126,16 +172,25 @@ class CommandRequest: self.stdout_file.seek(0) self.stderr_file.seek(0) - stdout = self.stdout_file.read().decode('utf-8', errors='replace') - stderr = self.stderr_file.read().decode('utf-8', errors='replace') + stdout = self.stdout_file.read().decode("utf-8", errors="replace") + stderr = self.stderr_file.read().decode("utf-8", errors="replace") - if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error: + if ( + self.timer is not None + and not self.process_finished_before_timeout + and not self.ignore_error + ): logging.debug(f"Timed out. Last stdout:{stdout}, stderr:{stderr}") - raise QueryTimeoutExceedException('Client timed out!') + raise QueryTimeoutExceedException("Client timed out!") if (self.process.returncode != 0 or stderr) and not self.ignore_error: raise QueryRuntimeException( - 'Client failed! Return code: {}, stderr: {}'.format(self.process.returncode, stderr), self.process.returncode, stderr) + "Client failed! Return code: {}, stderr: {}".format( + self.process.returncode, stderr + ), + self.process.returncode, + stderr, + ) return stdout @@ -144,14 +199,22 @@ class CommandRequest: self.stdout_file.seek(0) self.stderr_file.seek(0) - stdout = self.stdout_file.read().decode('utf-8', errors='replace') - stderr = self.stderr_file.read().decode('utf-8', errors='replace') + stdout = self.stdout_file.read().decode("utf-8", errors="replace") + stderr = self.stderr_file.read().decode("utf-8", errors="replace") - if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error: - raise QueryTimeoutExceedException('Client timed out!') + if ( + self.timer is not None + and not self.process_finished_before_timeout + and not self.ignore_error + ): + raise QueryTimeoutExceedException("Client timed out!") - if (self.process.returncode == 0): - raise QueryRuntimeException('Client expected to be failed but succeeded! stdout: {}'.format(stdout), self.process.returncode, stderr) + if self.process.returncode == 0: + raise QueryRuntimeException( + "Client expected to be failed but succeeded! stdout: {}".format(stdout), + self.process.returncode, + stderr, + ) return stderr @@ -160,10 +223,14 @@ class CommandRequest: self.stdout_file.seek(0) self.stderr_file.seek(0) - stdout = self.stdout_file.read().decode('utf-8', errors='replace') - stderr = self.stderr_file.read().decode('utf-8', errors='replace') + stdout = self.stdout_file.read().decode("utf-8", errors="replace") + stderr = self.stderr_file.read().decode("utf-8", errors="replace") - if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error: - raise QueryTimeoutExceedException('Client timed out!') + if ( + self.timer is not None + and not self.process_finished_before_timeout + and not self.ignore_error + ): + raise QueryTimeoutExceedException("Client timed out!") return (stdout, stderr) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 627e3725232..dffd09ae849 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -16,20 +16,28 @@ import traceback import urllib.parse import shlex import urllib3 - -from cassandra.policies import RoundRobinPolicy -import cassandra.cluster -import psycopg2 -import pymongo -import pymysql import requests -from confluent_kafka.avro.cached_schema_registry_client import \ - CachedSchemaRegistryClient + +try: + # Please, add modules that required for specific tests only here. + # So contributors will be able to run most tests locally + # without installing tons of unneeded packages that may be not so easy to install. + from cassandra.policies import RoundRobinPolicy + import cassandra.cluster + import psycopg2 + from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT + import pymongo + import pymysql + from confluent_kafka.avro.cached_schema_registry_client import ( + CachedSchemaRegistryClient, + ) +except Exception as e: + logging.warning(f"Cannot import some modules, some tests may not work: {e}") + from dict2xml import dict2xml from kazoo.client import KazooClient from kazoo.exceptions import KazooException from minio import Minio -from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT from helpers.test_tools import assert_eq_with_retry, exec_query_with_retry from helpers import pytest_xdist_logging_to_separate_files @@ -42,28 +50,48 @@ from .hdfs_api import HDFSApi HELPERS_DIR = p.dirname(__file__) CLICKHOUSE_ROOT_DIR = p.join(p.dirname(__file__), "../../..") -LOCAL_DOCKER_COMPOSE_DIR = p.join(CLICKHOUSE_ROOT_DIR, "docker/test/integration/runner/compose/") -DEFAULT_ENV_NAME = '.env' +LOCAL_DOCKER_COMPOSE_DIR = p.join( + CLICKHOUSE_ROOT_DIR, "docker/test/integration/runner/compose/" +) +DEFAULT_ENV_NAME = ".env" SANITIZER_SIGN = "==================" # to create docker-compose env file def _create_env_file(path, variables): logging.debug(f"Env {variables} stored in {path}") - with open(path, 'w') as f: + with open(path, "w") as f: for var, value in list(variables.items()): f.write("=".join([var, value]) + "\n") return path -def run_and_check(args, env=None, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=300, nothrow=False, detach=False): + +def run_and_check( + args, + env=None, + shell=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + timeout=300, + nothrow=False, + detach=False, +): if detach: - subprocess.Popen(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, env=env, shell=shell) + subprocess.Popen( + args, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + env=env, + shell=shell, + ) return logging.debug(f"Command:{args}") - res = subprocess.run(args, stdout=stdout, stderr=stderr, env=env, shell=shell, timeout=timeout) - out = res.stdout.decode('utf-8') - err = res.stderr.decode('utf-8') + res = subprocess.run( + args, stdout=stdout, stderr=stderr, env=env, shell=shell, timeout=timeout + ) + out = res.stdout.decode("utf-8") + err = res.stderr.decode("utf-8") # check_call(...) from subprocess does not print stderr, so we do it manually for outline in out.splitlines(): logging.debug(f"Stdout:{outline}") @@ -74,18 +102,22 @@ def run_and_check(args, env=None, shell=False, stdout=subprocess.PIPE, stderr=su if env: logging.debug(f"Env:{env}") if not nothrow: - raise Exception(f"Command {args} return non-zero code {res.returncode}: {res.stderr.decode('utf-8')}") + raise Exception( + f"Command {args} return non-zero code {res.returncode}: {res.stderr.decode('utf-8')}" + ) return out + # Based on https://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python/2838309#2838309 def get_free_port(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.bind(("",0)) + s.bind(("", 0)) s.listen(1) port = s.getsockname()[1] s.close() return port + def retry_exception(num, delay, func, exception=Exception, *args, **kwargs): """ Retry if `func()` throws, `num` times. @@ -100,87 +132,109 @@ def retry_exception(num, delay, func, exception=Exception, *args, **kwargs): try: func(*args, **kwargs) time.sleep(delay) - except exception: # pylint: disable=broad-except + except exception: # pylint: disable=broad-except i += 1 continue return - raise StopIteration('Function did not finished successfully') + raise StopIteration("Function did not finished successfully") + def subprocess_check_call(args, detach=False, nothrow=False): # Uncomment for debugging - #logging.info('run:' + ' '.join(args)) + # logging.info('run:' + ' '.join(args)) return run_and_check(args, detach=detach, nothrow=nothrow) def get_odbc_bridge_path(): - path = os.environ.get('CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH') + path = os.environ.get("CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH") if path is None: - server_path = os.environ.get('CLICKHOUSE_TESTS_SERVER_BIN_PATH') + server_path = os.environ.get("CLICKHOUSE_TESTS_SERVER_BIN_PATH") if server_path is not None: - return os.path.join(os.path.dirname(server_path), 'clickhouse-odbc-bridge') + return os.path.join(os.path.dirname(server_path), "clickhouse-odbc-bridge") else: - return '/usr/bin/clickhouse-odbc-bridge' + return "/usr/bin/clickhouse-odbc-bridge" return path + def get_library_bridge_path(): - path = os.environ.get('CLICKHOUSE_TESTS_LIBRARY_BRIDGE_BIN_PATH') + path = os.environ.get("CLICKHOUSE_TESTS_LIBRARY_BRIDGE_BIN_PATH") if path is None: - server_path = os.environ.get('CLICKHOUSE_TESTS_SERVER_BIN_PATH') + server_path = os.environ.get("CLICKHOUSE_TESTS_SERVER_BIN_PATH") if server_path is not None: - return os.path.join(os.path.dirname(server_path), 'clickhouse-library-bridge') + return os.path.join( + os.path.dirname(server_path), "clickhouse-library-bridge" + ) else: - return '/usr/bin/clickhouse-library-bridge' + return "/usr/bin/clickhouse-library-bridge" return path + def get_docker_compose_path(): - compose_path = os.environ.get('DOCKER_COMPOSE_DIR') + compose_path = os.environ.get("DOCKER_COMPOSE_DIR") if compose_path is not None: return os.path.dirname(compose_path) else: - if os.path.exists(os.path.dirname('/compose/')): - return os.path.dirname('/compose/') # default in docker runner container + if os.path.exists(os.path.dirname("/compose/")): + return os.path.dirname("/compose/") # default in docker runner container else: - logging.debug(f"Fallback docker_compose_path to LOCAL_DOCKER_COMPOSE_DIR: {LOCAL_DOCKER_COMPOSE_DIR}") + logging.debug( + f"Fallback docker_compose_path to LOCAL_DOCKER_COMPOSE_DIR: {LOCAL_DOCKER_COMPOSE_DIR}" + ) return LOCAL_DOCKER_COMPOSE_DIR + def check_kafka_is_available(kafka_id, kafka_port): - p = subprocess.Popen(('docker', - 'exec', - '-i', - kafka_id, - '/usr/bin/kafka-broker-api-versions', - '--bootstrap-server', - f'INSIDE://localhost:{kafka_port}'), - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p = subprocess.Popen( + ( + "docker", + "exec", + "-i", + kafka_id, + "/usr/bin/kafka-broker-api-versions", + "--bootstrap-server", + f"INSIDE://localhost:{kafka_port}", + ), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) p.communicate() return p.returncode == 0 + def check_rabbitmq_is_available(rabbitmq_id): - p = subprocess.Popen(('docker', - 'exec', - '-i', - rabbitmq_id, - 'rabbitmqctl', - 'await_startup'), - stdout=subprocess.PIPE) + p = subprocess.Popen( + ("docker", "exec", "-i", rabbitmq_id, "rabbitmqctl", "await_startup"), + stdout=subprocess.PIPE, + ) p.communicate() return p.returncode == 0 + def enable_consistent_hash_plugin(rabbitmq_id): - p = subprocess.Popen(('docker', - 'exec', - '-i', - rabbitmq_id, - "rabbitmq-plugins", "enable", "rabbitmq_consistent_hash_exchange"), - stdout=subprocess.PIPE) + p = subprocess.Popen( + ( + "docker", + "exec", + "-i", + rabbitmq_id, + "rabbitmq-plugins", + "enable", + "rabbitmq_consistent_hash_exchange", + ), + stdout=subprocess.PIPE, + ) p.communicate() return p.returncode == 0 + def get_instances_dir(): - if 'INTEGRATION_TESTS_RUN_ID' in os.environ and os.environ['INTEGRATION_TESTS_RUN_ID']: - return '_instances_' + shlex.quote(os.environ['INTEGRATION_TESTS_RUN_ID']) + if ( + "INTEGRATION_TESTS_RUN_ID" in os.environ + and os.environ["INTEGRATION_TESTS_RUN_ID"] + ): + return "_instances_" + shlex.quote(os.environ["INTEGRATION_TESTS_RUN_ID"]) else: - return '_instances' + return "_instances" class ClickHouseCluster: @@ -192,53 +246,87 @@ class ClickHouseCluster: these directories will contain logs, database files, docker-compose config, ClickHouse configs etc. """ - def __init__(self, base_path, name=None, base_config_dir=None, server_bin_path=None, client_bin_path=None, - odbc_bridge_bin_path=None, library_bridge_bin_path=None, zookeeper_config_path=None, custom_dockerd_host=None, - zookeeper_keyfile=None, zookeeper_certfile=None): + def __init__( + self, + base_path, + name=None, + base_config_dir=None, + server_bin_path=None, + client_bin_path=None, + odbc_bridge_bin_path=None, + library_bridge_bin_path=None, + zookeeper_config_path=None, + custom_dockerd_host=None, + zookeeper_keyfile=None, + zookeeper_certfile=None, + ): for param in list(os.environ.keys()): logging.debug("ENV %40s %s" % (param, os.environ[param])) self.base_path = base_path self.base_dir = p.dirname(base_path) - self.name = name if name is not None else '' + self.name = name if name is not None else "" - self.base_config_dir = base_config_dir or os.environ.get('CLICKHOUSE_TESTS_BASE_CONFIG_DIR', - '/etc/clickhouse-server/') + self.base_config_dir = base_config_dir or os.environ.get( + "CLICKHOUSE_TESTS_BASE_CONFIG_DIR", "/etc/clickhouse-server/" + ) self.server_bin_path = p.realpath( - server_bin_path or os.environ.get('CLICKHOUSE_TESTS_SERVER_BIN_PATH', '/usr/bin/clickhouse')) - self.odbc_bridge_bin_path = p.realpath(odbc_bridge_bin_path or get_odbc_bridge_path()) - self.library_bridge_bin_path = p.realpath(library_bridge_bin_path or get_library_bridge_path()) + server_bin_path + or os.environ.get("CLICKHOUSE_TESTS_SERVER_BIN_PATH", "/usr/bin/clickhouse") + ) + self.odbc_bridge_bin_path = p.realpath( + odbc_bridge_bin_path or get_odbc_bridge_path() + ) + self.library_bridge_bin_path = p.realpath( + library_bridge_bin_path or get_library_bridge_path() + ) self.client_bin_path = p.realpath( - client_bin_path or os.environ.get('CLICKHOUSE_TESTS_CLIENT_BIN_PATH', '/usr/bin/clickhouse-client')) - self.zookeeper_config_path = p.join(self.base_dir, zookeeper_config_path) if zookeeper_config_path else p.join( - HELPERS_DIR, 'zookeeper_config.xml') + client_bin_path + or os.environ.get( + "CLICKHOUSE_TESTS_CLIENT_BIN_PATH", "/usr/bin/clickhouse-client" + ) + ) + self.zookeeper_config_path = ( + p.join(self.base_dir, zookeeper_config_path) + if zookeeper_config_path + else p.join(HELPERS_DIR, "zookeeper_config.xml") + ) - project_name = pwd.getpwuid(os.getuid()).pw_name + p.basename(self.base_dir) + self.name + project_name = ( + pwd.getpwuid(os.getuid()).pw_name + p.basename(self.base_dir) + self.name + ) # docker-compose removes everything non-alphanumeric from project names so we do it too. - self.project_name = re.sub(r'[^a-z0-9]', '', project_name.lower()) - instances_dir_name = '_instances' + self.project_name = re.sub(r"[^a-z0-9]", "", project_name.lower()) + instances_dir_name = "_instances" if self.name: - instances_dir_name += '_' + self.name + instances_dir_name += "_" + self.name - if 'INTEGRATION_TESTS_RUN_ID' in os.environ and os.environ['INTEGRATION_TESTS_RUN_ID']: - instances_dir_name += '_' + shlex.quote(os.environ['INTEGRATION_TESTS_RUN_ID']) + if ( + "INTEGRATION_TESTS_RUN_ID" in os.environ + and os.environ["INTEGRATION_TESTS_RUN_ID"] + ): + instances_dir_name += "_" + shlex.quote( + os.environ["INTEGRATION_TESTS_RUN_ID"] + ) self.instances_dir = p.join(self.base_dir, instances_dir_name) - self.docker_logs_path = p.join(self.instances_dir, 'docker.log') + self.docker_logs_path = p.join(self.instances_dir, "docker.log") self.env_file = p.join(self.instances_dir, DEFAULT_ENV_NAME) self.env_variables = {} self.env_variables["TSAN_OPTIONS"] = "second_deadlock_stack=1" self.env_variables["CLICKHOUSE_WATCHDOG_ENABLE"] = "0" self.up_called = False - custom_dockerd_host = custom_dockerd_host or os.environ.get('CLICKHOUSE_TESTS_DOCKERD_HOST') + custom_dockerd_host = custom_dockerd_host or os.environ.get( + "CLICKHOUSE_TESTS_DOCKERD_HOST" + ) self.docker_api_version = os.environ.get("DOCKER_API_VERSION") self.docker_base_tag = os.environ.get("DOCKER_BASE_TAG", "latest") - self.base_cmd = ['docker-compose'] + self.base_cmd = ["docker-compose"] if custom_dockerd_host: - self.base_cmd += ['--host', custom_dockerd_host] - self.base_cmd += ['--env-file', self.env_file] - self.base_cmd += ['--project-name', self.project_name] + self.base_cmd += ["--host", custom_dockerd_host] + self.base_cmd += ["--env-file", self.env_file] + self.base_cmd += ["--project-name", self.project_name] self.base_zookeeper_cmd = None self.base_mysql_cmd = [] @@ -275,7 +363,7 @@ class ClickHouseCluster: self.with_minio = False self.minio_dir = os.path.join(self.instances_dir, "minio") - self.minio_certs_dir = None # source for certificates + self.minio_certs_dir = None # source for certificates self.minio_host = "minio1" self.minio_ip = None self.minio_bucket = "root" @@ -295,14 +383,16 @@ class ClickHouseCluster: self.hdfs_data_port = 50075 self.hdfs_dir = p.abspath(p.join(self.instances_dir, "hdfs")) self.hdfs_logs_dir = os.path.join(self.hdfs_dir, "logs") - self.hdfs_api = None # also for kerberized hdfs + self.hdfs_api = None # also for kerberized hdfs # available when with_kerberized_hdfs == True self.hdfs_kerberized_host = "kerberizedhdfs1" self.hdfs_kerberized_ip = None self.hdfs_kerberized_name_port = 50070 self.hdfs_kerberized_data_port = 1006 - self.hdfs_kerberized_dir = p.abspath(p.join(self.instances_dir, "kerberized_hdfs")) + self.hdfs_kerberized_dir = p.abspath( + p.join(self.instances_dir, "kerberized_hdfs") + ) self.hdfs_kerberized_logs_dir = os.path.join(self.hdfs_kerberized_dir, "logs") # available when with_kafka == True @@ -316,7 +406,9 @@ class ClickHouseCluster: # available when with_kerberozed_kafka == True self.kerberized_kafka_host = "kerberized_kafka1" self.kerberized_kafka_port = get_free_port() - self.kerberized_kafka_docker_id = self.get_instance_docker_id(self.kerberized_kafka_host) + self.kerberized_kafka_docker_id = self.get_instance_docker_id( + self.kerberized_kafka_host + ) # available when with_mongo == True self.mongo_host = "mongo1" @@ -388,7 +480,6 @@ class ClickHouseCluster: self.mysql_cluster_dir = p.abspath(p.join(self.instances_dir, "mysql")) self.mysql_cluster_logs_dir = os.path.join(self.mysql_dir, "logs") - # available when with_mysql8 == True self.mysql8_host = "mysql80" self.mysql8_port = 3306 @@ -404,7 +495,9 @@ class ClickHouseCluster: # available when with_zookeper == True self.use_keeper = True self.zookeeper_port = 2181 - self.keeper_instance_dir_prefix = p.join(p.abspath(self.instances_dir), "keeper") # if use_keeper = True + self.keeper_instance_dir_prefix = p.join( + p.abspath(self.instances_dir), "keeper" + ) # if use_keeper = True self.zookeeper_instance_dir_prefix = p.join(self.instances_dir, "zk") self.zookeeper_dirs_to_create = [] @@ -421,7 +514,11 @@ class ClickHouseCluster: logging.debug(f"CLUSTER INIT base_config_dir:{self.base_config_dir}") def cleanup(self): - if os.environ and 'DISABLE_CLEANUP' in os.environ and os.environ['DISABLE_CLEANUP'] == "1": + if ( + os.environ + and "DISABLE_CLEANUP" in os.environ + and os.environ["DISABLE_CLEANUP"] == "1" + ): logging.warning("Cleanup is disabled") return @@ -429,10 +526,12 @@ class ClickHouseCluster: try: unstopped_containers = self.get_running_containers() if unstopped_containers: - logging.debug(f"Trying to kill unstopped containers: {unstopped_containers}") + logging.debug( + f"Trying to kill unstopped containers: {unstopped_containers}" + ) for id in unstopped_containers: - run_and_check(f'docker kill {id}', shell=True, nothrow=True) - run_and_check(f'docker rm {id}', shell=True, nothrow=True) + run_and_check(f"docker kill {id}", shell=True, nothrow=True) + run_and_check(f"docker rm {id}", shell=True, nothrow=True) unstopped_containers = self.get_running_containers() if unstopped_containers: logging.debug(f"Left unstopped containers: {unstopped_containers}") @@ -465,9 +564,9 @@ class ClickHouseCluster: try: logging.debug("Trying to prune unused volumes...") - result = run_and_check(['docker volume ls | wc -l'], shell=True) - if int(result>0): - run_and_check(['docker', 'volume', 'prune', '-f']) + result = run_and_check(["docker volume ls | wc -l"], shell=True) + if int(result > 0): + run_and_check(["docker", "volume", "prune", "-f"]) logging.debug(f"Volumes pruned: {result}") except: pass @@ -485,7 +584,7 @@ class ClickHouseCluster: def get_client_cmd(self): cmd = self.client_bin_path - if p.basename(cmd) == 'clickhouse': + if p.basename(cmd) == "clickhouse": cmd += " client" return cmd @@ -495,310 +594,605 @@ class ClickHouseCluster: # container_name = project_name + '_' + instance_name + '_1' # We need to have "^/" and "$" in the "--filter name" option below to filter by exact name of the container, see # https://stackoverflow.com/questions/48767760/how-to-make-docker-container-ls-f-name-filter-by-exact-name - filter_name = f'^/{self.project_name}_.*_1$' + filter_name = f"^/{self.project_name}_.*_1$" # We want the command "docker container list" to show only containers' ID and their names, separated by colon. - format = '{{.ID}}:{{.Names}}' - containers = run_and_check(f"docker container list --all --filter name='{filter_name}' --format '{format}'", shell=True) - containers = dict(line.split(':', 1) for line in containers.decode('utf8').splitlines()) + format = "{{.ID}}:{{.Names}}" + containers = run_and_check( + f"docker container list --all --filter name='{filter_name}' --format '{format}'", + shell=True, + ) + containers = dict( + line.split(":", 1) for line in containers.decode("utf8").splitlines() + ) return containers - def copy_file_from_container_to_container(self, src_node, src_path, dst_node, dst_path): + def copy_file_from_container_to_container( + self, src_node, src_path, dst_node, dst_path + ): fname = os.path.basename(src_path) - run_and_check([f"docker cp {src_node.docker_id}:{src_path} {self.instances_dir}"], shell=True) - run_and_check([f"docker cp {self.instances_dir}/{fname} {dst_node.docker_id}:{dst_path}"], shell=True) + run_and_check( + [f"docker cp {src_node.docker_id}:{src_path} {self.instances_dir}"], + shell=True, + ) + run_and_check( + [f"docker cp {self.instances_dir}/{fname} {dst_node.docker_id}:{dst_path}"], + shell=True, + ) - def setup_zookeeper_secure_cmd(self, instance, env_variables, docker_compose_yml_dir): - logging.debug('Setup ZooKeeper Secure') - zookeeper_docker_compose_path = p.join(docker_compose_yml_dir, 'docker_compose_zookeeper_secure.yml') - env_variables['ZOO_SECURE_CLIENT_PORT'] = str(self.zookeeper_secure_port) - env_variables['ZK_FS'] = 'bind' + def setup_zookeeper_secure_cmd( + self, instance, env_variables, docker_compose_yml_dir + ): + logging.debug("Setup ZooKeeper Secure") + zookeeper_docker_compose_path = p.join( + docker_compose_yml_dir, "docker_compose_zookeeper_secure.yml" + ) + env_variables["ZOO_SECURE_CLIENT_PORT"] = str(self.zookeeper_secure_port) + env_variables["ZK_FS"] = "bind" for i in range(1, 4): - zk_data_path = os.path.join(self.zookeeper_instance_dir_prefix + str(i), "data") - zk_log_path = os.path.join(self.zookeeper_instance_dir_prefix + str(i), "log") - env_variables['ZK_DATA' + str(i)] = zk_data_path - env_variables['ZK_DATA_LOG' + str(i)] = zk_log_path + zk_data_path = os.path.join( + self.zookeeper_instance_dir_prefix + str(i), "data" + ) + zk_log_path = os.path.join( + self.zookeeper_instance_dir_prefix + str(i), "log" + ) + env_variables["ZK_DATA" + str(i)] = zk_data_path + env_variables["ZK_DATA_LOG" + str(i)] = zk_log_path self.zookeeper_dirs_to_create += [zk_data_path, zk_log_path] logging.debug(f"DEBUG ZK: {self.zookeeper_dirs_to_create}") self.with_zookeeper_secure = True - self.base_cmd.extend(['--file', zookeeper_docker_compose_path]) - self.base_zookeeper_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', zookeeper_docker_compose_path] + self.base_cmd.extend(["--file", zookeeper_docker_compose_path]) + self.base_zookeeper_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + zookeeper_docker_compose_path, + ] return self.base_zookeeper_cmd def setup_zookeeper_cmd(self, instance, env_variables, docker_compose_yml_dir): - logging.debug('Setup ZooKeeper') - zookeeper_docker_compose_path = p.join(docker_compose_yml_dir, 'docker_compose_zookeeper.yml') + logging.debug("Setup ZooKeeper") + zookeeper_docker_compose_path = p.join( + docker_compose_yml_dir, "docker_compose_zookeeper.yml" + ) - env_variables['ZK_FS'] = 'bind' + env_variables["ZK_FS"] = "bind" for i in range(1, 4): - zk_data_path = os.path.join(self.zookeeper_instance_dir_prefix + str(i), "data") - zk_log_path = os.path.join(self.zookeeper_instance_dir_prefix + str(i), "log") - env_variables['ZK_DATA' + str(i)] = zk_data_path - env_variables['ZK_DATA_LOG' + str(i)] = zk_log_path + zk_data_path = os.path.join( + self.zookeeper_instance_dir_prefix + str(i), "data" + ) + zk_log_path = os.path.join( + self.zookeeper_instance_dir_prefix + str(i), "log" + ) + env_variables["ZK_DATA" + str(i)] = zk_data_path + env_variables["ZK_DATA_LOG" + str(i)] = zk_log_path self.zookeeper_dirs_to_create += [zk_data_path, zk_log_path] logging.debug(f"DEBUG ZK: {self.zookeeper_dirs_to_create}") self.with_zookeeper = True - self.base_cmd.extend(['--file', zookeeper_docker_compose_path]) - self.base_zookeeper_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', zookeeper_docker_compose_path] + self.base_cmd.extend(["--file", zookeeper_docker_compose_path]) + self.base_zookeeper_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + zookeeper_docker_compose_path, + ] return self.base_zookeeper_cmd def setup_keeper_cmd(self, instance, env_variables, docker_compose_yml_dir): - logging.debug('Setup Keeper') - keeper_docker_compose_path = p.join(docker_compose_yml_dir, 'docker_compose_keeper.yml') + logging.debug("Setup Keeper") + keeper_docker_compose_path = p.join( + docker_compose_yml_dir, "docker_compose_keeper.yml" + ) binary_path = self.server_bin_path - if binary_path.endswith('-server'): - binary_path = binary_path[:-len('-server')] + if binary_path.endswith("-server"): + binary_path = binary_path[: -len("-server")] - env_variables['keeper_binary'] = binary_path - env_variables['image'] = "clickhouse/integration-test:" + self.docker_base_tag - env_variables['user'] = str(os.getuid()) - env_variables['keeper_fs'] = 'bind' + env_variables["keeper_binary"] = binary_path + env_variables["image"] = "clickhouse/integration-test:" + self.docker_base_tag + env_variables["user"] = str(os.getuid()) + env_variables["keeper_fs"] = "bind" for i in range(1, 4): keeper_instance_dir = self.keeper_instance_dir_prefix + f"{i}" logs_dir = os.path.join(keeper_instance_dir, "log") configs_dir = os.path.join(keeper_instance_dir, "config") coordination_dir = os.path.join(keeper_instance_dir, "coordination") - env_variables[f'keeper_logs_dir{i}'] = logs_dir - env_variables[f'keeper_config_dir{i}'] = configs_dir - env_variables[f'keeper_db_dir{i}'] = coordination_dir + env_variables[f"keeper_logs_dir{i}"] = logs_dir + env_variables[f"keeper_config_dir{i}"] = configs_dir + env_variables[f"keeper_db_dir{i}"] = coordination_dir self.zookeeper_dirs_to_create += [logs_dir, configs_dir, coordination_dir] logging.debug(f"DEBUG KEEPER: {self.zookeeper_dirs_to_create}") - self.with_zookeeper = True - self.base_cmd.extend(['--file', keeper_docker_compose_path]) - self.base_zookeeper_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', keeper_docker_compose_path] + self.base_cmd.extend(["--file", keeper_docker_compose_path]) + self.base_zookeeper_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + keeper_docker_compose_path, + ] return self.base_zookeeper_cmd def setup_mysql_client_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_mysql_client = True - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_mysql_client.yml')]) - self.base_mysql_client_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_mysql_client.yml')] + self.base_cmd.extend( + [ + "--file", + p.join(docker_compose_yml_dir, "docker_compose_mysql_client.yml"), + ] + ) + self.base_mysql_client_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_mysql_client.yml"), + ] return self.base_mysql_client_cmd def setup_mysql_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_mysql = True - env_variables['MYSQL_HOST'] = self.mysql_host - env_variables['MYSQL_PORT'] = str(self.mysql_port) - env_variables['MYSQL_ROOT_HOST'] = '%' - env_variables['MYSQL_LOGS'] = self.mysql_logs_dir - env_variables['MYSQL_LOGS_FS'] = "bind" - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_mysql.yml')]) - self.base_mysql_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_mysql.yml')] + env_variables["MYSQL_HOST"] = self.mysql_host + env_variables["MYSQL_PORT"] = str(self.mysql_port) + env_variables["MYSQL_ROOT_HOST"] = "%" + env_variables["MYSQL_LOGS"] = self.mysql_logs_dir + env_variables["MYSQL_LOGS_FS"] = "bind" + self.base_cmd.extend( + ["--file", p.join(docker_compose_yml_dir, "docker_compose_mysql.yml")] + ) + self.base_mysql_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_mysql.yml"), + ] return self.base_mysql_cmd def setup_mysql8_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_mysql8 = True - env_variables['MYSQL8_HOST'] = self.mysql8_host - env_variables['MYSQL8_PORT'] = str(self.mysql8_port) - env_variables['MYSQL8_ROOT_HOST'] = '%' - env_variables['MYSQL8_LOGS'] = self.mysql8_logs_dir - env_variables['MYSQL8_LOGS_FS'] = "bind" - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_mysql_8_0.yml')]) - self.base_mysql8_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_mysql_8_0.yml')] + env_variables["MYSQL8_HOST"] = self.mysql8_host + env_variables["MYSQL8_PORT"] = str(self.mysql8_port) + env_variables["MYSQL8_ROOT_HOST"] = "%" + env_variables["MYSQL8_LOGS"] = self.mysql8_logs_dir + env_variables["MYSQL8_LOGS_FS"] = "bind" + self.base_cmd.extend( + ["--file", p.join(docker_compose_yml_dir, "docker_compose_mysql_8_0.yml")] + ) + self.base_mysql8_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_mysql_8_0.yml"), + ] return self.base_mysql8_cmd def setup_mysql_cluster_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_mysql_cluster = True - env_variables['MYSQL_CLUSTER_PORT'] = str(self.mysql_port) - env_variables['MYSQL_CLUSTER_ROOT_HOST'] = '%' - env_variables['MYSQL_CLUSTER_LOGS'] = self.mysql_cluster_logs_dir - env_variables['MYSQL_CLUSTER_LOGS_FS'] = "bind" + env_variables["MYSQL_CLUSTER_PORT"] = str(self.mysql_port) + env_variables["MYSQL_CLUSTER_ROOT_HOST"] = "%" + env_variables["MYSQL_CLUSTER_LOGS"] = self.mysql_cluster_logs_dir + env_variables["MYSQL_CLUSTER_LOGS_FS"] = "bind" - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_mysql_cluster.yml')]) - self.base_mysql_cluster_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_mysql_cluster.yml')] + self.base_cmd.extend( + [ + "--file", + p.join(docker_compose_yml_dir, "docker_compose_mysql_cluster.yml"), + ] + ) + self.base_mysql_cluster_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_mysql_cluster.yml"), + ] return self.base_mysql_cluster_cmd def setup_postgres_cmd(self, instance, env_variables, docker_compose_yml_dir): - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_postgres.yml')]) - env_variables['POSTGRES_PORT'] = str(self.postgres_port) - env_variables['POSTGRES_DIR'] = self.postgres_logs_dir - env_variables['POSTGRES_LOGS_FS'] = "bind" + self.base_cmd.extend( + ["--file", p.join(docker_compose_yml_dir, "docker_compose_postgres.yml")] + ) + env_variables["POSTGRES_PORT"] = str(self.postgres_port) + env_variables["POSTGRES_DIR"] = self.postgres_logs_dir + env_variables["POSTGRES_LOGS_FS"] = "bind" self.with_postgres = True - self.base_postgres_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_postgres.yml')] + self.base_postgres_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_postgres.yml"), + ] return self.base_postgres_cmd - def setup_postgres_cluster_cmd(self, instance, env_variables, docker_compose_yml_dir): + def setup_postgres_cluster_cmd( + self, instance, env_variables, docker_compose_yml_dir + ): self.with_postgres_cluster = True - env_variables['POSTGRES_PORT'] = str(self.postgres_port) - env_variables['POSTGRES2_DIR'] = self.postgres2_logs_dir - env_variables['POSTGRES3_DIR'] = self.postgres3_logs_dir - env_variables['POSTGRES4_DIR'] = self.postgres4_logs_dir - env_variables['POSTGRES_LOGS_FS'] = "bind" - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_postgres_cluster.yml')]) - self.base_postgres_cluster_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_postgres_cluster.yml')] + env_variables["POSTGRES_PORT"] = str(self.postgres_port) + env_variables["POSTGRES2_DIR"] = self.postgres2_logs_dir + env_variables["POSTGRES3_DIR"] = self.postgres3_logs_dir + env_variables["POSTGRES4_DIR"] = self.postgres4_logs_dir + env_variables["POSTGRES_LOGS_FS"] = "bind" + self.base_cmd.extend( + [ + "--file", + p.join(docker_compose_yml_dir, "docker_compose_postgres_cluster.yml"), + ] + ) + self.base_postgres_cluster_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_postgres_cluster.yml"), + ] def setup_hdfs_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_hdfs = True - env_variables['HDFS_HOST'] = self.hdfs_host - env_variables['HDFS_NAME_PORT'] = str(self.hdfs_name_port) - env_variables['HDFS_DATA_PORT'] = str(self.hdfs_data_port) - env_variables['HDFS_LOGS'] = self.hdfs_logs_dir - env_variables['HDFS_FS'] = "bind" - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_hdfs.yml')]) - self.base_hdfs_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_hdfs.yml')] + env_variables["HDFS_HOST"] = self.hdfs_host + env_variables["HDFS_NAME_PORT"] = str(self.hdfs_name_port) + env_variables["HDFS_DATA_PORT"] = str(self.hdfs_data_port) + env_variables["HDFS_LOGS"] = self.hdfs_logs_dir + env_variables["HDFS_FS"] = "bind" + self.base_cmd.extend( + ["--file", p.join(docker_compose_yml_dir, "docker_compose_hdfs.yml")] + ) + self.base_hdfs_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_hdfs.yml"), + ] logging.debug("HDFS BASE CMD:{self.base_hdfs_cmd)}") return self.base_hdfs_cmd - def setup_kerberized_hdfs_cmd(self, instance, env_variables, docker_compose_yml_dir): + def setup_kerberized_hdfs_cmd( + self, instance, env_variables, docker_compose_yml_dir + ): self.with_kerberized_hdfs = True - env_variables['KERBERIZED_HDFS_HOST'] = self.hdfs_kerberized_host - env_variables['KERBERIZED_HDFS_NAME_PORT'] = str(self.hdfs_kerberized_name_port) - env_variables['KERBERIZED_HDFS_DATA_PORT'] = str(self.hdfs_kerberized_data_port) - env_variables['KERBERIZED_HDFS_LOGS'] = self.hdfs_kerberized_logs_dir - env_variables['KERBERIZED_HDFS_FS'] = "bind" - env_variables['KERBERIZED_HDFS_DIR'] = instance.path + '/' - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_kerberized_hdfs.yml')]) - self.base_kerberized_hdfs_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_kerberized_hdfs.yml')] + env_variables["KERBERIZED_HDFS_HOST"] = self.hdfs_kerberized_host + env_variables["KERBERIZED_HDFS_NAME_PORT"] = str(self.hdfs_kerberized_name_port) + env_variables["KERBERIZED_HDFS_DATA_PORT"] = str(self.hdfs_kerberized_data_port) + env_variables["KERBERIZED_HDFS_LOGS"] = self.hdfs_kerberized_logs_dir + env_variables["KERBERIZED_HDFS_FS"] = "bind" + env_variables["KERBERIZED_HDFS_DIR"] = instance.path + "/" + self.base_cmd.extend( + [ + "--file", + p.join(docker_compose_yml_dir, "docker_compose_kerberized_hdfs.yml"), + ] + ) + self.base_kerberized_hdfs_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_kerberized_hdfs.yml"), + ] return self.base_kerberized_hdfs_cmd def setup_kafka_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_kafka = True - env_variables['KAFKA_HOST'] = self.kafka_host - env_variables['KAFKA_EXTERNAL_PORT'] = str(self.kafka_port) - env_variables['SCHEMA_REGISTRY_EXTERNAL_PORT'] = str(self.schema_registry_port) - env_variables['SCHEMA_REGISTRY_INTERNAL_PORT'] = "8081" - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_kafka.yml')]) - self.base_kafka_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_kafka.yml')] + env_variables["KAFKA_HOST"] = self.kafka_host + env_variables["KAFKA_EXTERNAL_PORT"] = str(self.kafka_port) + env_variables["SCHEMA_REGISTRY_EXTERNAL_PORT"] = str(self.schema_registry_port) + env_variables["SCHEMA_REGISTRY_INTERNAL_PORT"] = "8081" + self.base_cmd.extend( + ["--file", p.join(docker_compose_yml_dir, "docker_compose_kafka.yml")] + ) + self.base_kafka_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_kafka.yml"), + ] return self.base_kafka_cmd - def setup_kerberized_kafka_cmd(self, instance, env_variables, docker_compose_yml_dir): + def setup_kerberized_kafka_cmd( + self, instance, env_variables, docker_compose_yml_dir + ): self.with_kerberized_kafka = True - env_variables['KERBERIZED_KAFKA_DIR'] = instance.path + '/' - env_variables['KERBERIZED_KAFKA_HOST'] = self.kerberized_kafka_host - env_variables['KERBERIZED_KAFKA_EXTERNAL_PORT'] = str(self.kerberized_kafka_port) - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_kerberized_kafka.yml')]) - self.base_kerberized_kafka_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_kerberized_kafka.yml')] + env_variables["KERBERIZED_KAFKA_DIR"] = instance.path + "/" + env_variables["KERBERIZED_KAFKA_HOST"] = self.kerberized_kafka_host + env_variables["KERBERIZED_KAFKA_EXTERNAL_PORT"] = str( + self.kerberized_kafka_port + ) + self.base_cmd.extend( + [ + "--file", + p.join(docker_compose_yml_dir, "docker_compose_kerberized_kafka.yml"), + ] + ) + self.base_kerberized_kafka_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_kerberized_kafka.yml"), + ] return self.base_kerberized_kafka_cmd def setup_redis_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_redis = True - env_variables['REDIS_HOST'] = self.redis_host - env_variables['REDIS_EXTERNAL_PORT'] = str(self.redis_port) - env_variables['REDIS_INTERNAL_PORT'] = "6379" + env_variables["REDIS_HOST"] = self.redis_host + env_variables["REDIS_EXTERNAL_PORT"] = str(self.redis_port) + env_variables["REDIS_INTERNAL_PORT"] = "6379" - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_redis.yml')]) - self.base_redis_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_redis.yml')] + self.base_cmd.extend( + ["--file", p.join(docker_compose_yml_dir, "docker_compose_redis.yml")] + ) + self.base_redis_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_redis.yml"), + ] return self.base_redis_cmd def setup_rabbitmq_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_rabbitmq = True - env_variables['RABBITMQ_HOST'] = self.rabbitmq_host - env_variables['RABBITMQ_PORT'] = str(self.rabbitmq_port) - env_variables['RABBITMQ_LOGS'] = self.rabbitmq_logs_dir - env_variables['RABBITMQ_LOGS_FS'] = "bind" + env_variables["RABBITMQ_HOST"] = self.rabbitmq_host + env_variables["RABBITMQ_PORT"] = str(self.rabbitmq_port) + env_variables["RABBITMQ_LOGS"] = self.rabbitmq_logs_dir + env_variables["RABBITMQ_LOGS_FS"] = "bind" - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_rabbitmq.yml')]) - self.base_rabbitmq_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_rabbitmq.yml')] + self.base_cmd.extend( + ["--file", p.join(docker_compose_yml_dir, "docker_compose_rabbitmq.yml")] + ) + self.base_rabbitmq_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_rabbitmq.yml"), + ] return self.base_rabbitmq_cmd def setup_mongo_secure_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_mongo = self.with_mongo_secure = True - env_variables['MONGO_HOST'] = self.mongo_host - env_variables['MONGO_EXTERNAL_PORT'] = str(self.mongo_port) - env_variables['MONGO_INTERNAL_PORT'] = "27017" - env_variables['MONGO_CONFIG_PATH'] = HELPERS_DIR - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_mongo_secure.yml')]) - self.base_mongo_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_mongo_secure.yml')] + env_variables["MONGO_HOST"] = self.mongo_host + env_variables["MONGO_EXTERNAL_PORT"] = str(self.mongo_port) + env_variables["MONGO_INTERNAL_PORT"] = "27017" + env_variables["MONGO_CONFIG_PATH"] = HELPERS_DIR + self.base_cmd.extend( + [ + "--file", + p.join(docker_compose_yml_dir, "docker_compose_mongo_secure.yml"), + ] + ) + self.base_mongo_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_mongo_secure.yml"), + ] return self.base_mongo_cmd def setup_mongo_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_mongo = True - env_variables['MONGO_HOST'] = self.mongo_host - env_variables['MONGO_EXTERNAL_PORT'] = str(self.mongo_port) - env_variables['MONGO_INTERNAL_PORT'] = "27017" - env_variables['MONGO_NO_CRED_EXTERNAL_PORT'] = str(self.mongo_no_cred_port) - env_variables['MONGO_NO_CRED_INTERNAL_PORT'] = "27017" - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_mongo.yml')]) - self.base_mongo_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_mongo.yml')] + env_variables["MONGO_HOST"] = self.mongo_host + env_variables["MONGO_EXTERNAL_PORT"] = str(self.mongo_port) + env_variables["MONGO_INTERNAL_PORT"] = "27017" + env_variables["MONGO_NO_CRED_EXTERNAL_PORT"] = str(self.mongo_no_cred_port) + env_variables["MONGO_NO_CRED_INTERNAL_PORT"] = "27017" + self.base_cmd.extend( + ["--file", p.join(docker_compose_yml_dir, "docker_compose_mongo.yml")] + ) + self.base_mongo_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_mongo.yml"), + ] return self.base_mongo_cmd def setup_minio_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_minio = True cert_d = p.join(self.minio_dir, "certs") - env_variables['MINIO_CERTS_DIR'] = cert_d - env_variables['MINIO_PORT'] = str(self.minio_port) - env_variables['SSL_CERT_FILE'] = p.join(self.base_dir, cert_d, 'public.crt') + env_variables["MINIO_CERTS_DIR"] = cert_d + env_variables["MINIO_PORT"] = str(self.minio_port) + env_variables["SSL_CERT_FILE"] = p.join(self.base_dir, cert_d, "public.crt") - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_minio.yml')]) - self.base_minio_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_minio.yml')] + self.base_cmd.extend( + ["--file", p.join(docker_compose_yml_dir, "docker_compose_minio.yml")] + ) + self.base_minio_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_minio.yml"), + ] return self.base_minio_cmd def setup_azurite_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_azurite = True - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_azurite.yml')]) - self.base_azurite_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_azurite.yml')] + self.base_cmd.extend( + ["--file", p.join(docker_compose_yml_dir, "docker_compose_azurite.yml")] + ) + self.base_azurite_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_azurite.yml"), + ] return self.base_azurite_cmd def setup_cassandra_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_cassandra = True - env_variables['CASSANDRA_PORT'] = str(self.cassandra_port) - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_cassandra.yml')]) - self.base_cassandra_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_cassandra.yml')] + env_variables["CASSANDRA_PORT"] = str(self.cassandra_port) + self.base_cmd.extend( + ["--file", p.join(docker_compose_yml_dir, "docker_compose_cassandra.yml")] + ) + self.base_cassandra_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_cassandra.yml"), + ] return self.base_cassandra_cmd def setup_jdbc_bridge_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_jdbc_bridge = True - env_variables['JDBC_DRIVER_LOGS'] = self.jdbc_driver_logs_dir - env_variables['JDBC_DRIVER_FS'] = "bind" - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_jdbc_bridge.yml')]) - self.base_jdbc_bridge_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_jdbc_bridge.yml')] + env_variables["JDBC_DRIVER_LOGS"] = self.jdbc_driver_logs_dir + env_variables["JDBC_DRIVER_FS"] = "bind" + self.base_cmd.extend( + ["--file", p.join(docker_compose_yml_dir, "docker_compose_jdbc_bridge.yml")] + ) + self.base_jdbc_bridge_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_jdbc_bridge.yml"), + ] return self.base_jdbc_bridge_cmd def setup_nginx_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_nginx = True - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_nginx.yml')]) - self.base_nginx_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_nginx.yml')] + self.base_cmd.extend( + ["--file", p.join(docker_compose_yml_dir, "docker_compose_nginx.yml")] + ) + self.base_nginx_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_nginx.yml"), + ] return self.base_nginx_cmd def setup_hive(self, instance, env_variables, docker_compose_yml_dir): self.with_hive = True - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_hive.yml')]) - self.base_hive_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name, - '--file', p.join(docker_compose_yml_dir, 'docker_compose_hive.yml')] + self.base_cmd.extend( + ["--file", p.join(docker_compose_yml_dir, "docker_compose_hive.yml")] + ) + self.base_hive_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_hive.yml"), + ] return self.base_hive_cmd - def add_instance(self, name, base_config_dir=None, main_configs=None, user_configs=None, dictionaries=None, - macros=None, with_zookeeper=False, with_zookeeper_secure=False, - with_mysql_client=False, with_mysql=False, with_mysql8=False, with_mysql_cluster=False, - with_kafka=False, with_kerberized_kafka=False, with_rabbitmq=False, clickhouse_path_dir=None, - with_odbc_drivers=False, with_postgres=False, with_postgres_cluster=False, with_hdfs=False, - with_kerberized_hdfs=False, with_mongo=False, with_mongo_secure=False, with_nginx=False, - with_redis=False, with_minio=False, with_azurite=False, with_cassandra=False, with_jdbc_bridge=False, with_hive=False, - hostname=None, env_variables=None, image="clickhouse/integration-test", tag=None, - stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False, external_dirs=None, tmpfs=None, - zookeeper_docker_compose_path=None, minio_certs_dir=None, use_keeper=True, - main_config_name="config.xml", users_config_name="users.xml", copy_common_configs=True, config_root_name="clickhouse", extra_configs=[]) -> 'ClickHouseInstance': + def add_instance( + self, + name, + base_config_dir=None, + main_configs=None, + user_configs=None, + dictionaries=None, + macros=None, + with_zookeeper=False, + with_zookeeper_secure=False, + with_mysql_client=False, + with_mysql=False, + with_mysql8=False, + with_mysql_cluster=False, + with_kafka=False, + with_kerberized_kafka=False, + with_rabbitmq=False, + clickhouse_path_dir=None, + with_odbc_drivers=False, + with_postgres=False, + with_postgres_cluster=False, + with_hdfs=False, + with_kerberized_hdfs=False, + with_mongo=False, + with_mongo_secure=False, + with_nginx=False, + with_redis=False, + with_minio=False, + with_azurite=False, + with_cassandra=False, + with_jdbc_bridge=False, + with_hive=False, + hostname=None, + env_variables=None, + image="clickhouse/integration-test", + tag=None, + stay_alive=False, + ipv4_address=None, + ipv6_address=None, + with_installed_binary=False, + external_dirs=None, + tmpfs=None, + zookeeper_docker_compose_path=None, + minio_certs_dir=None, + use_keeper=True, + main_config_name="config.xml", + users_config_name="users.xml", + copy_common_configs=True, + config_root_name="clickhouse", + extra_configs=[], + ) -> "ClickHouseInstance": """Add an instance to the cluster. @@ -812,10 +1206,13 @@ class ClickHouseCluster: """ if self.is_up: - raise Exception("Can\'t add instance %s: cluster is already up!" % name) + raise Exception("Can't add instance %s: cluster is already up!" % name) if name in self.instances: - raise Exception("Can\'t add instance `%s': there is already an instance with the same name!" % name) + raise Exception( + "Can't add instance `%s': there is already an instance with the same name!" + % name + ) if tag is None: tag = self.docker_base_tag @@ -826,13 +1223,17 @@ class ClickHouseCluster: # Code coverage files will be placed in database directory # (affect only WITH_COVERAGE=1 build) - env_variables['LLVM_PROFILE_FILE'] = '/var/lib/clickhouse/server_%h_%p_%m.profraw' + env_variables[ + "LLVM_PROFILE_FILE" + ] = "/var/lib/clickhouse/server_%h_%p_%m.profraw" instance = ClickHouseInstance( cluster=self, base_path=self.base_dir, name=name, - base_config_dir=base_config_dir if base_config_dir else self.base_config_dir, + base_config_dir=base_config_dir + if base_config_dir + else self.base_config_dir, custom_main_configs=main_configs or [], custom_user_configs=user_configs or [], custom_dictionaries=dictionaries or [], @@ -854,7 +1255,7 @@ class ClickHouseCluster: with_azurite=with_azurite, with_cassandra=with_cassandra, with_jdbc_bridge=with_jdbc_bridge, - with_hive = with_hive, + with_hive=with_hive, server_bin_path=self.server_bin_path, odbc_bridge_bin_path=self.odbc_bridge_bin_path, library_bridge_bin_path=self.library_bridge_bin_path, @@ -876,89 +1277,164 @@ class ClickHouseCluster: external_dirs=external_dirs, tmpfs=tmpfs or [], config_root_name=config_root_name, - extra_configs = extra_configs) + extra_configs=extra_configs, + ) docker_compose_yml_dir = get_docker_compose_path() self.instances[name] = instance if ipv4_address is not None or ipv6_address is not None: self.with_net_trics = True - self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_net.yml')]) + self.base_cmd.extend( + ["--file", p.join(docker_compose_yml_dir, "docker_compose_net.yml")] + ) - self.base_cmd.extend(['--file', instance.docker_compose_path]) + self.base_cmd.extend(["--file", instance.docker_compose_path]) cmds = [] if with_zookeeper_secure and not self.with_zookeeper_secure: - cmds.append(self.setup_zookeeper_secure_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_zookeeper_secure_cmd( + instance, env_variables, docker_compose_yml_dir + ) + ) if with_zookeeper and not self.with_zookeeper: if self.use_keeper: - cmds.append(self.setup_keeper_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_keeper_cmd( + instance, env_variables, docker_compose_yml_dir + ) + ) else: - cmds.append(self.setup_zookeeper_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_zookeeper_cmd( + instance, env_variables, docker_compose_yml_dir + ) + ) if with_mysql_client and not self.with_mysql_client: - cmds.append(self.setup_mysql_client_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_mysql_client_cmd( + instance, env_variables, docker_compose_yml_dir + ) + ) if with_mysql and not self.with_mysql: - cmds.append(self.setup_mysql_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_mysql_cmd(instance, env_variables, docker_compose_yml_dir) + ) if with_mysql8 and not self.with_mysql8: - cmds.append(self.setup_mysql8_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_mysql8_cmd(instance, env_variables, docker_compose_yml_dir) + ) if with_mysql_cluster and not self.with_mysql_cluster: - cmds.append(self.setup_mysql_cluster_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_mysql_cluster_cmd( + instance, env_variables, docker_compose_yml_dir + ) + ) if with_postgres and not self.with_postgres: - cmds.append(self.setup_postgres_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_postgres_cmd(instance, env_variables, docker_compose_yml_dir) + ) if with_postgres_cluster and not self.with_postgres_cluster: - cmds.append(self.setup_postgres_cluster_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_postgres_cluster_cmd( + instance, env_variables, docker_compose_yml_dir + ) + ) if with_odbc_drivers and not self.with_odbc_drivers: self.with_odbc_drivers = True if not self.with_mysql: - cmds.append(self.setup_mysql_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_mysql_cmd( + instance, env_variables, docker_compose_yml_dir + ) + ) if not self.with_postgres: - cmds.append(self.setup_postgres_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_postgres_cmd( + instance, env_variables, docker_compose_yml_dir + ) + ) if with_kafka and not self.with_kafka: - cmds.append(self.setup_kafka_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_kafka_cmd(instance, env_variables, docker_compose_yml_dir) + ) if with_kerberized_kafka and not self.with_kerberized_kafka: - cmds.append(self.setup_kerberized_kafka_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_kerberized_kafka_cmd( + instance, env_variables, docker_compose_yml_dir + ) + ) if with_rabbitmq and not self.with_rabbitmq: - cmds.append(self.setup_rabbitmq_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_rabbitmq_cmd(instance, env_variables, docker_compose_yml_dir) + ) if with_nginx and not self.with_nginx: - cmds.append(self.setup_nginx_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_nginx_cmd(instance, env_variables, docker_compose_yml_dir) + ) if with_hdfs and not self.with_hdfs: - cmds.append(self.setup_hdfs_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_hdfs_cmd(instance, env_variables, docker_compose_yml_dir) + ) if with_kerberized_hdfs and not self.with_kerberized_hdfs: - cmds.append(self.setup_kerberized_hdfs_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_kerberized_hdfs_cmd( + instance, env_variables, docker_compose_yml_dir + ) + ) - if (with_mongo or with_mongo_secure) and not (self.with_mongo or self.with_mongo_secure): + if (with_mongo or with_mongo_secure) and not ( + self.with_mongo or self.with_mongo_secure + ): if with_mongo_secure: - cmds.append(self.setup_mongo_secure_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_mongo_secure_cmd( + instance, env_variables, docker_compose_yml_dir + ) + ) else: - cmds.append(self.setup_mongo_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_mongo_cmd( + instance, env_variables, docker_compose_yml_dir + ) + ) if self.with_net_trics: for cmd in cmds: - cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_net.yml')]) + cmd.extend( + ["--file", p.join(docker_compose_yml_dir, "docker_compose_net.yml")] + ) if with_redis and not self.with_redis: - cmds.append(self.setup_redis_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_redis_cmd(instance, env_variables, docker_compose_yml_dir) + ) if with_minio and not self.with_minio: - cmds.append(self.setup_minio_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_minio_cmd(instance, env_variables, docker_compose_yml_dir) + ) if with_azurite and not self.with_azurite: - cmds.append(self.setup_azurite_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_azurite_cmd(instance, env_variables, docker_compose_yml_dir) + ) if minio_certs_dir is not None: if self.minio_certs_dir is None: @@ -967,31 +1443,49 @@ class ClickHouseCluster: raise Exception("Overwriting minio certs dir") if with_cassandra and not self.with_cassandra: - cmds.append(self.setup_cassandra_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_cassandra_cmd( + instance, env_variables, docker_compose_yml_dir + ) + ) if with_jdbc_bridge and not self.with_jdbc_bridge: - cmds.append(self.setup_jdbc_bridge_cmd(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_jdbc_bridge_cmd( + instance, env_variables, docker_compose_yml_dir + ) + ) if with_hive: - cmds.append(self.setup_hive(instance, env_variables, docker_compose_yml_dir)) + cmds.append( + self.setup_hive(instance, env_variables, docker_compose_yml_dir) + ) - logging.debug("Cluster name:{} project_name:{}. Added instance name:{} tag:{} base_cmd:{} docker_compose_yml_dir:{}".format( - self.name, self.project_name, name, tag, self.base_cmd, docker_compose_yml_dir)) + logging.debug( + "Cluster name:{} project_name:{}. Added instance name:{} tag:{} base_cmd:{} docker_compose_yml_dir:{}".format( + self.name, + self.project_name, + name, + tag, + self.base_cmd, + docker_compose_yml_dir, + ) + ) return instance def get_instance_docker_id(self, instance_name): # According to how docker-compose names containers. - return self.project_name + '_' + instance_name + '_1' + return self.project_name + "_" + instance_name + "_1" def _replace(self, path, what, to): - with open(path, 'r') as p: + with open(path, "r") as p: data = p.read() data = data.replace(what, to) - with open(path, 'w') as p: + with open(path, "w") as p: p.write(data) def restart_instance_with_ip_change(self, node, new_ip): - if '::' in new_ip: + if "::" in new_ip: if node.ipv6_address is None: raise Exception("You should specity ipv6_address in add_node method") self._replace(node.docker_compose_path, node.ipv6_address, new_ip) @@ -1003,7 +1497,9 @@ class ClickHouseCluster: node.ipv4_address = new_ip run_and_check(self.base_cmd + ["stop", node.name]) run_and_check(self.base_cmd + ["rm", "--force", "--stop", node.name]) - run_and_check(self.base_cmd + ["up", "--force-recreate", "--no-deps", "-d", node.name]) + run_and_check( + self.base_cmd + ["up", "--force-recreate", "--no-deps", "-d", node.name] + ) node.ip_address = self.get_instance_ip(node.name) node.client = Client(node.ip_address, command=self.client_bin_path) @@ -1024,9 +1520,11 @@ class ClickHouseCluster: logging.debug("get_instance_ip instance_name={}".format(instance_name)) docker_id = self.get_instance_docker_id(instance_name) # for cont in self.docker_client.containers.list(): - # logging.debug("CONTAINERS LIST: ID={} NAME={} STATUS={}".format(cont.id, cont.name, cont.status)) + # logging.debug("CONTAINERS LIST: ID={} NAME={} STATUS={}".format(cont.id, cont.name, cont.status)) handle = self.docker_client.containers.get(docker_id) - return list(handle.attrs['NetworkSettings']['Networks'].values())[0]['IPAddress'] + return list(handle.attrs["NetworkSettings"]["Networks"].values())[0][ + "IPAddress" + ] def get_container_id(self, instance_name): return self.get_instance_docker_id(instance_name) @@ -1038,31 +1536,40 @@ class ClickHouseCluster: container_id = self.get_container_id(instance_name) return self.docker_client.api.logs(container_id).decode() - def exec_in_container(self, container_id, cmd, detach=False, nothrow=False, use_cli=True, **kwargs): + def exec_in_container( + self, container_id, cmd, detach=False, nothrow=False, use_cli=True, **kwargs + ): if use_cli: - logging.debug(f"run container_id:{container_id} detach:{detach} nothrow:{nothrow} cmd: {cmd}") + logging.debug( + f"run container_id:{container_id} detach:{detach} nothrow:{nothrow} cmd: {cmd}" + ) exec_cmd = ["docker", "exec"] - if 'user' in kwargs: - exec_cmd += ['-u', kwargs['user']] - result = subprocess_check_call(exec_cmd + [container_id] + cmd, detach=detach, nothrow=nothrow) + if "user" in kwargs: + exec_cmd += ["-u", kwargs["user"]] + result = subprocess_check_call( + exec_cmd + [container_id] + cmd, detach=detach, nothrow=nothrow + ) return result else: exec_id = self.docker_client.api.exec_create(container_id, cmd, **kwargs) output = self.docker_client.api.exec_start(exec_id, detach=detach) - exit_code = self.docker_client.api.exec_inspect(exec_id)['ExitCode'] + exit_code = self.docker_client.api.exec_inspect(exec_id)["ExitCode"] if exit_code: container_info = self.docker_client.api.inspect_container(container_id) - image_id = container_info.get('Image') + image_id = container_info.get("Image") image_info = self.docker_client.api.inspect_image(image_id) logging.debug(("Command failed in container {}: ".format(container_id))) pprint.pprint(container_info) logging.debug("") - logging.debug(("Container {} uses image {}: ".format(container_id, image_id))) + logging.debug( + ("Container {} uses image {}: ".format(container_id, image_id)) + ) pprint.pprint(image_info) logging.debug("") - message = 'Cmd "{}" failed in container {}. Return code {}. Output: {}'.format(' '.join(cmd), container_id, - exit_code, output) + message = 'Cmd "{}" failed in container {}. Return code {}. Output: {}'.format( + " ".join(cmd), container_id, exit_code, output + ) if nothrow: logging.debug(message) else: @@ -1076,12 +1583,20 @@ class ClickHouseCluster: data = fdata.read() encodedBytes = base64.b64encode(data.encode("utf-8")) encodedStr = str(encodedBytes, "utf-8") - self.exec_in_container(container_id, - ["bash", "-c", "echo {} | base64 --decode > {}".format(encodedStr, dest_path)], - user='root') + self.exec_in_container( + container_id, + [ + "bash", + "-c", + "echo {} | base64 --decode > {}".format(encodedStr, dest_path), + ], + user="root", + ) - def wait_for_url(self, url="http://localhost:8123/ping", conn_timeout=2, interval=2, timeout=60): - if not url.startswith('http'): + def wait_for_url( + self, url="http://localhost:8123/ping", conn_timeout=2, interval=2, timeout=60 + ): + if not url.startswith("http"): url = "http://" + url if interval <= 0: interval = 2 @@ -1093,29 +1608,44 @@ class ClickHouseCluster: start = time.time() while time.time() - start < timeout: try: - requests.get(url, allow_redirects=True, timeout=conn_timeout, verify=False).raise_for_status() - logging.debug("{} is available after {} seconds".format(url, time.time() - start)) + requests.get( + url, allow_redirects=True, timeout=conn_timeout, verify=False + ).raise_for_status() + logging.debug( + "{} is available after {} seconds".format(url, time.time() - start) + ) return except Exception as ex: - logging.debug("{} Attempt {} failed, retrying in {} seconds".format(ex, attempts, interval)) + logging.debug( + "{} Attempt {} failed, retrying in {} seconds".format( + ex, attempts, interval + ) + ) attempts += 1 errors += [str(ex)] time.sleep(interval) - run_and_check(['docker', 'ps', '--all']) + run_and_check(["docker", "ps", "--all"]) logging.error("Can't connect to URL:{}".format(errors)) - raise Exception("Cannot wait URL {}(interval={}, timeout={}, attempts={})".format( - url, interval, timeout, attempts)) + raise Exception( + "Cannot wait URL {}(interval={}, timeout={}, attempts={})".format( + url, interval, timeout, attempts + ) + ) def wait_mysql_client_to_start(self, timeout=180): start = time.time() errors = [] - self.mysql_client_container = self.get_docker_handle(self.get_instance_docker_id(self.mysql_client_host)) + self.mysql_client_container = self.get_docker_handle( + self.get_instance_docker_id(self.mysql_client_host) + ) while time.time() - start < timeout: try: - info = self.mysql_client_container.client.api.inspect_container(self.mysql_client_container.name) - if info['State']['Health']['Status'] == 'healthy': + info = self.mysql_client_container.client.api.inspect_container( + self.mysql_client_container.name + ) + if info["State"]["Health"]["Status"] == "healthy": logging.debug("Mysql Client Container Started") return time.sleep(1) @@ -1123,17 +1653,22 @@ class ClickHouseCluster: errors += [str(ex)] time.sleep(1) - run_and_check(['docker', 'ps', '--all']) + run_and_check(["docker", "ps", "--all"]) logging.error("Can't connect to MySQL Client:{}".format(errors)) raise Exception("Cannot wait MySQL Client container") def wait_mysql_to_start(self, timeout=180): - self.mysql_ip = self.get_instance_ip('mysql57') + self.mysql_ip = self.get_instance_ip("mysql57") start = time.time() errors = [] while time.time() - start < timeout: try: - conn = pymysql.connect(user='root', password='clickhouse', host=self.mysql_ip, port=self.mysql_port) + conn = pymysql.connect( + user="root", + password="clickhouse", + host=self.mysql_ip, + port=self.mysql_port, + ) conn.close() logging.debug("Mysql Started") return @@ -1141,16 +1676,21 @@ class ClickHouseCluster: errors += [str(ex)] time.sleep(0.5) - run_and_check(['docker-compose', 'ps', '--services', '--all']) + run_and_check(["docker-compose", "ps", "--services", "--all"]) logging.error("Can't connect to MySQL:{}".format(errors)) raise Exception("Cannot wait MySQL container") def wait_mysql8_to_start(self, timeout=180): - self.mysql8_ip = self.get_instance_ip('mysql80') + self.mysql8_ip = self.get_instance_ip("mysql80") start = time.time() while time.time() - start < timeout: try: - conn = pymysql.connect(user='root', password='clickhouse', host=self.mysql8_ip, port=self.mysql8_port) + conn = pymysql.connect( + user="root", + password="clickhouse", + host=self.mysql8_ip, + port=self.mysql8_port, + ) conn.close() logging.debug("Mysql 8 Started") return @@ -1158,7 +1698,7 @@ class ClickHouseCluster: logging.debug("Can't connect to MySQL 8 " + str(ex)) time.sleep(0.5) - run_and_check(['docker-compose', 'ps', '--services', '--all']) + run_and_check(["docker-compose", "ps", "--services", "--all"]) raise Exception("Cannot wait MySQL 8 container") def wait_mysql_cluster_to_start(self, timeout=180): @@ -1170,7 +1710,12 @@ class ClickHouseCluster: while time.time() - start < timeout: try: for ip in [self.mysql2_ip, self.mysql3_ip, self.mysql4_ip]: - conn = pymysql.connect(user='root', password='clickhouse', host=ip, port=self.mysql_port) + conn = pymysql.connect( + user="root", + password="clickhouse", + host=ip, + port=self.mysql_port, + ) conn.close() logging.debug(f"Mysql Started {ip}") return @@ -1178,7 +1723,7 @@ class ClickHouseCluster: errors += [str(ex)] time.sleep(0.5) - run_and_check(['docker-compose', 'ps', '--services', '--all']) + run_and_check(["docker-compose", "ps", "--services", "--all"]) logging.error("Can't connect to MySQL:{}".format(errors)) raise Exception("Cannot wait MySQL container") @@ -1187,7 +1732,13 @@ class ClickHouseCluster: start = time.time() while time.time() - start < timeout: try: - self.postgres_conn = psycopg2.connect(host=self.postgres_ip, port=self.postgres_port, database='postgres', user='postgres', password='mysecretpassword') + self.postgres_conn = psycopg2.connect( + host=self.postgres_ip, + port=self.postgres_port, + database="postgres", + user="postgres", + password="mysecretpassword", + ) self.postgres_conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) self.postgres_conn.autocommit = True logging.debug("Postgres Started") @@ -1205,7 +1756,13 @@ class ClickHouseCluster: start = time.time() while time.time() - start < timeout: try: - self.postgres2_conn = psycopg2.connect(host=self.postgres2_ip, port=self.postgres_port, database='postgres', user='postgres', password='mysecretpassword') + self.postgres2_conn = psycopg2.connect( + host=self.postgres2_ip, + port=self.postgres_port, + database="postgres", + user="postgres", + password="mysecretpassword", + ) self.postgres2_conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) self.postgres2_conn.autocommit = True logging.debug("Postgres Cluster host 2 started") @@ -1215,7 +1772,13 @@ class ClickHouseCluster: time.sleep(0.5) while time.time() - start < timeout: try: - self.postgres3_conn = psycopg2.connect(host=self.postgres3_ip, port=self.postgres_port, database='postgres', user='postgres', password='mysecretpassword') + self.postgres3_conn = psycopg2.connect( + host=self.postgres3_ip, + port=self.postgres_port, + database="postgres", + user="postgres", + password="mysecretpassword", + ) self.postgres3_conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) self.postgres3_conn.autocommit = True logging.debug("Postgres Cluster host 3 started") @@ -1225,7 +1788,13 @@ class ClickHouseCluster: time.sleep(0.5) while time.time() - start < timeout: try: - self.postgres4_conn = psycopg2.connect(host=self.postgres4_ip, port=self.postgres_port, database='postgres', user='postgres', password='mysecretpassword') + self.postgres4_conn = psycopg2.connect( + host=self.postgres4_ip, + port=self.postgres_port, + database="postgres", + user="postgres", + password="mysecretpassword", + ) self.postgres4_conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) self.postgres4_conn.autocommit = True logging.debug("Postgres Cluster host 4 started") @@ -1261,10 +1830,15 @@ class ClickHouseCluster: start = time.time() while time.time() - start < timeout: try: - self.exec_in_container(self.nginx_id, ["curl", "-X", "PUT", "-d", "Test", "http://test.com/test.txt"]) - res = self.exec_in_container(self.nginx_id, ["curl", "-X", "GET", "http://test.com/test.txt"]) - assert(res == 'Test') - print('nginx static files server is available') + self.exec_in_container( + self.nginx_id, + ["curl", "-X", "PUT", "-d", "Test", "http://test.com/test.txt"], + ) + res = self.exec_in_container( + self.nginx_id, ["curl", "-X", "GET", "http://test.com/test.txt"] + ) + assert res == "Test" + print("nginx static files server is available") return except Exception as ex: print("Can't connect to nginx: " + str(ex)) @@ -1275,9 +1849,9 @@ class ClickHouseCluster: start = time.time() while time.time() - start < timeout: try: - for instance in ['zoo1', 'zoo2', 'zoo3']: + for instance in ["zoo1", "zoo2", "zoo3"]: conn = self.get_kazoo_client(instance) - conn.get_children('/') + conn.get_children("/") conn.stop() logging.debug("All instances of ZooKeeper Secure started") return @@ -1292,9 +1866,9 @@ class ClickHouseCluster: start = time.time() while time.time() - start < timeout: try: - for instance in ['zoo1', 'zoo2', 'zoo3']: + for instance in ["zoo1", "zoo2", "zoo3"]: conn = self.get_kazoo_client(instance) - conn.get_children('/') + conn.get_children("/") conn.stop() logging.debug("All instances of ZooKeeper started") return @@ -1306,26 +1880,38 @@ class ClickHouseCluster: def make_hdfs_api(self, timeout=180, kerberized=False): if kerberized: - keytab = p.abspath(p.join(self.instances['node1'].path, "secrets/clickhouse.keytab")) - krb_conf = p.abspath(p.join(self.instances['node1'].path, "secrets/krb_long.conf")) + keytab = p.abspath( + p.join(self.instances["node1"].path, "secrets/clickhouse.keytab") + ) + krb_conf = p.abspath( + p.join(self.instances["node1"].path, "secrets/krb_long.conf") + ) self.hdfs_kerberized_ip = self.get_instance_ip(self.hdfs_kerberized_host) - kdc_ip = self.get_instance_ip('hdfskerberos') + kdc_ip = self.get_instance_ip("hdfskerberos") - self.hdfs_api = HDFSApi(user="root", - timeout=timeout, - kerberized=True, - principal="root@TEST.CLICKHOUSE.TECH", - keytab=keytab, - krb_conf=krb_conf, - host=self.hdfs_kerberized_host, - protocol="http", - proxy_port=self.hdfs_kerberized_name_port, - data_port=self.hdfs_kerberized_data_port, - hdfs_ip=self.hdfs_kerberized_ip, - kdc_ip=kdc_ip) + self.hdfs_api = HDFSApi( + user="root", + timeout=timeout, + kerberized=True, + principal="root@TEST.CLICKHOUSE.TECH", + keytab=keytab, + krb_conf=krb_conf, + host=self.hdfs_kerberized_host, + protocol="http", + proxy_port=self.hdfs_kerberized_name_port, + data_port=self.hdfs_kerberized_data_port, + hdfs_ip=self.hdfs_kerberized_ip, + kdc_ip=kdc_ip, + ) else: self.hdfs_ip = self.get_instance_ip(self.hdfs_host) - self.hdfs_api = HDFSApi(user="root", host=self.hdfs_host, data_port=self.hdfs_data_port, proxy_port=self.hdfs_name_port, hdfs_ip=self.hdfs_ip) + self.hdfs_api = HDFSApi( + user="root", + host=self.hdfs_host, + data_port=self.hdfs_data_port, + proxy_port=self.hdfs_name_port, + hdfs_ip=self.hdfs_ip, + ) def wait_kafka_is_available(self, kafka_docker_id, kafka_port, max_retries=50): retries = 0 @@ -1350,16 +1936,19 @@ class ClickHouseCluster: return except Exception as ex: - logging.exception("Can't connect to HDFS or preparations are not done yet " + str(ex)) + logging.exception( + "Can't connect to HDFS or preparations are not done yet " + str(ex) + ) time.sleep(1) raise Exception("Can't wait HDFS to start") def wait_mongo_to_start(self, timeout=30, secure=False): - connection_str = 'mongodb://{user}:{password}@{host}:{port}'.format( - host='localhost', port=self.mongo_port, user='root', password='clickhouse') + connection_str = "mongodb://{user}:{password}@{host}:{port}".format( + host="localhost", port=self.mongo_port, user="root", password="clickhouse" + ) if secure: - connection_str += '/?tls=true&tlsAllowInvalidCertificates=true' + connection_str += "/?tls=true&tlsAllowInvalidCertificates=true" connection = pymongo.MongoClient(connection_str) start = time.time() while time.time() - start < timeout: @@ -1375,13 +1964,16 @@ class ClickHouseCluster: self.minio_ip = self.get_instance_ip(self.minio_host) self.minio_redirect_ip = self.get_instance_ip(self.minio_redirect_host) - - os.environ['SSL_CERT_FILE'] = p.join(self.base_dir, self.minio_dir, 'certs', 'public.crt') - minio_client = Minio(f'{self.minio_ip}:{self.minio_port}', - access_key='minio', - secret_key='minio123', - secure=secure, - http_client=urllib3.PoolManager(cert_reqs='CERT_NONE')) # disable SSL check as we test ClickHouse and not Python library + os.environ["SSL_CERT_FILE"] = p.join( + self.base_dir, self.minio_dir, "certs", "public.crt" + ) + minio_client = Minio( + f"{self.minio_ip}:{self.minio_port}", + access_key="minio", + secret_key="minio123", + secure=secure, + http_client=urllib3.PoolManager(cert_reqs="CERT_NONE"), + ) # disable SSL check as we test ClickHouse and not Python library start = time.time() while time.time() - start < timeout: try: @@ -1414,12 +2006,15 @@ class ClickHouseCluster: def wait_azurite_to_start(self, timeout=180): from azure.storage.blob import BlobServiceClient + connection_string = "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;" time.sleep(1) start = time.time() while time.time() - start < timeout: try: - blob_service_client = BlobServiceClient.from_connection_string(connection_string) + blob_service_client = BlobServiceClient.from_connection_string( + connection_string + ) logging.debug(blob_service_client.get_account_information()) self.blob_service_client = blob_service_client return @@ -1430,7 +2025,9 @@ class ClickHouseCluster: raise Exception("Can't wait Azurite to start") def wait_schema_registry_to_start(self, timeout=180): - sr_client = CachedSchemaRegistryClient({"url":'http://localhost:{}'.format(self.schema_registry_port)}) + sr_client = CachedSchemaRegistryClient( + {"url": "http://localhost:{}".format(self.schema_registry_port)} + ) start = time.time() while time.time() - start < timeout: try: @@ -1445,12 +2042,26 @@ class ClickHouseCluster: def wait_cassandra_to_start(self, timeout=180): self.cassandra_ip = self.get_instance_ip(self.cassandra_host) - cass_client = cassandra.cluster.Cluster([self.cassandra_ip], port=self.cassandra_port, load_balancing_policy=RoundRobinPolicy()) + cass_client = cassandra.cluster.Cluster( + [self.cassandra_ip], + port=self.cassandra_port, + load_balancing_policy=RoundRobinPolicy(), + ) start = time.time() while time.time() - start < timeout: try: - logging.info(f"Check Cassandra Online {self.cassandra_id} {self.cassandra_ip} {self.cassandra_port}") - check = self.exec_in_container(self.cassandra_id, ["bash", "-c", f"/opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e 'describe keyspaces' {self.cassandra_ip} {self.cassandra_port}"], user='root') + logging.info( + f"Check Cassandra Online {self.cassandra_id} {self.cassandra_ip} {self.cassandra_port}" + ) + check = self.exec_in_container( + self.cassandra_id, + [ + "bash", + "-c", + f"/opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e 'describe keyspaces' {self.cassandra_ip} {self.cassandra_port}", + ], + user="root", + ) logging.info("Cassandra Online") cass_client.connect() logging.info("Connected Clients to Cassandra") @@ -1465,7 +2076,11 @@ class ClickHouseCluster: pytest_xdist_logging_to_separate_files.setup() logging.info("Running tests in {}".format(self.base_path)) - logging.debug("Cluster start called. is_up={}, destroy_dirs={}".format(self.is_up, destroy_dirs)) + logging.debug( + "Cluster start called. is_up={}, destroy_dirs={}".format( + self.is_up, destroy_dirs + ) + ) if self.is_up: return @@ -1484,15 +2099,25 @@ class ClickHouseCluster: shutil.rmtree(self.instances_dir) for instance in list(self.instances.values()): - logging.debug(('Setup directory for instance: {} destroy_dirs: {}'.format(instance.name, destroy_dirs))) + logging.debug( + ( + "Setup directory for instance: {} destroy_dirs: {}".format( + instance.name, destroy_dirs + ) + ) + ) instance.create_dir(destroy_dir=destroy_dirs) _create_env_file(os.path.join(self.env_file), self.env_variables) - self.docker_client = docker.DockerClient(base_url='unix:///var/run/docker.sock', version=self.docker_api_version, timeout=600) + self.docker_client = docker.DockerClient( + base_url="unix:///var/run/docker.sock", + version=self.docker_api_version, + timeout=600, + ) - common_opts = ['--verbose', 'up', '-d'] + common_opts = ["--verbose", "up", "-d"] - images_pull_cmd = self.base_cmd + ['pull'] + images_pull_cmd = self.base_cmd + ["pull"] # sometimes dockerhub/proxy can be flaky for i in range(5): try: @@ -1505,9 +2130,11 @@ class ClickHouseCluster: time.sleep(i * 3) if self.with_zookeeper_secure and self.base_zookeeper_cmd: - logging.debug('Setup ZooKeeper Secure') - logging.debug(f'Creating internal ZooKeeper dirs: {self.zookeeper_dirs_to_create}') - for i in range(1,3): + logging.debug("Setup ZooKeeper Secure") + logging.debug( + f"Creating internal ZooKeeper dirs: {self.zookeeper_dirs_to_create}" + ) + for i in range(1, 3): if os.path.exists(self.zookeeper_instance_dir_prefix + f"{i}"): shutil.rmtree(self.zookeeper_instance_dir_prefix + f"{i}") for dir in self.zookeeper_dirs_to_create: @@ -1520,23 +2147,30 @@ class ClickHouseCluster: self.run_kazoo_commands_with_retries(command, repeats=5) if self.with_zookeeper and self.base_zookeeper_cmd: - logging.debug('Setup ZooKeeper') - logging.debug(f'Creating internal ZooKeeper dirs: {self.zookeeper_dirs_to_create}') + logging.debug("Setup ZooKeeper") + logging.debug( + f"Creating internal ZooKeeper dirs: {self.zookeeper_dirs_to_create}" + ) if self.use_keeper: - for i in range(1,4): + for i in range(1, 4): if os.path.exists(self.keeper_instance_dir_prefix + f"{i}"): shutil.rmtree(self.keeper_instance_dir_prefix + f"{i}") else: - for i in range(1,3): + for i in range(1, 3): if os.path.exists(self.zookeeper_instance_dir_prefix + f"{i}"): shutil.rmtree(self.zookeeper_instance_dir_prefix + f"{i}") for dir in self.zookeeper_dirs_to_create: os.makedirs(dir) - if self.use_keeper: # TODO: remove hardcoded paths from here - for i in range(1,4): - shutil.copy(os.path.join(HELPERS_DIR, f'keeper_config{i}.xml'), os.path.join(self.keeper_instance_dir_prefix + f"{i}", "config" )) + if self.use_keeper: # TODO: remove hardcoded paths from here + for i in range(1, 4): + shutil.copy( + os.path.join(HELPERS_DIR, f"keeper_config{i}.xml"), + os.path.join( + self.keeper_instance_dir_prefix + f"{i}", "config" + ), + ) run_and_check(self.base_zookeeper_cmd + common_opts, env=self.env) self.up_called = True @@ -1546,12 +2180,12 @@ class ClickHouseCluster: self.run_kazoo_commands_with_retries(command, repeats=5) if self.with_mysql_client and self.base_mysql_client_cmd: - logging.debug('Setup MySQL Client') + logging.debug("Setup MySQL Client") subprocess_check_call(self.base_mysql_client_cmd + common_opts) self.wait_mysql_client_to_start() if self.with_mysql and self.base_mysql_cmd: - logging.debug('Setup MySQL') + logging.debug("Setup MySQL") if os.path.exists(self.mysql_dir): shutil.rmtree(self.mysql_dir) os.makedirs(self.mysql_logs_dir) @@ -1561,7 +2195,7 @@ class ClickHouseCluster: self.wait_mysql_to_start() if self.with_mysql8 and self.base_mysql8_cmd: - logging.debug('Setup MySQL 8') + logging.debug("Setup MySQL 8") if os.path.exists(self.mysql8_dir): shutil.rmtree(self.mysql8_dir) os.makedirs(self.mysql8_logs_dir) @@ -1570,7 +2204,7 @@ class ClickHouseCluster: self.wait_mysql8_to_start() if self.with_mysql_cluster and self.base_mysql_cluster_cmd: - print('Setup MySQL') + print("Setup MySQL") if os.path.exists(self.mysql_cluster_dir): shutil.rmtree(self.mysql_cluster_dir) os.makedirs(self.mysql_cluster_logs_dir) @@ -1581,7 +2215,7 @@ class ClickHouseCluster: self.wait_mysql_cluster_to_start() if self.with_postgres and self.base_postgres_cmd: - logging.debug('Setup Postgres') + logging.debug("Setup Postgres") if os.path.exists(self.postgres_dir): shutil.rmtree(self.postgres_dir) os.makedirs(self.postgres_logs_dir) @@ -1592,7 +2226,7 @@ class ClickHouseCluster: self.wait_postgres_to_start() if self.with_postgres_cluster and self.base_postgres_cluster_cmd: - print('Setup Postgres') + print("Setup Postgres") os.makedirs(self.postgres2_logs_dir) os.chmod(self.postgres2_logs_dir, stat.S_IRWXU | stat.S_IRWXO) os.makedirs(self.postgres3_logs_dir) @@ -1604,33 +2238,43 @@ class ClickHouseCluster: self.wait_postgres_cluster_to_start() if self.with_kafka and self.base_kafka_cmd: - logging.debug('Setup Kafka') - subprocess_check_call(self.base_kafka_cmd + common_opts + ['--renew-anon-volumes']) + logging.debug("Setup Kafka") + subprocess_check_call( + self.base_kafka_cmd + common_opts + ["--renew-anon-volumes"] + ) self.up_called = True self.wait_kafka_is_available(self.kafka_docker_id, self.kafka_port) self.wait_schema_registry_to_start() if self.with_kerberized_kafka and self.base_kerberized_kafka_cmd: - logging.debug('Setup kerberized kafka') - run_and_check(self.base_kerberized_kafka_cmd + common_opts + ['--renew-anon-volumes']) + logging.debug("Setup kerberized kafka") + run_and_check( + self.base_kerberized_kafka_cmd + + common_opts + + ["--renew-anon-volumes"] + ) self.up_called = True - self.wait_kafka_is_available(self.kerberized_kafka_docker_id, self.kerberized_kafka_port, 100) + self.wait_kafka_is_available( + self.kerberized_kafka_docker_id, self.kerberized_kafka_port, 100 + ) if self.with_rabbitmq and self.base_rabbitmq_cmd: - logging.debug('Setup RabbitMQ') + logging.debug("Setup RabbitMQ") os.makedirs(self.rabbitmq_logs_dir) os.chmod(self.rabbitmq_logs_dir, stat.S_IRWXU | stat.S_IRWXO) for i in range(5): - subprocess_check_call(self.base_rabbitmq_cmd + common_opts + ['--renew-anon-volumes']) + subprocess_check_call( + self.base_rabbitmq_cmd + common_opts + ["--renew-anon-volumes"] + ) self.up_called = True - self.rabbitmq_docker_id = self.get_instance_docker_id('rabbitmq1') + self.rabbitmq_docker_id = self.get_instance_docker_id("rabbitmq1") logging.debug(f"RabbitMQ checking container try: {i}") - if self.wait_rabbitmq_to_start(throw=(i==4)): + if self.wait_rabbitmq_to_start(throw=(i == 4)): break if self.with_hdfs and self.base_hdfs_cmd: - logging.debug('Setup HDFS') + logging.debug("Setup HDFS") os.makedirs(self.hdfs_logs_dir) os.chmod(self.hdfs_logs_dir, stat.S_IRWXU | stat.S_IRWXO) subprocess_check_call(self.base_hdfs_cmd + common_opts) @@ -1639,7 +2283,7 @@ class ClickHouseCluster: self.wait_hdfs_to_start() if self.with_kerberized_hdfs and self.base_kerberized_hdfs_cmd: - logging.debug('Setup kerberized HDFS') + logging.debug("Setup kerberized HDFS") os.makedirs(self.hdfs_kerberized_logs_dir) os.chmod(self.hdfs_kerberized_logs_dir, stat.S_IRWXU | stat.S_IRWXO) run_and_check(self.base_kerberized_hdfs_cmd + common_opts) @@ -1648,26 +2292,28 @@ class ClickHouseCluster: self.wait_hdfs_to_start(check_marker=True) if self.with_nginx and self.base_nginx_cmd: - logging.debug('Setup nginx') - subprocess_check_call(self.base_nginx_cmd + common_opts + ['--renew-anon-volumes']) + logging.debug("Setup nginx") + subprocess_check_call( + self.base_nginx_cmd + common_opts + ["--renew-anon-volumes"] + ) self.up_called = True - self.nginx_docker_id = self.get_instance_docker_id('nginx') + self.nginx_docker_id = self.get_instance_docker_id("nginx") self.wait_nginx_to_start() if self.with_mongo and self.base_mongo_cmd: - logging.debug('Setup Mongo') + logging.debug("Setup Mongo") run_and_check(self.base_mongo_cmd + common_opts) self.up_called = True self.wait_mongo_to_start(30, secure=self.with_mongo_secure) if self.with_redis and self.base_redis_cmd: - logging.debug('Setup Redis') + logging.debug("Setup Redis") subprocess_check_call(self.base_redis_cmd + common_opts) self.up_called = True time.sleep(10) if self.with_hive and self.base_hive_cmd: - logging.debug('Setup hive') + logging.debug("Setup hive") subprocess_check_call(self.base_hive_cmd + common_opts) self.up_called = True time.sleep(300) @@ -1676,13 +2322,19 @@ class ClickHouseCluster: # Copy minio certificates to minio/certs os.mkdir(self.minio_dir) if self.minio_certs_dir is None: - os.mkdir(os.path.join(self.minio_dir, 'certs')) + os.mkdir(os.path.join(self.minio_dir, "certs")) else: - shutil.copytree(os.path.join(self.base_dir, self.minio_certs_dir), os.path.join(self.minio_dir, 'certs')) + shutil.copytree( + os.path.join(self.base_dir, self.minio_certs_dir), + os.path.join(self.minio_dir, "certs"), + ) minio_start_cmd = self.base_minio_cmd + common_opts - logging.info("Trying to create Minio instance by command %s", ' '.join(map(str, minio_start_cmd))) + logging.info( + "Trying to create Minio instance by command %s", + " ".join(map(str, minio_start_cmd)), + ) run_and_check(minio_start_cmd) self.up_called = True logging.info("Trying to connect to Minio...") @@ -1690,14 +2342,17 @@ class ClickHouseCluster: if self.with_azurite and self.base_azurite_cmd: azurite_start_cmd = self.base_azurite_cmd + common_opts - logging.info("Trying to create Azurite instance by command %s", ' '.join(map(str, azurite_start_cmd))) + logging.info( + "Trying to create Azurite instance by command %s", + " ".join(map(str, azurite_start_cmd)), + ) run_and_check(azurite_start_cmd) self.up_called = True logging.info("Trying to connect to Azurite") self.wait_azurite_to_start() if self.with_cassandra and self.base_cassandra_cmd: - subprocess_check_call(self.base_cassandra_cmd + ['up', '-d']) + subprocess_check_call(self.base_cassandra_cmd + ["up", "-d"]) self.up_called = True self.wait_cassandra_to_start() @@ -1705,13 +2360,20 @@ class ClickHouseCluster: os.makedirs(self.jdbc_driver_logs_dir) os.chmod(self.jdbc_driver_logs_dir, stat.S_IRWXU | stat.S_IRWXO) - subprocess_check_call(self.base_jdbc_bridge_cmd + ['up', '-d']) + subprocess_check_call(self.base_jdbc_bridge_cmd + ["up", "-d"]) self.up_called = True self.jdbc_bridge_ip = self.get_instance_ip(self.jdbc_bridge_host) - self.wait_for_url(f"http://{self.jdbc_bridge_ip}:{self.jdbc_bridge_port}/ping") + self.wait_for_url( + f"http://{self.jdbc_bridge_ip}:{self.jdbc_bridge_port}/ping" + ) - clickhouse_start_cmd = self.base_cmd + ['up', '-d', '--no-recreate'] - logging.debug(("Trying to create ClickHouse instance by command %s", ' '.join(map(str, clickhouse_start_cmd)))) + clickhouse_start_cmd = self.base_cmd + ["up", "-d", "--no-recreate"] + logging.debug( + ( + "Trying to create ClickHouse instance by command %s", + " ".join(map(str, clickhouse_start_cmd)), + ) + ) self.up_called = True run_and_check(clickhouse_start_cmd) logging.debug("ClickHouse instance created") @@ -1721,11 +2383,15 @@ class ClickHouseCluster: instance.docker_client = self.docker_client instance.ip_address = self.get_instance_ip(instance.name) - logging.debug(f"Waiting for ClickHouse start in {instance.name}, ip: {instance.ip_address}...") + logging.debug( + f"Waiting for ClickHouse start in {instance.name}, ip: {instance.ip_address}..." + ) instance.wait_for_start(start_timeout) logging.debug(f"ClickHouse {instance.name} started") - instance.client = Client(instance.ip_address, command=self.client_bin_path) + instance.client = Client( + instance.ip_address, command=self.client_bin_path + ) self.is_up = True @@ -1743,43 +2409,59 @@ class ClickHouseCluster: if self.up_called: with open(self.docker_logs_path, "w+") as f: try: - subprocess.check_call(self.base_cmd + ['logs'], stdout=f) # STYLE_CHECK_ALLOW_SUBPROCESS_CHECK_CALL + subprocess.check_call( # STYLE_CHECK_ALLOW_SUBPROCESS_CHECK_CALL + self.base_cmd + ["logs"], stdout=f + ) except Exception as e: logging.debug("Unable to get logs from docker.") f.seek(0) for line in f: if SANITIZER_SIGN in line: - sanitizer_assert_instance = line.split('|')[0].strip() + sanitizer_assert_instance = line.split("|")[0].strip() break if kill: try: - run_and_check(self.base_cmd + ['stop', '--timeout', '20']) + run_and_check(self.base_cmd + ["stop", "--timeout", "20"]) except Exception as e: - logging.debug("Kill command failed during shutdown. {}".format(repr(e))) + logging.debug( + "Kill command failed during shutdown. {}".format(repr(e)) + ) logging.debug("Trying to kill forcefully") - run_and_check(self.base_cmd + ['kill']) + run_and_check(self.base_cmd + ["kill"]) # Check server logs for Fatal messages and sanitizer failures. # NOTE: we cannot do this via docker since in case of Fatal message container may already die. for name, instance in self.instances.items(): if instance.contains_in_log(SANITIZER_SIGN, from_host=True): - sanitizer_assert_instance = instance.grep_in_log(SANITIZER_SIGN, from_host=True, filename='stderr.log') - logging.error("Sanitizer in instance %s log %s", name, sanitizer_assert_instance) + sanitizer_assert_instance = instance.grep_in_log( + SANITIZER_SIGN, from_host=True, filename="stderr.log" + ) + logging.error( + "Sanitizer in instance %s log %s", + name, + sanitizer_assert_instance, + ) - if not ignore_fatal and instance.contains_in_log("Fatal", from_host=True): + if not ignore_fatal and instance.contains_in_log( + "Fatal", from_host=True + ): fatal_log = instance.grep_in_log("Fatal", from_host=True) - if 'Child process was terminated by signal 9 (KILL)' in fatal_log: + if "Child process was terminated by signal 9 (KILL)" in fatal_log: fatal_log = None continue logging.error("Crash in instance %s fatal log %s", name, fatal_log) try: - subprocess_check_call(self.base_cmd + ['down', '--volumes']) + subprocess_check_call(self.base_cmd + ["down", "--volumes"]) except Exception as e: - logging.debug("Down + remove orphans failed during shutdown. {}".format(repr(e))) + logging.debug( + "Down + remove orphans failed during shutdown. {}".format(repr(e)) + ) else: - logging.warning("docker-compose up was not called. Trying to export docker.log for running containers") + logging.warning( + "docker-compose up was not called. Trying to export docker.log for running containers" + ) self.cleanup() @@ -1794,23 +2476,25 @@ class ClickHouseCluster: if sanitizer_assert_instance is not None: raise Exception( - "Sanitizer assert found in {} for instance {}".format(self.docker_logs_path, sanitizer_assert_instance)) + "Sanitizer assert found in {} for instance {}".format( + self.docker_logs_path, sanitizer_assert_instance + ) + ) if fatal_log is not None: raise Exception("Fatal messages found: {}".format(fatal_log)) - def pause_container(self, instance_name): - subprocess_check_call(self.base_cmd + ['pause', instance_name]) + subprocess_check_call(self.base_cmd + ["pause", instance_name]) # subprocess_check_call(self.base_cmd + ['kill', '-s SIGSTOP', instance_name]) def unpause_container(self, instance_name): - subprocess_check_call(self.base_cmd + ['unpause', instance_name]) + subprocess_check_call(self.base_cmd + ["unpause", instance_name]) # subprocess_check_call(self.base_cmd + ['kill', '-s SIGCONT', instance_name]) def open_bash_shell(self, instance_name): - os.system(' '.join(self.base_cmd + ['exec', instance_name, '/bin/bash'])) + os.system(" ".join(self.base_cmd + ["exec", instance_name, "/bin/bash"])) def get_kazoo_client(self, zoo_instance_name): use_ssl = False @@ -1823,15 +2507,26 @@ class ClickHouseCluster: raise Exception("Cluster has no ZooKeeper") ip = self.get_instance_ip(zoo_instance_name) - logging.debug(f"get_kazoo_client: {zoo_instance_name}, ip:{ip}, port:{port}, use_ssl:{use_ssl}") - zk = KazooClient(hosts=f"{ip}:{port}", use_ssl=use_ssl, verify_certs=False, certfile=self.zookeeper_certfile, - keyfile=self.zookeeper_keyfile) + logging.debug( + f"get_kazoo_client: {zoo_instance_name}, ip:{ip}, port:{port}, use_ssl:{use_ssl}" + ) + zk = KazooClient( + hosts=f"{ip}:{port}", + use_ssl=use_ssl, + verify_certs=False, + certfile=self.zookeeper_certfile, + keyfile=self.zookeeper_keyfile, + ) zk.start() return zk - def run_kazoo_commands_with_retries(self, kazoo_callback, zoo_instance_name='zoo1', repeats=1, sleep_for=1): + def run_kazoo_commands_with_retries( + self, kazoo_callback, zoo_instance_name="zoo1", repeats=1, sleep_for=1 + ): zk = self.get_kazoo_client(zoo_instance_name) - logging.debug(f"run_kazoo_commands_with_retries: {zoo_instance_name}, {kazoo_callback}") + logging.debug( + f"run_kazoo_commands_with_retries: {zoo_instance_name}, {kazoo_callback}" + ) for i in range(repeats - 1): try: kazoo_callback(zk) @@ -1856,14 +2551,18 @@ class ClickHouseCluster: subprocess_check_call(self.base_zookeeper_cmd + ["start", n]) -CLICKHOUSE_START_COMMAND = "clickhouse server --config-file=/etc/clickhouse-server/{main_config_file}" \ - " --log-file=/var/log/clickhouse-server/clickhouse-server.log " \ - " --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log" +CLICKHOUSE_START_COMMAND = ( + "clickhouse server --config-file=/etc/clickhouse-server/{main_config_file}" + " --log-file=/var/log/clickhouse-server/clickhouse-server.log " + " --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log" +) -CLICKHOUSE_STAY_ALIVE_COMMAND = 'bash -c "trap \'pkill tail\' INT TERM; {} --daemon; coproc tail -f /dev/null; wait $$!"'.format(CLICKHOUSE_START_COMMAND) +CLICKHOUSE_STAY_ALIVE_COMMAND = "bash -c \"trap 'pkill tail' INT TERM; {} --daemon; coproc tail -f /dev/null; wait $$!\"".format( + CLICKHOUSE_START_COMMAND +) # /run/xtables.lock passed inside for correct iptables --wait -DOCKER_COMPOSE_TEMPLATE = ''' +DOCKER_COMPOSE_TEMPLATE = """ version: '2.3' services: {name}: @@ -1906,22 +2605,62 @@ services: {ipv6_address} {net_aliases} {net_alias1} -''' +""" class ClickHouseInstance: - def __init__( - self, cluster, base_path, name, base_config_dir, custom_main_configs, custom_user_configs, - custom_dictionaries, - macros, with_zookeeper, zookeeper_config_path, with_mysql_client, with_mysql, with_mysql8, with_mysql_cluster, with_kafka, with_kerberized_kafka, - with_rabbitmq, with_nginx, with_kerberized_hdfs, with_mongo, with_redis, with_minio, with_azurite, with_jdbc_bridge, with_hive, - with_cassandra, server_bin_path, odbc_bridge_bin_path, library_bridge_bin_path, clickhouse_path_dir, with_odbc_drivers, with_postgres, with_postgres_cluster, - clickhouse_start_command=CLICKHOUSE_START_COMMAND, - main_config_name="config.xml", users_config_name="users.xml", copy_common_configs=True, - hostname=None, env_variables=None, - image="clickhouse/integration-test", tag="latest", - stay_alive=False, ipv4_address=None, ipv6_address=None, with_installed_binary=False, external_dirs=None, tmpfs=None, config_root_name="clickhouse", extra_configs=[]): + self, + cluster, + base_path, + name, + base_config_dir, + custom_main_configs, + custom_user_configs, + custom_dictionaries, + macros, + with_zookeeper, + zookeeper_config_path, + with_mysql_client, + with_mysql, + with_mysql8, + with_mysql_cluster, + with_kafka, + with_kerberized_kafka, + with_rabbitmq, + with_nginx, + with_kerberized_hdfs, + with_mongo, + with_redis, + with_minio, + with_azurite, + with_jdbc_bridge, + with_hive, + with_cassandra, + server_bin_path, + odbc_bridge_bin_path, + library_bridge_bin_path, + clickhouse_path_dir, + with_odbc_drivers, + with_postgres, + with_postgres_cluster, + clickhouse_start_command=CLICKHOUSE_START_COMMAND, + main_config_name="config.xml", + users_config_name="users.xml", + copy_common_configs=True, + hostname=None, + env_variables=None, + image="clickhouse/integration-test", + tag="latest", + stay_alive=False, + ipv4_address=None, + ipv6_address=None, + with_installed_binary=False, + external_dirs=None, + tmpfs=None, + config_root_name="clickhouse", + extra_configs=[], + ): self.name = name self.base_cmd = cluster.base_cmd @@ -1931,13 +2670,27 @@ class ClickHouseInstance: self.external_dirs = external_dirs self.tmpfs = tmpfs or [] - self.base_config_dir = p.abspath(p.join(base_path, base_config_dir)) if base_config_dir else None - self.custom_main_config_paths = [p.abspath(p.join(base_path, c)) for c in custom_main_configs] - self.custom_user_config_paths = [p.abspath(p.join(base_path, c)) for c in custom_user_configs] - self.custom_dictionaries_paths = [p.abspath(p.join(base_path, c)) for c in custom_dictionaries] - self.custom_extra_config_paths = [p.abspath(p.join(base_path,c)) for c in extra_configs] - self.clickhouse_path_dir = p.abspath(p.join(base_path, clickhouse_path_dir)) if clickhouse_path_dir else None - self.kerberos_secrets_dir = p.abspath(p.join(base_path, 'secrets')) + self.base_config_dir = ( + p.abspath(p.join(base_path, base_config_dir)) if base_config_dir else None + ) + self.custom_main_config_paths = [ + p.abspath(p.join(base_path, c)) for c in custom_main_configs + ] + self.custom_user_config_paths = [ + p.abspath(p.join(base_path, c)) for c in custom_user_configs + ] + self.custom_dictionaries_paths = [ + p.abspath(p.join(base_path, c)) for c in custom_dictionaries + ] + self.custom_extra_config_paths = [ + p.abspath(p.join(base_path, c)) for c in extra_configs + ] + self.clickhouse_path_dir = ( + p.abspath(p.join(base_path, clickhouse_path_dir)) + if clickhouse_path_dir + else None + ) + self.kerberos_secrets_dir = p.abspath(p.join(base_path, "secrets")) self.macros = macros if macros is not None else {} self.with_zookeeper = with_zookeeper self.zookeeper_config_path = zookeeper_config_path @@ -1969,10 +2722,12 @@ class ClickHouseInstance: self.users_config_name = users_config_name self.copy_common_configs = copy_common_configs - self.clickhouse_start_command = clickhouse_start_command.replace("{main_config_file}", self.main_config_name) + self.clickhouse_start_command = clickhouse_start_command.replace( + "{main_config_file}", self.main_config_name + ) self.path = p.join(self.cluster.instances_dir, name) - self.docker_compose_path = p.join(self.path, 'docker-compose.yml') + self.docker_compose_path = p.join(self.path, "docker-compose.yml") self.env_variables = env_variables or {} self.env_file = self.cluster.env_file if with_odbc_drivers: @@ -1982,8 +2737,16 @@ class ClickHouseInstance: self.odbc_ini_path = "" if with_kerberized_kafka or with_kerberized_hdfs: - self.keytab_path = '- ' + os.path.dirname(self.docker_compose_path) + "/secrets:/tmp/keytab" - self.krb5_conf = '- ' + os.path.dirname(self.docker_compose_path) + "/secrets/krb.conf:/etc/krb5.conf:ro" + self.keytab_path = ( + "- " + + os.path.dirname(self.docker_compose_path) + + "/secrets:/tmp/keytab" + ) + self.krb5_conf = ( + "- " + + os.path.dirname(self.docker_compose_path) + + "/secrets/krb.conf:/etc/krb5.conf:ro" + ) else: self.keytab_path = "" self.krb5_conf = "" @@ -2000,54 +2763,81 @@ class ClickHouseInstance: self.is_up = False self.config_root_name = config_root_name - - def is_built_with_sanitizer(self, sanitizer_name=''): - build_opts = self.query("SELECT value FROM system.build_options WHERE name = 'CXX_FLAGS'") + def is_built_with_sanitizer(self, sanitizer_name=""): + build_opts = self.query( + "SELECT value FROM system.build_options WHERE name = 'CXX_FLAGS'" + ) return "-fsanitize={}".format(sanitizer_name) in build_opts def is_debug_build(self): - build_opts = self.query("SELECT value FROM system.build_options WHERE name = 'CXX_FLAGS'") - return 'NDEBUG' not in build_opts + build_opts = self.query( + "SELECT value FROM system.build_options WHERE name = 'CXX_FLAGS'" + ) + return "NDEBUG" not in build_opts def is_built_with_thread_sanitizer(self): - return self.is_built_with_sanitizer('thread') + return self.is_built_with_sanitizer("thread") def is_built_with_address_sanitizer(self): - return self.is_built_with_sanitizer('address') + return self.is_built_with_sanitizer("address") def is_built_with_memory_sanitizer(self): - return self.is_built_with_sanitizer('memory') + return self.is_built_with_sanitizer("memory") # Connects to the instance via clickhouse-client, sends a query (1st argument) and returns the answer - def query(self, sql, - stdin=None, - timeout=None, - settings=None, - user=None, - password=None, - database=None, - ignore_error=False, - query_id=None): + def query( + self, + sql, + stdin=None, + timeout=None, + settings=None, + user=None, + password=None, + database=None, + ignore_error=False, + query_id=None, + ): logging.debug("Executing query %s on %s", sql, self.name) - return self.client.query(sql, - stdin=stdin, - timeout=timeout, - settings=settings, - user=user, - password=password, - database=database, - ignore_error=ignore_error, - query_id=query_id) + return self.client.query( + sql, + stdin=stdin, + timeout=timeout, + settings=settings, + user=user, + password=password, + database=database, + ignore_error=ignore_error, + query_id=query_id, + ) - def query_with_retry(self, sql, stdin=None, timeout=None, settings=None, user=None, password=None, database=None, - ignore_error=False, - retry_count=20, sleep_time=0.5, check_callback=lambda x: True): + def query_with_retry( + self, + sql, + stdin=None, + timeout=None, + settings=None, + user=None, + password=None, + database=None, + ignore_error=False, + retry_count=20, + sleep_time=0.5, + check_callback=lambda x: True, + ): logging.debug(f"Executing query {sql} on {self.name}") result = None for i in range(retry_count): try: - result = self.query(sql, stdin=stdin, timeout=timeout, settings=settings, user=user, password=password, - database=database, ignore_error=ignore_error) + result = self.query( + sql, + stdin=stdin, + timeout=timeout, + settings=settings, + user=user, + password=password, + database=database, + ignore_error=ignore_error, + ) if check_callback(result): return result time.sleep(sleep_time) @@ -2065,22 +2855,62 @@ class ClickHouseInstance: return self.client.get_query_request(sql, *args, **kwargs) # Connects to the instance via clickhouse-client, sends a query (1st argument), expects an error and return its code - def query_and_get_error(self, sql, stdin=None, timeout=None, settings=None, user=None, password=None, - database=None): + def query_and_get_error( + self, + sql, + stdin=None, + timeout=None, + settings=None, + user=None, + password=None, + database=None, + ): logging.debug(f"Executing query {sql} on {self.name}") - return self.client.query_and_get_error(sql, stdin=stdin, timeout=timeout, settings=settings, user=user, - password=password, database=database) + return self.client.query_and_get_error( + sql, + stdin=stdin, + timeout=timeout, + settings=settings, + user=user, + password=password, + database=database, + ) # The same as query_and_get_error but ignores successful query. - def query_and_get_answer_with_error(self, sql, stdin=None, timeout=None, settings=None, user=None, password=None, - database=None): + def query_and_get_answer_with_error( + self, + sql, + stdin=None, + timeout=None, + settings=None, + user=None, + password=None, + database=None, + ): logging.debug(f"Executing query {sql} on {self.name}") - return self.client.query_and_get_answer_with_error(sql, stdin=stdin, timeout=timeout, settings=settings, - user=user, password=password, database=database) + return self.client.query_and_get_answer_with_error( + sql, + stdin=stdin, + timeout=timeout, + settings=settings, + user=user, + password=password, + database=database, + ) # Connects to the instance via HTTP interface, sends a query and returns the answer - def http_query(self, sql, data=None, params=None, user=None, password=None, expect_fail_and_get_error=False, - port=8123, timeout=None, retry_strategy=None): + def http_query( + self, + sql, + data=None, + params=None, + user=None, + password=None, + expect_fail_and_get_error=False, + port=8123, + timeout=None, + retry_strategy=None, + ): logging.debug(f"Executing query {sql} on {self.name} via HTTP interface") if params is None: params = {} @@ -2093,7 +2923,7 @@ class ClickHouseInstance: if user and password: auth = requests.auth.HTTPBasicAuth(user, password) elif user: - auth = requests.auth.HTTPBasicAuth(user, '') + auth = requests.auth.HTTPBasicAuth(user, "") url = f"http://{self.ip_address}:{port}/?" + urllib.parse.urlencode(params) if retry_strategy is None: @@ -2114,35 +2944,57 @@ class ClickHouseInstance: if expect_fail_and_get_error: if r.ok: - raise Exception("ClickHouse HTTP server is expected to fail, but succeeded: " + r.text) + raise Exception( + "ClickHouse HTTP server is expected to fail, but succeeded: " + + r.text + ) return http_code_and_message() else: if not r.ok: - raise Exception("ClickHouse HTTP server returned " + http_code_and_message()) + raise Exception( + "ClickHouse HTTP server returned " + http_code_and_message() + ) return r.text # Connects to the instance via HTTP interface, sends a query and returns the answer - def http_request(self, url, method='GET', params=None, data=None, headers=None): + def http_request(self, url, method="GET", params=None, data=None, headers=None): logging.debug(f"Sending HTTP request {url} to {self.name}") url = "http://" + self.ip_address + ":8123/" + url - return requests.request(method=method, url=url, params=params, data=data, headers=headers) + return requests.request( + method=method, url=url, params=params, data=data, headers=headers + ) # Connects to the instance via HTTP interface, sends a query, expects an error and return the error message - def http_query_and_get_error(self, sql, data=None, params=None, user=None, password=None): + def http_query_and_get_error( + self, sql, data=None, params=None, user=None, password=None + ): logging.debug(f"Executing query {sql} on {self.name} via HTTP interface") - return self.http_query(sql=sql, data=data, params=params, user=user, password=password, - expect_fail_and_get_error=True) + return self.http_query( + sql=sql, + data=data, + params=params, + user=user, + password=password, + expect_fail_and_get_error=True, + ) def stop_clickhouse(self, stop_wait_sec=30, kill=False): if not self.stay_alive: - raise Exception("clickhouse can be stopped only with stay_alive=True instance") + raise Exception( + "clickhouse can be stopped only with stay_alive=True instance" + ) try: - ps_clickhouse = self.exec_in_container(["bash", "-c", "ps -C clickhouse"], nothrow=True, user='root') - if ps_clickhouse == " PID TTY STAT TIME COMMAND" : + ps_clickhouse = self.exec_in_container( + ["bash", "-c", "ps -C clickhouse"], nothrow=True, user="root" + ) + if ps_clickhouse == " PID TTY STAT TIME COMMAND": logging.warning("ClickHouse process already stopped") return - self.exec_in_container(["bash", "-c", "pkill {} clickhouse".format("-9" if kill else "")], user='root') + self.exec_in_container( + ["bash", "-c", "pkill {} clickhouse".format("-9" if kill else "")], + user="root", + ) start_time = time.time() stopped = False @@ -2157,19 +3009,34 @@ class ClickHouseInstance: if not stopped: pid = self.get_process_pid("clickhouse") if pid is not None: - logging.warning(f"Force kill clickhouse in stop_clickhouse. ps:{pid}") - self.exec_in_container(["bash", "-c", f"gdb -batch -ex 'thread apply all bt full' -p {pid} > {os.path.join(self.path, 'logs/stdout.log')}"], user='root') + logging.warning( + f"Force kill clickhouse in stop_clickhouse. ps:{pid}" + ) + self.exec_in_container( + [ + "bash", + "-c", + f"gdb -batch -ex 'thread apply all bt full' -p {pid} > {os.path.join(self.path, 'logs/stdout.log')}", + ], + user="root", + ) self.stop_clickhouse(kill=True) else: - ps_all = self.exec_in_container(["bash", "-c", "ps aux"], nothrow=True, user='root') - logging.warning(f"We want force stop clickhouse, but no clickhouse-server is running\n{ps_all}") + ps_all = self.exec_in_container( + ["bash", "-c", "ps aux"], nothrow=True, user="root" + ) + logging.warning( + f"We want force stop clickhouse, but no clickhouse-server is running\n{ps_all}" + ) return except Exception as e: logging.warning(f"Stop ClickHouse raised an error {e}") def start_clickhouse(self, start_wait_sec=60): if not self.stay_alive: - raise Exception("ClickHouse can be started again only with stay_alive=True instance") + raise Exception( + "ClickHouse can be started again only with stay_alive=True instance" + ) start_time = time.time() time_to_sleep = 0.5 @@ -2179,7 +3046,10 @@ class ClickHouseInstance: pid = self.get_process_pid("clickhouse") if pid is None: logging.debug("No clickhouse process running. Start new one.") - self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid())) + self.exec_in_container( + ["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], + user=str(os.getuid()), + ) time.sleep(1) continue else: @@ -2188,13 +3058,16 @@ class ClickHouseInstance: self.wait_start(start_wait_sec + start_time - time.time()) return except Exception as e: - logging.warning(f"Current start attempt failed. Will kill {pid} just in case.") - self.exec_in_container(["bash", "-c", f"kill -9 {pid}"], user='root', nothrow=True) + logging.warning( + f"Current start attempt failed. Will kill {pid} just in case." + ) + self.exec_in_container( + ["bash", "-c", f"kill -9 {pid}"], user="root", nothrow=True + ) time.sleep(time_to_sleep) raise Exception("Cannot start ClickHouse, see additional info in logs") - def wait_start(self, start_wait_sec): start_time = time.time() last_err = None @@ -2203,7 +3076,7 @@ class ClickHouseInstance: pid = self.get_process_pid("clickhouse") if pid is None: raise Exception("ClickHouse server is not running. Check logs.") - exec_query_with_retry(self, 'select 20', retry_count = 10, silent=True) + exec_query_with_retry(self, "select 20", retry_count=10, silent=True) return except QueryRuntimeException as err: last_err = err @@ -2214,12 +3087,19 @@ class ClickHouseInstance: raise Exception("ClickHouse server is not running. Check logs.") if time.time() > start_time + start_wait_sec: break - logging.error(f"No time left to start. But process is still running. Will dump threads.") - ps_clickhouse = self.exec_in_container(["bash", "-c", "ps -C clickhouse"], nothrow=True, user='root') + logging.error( + f"No time left to start. But process is still running. Will dump threads." + ) + ps_clickhouse = self.exec_in_container( + ["bash", "-c", "ps -C clickhouse"], nothrow=True, user="root" + ) logging.info(f"PS RESULT:\n{ps_clickhouse}") pid = self.get_process_pid("clickhouse") if pid is not None: - self.exec_in_container(["bash", "-c", f"gdb -batch -ex 'thread apply all bt full' -p {pid}"], user='root') + self.exec_in_container( + ["bash", "-c", f"gdb -batch -ex 'thread apply all bt full' -p {pid}"], + user="root", + ) if last_err is not None: raise last_err @@ -2228,82 +3108,164 @@ class ClickHouseInstance: self.start_clickhouse(stop_start_wait_sec) def exec_in_container(self, cmd, detach=False, nothrow=False, **kwargs): - return self.cluster.exec_in_container(self.docker_id, cmd, detach, nothrow, **kwargs) + return self.cluster.exec_in_container( + self.docker_id, cmd, detach, nothrow, **kwargs + ) def rotate_logs(self): - self.exec_in_container(["bash", "-c", f"kill -HUP {self.get_process_pid('clickhouse server')}"], user='root') + self.exec_in_container( + ["bash", "-c", f"kill -HUP {self.get_process_pid('clickhouse server')}"], + user="root", + ) - def contains_in_log(self, substring, from_host=False, filename='clickhouse-server.log'): + def contains_in_log( + self, substring, from_host=False, filename="clickhouse-server.log" + ): if from_host: # We check fist file exists but want to look for all rotated logs as well - result = subprocess_check_call(["bash", "-c", - f'[ -f {self.logs_dir}/{filename} ] && zgrep -aH "{substring}" {self.logs_dir}/{filename}* || true' - ]) + result = subprocess_check_call( + [ + "bash", + "-c", + f'[ -f {self.logs_dir}/{filename} ] && zgrep -aH "{substring}" {self.logs_dir}/{filename}* || true', + ] + ) else: - result = self.exec_in_container(["bash", "-c", - f'[ -f /var/log/clickhouse-server/{filename} ] && zgrep -aH "{substring}" /var/log/clickhouse-server/{filename} || true' - ]) + result = self.exec_in_container( + [ + "bash", + "-c", + f'[ -f /var/log/clickhouse-server/{filename} ] && zgrep -aH "{substring}" /var/log/clickhouse-server/{filename} || true', + ] + ) return len(result) > 0 - def grep_in_log(self, substring, from_host=False, filename='clickhouse-server.log'): + def grep_in_log(self, substring, from_host=False, filename="clickhouse-server.log"): logging.debug(f"grep in log called %s", substring) if from_host: # We check fist file exists but want to look for all rotated logs as well - result = subprocess_check_call(["bash", "-c", - f'[ -f {self.logs_dir}/{filename} ] && zgrep -a "{substring}" {self.logs_dir}/{filename}* || true' - ]) + result = subprocess_check_call( + [ + "bash", + "-c", + f'[ -f {self.logs_dir}/{filename} ] && zgrep -a "{substring}" {self.logs_dir}/{filename}* || true', + ] + ) else: - result = self.exec_in_container(["bash", "-c", - f'[ -f /var/log/clickhouse-server/{filename} ] && zgrep -a "{substring}" /var/log/clickhouse-server/{filename}* || true' - ]) + result = self.exec_in_container( + [ + "bash", + "-c", + f'[ -f /var/log/clickhouse-server/{filename} ] && zgrep -a "{substring}" /var/log/clickhouse-server/{filename}* || true', + ] + ) logging.debug("grep result %s", result) return result def count_in_log(self, substring): result = self.exec_in_container( - ["bash", "-c", 'grep -a "{}" /var/log/clickhouse-server/clickhouse-server.log | wc -l'.format(substring)]) + [ + "bash", + "-c", + 'grep -a "{}" /var/log/clickhouse-server/clickhouse-server.log | wc -l'.format( + substring + ), + ] + ) return result - def wait_for_log_line(self, regexp, filename='/var/log/clickhouse-server/clickhouse-server.log', timeout=30, repetitions=1, look_behind_lines=100): + def wait_for_log_line( + self, + regexp, + filename="/var/log/clickhouse-server/clickhouse-server.log", + timeout=30, + repetitions=1, + look_behind_lines=100, + ): start_time = time.time() result = self.exec_in_container( - ["bash", "-c", 'timeout {} tail -Fn{} "{}" | grep -Em {} {}'.format(timeout, look_behind_lines, filename, repetitions, shlex.quote(regexp))]) + [ + "bash", + "-c", + 'timeout {} tail -Fn{} "{}" | grep -Em {} {}'.format( + timeout, + look_behind_lines, + filename, + repetitions, + shlex.quote(regexp), + ), + ] + ) # if repetitions>1 grep will return success even if not enough lines were collected, - if repetitions>1 and len(result.splitlines()) < repetitions: - logging.debug("wait_for_log_line: those lines were found during {} seconds:".format(timeout)) + if repetitions > 1 and len(result.splitlines()) < repetitions: + logging.debug( + "wait_for_log_line: those lines were found during {} seconds:".format( + timeout + ) + ) logging.debug(result) - raise Exception("wait_for_log_line: Not enough repetitions: {} found, while {} expected".format(len(result.splitlines()), repetitions)) + raise Exception( + "wait_for_log_line: Not enough repetitions: {} found, while {} expected".format( + len(result.splitlines()), repetitions + ) + ) wait_duration = time.time() - start_time - logging.debug('{} log line(s) matching "{}" appeared in a {:.3f} seconds'.format(repetitions, regexp, wait_duration)) + logging.debug( + '{} log line(s) matching "{}" appeared in a {:.3f} seconds'.format( + repetitions, regexp, wait_duration + ) + ) return wait_duration def path_exists(self, path): - return self.exec_in_container( - ["bash", "-c", "echo $(if [ -e '{}' ]; then echo 'yes'; else echo 'no'; fi)".format(path)]) == 'yes\n' + return ( + self.exec_in_container( + [ + "bash", + "-c", + "echo $(if [ -e '{}' ]; then echo 'yes'; else echo 'no'; fi)".format( + path + ), + ] + ) + == "yes\n" + ) def copy_file_to_container(self, local_path, dest_path): - return self.cluster.copy_file_to_container(self.docker_id, local_path, dest_path) + return self.cluster.copy_file_to_container( + self.docker_id, local_path, dest_path + ) def get_process_pid(self, process_name): - output = self.exec_in_container(["bash", "-c", - "ps ax | grep '{}' | grep -v 'grep' | grep -v 'coproc' | grep -v 'bash -c' | awk '{{print $1}}'".format( - process_name)]) + output = self.exec_in_container( + [ + "bash", + "-c", + "ps ax | grep '{}' | grep -v 'grep' | grep -v 'coproc' | grep -v 'bash -c' | awk '{{print $1}}'".format( + process_name + ), + ] + ) if output: try: - pid = int(output.split('\n')[0].strip()) + pid = int(output.split("\n")[0].strip()) return pid except: return None return None - def restart_with_original_version(self, stop_start_wait_sec=300, callback_onstop=None, signal=15): + def restart_with_original_version( + self, stop_start_wait_sec=300, callback_onstop=None, signal=15 + ): begin_time = time.time() if not self.stay_alive: raise Exception("Cannot restart not stay alive container") - self.exec_in_container(["bash", "-c", "pkill -{} clickhouse".format(signal)], user='root') + self.exec_in_container( + ["bash", "-c", "pkill -{} clickhouse".format(signal)], user="root" + ) retries = int(stop_start_wait_sec / 0.5) local_counter = 0 # wait stop @@ -2316,18 +3278,41 @@ class ClickHouseInstance: # force kill if server hangs if self.get_process_pid("clickhouse server"): # server can die before kill, so don't throw exception, it's expected - self.exec_in_container(["bash", "-c", "pkill -{} clickhouse".format(9)], nothrow=True, user='root') + self.exec_in_container( + ["bash", "-c", "pkill -{} clickhouse".format(9)], + nothrow=True, + user="root", + ) if callback_onstop: callback_onstop(self) - self.exec_in_container(["bash", "-c", "echo 'restart_with_original_version: From version' && /usr/bin/clickhouse server --version && echo 'To version' && /usr/share/clickhouse_original server --version"]) self.exec_in_container( - ["bash", "-c", "cp /usr/share/clickhouse_original /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse"], - user='root') - self.exec_in_container(["bash", "-c", - "cp /usr/share/clickhouse-odbc-bridge_fresh /usr/bin/clickhouse-odbc-bridge && chmod 777 /usr/bin/clickhouse"], - user='root') - self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid())) + [ + "bash", + "-c", + "echo 'restart_with_original_version: From version' && /usr/bin/clickhouse server --version && echo 'To version' && /usr/share/clickhouse_original server --version", + ] + ) + self.exec_in_container( + [ + "bash", + "-c", + "cp /usr/share/clickhouse_original /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse", + ], + user="root", + ) + self.exec_in_container( + [ + "bash", + "-c", + "cp /usr/share/clickhouse-odbc-bridge_fresh /usr/bin/clickhouse-odbc-bridge && chmod 777 /usr/bin/clickhouse", + ], + user="root", + ) + self.exec_in_container( + ["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], + user=str(os.getuid()), + ) # wait start time_left = begin_time + stop_start_wait_sec - time.time() @@ -2336,11 +3321,15 @@ class ClickHouseInstance: else: self.wait_start(time_left) - def restart_with_latest_version(self, stop_start_wait_sec=300, callback_onstop=None, signal=15): + def restart_with_latest_version( + self, stop_start_wait_sec=300, callback_onstop=None, signal=15 + ): begin_time = time.time() if not self.stay_alive: raise Exception("Cannot restart not stay alive container") - self.exec_in_container(["bash", "-c", "pkill -{} clickhouse".format(signal)], user='root') + self.exec_in_container( + ["bash", "-c", "pkill -{} clickhouse".format(signal)], user="root" + ) retries = int(stop_start_wait_sec / 0.5) local_counter = 0 # wait stop @@ -2353,21 +3342,45 @@ class ClickHouseInstance: # force kill if server hangs if self.get_process_pid("clickhouse server"): # server can die before kill, so don't throw exception, it's expected - self.exec_in_container(["bash", "-c", "pkill -{} clickhouse".format(9)], nothrow=True, user='root') + self.exec_in_container( + ["bash", "-c", "pkill -{} clickhouse".format(9)], + nothrow=True, + user="root", + ) if callback_onstop: callback_onstop(self) self.exec_in_container( ["bash", "-c", "cp /usr/bin/clickhouse /usr/share/clickhouse_original"], - user='root') + user="root", + ) self.exec_in_container( - ["bash", "-c", "cp /usr/share/clickhouse_fresh /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse"], - user='root') - self.exec_in_container(["bash", "-c", "echo 'restart_with_latest_version: From version' && /usr/share/clickhouse_original server --version && echo 'To version' /usr/share/clickhouse_fresh server --version"]) - self.exec_in_container(["bash", "-c", - "cp /usr/share/clickhouse-odbc-bridge_fresh /usr/bin/clickhouse-odbc-bridge && chmod 777 /usr/bin/clickhouse"], - user='root') - self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid())) + [ + "bash", + "-c", + "cp /usr/share/clickhouse_fresh /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse", + ], + user="root", + ) + self.exec_in_container( + [ + "bash", + "-c", + "echo 'restart_with_latest_version: From version' && /usr/share/clickhouse_original server --version && echo 'To version' /usr/share/clickhouse_fresh server --version", + ] + ) + self.exec_in_container( + [ + "bash", + "-c", + "cp /usr/share/clickhouse-odbc-bridge_fresh /usr/bin/clickhouse-odbc-bridge && chmod 777 /usr/bin/clickhouse", + ], + user="root", + ) + self.exec_in_container( + ["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], + user=str(os.getuid()), + ) # wait start time_left = begin_time + stop_start_wait_sec - time.time() @@ -2392,8 +3405,11 @@ class ClickHouseInstance: raise Exception("Invalid timeout: {}".format(start_timeout)) if connection_timeout is not None and connection_timeout < start_timeout: - raise Exception("Connection timeout {} should be grater then start timeout {}" - .format(connection_timeout, start_timeout)) + raise Exception( + "Connection timeout {} should be grater then start timeout {}".format( + connection_timeout, start_timeout + ) + ) start_time = time.time() prev_rows_in_log = 0 @@ -2411,19 +3427,23 @@ class ClickHouseInstance: while True: handle.reload() status = handle.status - if status == 'exited': - raise Exception(f"Instance `{self.name}' failed to start. Container status: {status}, logs: {handle.logs().decode('utf-8')}") + if status == "exited": + raise Exception( + f"Instance `{self.name}' failed to start. Container status: {status}, logs: {handle.logs().decode('utf-8')}" + ) deadline = start_time + start_timeout # It is possible that server starts slowly. # If container is running, and there is some progress in log, check connection_timeout. - if connection_timeout and status == 'running' and has_new_rows_in_log(): + if connection_timeout and status == "running" and has_new_rows_in_log(): deadline = start_time + connection_timeout current_time = time.time() if current_time >= deadline: - raise Exception(f"Timed out while waiting for instance `{self.name}' with ip address {self.ip_address} to start. " \ - f"Container status: {status}, logs: {handle.logs().decode('utf-8')}") + raise Exception( + f"Timed out while waiting for instance `{self.name}' with ip address {self.ip_address} to start. " + f"Container status: {status}, logs: {handle.logs().decode('utf-8')}" + ) socket_timeout = min(start_timeout, deadline - current_time) @@ -2438,7 +3458,11 @@ class ClickHouseInstance: except socket.timeout: continue except socket.error as e: - if e.errno == errno.ECONNREFUSED or e.errno == errno.EHOSTUNREACH or e.errno == errno.ENETUNREACH: + if ( + e.errno == errno.ECONNREFUSED + or e.errno == errno.EHOSTUNREACH + or e.errno == errno.ENETUNREACH + ): time.sleep(0.1) else: raise @@ -2446,7 +3470,9 @@ class ClickHouseInstance: sock.close() def dict_to_xml(self, dictionary): - xml_str = dict2xml(dictionary, wrap=self.config_root_name, indent=" ", newlines=True) + xml_str = dict2xml( + dictionary, wrap=self.config_root_name, indent=" ", newlines=True + ) return xml_str @property @@ -2481,13 +3507,13 @@ class ClickHouseInstance: "Driver": "/usr/lib/x86_64-linux-gnu/odbc/psqlodbca.so", "Setup": "/usr/lib/x86_64-linux-gnu/odbc/libodbcpsqlS.so", "ConnSettings": "", - } + }, } else: return {} def _create_odbc_config_file(self): - with open(self.odbc_ini_path.split(':')[0], 'w') as f: + with open(self.odbc_ini_path.split(":")[0], "w") as f: for driver_setup in list(self.odbc_drivers.values()): f.write("[{}]\n".format(driver_setup["DSN"])) for key, value in list(driver_setup.items()): @@ -2495,10 +3521,14 @@ class ClickHouseInstance: f.write(key + "=" + value + "\n") def replace_config(self, path_to_config, replacement): - self.exec_in_container(["bash", "-c", "echo '{}' > {}".format(replacement, path_to_config)]) + self.exec_in_container( + ["bash", "-c", "echo '{}' > {}".format(replacement, path_to_config)] + ) def replace_in_config(self, path_to_config, replace, replacement): - self.exec_in_container(["bash", "-c", f"sed -i 's/{replace}/{replacement}/g' {path_to_config}"]) + self.exec_in_container( + ["bash", "-c", f"sed -i 's/{replace}/{replacement}/g' {path_to_config}"] + ) def create_dir(self, destroy_dir=True): """Create the instance directory and all the needed files there.""" @@ -2510,54 +3540,64 @@ class ClickHouseInstance: os.makedirs(self.path) - instance_config_dir = p.abspath(p.join(self.path, 'configs')) + instance_config_dir = p.abspath(p.join(self.path, "configs")) os.makedirs(instance_config_dir) - print(f"Copy common default production configuration from {self.base_config_dir}. Files: {self.main_config_name}, {self.users_config_name}") + print( + f"Copy common default production configuration from {self.base_config_dir}. Files: {self.main_config_name}, {self.users_config_name}" + ) - shutil.copyfile(p.join(self.base_config_dir, self.main_config_name), p.join(instance_config_dir, self.main_config_name)) - shutil.copyfile(p.join(self.base_config_dir, self.users_config_name), p.join(instance_config_dir, self.users_config_name)) + shutil.copyfile( + p.join(self.base_config_dir, self.main_config_name), + p.join(instance_config_dir, self.main_config_name), + ) + shutil.copyfile( + p.join(self.base_config_dir, self.users_config_name), + p.join(instance_config_dir, self.users_config_name), + ) logging.debug("Create directory for configuration generated in this helper") # used by all utils with any config - conf_d_dir = p.abspath(p.join(instance_config_dir, 'conf.d')) + conf_d_dir = p.abspath(p.join(instance_config_dir, "conf.d")) os.mkdir(conf_d_dir) logging.debug("Create directory for common tests configuration") # used by server with main config.xml - self.config_d_dir = p.abspath(p.join(instance_config_dir, 'config.d')) + self.config_d_dir = p.abspath(p.join(instance_config_dir, "config.d")) os.mkdir(self.config_d_dir) - users_d_dir = p.abspath(p.join(instance_config_dir, 'users.d')) + users_d_dir = p.abspath(p.join(instance_config_dir, "users.d")) os.mkdir(users_d_dir) - dictionaries_dir = p.abspath(p.join(instance_config_dir, 'dictionaries')) + dictionaries_dir = p.abspath(p.join(instance_config_dir, "dictionaries")) os.mkdir(dictionaries_dir) - extra_conf_dir = p.abspath(p.join(instance_config_dir, 'extra_conf.d')) + extra_conf_dir = p.abspath(p.join(instance_config_dir, "extra_conf.d")) os.mkdir(extra_conf_dir) def write_embedded_config(name, dest_dir, fix_log_level=False): - with open(p.join(HELPERS_DIR, name), 'r') as f: + with open(p.join(HELPERS_DIR, name), "r") as f: data = f.read() - data = data.replace('clickhouse', self.config_root_name) + data = data.replace("clickhouse", self.config_root_name) if fix_log_level: - data = data.replace('test', 'trace') - with open(p.join(dest_dir, name), 'w') as r: + data = data.replace("test", "trace") + with open(p.join(dest_dir, name), "w") as r: r.write(data) logging.debug("Copy common configuration from helpers") # The file is named with 0_ prefix to be processed before other configuration overloads. if self.copy_common_configs: - need_fix_log_level = self.tag != 'latest' - write_embedded_config('0_common_instance_config.xml', self.config_d_dir, need_fix_log_level) + need_fix_log_level = self.tag != "latest" + write_embedded_config( + "0_common_instance_config.xml", self.config_d_dir, need_fix_log_level + ) - write_embedded_config('0_common_instance_users.xml', users_d_dir) + write_embedded_config("0_common_instance_users.xml", users_d_dir) if len(self.custom_dictionaries_paths): - write_embedded_config('0_common_enable_dictionaries.xml', self.config_d_dir) + write_embedded_config("0_common_enable_dictionaries.xml", self.config_d_dir) logging.debug("Generate and write macros file") macros = self.macros.copy() - macros['instance'] = self.name - with open(p.join(conf_d_dir, 'macros.xml'), 'w') as macros_config: + macros["instance"] = self.name + with open(p.join(conf_d_dir, "macros.xml"), "w") as macros_config: macros_config.write(self.dict_to_xml({"macros": macros})) # Put ZooKeeper config @@ -2565,10 +3605,14 @@ class ClickHouseInstance: shutil.copy(self.zookeeper_config_path, conf_d_dir) if self.with_kerberized_kafka or self.with_kerberized_hdfs: - shutil.copytree(self.kerberos_secrets_dir, p.abspath(p.join(self.path, 'secrets'))) + shutil.copytree( + self.kerberos_secrets_dir, p.abspath(p.join(self.path, "secrets")) + ) # Copy config.d configs - logging.debug(f"Copy custom test config files {self.custom_main_config_paths} to {self.config_d_dir}") + logging.debug( + f"Copy custom test config files {self.custom_main_config_paths} to {self.config_d_dir}" + ) for path in self.custom_main_config_paths: shutil.copy(path, self.config_d_dir) @@ -2582,16 +3626,18 @@ class ClickHouseInstance: for path in self.custom_extra_config_paths: shutil.copy(path, extra_conf_dir) - db_dir = p.abspath(p.join(self.path, 'database')) + db_dir = p.abspath(p.join(self.path, "database")) logging.debug(f"Setup database dir {db_dir}") if self.clickhouse_path_dir is not None: logging.debug(f"Database files taken from {self.clickhouse_path_dir}") shutil.copytree(self.clickhouse_path_dir, db_dir) - logging.debug(f"Database copied from {self.clickhouse_path_dir} to {db_dir}") + logging.debug( + f"Database copied from {self.clickhouse_path_dir} to {db_dir}" + ) else: os.mkdir(db_dir) - logs_dir = p.abspath(p.join(self.path, 'logs')) + logs_dir = p.abspath(p.join(self.path, "logs")) logging.debug(f"Setup logs dir {logs_dir}") os.mkdir(logs_dir) self.logs_dir = logs_dir @@ -2647,19 +3693,29 @@ class ClickHouseInstance: odbc_ini_path = "" if self.odbc_ini_path: self._create_odbc_config_file() - odbc_ini_path = '- ' + self.odbc_ini_path + odbc_ini_path = "- " + self.odbc_ini_path entrypoint_cmd = self.clickhouse_start_command if self.stay_alive: - entrypoint_cmd = CLICKHOUSE_STAY_ALIVE_COMMAND.replace("{main_config_file}", self.main_config_name) + entrypoint_cmd = CLICKHOUSE_STAY_ALIVE_COMMAND.replace( + "{main_config_file}", self.main_config_name + ) else: - entrypoint_cmd = '[' + ', '.join(map(lambda x: '"' + x + '"', entrypoint_cmd.split())) + ']' + entrypoint_cmd = ( + "[" + + ", ".join(map(lambda x: '"' + x + '"', entrypoint_cmd.split())) + + "]" + ) logging.debug("Entrypoint cmd: {}".format(entrypoint_cmd)) networks = app_net = ipv4_address = ipv6_address = net_aliases = net_alias1 = "" - if self.ipv4_address is not None or self.ipv6_address is not None or self.hostname != self.name: + if ( + self.ipv4_address is not None + or self.ipv6_address is not None + or self.hostname != self.name + ): networks = "networks:" app_net = "default:" if self.ipv4_address is not None: @@ -2672,51 +3728,70 @@ class ClickHouseInstance: if not self.with_installed_binary: binary_volume = "- " + self.server_bin_path + ":/usr/bin/clickhouse" - odbc_bridge_volume = "- " + self.odbc_bridge_bin_path + ":/usr/bin/clickhouse-odbc-bridge" - library_bridge_volume = "- " + self.library_bridge_bin_path + ":/usr/bin/clickhouse-library-bridge" + odbc_bridge_volume = ( + "- " + self.odbc_bridge_bin_path + ":/usr/bin/clickhouse-odbc-bridge" + ) + library_bridge_volume = ( + "- " + + self.library_bridge_bin_path + + ":/usr/bin/clickhouse-library-bridge" + ) else: binary_volume = "- " + self.server_bin_path + ":/usr/share/clickhouse_fresh" - odbc_bridge_volume = "- " + self.odbc_bridge_bin_path + ":/usr/share/clickhouse-odbc-bridge_fresh" - library_bridge_volume = "- " + self.library_bridge_bin_path + ":/usr/share/clickhouse-library-bridge_fresh" + odbc_bridge_volume = ( + "- " + + self.odbc_bridge_bin_path + + ":/usr/share/clickhouse-odbc-bridge_fresh" + ) + library_bridge_volume = ( + "- " + + self.library_bridge_bin_path + + ":/usr/share/clickhouse-library-bridge_fresh" + ) external_dirs_volumes = "" if self.external_dirs: for external_dir in self.external_dirs: - external_dir_abs_path = p.abspath(p.join(self.path, external_dir.lstrip('/'))) - logging.info(f'external_dir_abs_path={external_dir_abs_path}') + external_dir_abs_path = p.abspath( + p.join(self.path, external_dir.lstrip("/")) + ) + logging.info(f"external_dir_abs_path={external_dir_abs_path}") os.mkdir(external_dir_abs_path) - external_dirs_volumes += "- " + external_dir_abs_path + ":" + external_dir + "\n" + external_dirs_volumes += ( + "- " + external_dir_abs_path + ":" + external_dir + "\n" + ) - - with open(self.docker_compose_path, 'w') as docker_compose: - docker_compose.write(DOCKER_COMPOSE_TEMPLATE.format( - image=self.image, - tag=self.tag, - name=self.name, - hostname=self.hostname, - binary_volume=binary_volume, - odbc_bridge_volume=odbc_bridge_volume, - library_bridge_volume=library_bridge_volume, - instance_config_dir=instance_config_dir, - config_d_dir=self.config_d_dir, - db_dir=db_dir, - external_dirs_volumes=external_dirs_volumes, - tmpfs=str(self.tmpfs), - logs_dir=logs_dir, - depends_on=str(depends_on), - user=os.getuid(), - env_file=self.env_file, - odbc_ini_path=odbc_ini_path, - keytab_path=self.keytab_path, - krb5_conf=self.krb5_conf, - entrypoint_cmd=entrypoint_cmd, - networks=networks, - app_net=app_net, - ipv4_address=ipv4_address, - ipv6_address=ipv6_address, - net_aliases=net_aliases, - net_alias1=net_alias1, - )) + with open(self.docker_compose_path, "w") as docker_compose: + docker_compose.write( + DOCKER_COMPOSE_TEMPLATE.format( + image=self.image, + tag=self.tag, + name=self.name, + hostname=self.hostname, + binary_volume=binary_volume, + odbc_bridge_volume=odbc_bridge_volume, + library_bridge_volume=library_bridge_volume, + instance_config_dir=instance_config_dir, + config_d_dir=self.config_d_dir, + db_dir=db_dir, + external_dirs_volumes=external_dirs_volumes, + tmpfs=str(self.tmpfs), + logs_dir=logs_dir, + depends_on=str(depends_on), + user=os.getuid(), + env_file=self.env_file, + odbc_ini_path=odbc_ini_path, + keytab_path=self.keytab_path, + krb5_conf=self.krb5_conf, + entrypoint_cmd=entrypoint_cmd, + networks=networks, + app_net=app_net, + ipv4_address=ipv4_address, + ipv6_address=ipv6_address, + net_aliases=net_aliases, + net_alias1=net_alias1, + ) + ) def destroy_dir(self): if p.exists(self.path): @@ -2730,11 +3805,21 @@ class ClickHouseInstance: time.sleep(1) def get_backuped_s3_objects(self, disk, backup_name): - path = f'/var/lib/clickhouse/disks/{disk}/shadow/{backup_name}/store' + path = f"/var/lib/clickhouse/disks/{disk}/shadow/{backup_name}/store" self.wait_for_path_exists(path, 10) - command = ['find', path, '-type', 'f', - '-exec', 'grep', '-o', 'r[01]\\{64\\}-file-[[:lower:]]\\{32\\}', '{}', ';'] - return self.exec_in_container(command).split('\n') + command = [ + "find", + path, + "-type", + "f", + "-exec", + "grep", + "-o", + "r[01]\\{64\\}-file-[[:lower:]]\\{32\\}", + "{}", + ";", + ] + return self.exec_in_container(command).split("\n") class ClickHouseKiller(object): diff --git a/tests/integration/helpers/corrupt_part_data_on_disk.py b/tests/integration/helpers/corrupt_part_data_on_disk.py index 1a6f384da9e..676511ebbdf 100644 --- a/tests/integration/helpers/corrupt_part_data_on_disk.py +++ b/tests/integration/helpers/corrupt_part_data_on_disk.py @@ -1,14 +1,29 @@ def corrupt_part_data_on_disk(node, table, part_name): - part_path = node.query("SELECT path FROM system.parts WHERE table = '{}' and name = '{}'" - .format(table, part_name)).strip() + part_path = node.query( + "SELECT path FROM system.parts WHERE table = '{}' and name = '{}'".format( + table, part_name + ) + ).strip() corrupt_part_data_by_path(node, part_path) + def corrupt_part_data_by_path(node, part_path): print("Corrupting part", part_path, "at", node.name) - print("Will corrupt: ", - node.exec_in_container(['bash', '-c', 'cd {p} && ls *.bin | head -n 1'.format(p=part_path)])) + print( + "Will corrupt: ", + node.exec_in_container( + ["bash", "-c", "cd {p} && ls *.bin | head -n 1".format(p=part_path)] + ), + ) - node.exec_in_container(['bash', '-c', - 'cd {p} && ls *.bin | head -n 1 | xargs -I{{}} sh -c \'echo "1" >> $1\' -- {{}}'.format( - p=part_path)], privileged=True) + node.exec_in_container( + [ + "bash", + "-c", + "cd {p} && ls *.bin | head -n 1 | xargs -I{{}} sh -c 'echo \"1\" >> $1' -- {{}}".format( + p=part_path + ), + ], + privileged=True, + ) diff --git a/tests/integration/helpers/dictionary.py b/tests/integration/helpers/dictionary.py index 99c4e3a5a5d..aaa1b00a8a6 100644 --- a/tests/integration/helpers/dictionary.py +++ b/tests/integration/helpers/dictionary.py @@ -4,18 +4,18 @@ import copy class Layout(object): LAYOUTS_STR_DICT = { - 'flat': '', - 'hashed': '', - 'cache': '128', - 'ssd_cache': '/etc/clickhouse-server/dictionaries/all', - 'complex_key_hashed': '', - 'complex_key_hashed_one_key': '', - 'complex_key_hashed_two_keys': '', - 'complex_key_cache': '128', - 'complex_key_ssd_cache': '/etc/clickhouse-server/dictionaries/all', - 'range_hashed': '', - 'direct': '', - 'complex_key_direct': '' + "flat": "", + "hashed": "", + "cache": "128", + "ssd_cache": "/etc/clickhouse-server/dictionaries/all", + "complex_key_hashed": "", + "complex_key_hashed_one_key": "", + "complex_key_hashed_two_keys": "", + "complex_key_cache": "128", + "complex_key_ssd_cache": "/etc/clickhouse-server/dictionaries/all", + "range_hashed": "", + "direct": "", + "complex_key_direct": "", } def __init__(self, name): @@ -23,14 +23,14 @@ class Layout(object): self.is_complex = False self.is_simple = False self.is_ranged = False - if self.name.startswith('complex'): - self.layout_type = 'complex' + if self.name.startswith("complex"): + self.layout_type = "complex" self.is_complex = True - elif name.startswith('range'): - self.layout_type = 'ranged' + elif name.startswith("range"): + self.layout_type = "ranged" self.is_ranged = True else: - self.layout_type = 'simple' + self.layout_type = "simple" self.is_simple = True def get_str(self): @@ -38,8 +38,8 @@ class Layout(object): def get_key_block_name(self): if self.is_complex: - return 'key' - return 'id' + return "key" + return "id" class Row(object): @@ -59,8 +59,17 @@ class Row(object): class Field(object): - def __init__(self, name, field_type, is_key=False, is_range_key=False, default=None, hierarchical=False, - range_hash_type=None, default_value_for_get=None): + def __init__( + self, + name, + field_type, + is_key=False, + is_range_key=False, + default=None, + hierarchical=False, + range_hash_type=None, + default_value_for_get=None, + ): self.name = name self.field_type = field_type self.is_key = is_key @@ -72,30 +81,32 @@ class Field(object): self.default_value_for_get = default_value_for_get def get_attribute_str(self): - return ''' + return """ {name} {field_type} {default} {hierarchical} - '''.format( + """.format( name=self.name, field_type=self.field_type, - default=self.default if self.default else '', - hierarchical='true' if self.hierarchical else 'false', + default=self.default if self.default else "", + hierarchical="true" if self.hierarchical else "false", ) def get_simple_index_str(self): - return '{name}'.format(name=self.name) + return "{name}".format(name=self.name) def get_range_hash_str(self): if not self.range_hash_type: raise Exception("Field {} is not range hashed".format(self.name)) - return ''' + return """ {name} - '''.format(type=self.range_hash_type, name=self.name) + """.format( + type=self.range_hash_type, name=self.name + ) class DictionaryStructure(object): @@ -125,9 +136,14 @@ class DictionaryStructure(object): if not self.layout.is_complex and len(self.keys) > 1: raise Exception( - "More than one key {} field in non complex layout {}".format(len(self.keys), self.layout.name)) + "More than one key {} field in non complex layout {}".format( + len(self.keys), self.layout.name + ) + ) - if self.layout.is_ranged and (not self.range_key or len(self.range_fields) != 2): + if self.layout.is_ranged and ( + not self.range_key or len(self.range_fields) != 2 + ): raise Exception("Inconsistent configuration of ranged dictionary") def get_structure_str(self): @@ -148,7 +164,7 @@ class DictionaryStructure(object): for range_field in self.range_fields: ranged_strs.append(range_field.get_range_hash_str()) - return ''' + return """ {layout_str} @@ -158,12 +174,12 @@ class DictionaryStructure(object): {range_strs} {attributes_str} - '''.format( + """.format( layout_str=self.layout.get_str(), key_block_name=self.layout.get_key_block_name(), - key_str='\n'.join(key_strs), - attributes_str='\n'.join(fields_strs), - range_strs='\n'.join(ranged_strs), + key_str="\n".join(key_strs), + attributes_str="\n".join(fields_strs), + range_strs="\n".join(ranged_strs), ) def get_ordered_names(self): @@ -179,15 +195,19 @@ class DictionaryStructure(object): def get_all_fields(self): return self.keys + self.range_fields + self.ordinary_fields - def _get_dict_get_common_expression(self, dict_name, field, row, or_default, with_type, has): + def _get_dict_get_common_expression( + self, dict_name, field, row, or_default, with_type, has + ): if field in self.keys: - raise Exception("Trying to receive key field {} from dictionary".format(field.name)) + raise Exception( + "Trying to receive key field {} from dictionary".format(field.name) + ) if not self.layout.is_complex: if not or_default: - key_expr = ', toUInt64({})'.format(row.data[self.keys[0].name]) + key_expr = ", toUInt64({})".format(row.data[self.keys[0].name]) else: - key_expr = ', toUInt64({})'.format(self.keys[0].default_value_for_get) + key_expr = ", toUInt64({})".format(self.keys[0].default_value_for_get) else: key_exprs_strs = [] for key in self.keys: @@ -197,48 +217,57 @@ class DictionaryStructure(object): val = key.default_value_for_get if isinstance(val, str): val = "'" + val + "'" - key_exprs_strs.append('to{type}({value})'.format(type=key.field_type, value=val)) - key_expr = ', tuple(' + ','.join(key_exprs_strs) + ')' + key_exprs_strs.append( + "to{type}({value})".format(type=key.field_type, value=val) + ) + key_expr = ", tuple(" + ",".join(key_exprs_strs) + ")" - date_expr = '' + date_expr = "" if self.layout.is_ranged: val = row.data[self.range_key.name] if isinstance(val, str): val = "'" + val + "'" val = "to{type}({val})".format(type=self.range_key.field_type, val=val) - date_expr = ', ' + val + date_expr = ", " + val if or_default: - raise Exception("Can create 'dictGetOrDefault' query for ranged dictionary") + raise Exception( + "Can create 'dictGetOrDefault' query for ranged dictionary" + ) if or_default: - or_default_expr = 'OrDefault' + or_default_expr = "OrDefault" if field.default_value_for_get is None: raise Exception( - "Can create 'dictGetOrDefault' query for field {} without default_value_for_get".format(field.name)) + "Can create 'dictGetOrDefault' query for field {} without default_value_for_get".format( + field.name + ) + ) val = field.default_value_for_get if isinstance(val, str): val = "'" + val + "'" - default_value_for_get = ', to{type}({value})'.format(type=field.field_type, value=val) + default_value_for_get = ", to{type}({value})".format( + type=field.field_type, value=val + ) else: - or_default_expr = '' - default_value_for_get = '' + or_default_expr = "" + default_value_for_get = "" if with_type: field_type = field.field_type else: - field_type = '' + field_type = "" field_name = ", '" + field.name + "'" if has: what = "Has" - field_type = '' - or_default = '' - field_name = '' - date_expr = '' - def_for_get = '' + field_type = "" + or_default = "" + field_name = "" + date_expr = "" + def_for_get = "" else: what = "Get" @@ -255,28 +284,38 @@ class DictionaryStructure(object): def get_get_expressions(self, dict_name, field, row): return [ - self._get_dict_get_common_expression(dict_name, field, row, or_default=False, with_type=False, has=False), - self._get_dict_get_common_expression(dict_name, field, row, or_default=False, with_type=True, has=False), + self._get_dict_get_common_expression( + dict_name, field, row, or_default=False, with_type=False, has=False + ), + self._get_dict_get_common_expression( + dict_name, field, row, or_default=False, with_type=True, has=False + ), ] def get_get_or_default_expressions(self, dict_name, field, row): if not self.layout.is_ranged: return [ - self._get_dict_get_common_expression(dict_name, field, row, or_default=True, with_type=False, - has=False), - self._get_dict_get_common_expression(dict_name, field, row, or_default=True, with_type=True, has=False), + self._get_dict_get_common_expression( + dict_name, field, row, or_default=True, with_type=False, has=False + ), + self._get_dict_get_common_expression( + dict_name, field, row, or_default=True, with_type=True, has=False + ), ] return [] def get_has_expressions(self, dict_name, field, row): if not self.layout.is_ranged: - return [self._get_dict_get_common_expression(dict_name, field, row, or_default=False, with_type=False, - has=True)] + return [ + self._get_dict_get_common_expression( + dict_name, field, row, or_default=False, with_type=False, has=True + ) + ] return [] def get_hierarchical_expressions(self, dict_name, row): if self.layout.is_simple: - key_expr = 'toUInt64({})'.format(row.data[self.keys[0].name]) + key_expr = "toUInt64({})".format(row.data[self.keys[0].name]) return [ "dictGetHierarchy('{dict_name}', {key})".format( dict_name=dict_name, @@ -288,21 +327,31 @@ class DictionaryStructure(object): def get_is_in_expressions(self, dict_name, row, parent_row): if self.layout.is_simple: - child_key_expr = 'toUInt64({})'.format(row.data[self.keys[0].name]) - parent_key_expr = 'toUInt64({})'.format(parent_row.data[self.keys[0].name]) + child_key_expr = "toUInt64({})".format(row.data[self.keys[0].name]) + parent_key_expr = "toUInt64({})".format(parent_row.data[self.keys[0].name]) return [ "dictIsIn('{dict_name}', {child_key}, {parent_key})".format( dict_name=dict_name, child_key=child_key_expr, - parent_key=parent_key_expr, ) + parent_key=parent_key_expr, + ) ] return [] class Dictionary(object): - def __init__(self, name, structure, source, config_path, - table_name, fields, min_lifetime=3, max_lifetime=5): + def __init__( + self, + name, + structure, + source, + config_path, + table_name, + fields, + min_lifetime=3, + max_lifetime=5, + ): self.name = name self.structure = copy.deepcopy(structure) self.source = copy.deepcopy(source) @@ -313,9 +362,10 @@ class Dictionary(object): self.max_lifetime = max_lifetime def generate_config(self): - with open(self.config_path, 'w') as result: - if 'direct' not in self.structure.layout.get_str(): - result.write(''' + with open(self.config_path, "w") as result: + if "direct" not in self.structure.layout.get_str(): + result.write( + """ @@ -329,15 +379,17 @@ class Dictionary(object): - '''.format( - min_lifetime=self.min_lifetime, - max_lifetime=self.max_lifetime, - name=self.name, - structure=self.structure.get_structure_str(), - source=self.source.get_source_str(self.table_name), - )) + """.format( + min_lifetime=self.min_lifetime, + max_lifetime=self.max_lifetime, + name=self.name, + structure=self.structure.get_structure_str(), + source=self.source.get_source_str(self.table_name), + ) + ) else: - result.write(''' + result.write( + """ {name} @@ -347,38 +399,59 @@ class Dictionary(object): - '''.format( - min_lifetime=self.min_lifetime, - max_lifetime=self.max_lifetime, - name=self.name, - structure=self.structure.get_structure_str(), - source=self.source.get_source_str(self.table_name), - )) + """.format( + min_lifetime=self.min_lifetime, + max_lifetime=self.max_lifetime, + name=self.name, + structure=self.structure.get_structure_str(), + source=self.source.get_source_str(self.table_name), + ) + ) def prepare_source(self, cluster): self.source.prepare(self.structure, self.table_name, cluster) def load_data(self, data): if not self.source.prepared: - raise Exception("Cannot load data for dictionary {}, source is not prepared".format(self.name)) + raise Exception( + "Cannot load data for dictionary {}, source is not prepared".format( + self.name + ) + ) self.source.load_data(data, self.table_name) def get_select_get_queries(self, field, row): - return ['select {}'.format(expr) for expr in self.structure.get_get_expressions(self.name, field, row)] + return [ + "select {}".format(expr) + for expr in self.structure.get_get_expressions(self.name, field, row) + ] def get_select_get_or_default_queries(self, field, row): - return ['select {}'.format(expr) for expr in - self.structure.get_get_or_default_expressions(self.name, field, row)] + return [ + "select {}".format(expr) + for expr in self.structure.get_get_or_default_expressions( + self.name, field, row + ) + ] def get_select_has_queries(self, field, row): - return ['select {}'.format(expr) for expr in self.structure.get_has_expressions(self.name, field, row)] + return [ + "select {}".format(expr) + for expr in self.structure.get_has_expressions(self.name, field, row) + ] def get_hierarchical_queries(self, row): - return ['select {}'.format(expr) for expr in self.structure.get_hierarchical_expressions(self.name, row)] + return [ + "select {}".format(expr) + for expr in self.structure.get_hierarchical_expressions(self.name, row) + ] def get_is_in_queries(self, row, parent_row): - return ['select {}'.format(expr) for expr in self.structure.get_is_in_expressions(self.name, row, parent_row)] + return [ + "select {}".format(expr) + for expr in self.structure.get_is_in_expressions(self.name, row, parent_row) + ] def is_complex(self): return self.structure.layout.is_complex diff --git a/tests/integration/helpers/external_sources.py b/tests/integration/helpers/external_sources.py index 93247e7b617..fd086fc4526 100644 --- a/tests/integration/helpers/external_sources.py +++ b/tests/integration/helpers/external_sources.py @@ -11,9 +11,18 @@ import pymysql.cursors import redis import logging + class ExternalSource(object): - def __init__(self, name, internal_hostname, internal_port, - docker_hostname, docker_port, user, password): + def __init__( + self, + name, + internal_hostname, + internal_port, + docker_hostname, + docker_port, + user, + password, + ): self.name = name self.internal_hostname = internal_hostname self.internal_port = int(internal_port) @@ -23,17 +32,26 @@ class ExternalSource(object): self.password = password def get_source_str(self, table_name): - raise NotImplementedError("Method {} is not implemented for {}".format( - "get_source_config_part", self.__class__.__name__)) + raise NotImplementedError( + "Method {} is not implemented for {}".format( + "get_source_config_part", self.__class__.__name__ + ) + ) def prepare(self, structure, table_name, cluster): - raise NotImplementedError("Method {} is not implemented for {}".format( - "prepare_remote_source", self.__class__.__name__)) + raise NotImplementedError( + "Method {} is not implemented for {}".format( + "prepare_remote_source", self.__class__.__name__ + ) + ) # data is banch of Row def load_data(self, data): - raise NotImplementedError("Method {} is not implemented for {}".format( - "prepare_remote_source", self.__class__.__name__)) + raise NotImplementedError( + "Method {} is not implemented for {}".format( + "prepare_remote_source", self.__class__.__name__ + ) + ) def compatible_with_layout(self, layout): return True @@ -41,29 +59,32 @@ class ExternalSource(object): class SourceMySQL(ExternalSource): TYPE_MAPPING = { - 'UInt8': 'tinyint unsigned', - 'UInt16': 'smallint unsigned', - 'UInt32': 'int unsigned', - 'UInt64': 'bigint unsigned', - 'Int8': 'tinyint', - 'Int16': 'smallint', - 'Int32': 'int', - 'Int64': 'bigint', - 'UUID': 'varchar(36)', - 'Date': 'date', - 'DateTime': 'datetime', - 'String': 'text', - 'Float32': 'float', - 'Float64': 'double' + "UInt8": "tinyint unsigned", + "UInt16": "smallint unsigned", + "UInt32": "int unsigned", + "UInt64": "bigint unsigned", + "Int8": "tinyint", + "Int16": "smallint", + "Int32": "int", + "Int64": "bigint", + "UUID": "varchar(36)", + "Date": "date", + "DateTime": "datetime", + "String": "text", + "Float32": "float", + "Float64": "double", } def create_mysql_conn(self): - logging.debug(f"pymysql connect {self.user}, {self.password}, {self.internal_hostname}, {self.internal_port}") + logging.debug( + f"pymysql connect {self.user}, {self.password}, {self.internal_hostname}, {self.internal_port}" + ) self.connection = pymysql.connect( user=self.user, password=self.password, host=self.internal_hostname, - port=self.internal_port) + port=self.internal_port, + ) def execute_mysql_query(self, query): with warnings.catch_warnings(): @@ -73,7 +94,7 @@ class SourceMySQL(ExternalSource): self.connection.commit() def get_source_str(self, table_name): - return ''' + return """ 1 @@ -89,7 +110,7 @@ class SourceMySQL(ExternalSource): {password} test {tbl}
-
'''.format( + """.format( hostname=self.docker_hostname, port=self.docker_port, user=self.user, @@ -101,14 +122,20 @@ class SourceMySQL(ExternalSource): if self.internal_hostname is None: self.internal_hostname = cluster.mysql_ip self.create_mysql_conn() - self.execute_mysql_query("create database if not exists test default character set 'utf8'") + self.execute_mysql_query( + "create database if not exists test default character set 'utf8'" + ) self.execute_mysql_query("drop table if exists test.{}".format(table_name)) fields_strs = [] - for field in structure.keys + structure.ordinary_fields + structure.range_fields: - fields_strs.append(field.name + ' ' + self.TYPE_MAPPING[field.field_type]) - create_query = '''create table test.{table_name} ( + for field in ( + structure.keys + structure.ordinary_fields + structure.range_fields + ): + fields_strs.append(field.name + " " + self.TYPE_MAPPING[field.field_type]) + create_query = """create table test.{table_name} ( {fields_str}); - '''.format(table_name=table_name, fields_str=','.join(fields_strs)) + """.format( + table_name=table_name, fields_str=",".join(fields_strs) + ) self.execute_mysql_query(create_query) self.ordered_names = structure.get_ordered_names() self.prepared = True @@ -126,18 +153,16 @@ class SourceMySQL(ExternalSource): else: data = str(data) sorted_row.append(data) - values_strs.append('(' + ','.join(sorted_row) + ')') - query = 'insert into test.{} ({}) values {}'.format( - table_name, - ','.join(self.ordered_names), - ','.join(values_strs)) + values_strs.append("(" + ",".join(sorted_row) + ")") + query = "insert into test.{} ({}) values {}".format( + table_name, ",".join(self.ordered_names), ",".join(values_strs) + ) self.execute_mysql_query(query) class SourceMongo(ExternalSource): - def get_source_str(self, table_name): - return ''' + return """ {host} {port} @@ -146,7 +171,7 @@ class SourceMongo(ExternalSource): test {tbl} - '''.format( + """.format( host=self.docker_hostname, port=self.docker_port, user=self.user, @@ -155,22 +180,29 @@ class SourceMongo(ExternalSource): ) def prepare(self, structure, table_name, cluster): - connection_str = 'mongodb://{user}:{password}@{host}:{port}'.format( - host=self.internal_hostname, port=self.internal_port, - user=self.user, password=self.password) + connection_str = "mongodb://{user}:{password}@{host}:{port}".format( + host=self.internal_hostname, + port=self.internal_port, + user=self.user, + password=self.password, + ) self.connection = pymongo.MongoClient(connection_str) self.converters = {} for field in structure.get_all_fields(): if field.field_type == "Date": - self.converters[field.name] = lambda x: datetime.datetime.strptime(x, "%Y-%m-%d") + self.converters[field.name] = lambda x: datetime.datetime.strptime( + x, "%Y-%m-%d" + ) elif field.field_type == "DateTime": + def converter(x): - return datetime.datetime.strptime(x, '%Y-%m-%d %H:%M:%S') + return datetime.datetime.strptime(x, "%Y-%m-%d %H:%M:%S") + self.converters[field.name] = converter else: self.converters[field.name] = lambda x: x - self.db = self.connection['test'] + self.db = self.connection["test"] self.db.add_user(self.user, self.password) self.prepared = True @@ -191,15 +223,15 @@ class SourceMongoURI(SourceMongo): def compatible_with_layout(self, layout): # It is enough to test one layout for this dictionary, since we're # only testing that the connection with URI works. - return layout.name == 'flat' + return layout.name == "flat" def get_source_str(self, table_name): - return ''' + return """ mongodb://{user}:{password}@{host}:{port}/test {tbl} - '''.format( + """.format( host=self.docker_hostname, port=self.docker_port, user=self.user, @@ -209,9 +241,8 @@ class SourceMongoURI(SourceMongo): class SourceClickHouse(ExternalSource): - def get_source_str(self, table_name): - return ''' + return """ {host} {port} @@ -220,7 +251,7 @@ class SourceClickHouse(ExternalSource): test {tbl}
- '''.format( + """.format( host=self.docker_hostname, port=self.docker_port, user=self.user, @@ -232,11 +263,15 @@ class SourceClickHouse(ExternalSource): self.node = cluster.instances[self.docker_hostname] self.node.query("CREATE DATABASE IF NOT EXISTS test") fields_strs = [] - for field in structure.keys + structure.ordinary_fields + structure.range_fields: - fields_strs.append(field.name + ' ' + field.field_type) - create_query = '''CREATE TABLE test.{table_name} ( + for field in ( + structure.keys + structure.ordinary_fields + structure.range_fields + ): + fields_strs.append(field.name + " " + field.field_type) + create_query = """CREATE TABLE test.{table_name} ( {fields_str}) ENGINE MergeTree ORDER BY tuple(); - '''.format(table_name=table_name, fields_str=','.join(fields_strs)) + """.format( + table_name=table_name, fields_str=",".join(fields_strs) + ) self.node.query(create_query) self.ordered_names = structure.get_ordered_names() self.prepared = True @@ -254,31 +289,31 @@ class SourceClickHouse(ExternalSource): else: row_data = str(row_data) sorted_row.append(row_data) - values_strs.append('(' + ','.join(sorted_row) + ')') - query = 'INSERT INTO test.{} ({}) values {}'.format( - table_name, - ','.join(self.ordered_names), - ','.join(values_strs)) + values_strs.append("(" + ",".join(sorted_row) + ")") + query = "INSERT INTO test.{} ({}) values {}".format( + table_name, ",".join(self.ordered_names), ",".join(values_strs) + ) self.node.query(query) class SourceFile(ExternalSource): - def get_source_str(self, table_name): table_path = "/" + table_name + ".tsv" - return ''' + return """ {path} TabSeparated - '''.format( + """.format( path=table_path, ) def prepare(self, structure, table_name, cluster): self.node = cluster.instances[self.docker_hostname] path = "/" + table_name + ".tsv" - self.node.exec_in_container(["bash", "-c", "touch {}".format(path)], user="root") + self.node.exec_in_container( + ["bash", "-c", "touch {}".format(path)], user="root" + ) self.ordered_names = structure.get_ordered_names() self.prepared = True @@ -291,35 +326,45 @@ class SourceFile(ExternalSource): for name in self.ordered_names: sorted_row.append(str(row.data[name])) - str_data = '\t'.join(sorted_row) - self.node.exec_in_container(["bash", "-c", "echo \"{row}\" >> {fname}".format(row=str_data, fname=path)], - user="root") + str_data = "\t".join(sorted_row) + self.node.exec_in_container( + [ + "bash", + "-c", + 'echo "{row}" >> {fname}'.format(row=str_data, fname=path), + ], + user="root", + ) def compatible_with_layout(self, layout): - return 'cache' not in layout.name and 'direct' not in layout.name + return "cache" not in layout.name and "direct" not in layout.name class _SourceExecutableBase(ExternalSource): - def _get_cmd(self, path): - raise NotImplementedError("Method {} is not implemented for {}".format( - "_get_cmd", self.__class__.__name__)) + raise NotImplementedError( + "Method {} is not implemented for {}".format( + "_get_cmd", self.__class__.__name__ + ) + ) def get_source_str(self, table_name): table_path = "/" + table_name + ".tsv" - return ''' + return """ {cmd} TabSeparated - '''.format( + """.format( cmd=self._get_cmd(table_path), ) def prepare(self, structure, table_name, cluster): self.node = cluster.instances[self.docker_hostname] path = "/" + table_name + ".tsv" - self.node.exec_in_container(["bash", "-c", "touch {}".format(path)], user="root") + self.node.exec_in_container( + ["bash", "-c", "touch {}".format(path)], user="root" + ) self.ordered_names = structure.get_ordered_names() self.prepared = True @@ -332,27 +377,31 @@ class _SourceExecutableBase(ExternalSource): for name in self.ordered_names: sorted_row.append(str(row.data[name])) - str_data = '\t'.join(sorted_row) - self.node.exec_in_container(["bash", "-c", "echo \"{row}\" >> {fname}".format(row=str_data, fname=path)], - user='root') + str_data = "\t".join(sorted_row) + self.node.exec_in_container( + [ + "bash", + "-c", + 'echo "{row}" >> {fname}'.format(row=str_data, fname=path), + ], + user="root", + ) class SourceExecutableHashed(_SourceExecutableBase): - def _get_cmd(self, path): return "cat {}".format(path) def compatible_with_layout(self, layout): - return 'hashed' in layout.name + return "hashed" in layout.name class SourceExecutableCache(_SourceExecutableBase): - def _get_cmd(self, path): return "cat - >/dev/null;cat {}".format(path) def compatible_with_layout(self, layout): - return 'cache' in layout.name + return "cache" in layout.name class SourceHTTPBase(ExternalSource): @@ -360,10 +409,11 @@ class SourceHTTPBase(ExternalSource): def get_source_str(self, table_name): self.http_port = SourceHTTPBase.PORT_COUNTER - url = "{schema}://{host}:{port}/".format(schema=self._get_schema(), host=self.docker_hostname, - port=self.http_port) + url = "{schema}://{host}:{port}/".format( + schema=self._get_schema(), host=self.docker_hostname, port=self.http_port + ) SourceHTTPBase.PORT_COUNTER += 1 - return ''' + return """ {url} TabSeparated @@ -378,22 +428,37 @@ class SourceHTTPBase(ExternalSource): - '''.format(url=url) + """.format( + url=url + ) def prepare(self, structure, table_name, cluster): self.node = cluster.instances[self.docker_hostname] path = "/" + table_name + ".tsv" - self.node.exec_in_container(["bash", "-c", "touch {}".format(path)], user='root') + self.node.exec_in_container( + ["bash", "-c", "touch {}".format(path)], user="root" + ) script_dir = os.path.dirname(os.path.realpath(__file__)) - self.node.copy_file_to_container(os.path.join(script_dir, './http_server.py'), '/http_server.py') - self.node.copy_file_to_container(os.path.join(script_dir, './fake_cert.pem'), '/fake_cert.pem') - self.node.exec_in_container([ - "bash", - "-c", - "python3 /http_server.py --data-path={tbl} --schema={schema} --host={host} --port={port} --cert-path=/fake_cert.pem".format( - tbl=path, schema=self._get_schema(), host=self.docker_hostname, port=self.http_port) - ], detach=True) + self.node.copy_file_to_container( + os.path.join(script_dir, "./http_server.py"), "/http_server.py" + ) + self.node.copy_file_to_container( + os.path.join(script_dir, "./fake_cert.pem"), "/fake_cert.pem" + ) + self.node.exec_in_container( + [ + "bash", + "-c", + "python3 /http_server.py --data-path={tbl} --schema={schema} --host={host} --port={port} --cert-path=/fake_cert.pem".format( + tbl=path, + schema=self._get_schema(), + host=self.docker_hostname, + port=self.http_port, + ), + ], + detach=True, + ) self.ordered_names = structure.get_ordered_names() self.prepared = True @@ -406,9 +471,15 @@ class SourceHTTPBase(ExternalSource): for name in self.ordered_names: sorted_row.append(str(row.data[name])) - str_data = '\t'.join(sorted_row) - self.node.exec_in_container(["bash", "-c", "echo \"{row}\" >> {fname}".format(row=str_data, fname=path)], - user='root') + str_data = "\t".join(sorted_row) + self.node.exec_in_container( + [ + "bash", + "-c", + 'echo "{row}" >> {fname}'.format(row=str_data, fname=path), + ], + user="root", + ) class SourceHTTP(SourceHTTPBase): @@ -423,29 +494,46 @@ class SourceHTTPS(SourceHTTPBase): class SourceCassandra(ExternalSource): TYPE_MAPPING = { - 'UInt8': 'tinyint', - 'UInt16': 'smallint', - 'UInt32': 'int', - 'UInt64': 'bigint', - 'Int8': 'tinyint', - 'Int16': 'smallint', - 'Int32': 'int', - 'Int64': 'bigint', - 'UUID': 'uuid', - 'Date': 'date', - 'DateTime': 'timestamp', - 'String': 'text', - 'Float32': 'float', - 'Float64': 'double' + "UInt8": "tinyint", + "UInt16": "smallint", + "UInt32": "int", + "UInt64": "bigint", + "Int8": "tinyint", + "Int16": "smallint", + "Int32": "int", + "Int64": "bigint", + "UUID": "uuid", + "Date": "date", + "DateTime": "timestamp", + "String": "text", + "Float32": "float", + "Float64": "double", } - def __init__(self, name, internal_hostname, internal_port, docker_hostname, docker_port, user, password): - ExternalSource.__init__(self, name, internal_hostname, internal_port, docker_hostname, docker_port, user, - password) + def __init__( + self, + name, + internal_hostname, + internal_port, + docker_hostname, + docker_port, + user, + password, + ): + ExternalSource.__init__( + self, + name, + internal_hostname, + internal_port, + docker_hostname, + docker_port, + user, + password, + ) self.structure = dict() def get_source_str(self, table_name): - return ''' + return """ {host} {port} @@ -454,7 +542,7 @@ class SourceCassandra(ExternalSource): 1 "Int64_" < 1000000000000000000 - '''.format( + """.format( host=self.docker_hostname, port=self.docker_port, table=table_name, @@ -464,49 +552,79 @@ class SourceCassandra(ExternalSource): if self.internal_hostname is None: self.internal_hostname = cluster.cassandra_ip - self.client = cassandra.cluster.Cluster([self.internal_hostname], port=self.internal_port) + self.client = cassandra.cluster.Cluster( + [self.internal_hostname], port=self.internal_port + ) self.session = self.client.connect() self.session.execute( - "create keyspace if not exists test with replication = {'class': 'SimpleStrategy', 'replication_factor' : 1};") + "create keyspace if not exists test with replication = {'class': 'SimpleStrategy', 'replication_factor' : 1};" + ) self.session.execute('drop table if exists test."{}"'.format(table_name)) self.structure[table_name] = structure - columns = ['"' + col.name + '" ' + self.TYPE_MAPPING[col.field_type] for col in structure.get_all_fields()] + columns = [ + '"' + col.name + '" ' + self.TYPE_MAPPING[col.field_type] + for col in structure.get_all_fields() + ] keys = ['"' + col.name + '"' for col in structure.keys] query = 'create table test."{name}" ({columns}, primary key ({pk}));'.format( - name=table_name, columns=', '.join(columns), pk=', '.join(keys)) + name=table_name, columns=", ".join(columns), pk=", ".join(keys) + ) self.session.execute(query) self.prepared = True def get_value_to_insert(self, value, type): - if type == 'UUID': + if type == "UUID": return uuid.UUID(value) - elif type == 'DateTime': - return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S') + elif type == "DateTime": + return datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S") return value def load_data(self, data, table_name): - names_and_types = [(field.name, field.field_type) for field in self.structure[table_name].get_all_fields()] + names_and_types = [ + (field.name, field.field_type) + for field in self.structure[table_name].get_all_fields() + ] columns = ['"' + col[0] + '"' for col in names_and_types] insert = 'insert into test."{table}" ({columns}) values ({args})'.format( - table=table_name, columns=','.join(columns), args=','.join(['%s'] * len(columns))) + table=table_name, + columns=",".join(columns), + args=",".join(["%s"] * len(columns)), + ) for row in data: - values = [self.get_value_to_insert(row.get_value_by_name(col[0]), col[1]) for col in names_and_types] + values = [ + self.get_value_to_insert(row.get_value_by_name(col[0]), col[1]) + for col in names_and_types + ] self.session.execute(insert, values) class SourceRedis(ExternalSource): def __init__( - self, name, internal_hostname, internal_port, docker_hostname, docker_port, user, password, db_index, - storage_type + self, + name, + internal_hostname, + internal_port, + docker_hostname, + docker_port, + user, + password, + db_index, + storage_type, ): super(SourceRedis, self).__init__( - name, internal_hostname, internal_port, docker_hostname, docker_port, user, password + name, + internal_hostname, + internal_port, + docker_hostname, + docker_port, + user, + password, ) self.storage_type = storage_type self.db_index = db_index def get_source_str(self, table_name): - return ''' + return """ {host} {port} @@ -514,7 +632,7 @@ class SourceRedis(ExternalSource): {db_index} {storage_type} - '''.format( + """.format( host=self.docker_hostname, port=self.docker_port, password=self.password, @@ -523,8 +641,12 @@ class SourceRedis(ExternalSource): ) def prepare(self, structure, table_name, cluster): - self.client = redis.StrictRedis(host=self.internal_hostname, port=self.internal_port, db=self.db_index, - password=self.password or None) + self.client = redis.StrictRedis( + host=self.internal_hostname, + port=self.internal_port, + db=self.db_index, + password=self.password or None, + ) self.prepared = True self.ordered_names = structure.get_ordered_names() @@ -540,33 +662,52 @@ class SourceRedis(ExternalSource): self.client.hset(*values) def compatible_with_layout(self, layout): - return layout.is_simple and self.storage_type == "simple" or layout.is_complex and self.storage_type == "hash_map" + return ( + layout.is_simple + and self.storage_type == "simple" + or layout.is_complex + and self.storage_type == "hash_map" + ) class SourceAerospike(ExternalSource): - def __init__(self, name, internal_hostname, internal_port, - docker_hostname, docker_port, user, password): - ExternalSource.__init__(self, name, internal_hostname, internal_port, - docker_hostname, docker_port, user, password) + def __init__( + self, + name, + internal_hostname, + internal_port, + docker_hostname, + docker_port, + user, + password, + ): + ExternalSource.__init__( + self, + name, + internal_hostname, + internal_port, + docker_hostname, + docker_port, + user, + password, + ) self.namespace = "test" self.set = "test_set" def get_source_str(self, table_name): print("AEROSPIKE get source str") - return ''' + return """ {host} {port} - '''.format( + """.format( host=self.docker_hostname, port=self.docker_port, ) def prepare(self, structure, table_name, cluster): - config = { - 'hosts': [(self.internal_hostname, self.internal_port)] - } + config = {"hosts": [(self.internal_hostname, self.internal_port)]} self.client = aerospike.client(config).connect() self.prepared = True print("PREPARED AEROSPIKE") @@ -601,10 +742,14 @@ class SourceAerospike(ExternalSource): for value in values: key = (self.namespace, self.set, value[0]) print(key) - self.client.put(key, {"bin_value": value[1]}, policy={"key": aerospike.POLICY_KEY_SEND}) + self.client.put( + key, + {"bin_value": value[1]}, + policy={"key": aerospike.POLICY_KEY_SEND}, + ) assert self.client.exists(key) else: - assert ("VALUES SIZE != 2") + assert "VALUES SIZE != 2" # print(values) diff --git a/tests/integration/helpers/hdfs_api.py b/tests/integration/helpers/hdfs_api.py index 3d2d647d0ed..5739496cb50 100644 --- a/tests/integration/helpers/hdfs_api.py +++ b/tests/integration/helpers/hdfs_api.py @@ -10,27 +10,44 @@ import socket import tempfile import logging import os + + class mk_krb_conf(object): def __init__(self, krb_conf, kdc_ip): self.krb_conf = krb_conf self.kdc_ip = kdc_ip self.amended_krb_conf = None + def __enter__(self): with open(self.krb_conf) as f: content = f.read() - amended_content = content.replace('hdfskerberos', self.kdc_ip) + amended_content = content.replace("hdfskerberos", self.kdc_ip) self.amended_krb_conf = tempfile.NamedTemporaryFile(delete=False, mode="w+") self.amended_krb_conf.write(amended_content) self.amended_krb_conf.close() return self.amended_krb_conf.name + def __exit__(self, type, value, traceback): if self.amended_krb_conf is not None: self.amended_krb_conf.close() + class HDFSApi(object): - def __init__(self, user, host, proxy_port, data_port, timeout=100, kerberized=False, principal=None, - keytab=None, krb_conf=None, - protocol = "http", hdfs_ip = None, kdc_ip = None): + def __init__( + self, + user, + host, + proxy_port, + data_port, + timeout=100, + kerberized=False, + principal=None, + keytab=None, + krb_conf=None, + protocol="http", + hdfs_ip=None, + kdc_ip=None, + ): self.host = host self.protocol = protocol self.proxy_port = proxy_port @@ -55,7 +72,11 @@ class HDFSApi(object): if kerberized: self._run_kinit() - self.kerberos_auth = reqkerb.HTTPKerberosAuth(mutual_authentication=reqkerb.DISABLED, hostname_override=self.host, principal=self.principal) + self.kerberos_auth = reqkerb.HTTPKerberosAuth( + mutual_authentication=reqkerb.DISABLED, + hostname_override=self.host, + principal=self.principal, + ) if self.kerberos_auth is None: print("failed to obtain kerberos_auth") else: @@ -70,7 +91,11 @@ class HDFSApi(object): os.environ["KRB5_CONFIG"] = instantiated_krb_conf - cmd = "(kinit -R -t {keytab} -k {principal} || (sleep 5 && kinit -R -t {keytab} -k {principal})) ; klist".format(instantiated_krb_conf=instantiated_krb_conf, keytab=self.keytab, principal=self.principal) + cmd = "(kinit -R -t {keytab} -k {principal} || (sleep 5 && kinit -R -t {keytab} -k {principal})) ; klist".format( + instantiated_krb_conf=instantiated_krb_conf, + keytab=self.keytab, + principal=self.principal, + ) start = time.time() @@ -79,10 +104,18 @@ class HDFSApi(object): res = subprocess.run(cmd, shell=True) if res.returncode != 0: # check_call(...) from subprocess does not print stderr, so we do it manually - logging.debug('Stderr:\n{}\n'.format(res.stderr.decode('utf-8'))) - logging.debug('Stdout:\n{}\n'.format(res.stdout.decode('utf-8'))) - logging.debug('Env:\n{}\n'.format(env)) - raise Exception('Command {} return non-zero code {}: {}'.format(args, res.returncode, res.stderr.decode('utf-8'))) + logging.debug( + "Stderr:\n{}\n".format(res.stderr.decode("utf-8")) + ) + logging.debug( + "Stdout:\n{}\n".format(res.stdout.decode("utf-8")) + ) + logging.debug("Env:\n{}\n".format(env)) + raise Exception( + "Command {} return non-zero code {}: {}".format( + args, res.returncode, res.stderr.decode("utf-8") + ) + ) logging.debug("KDC started, kinit successfully run") return @@ -97,28 +130,60 @@ class HDFSApi(object): for i in range(0, cnt): logging.debug(f"CALL: {str(kwargs)}") response_data = func(**kwargs) - logging.debug(f"response_data:{response_data.content} headers:{response_data.headers}") + logging.debug( + f"response_data:{response_data.content} headers:{response_data.headers}" + ) if response_data.status_code == expected_code: return response_data else: - logging.error(f"unexpected response_data.status_code {response_data.status_code} != {expected_code}") + logging.error( + f"unexpected response_data.status_code {response_data.status_code} != {expected_code}" + ) time.sleep(1) response_data.raise_for_status() - def read_data(self, path, universal_newlines=True): - logging.debug("read_data protocol:{} host:{} ip:{} proxy port:{} data port:{} path: {}".format(self.protocol, self.host, self.hdfs_ip, self.proxy_port, self.data_port, path)) - response = self.req_wrapper(requests.get, 307, url="{protocol}://{ip}:{port}/webhdfs/v1{path}?op=OPEN".format(protocol=self.protocol, ip=self.hdfs_ip, port=self.proxy_port, path=path), headers={'host': str(self.hdfs_ip)}, allow_redirects=False, verify=False, auth=self.kerberos_auth) + logging.debug( + "read_data protocol:{} host:{} ip:{} proxy port:{} data port:{} path: {}".format( + self.protocol, + self.host, + self.hdfs_ip, + self.proxy_port, + self.data_port, + path, + ) + ) + response = self.req_wrapper( + requests.get, + 307, + url="{protocol}://{ip}:{port}/webhdfs/v1{path}?op=OPEN".format( + protocol=self.protocol, ip=self.hdfs_ip, port=self.proxy_port, path=path + ), + headers={"host": str(self.hdfs_ip)}, + allow_redirects=False, + verify=False, + auth=self.kerberos_auth, + ) # additional_params = '&'.join(response.headers['Location'].split('&')[1:2]) location = None if self.kerberized: - location = response.headers['Location'].replace("kerberizedhdfs1:1006", "{}:{}".format(self.hdfs_ip, self.data_port)) + location = response.headers["Location"].replace( + "kerberizedhdfs1:1006", "{}:{}".format(self.hdfs_ip, self.data_port) + ) else: - location = response.headers['Location'].replace("hdfs1:50075", "{}:{}".format(self.hdfs_ip, self.data_port)) + location = response.headers["Location"].replace( + "hdfs1:50075", "{}:{}".format(self.hdfs_ip, self.data_port) + ) logging.debug("redirected to {}".format(location)) - response_data = self.req_wrapper(requests.get, 200, url=location, headers={'host': self.hdfs_ip}, - verify=False, auth=self.kerberos_auth) + response_data = self.req_wrapper( + requests.get, + 200, + url=location, + headers={"host": self.hdfs_ip}, + verify=False, + auth=self.kerberos_auth, + ) if universal_newlines: return response_data.text @@ -126,23 +191,38 @@ class HDFSApi(object): return response_data.content def write_data(self, path, content): - logging.debug("write_data protocol:{} host:{} port:{} path: {} user:{}, principal:{}".format( - self.protocol, self.host, self.proxy_port, path, self.user, self.principal)) - named_file = NamedTemporaryFile(mode='wb+') + logging.debug( + "write_data protocol:{} host:{} port:{} path: {} user:{}, principal:{}".format( + self.protocol, + self.host, + self.proxy_port, + path, + self.user, + self.principal, + ) + ) + named_file = NamedTemporaryFile(mode="wb+") fpath = named_file.name if isinstance(content, str): content = content.encode() named_file.write(content) named_file.flush() - response = self.req_wrapper(requests.put, 307, - url="{protocol}://{ip}:{port}/webhdfs/v1{path}?op=CREATE".format(protocol=self.protocol, ip=self.hdfs_ip, - port=self.proxy_port, - path=path, user=self.user), + response = self.req_wrapper( + requests.put, + 307, + url="{protocol}://{ip}:{port}/webhdfs/v1{path}?op=CREATE".format( + protocol=self.protocol, + ip=self.hdfs_ip, + port=self.proxy_port, + path=path, + user=self.user, + ), allow_redirects=False, - headers={'host': str(self.hdfs_ip)}, - params={'overwrite' : 'true'}, - verify=False, auth=self.kerberos_auth + headers={"host": str(self.hdfs_ip)}, + params={"overwrite": "true"}, + verify=False, + auth=self.kerberos_auth, ) logging.debug("HDFS api response:{}".format(response.headers)) @@ -150,23 +230,30 @@ class HDFSApi(object): # additional_params = '&'.join( # response.headers['Location'].split('&')[1:2] + ["user.name={}".format(self.user), "overwrite=true"]) if self.kerberized: - location = response.headers['Location'].replace("kerberizedhdfs1:1006", "{}:{}".format(self.hdfs_ip, self.data_port)) + location = response.headers["Location"].replace( + "kerberizedhdfs1:1006", "{}:{}".format(self.hdfs_ip, self.data_port) + ) else: - location = response.headers['Location'].replace("hdfs1:50075", "{}:{}".format(self.hdfs_ip, self.data_port)) + location = response.headers["Location"].replace( + "hdfs1:50075", "{}:{}".format(self.hdfs_ip, self.data_port) + ) with open(fpath, mode="rb") as fh: file_data = fh.read() - protocol = "http" # self.protocol - response = self.req_wrapper(requests.put, 201, + protocol = "http" # self.protocol + response = self.req_wrapper( + requests.put, + 201, url="{location}".format(location=location), data=file_data, - headers={'content-type':'text/plain', 'host': str(self.hdfs_ip)}, - params={'file': path, 'user.name' : self.user}, - allow_redirects=False, verify=False, auth=self.kerberos_auth + headers={"content-type": "text/plain", "host": str(self.hdfs_ip)}, + params={"file": path, "user.name": self.user}, + allow_redirects=False, + verify=False, + auth=self.kerberos_auth, ) logging.debug(f"{response.content} {response.headers}") - def write_gzip_data(self, path, content): if isinstance(content, str): content = content.encode() @@ -176,4 +263,10 @@ class HDFSApi(object): self.write_data(path, out.getvalue()) def read_gzip_data(self, path): - return gzip.GzipFile(fileobj=io.BytesIO(self.read_data(path, universal_newlines=False))).read().decode() + return ( + gzip.GzipFile( + fileobj=io.BytesIO(self.read_data(path, universal_newlines=False)) + ) + .read() + .decode() + ) diff --git a/tests/integration/helpers/http_server.py b/tests/integration/helpers/http_server.py index e62096dd33f..3f32c2be775 100644 --- a/tests/integration/helpers/http_server.py +++ b/tests/integration/helpers/http_server.py @@ -9,9 +9,14 @@ from http.server import BaseHTTPRequestHandler, HTTPServer # Decorator used to see if authentication works for external dictionary who use a HTTP source. def check_auth(fn): def wrapper(req): - auth_header = req.headers.get('authorization', None) - api_key = req.headers.get('api-key', None) - if not auth_header or auth_header != 'Basic Zm9vOmJhcg==' or not api_key or api_key != 'secret': + auth_header = req.headers.get("authorization", None) + api_key = req.headers.get("api-key", None) + if ( + not auth_header + or auth_header != "Basic Zm9vOmJhcg==" + or not api_key + or api_key != "secret" + ): req.send_response(401) else: fn(req) @@ -35,15 +40,15 @@ def start_server(server_address, data_path, schema, cert_path, address_family): def __send_headers(self): self.send_response(200) - self.send_header('Content-type', 'text/tsv') + self.send_header("Content-type", "text/tsv") self.end_headers() def __send_data(self, only_ids=None): - with open(data_path, 'r') as fl: - reader = csv.reader(fl, delimiter='\t') + with open(data_path, "r") as fl: + reader = csv.reader(fl, delimiter="\t") for row in reader: if not only_ids or (row[0] in only_ids): - self.wfile.write(('\t'.join(row) + '\n').encode()) + self.wfile.write(("\t".join(row) + "\n").encode()) def __read_and_decode_post_ids(self): data = self.__read_and_decode_post_data() @@ -51,7 +56,7 @@ def start_server(server_address, data_path, schema, cert_path, address_family): def __read_and_decode_post_data(self): transfer_encoding = self.headers.get("Transfer-encoding") - decoded = ""; + decoded = "" if transfer_encoding == "chunked": while True: s = self.rfile.readline().decode() @@ -69,19 +74,29 @@ def start_server(server_address, data_path, schema, cert_path, address_family): HTTPServer.address_family = socket.AF_INET6 httpd = HTTPServer(server_address, TSVHTTPHandler) if schema == "https": - httpd.socket = ssl.wrap_socket(httpd.socket, certfile=cert_path, server_side=True) + httpd.socket = ssl.wrap_socket( + httpd.socket, certfile=cert_path, server_side=True + ) httpd.serve_forever() if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Simple HTTP server returns data from file") + parser = argparse.ArgumentParser( + description="Simple HTTP server returns data from file" + ) parser.add_argument("--host", default="localhost") parser.add_argument("--port", default=5555, type=int) parser.add_argument("--data-path", required=True) parser.add_argument("--schema", choices=("http", "https"), required=True) parser.add_argument("--cert-path", default="./fake_cert.pem") - parser.add_argument('--address-family', choices=("ipv4", "ipv6"), default="ipv4") + parser.add_argument("--address-family", choices=("ipv4", "ipv6"), default="ipv4") args = parser.parse_args() - start_server((args.host, args.port), args.data_path, args.schema, args.cert_path, args.address_family) + start_server( + (args.host, args.port), + args.data_path, + args.schema, + args.cert_path, + args.address_family, + ) diff --git a/tests/integration/helpers/network.py b/tests/integration/helpers/network.py index 2bf0867c847..63fb2065f9d 100644 --- a/tests/integration/helpers/network.py +++ b/tests/integration/helpers/network.py @@ -22,26 +22,38 @@ class PartitionManager: self._netem_delayed_instances = [] _NetworkManager.get() - def drop_instance_zk_connections(self, instance, action='DROP'): + def drop_instance_zk_connections(self, instance, action="DROP"): self._check_instance(instance) - self._add_rule({'source': instance.ip_address, 'destination_port': 2181, 'action': action}) - self._add_rule({'destination': instance.ip_address, 'source_port': 2181, 'action': action}) + self._add_rule( + {"source": instance.ip_address, "destination_port": 2181, "action": action} + ) + self._add_rule( + {"destination": instance.ip_address, "source_port": 2181, "action": action} + ) - def restore_instance_zk_connections(self, instance, action='DROP'): + def restore_instance_zk_connections(self, instance, action="DROP"): self._check_instance(instance) - self._delete_rule({'source': instance.ip_address, 'destination_port': 2181, 'action': action}) - self._delete_rule({'destination': instance.ip_address, 'source_port': 2181, 'action': action}) + self._delete_rule( + {"source": instance.ip_address, "destination_port": 2181, "action": action} + ) + self._delete_rule( + {"destination": instance.ip_address, "source_port": 2181, "action": action} + ) - def partition_instances(self, left, right, port=None, action='DROP'): + def partition_instances(self, left, right, port=None, action="DROP"): self._check_instance(left) self._check_instance(right) def create_rule(src, dst): - rule = {'source': src.ip_address, 'destination': dst.ip_address, 'action': action} + rule = { + "source": src.ip_address, + "destination": dst.ip_address, + "action": action, + } if port is not None: - rule['destination_port'] = port + rule["destination_port"] = port return rule self._add_rule(create_rule(left, right)) @@ -57,7 +69,9 @@ class PartitionManager: while self._netem_delayed_instances: instance = self._netem_delayed_instances.pop() - instance.exec_in_container(["bash", "-c", "tc qdisc del dev eth0 root netem"], user="root") + instance.exec_in_container( + ["bash", "-c", "tc qdisc del dev eth0 root netem"], user="root" + ) def pop_rules(self): res = self._iptables_rules[:] @@ -71,7 +85,7 @@ class PartitionManager: @staticmethod def _check_instance(instance): if instance.ip_address is None: - raise Exception('Instance + ' + instance.name + ' is not launched!') + raise Exception("Instance + " + instance.name + " is not launched!") def _add_rule(self, rule): _NetworkManager.get().add_iptables_rule(**rule) @@ -82,7 +96,14 @@ class PartitionManager: self._iptables_rules.remove(rule) def _add_tc_netem_delay(self, instance, delay_ms): - instance.exec_in_container(["bash", "-c", "tc qdisc add dev eth0 root netem delay {}ms".format(delay_ms)], user="root") + instance.exec_in_container( + [ + "bash", + "-c", + "tc qdisc add dev eth0 root netem delay {}ms".format(delay_ms), + ], + user="root", + ) self._netem_delayed_instances.append(instance) def __enter__(self): @@ -127,12 +148,12 @@ class _NetworkManager: return cls._instance def add_iptables_rule(self, **kwargs): - cmd = ['iptables', '--wait', '-I', 'DOCKER-USER', '1'] + cmd = ["iptables", "--wait", "-I", "DOCKER-USER", "1"] cmd.extend(self._iptables_cmd_suffix(**kwargs)) self._exec_run(cmd, privileged=True) def delete_iptables_rule(self, **kwargs): - cmd = ['iptables', '--wait', '-D', 'DOCKER-USER'] + cmd = ["iptables", "--wait", "-D", "DOCKER-USER"] cmd.extend(self._iptables_cmd_suffix(**kwargs)) self._exec_run(cmd, privileged=True) @@ -144,40 +165,66 @@ class _NetworkManager: res = subprocess.run("iptables --wait -D DOCKER-USER 1", shell=True) if res.returncode != 0: - logging.info("All iptables rules cleared, " + str(iptables_iter) + " iterations, last error: " + str(res.stderr)) + logging.info( + "All iptables rules cleared, " + + str(iptables_iter) + + " iterations, last error: " + + str(res.stderr) + ) return @staticmethod def _iptables_cmd_suffix( - source=None, destination=None, - source_port=None, destination_port=None, - action=None, probability=None, custom_args=None): + source=None, + destination=None, + source_port=None, + destination_port=None, + action=None, + probability=None, + custom_args=None, + ): ret = [] if probability is not None: - ret.extend(['-m', 'statistic', '--mode', 'random', '--probability', str(probability)]) - ret.extend(['-p', 'tcp']) + ret.extend( + [ + "-m", + "statistic", + "--mode", + "random", + "--probability", + str(probability), + ] + ) + ret.extend(["-p", "tcp"]) if source is not None: - ret.extend(['-s', source]) + ret.extend(["-s", source]) if destination is not None: - ret.extend(['-d', destination]) + ret.extend(["-d", destination]) if source_port is not None: - ret.extend(['--sport', str(source_port)]) + ret.extend(["--sport", str(source_port)]) if destination_port is not None: - ret.extend(['--dport', str(destination_port)]) + ret.extend(["--dport", str(destination_port)]) if action is not None: - ret.extend(['-j'] + action.split()) + ret.extend(["-j"] + action.split()) if custom_args is not None: ret.extend(custom_args) return ret def __init__( - self, - container_expire_timeout=50, container_exit_timeout=60, docker_api_version=os.environ.get("DOCKER_API_VERSION")): + self, + container_expire_timeout=50, + container_exit_timeout=60, + docker_api_version=os.environ.get("DOCKER_API_VERSION"), + ): self.container_expire_timeout = container_expire_timeout self.container_exit_timeout = container_exit_timeout - self._docker_client = docker.DockerClient(base_url='unix:///var/run/docker.sock', version=docker_api_version, timeout=600) + self._docker_client = docker.DockerClient( + base_url="unix:///var/run/docker.sock", + version=docker_api_version, + timeout=600, + ) self._container = None @@ -194,29 +241,41 @@ class _NetworkManager: except docker.errors.NotFound: break except Exception as ex: - print("Error removing network blocade container, will try again", str(ex)) + print( + "Error removing network blocade container, will try again", + str(ex), + ) time.sleep(i) - image = subprocess.check_output("docker images -q clickhouse/integration-helper 2>/dev/null", shell=True) + image = subprocess.check_output( + "docker images -q clickhouse/integration-helper 2>/dev/null", shell=True + ) if not image.strip(): print("No network image helper, will try download") # for some reason docker api may hang if image doesn't exist, so we download it # before running for i in range(5): try: - subprocess.check_call("docker pull clickhouse/integration-helper", shell=True) # STYLE_CHECK_ALLOW_SUBPROCESS_CHECK_CALL + subprocess.check_call( # STYLE_CHECK_ALLOW_SUBPROCESS_CHECK_CALL + "docker pull clickhouse/integration-helper", shell=True + ) break except: time.sleep(i) else: raise Exception("Cannot pull clickhouse/integration-helper image") - self._container = self._docker_client.containers.run('clickhouse/integration-helper', - auto_remove=True, - command=('sleep %s' % self.container_exit_timeout), - # /run/xtables.lock passed inside for correct iptables --wait - volumes={'/run/xtables.lock': {'bind': '/run/xtables.lock', 'mode': 'ro' }}, - detach=True, network_mode='host') + self._container = self._docker_client.containers.run( + "clickhouse/integration-helper", + auto_remove=True, + command=("sleep %s" % self.container_exit_timeout), + # /run/xtables.lock passed inside for correct iptables --wait + volumes={ + "/run/xtables.lock": {"bind": "/run/xtables.lock", "mode": "ro"} + }, + detach=True, + network_mode="host", + ) container_id = self._container.id self._container_expire_time = time.time() + self.container_expire_timeout @@ -233,8 +292,8 @@ class _NetworkManager: container = self._ensure_container() handle = self._docker_client.api.exec_create(container.id, cmd, **kwargs) - output = self._docker_client.api.exec_start(handle).decode('utf8') - exit_code = self._docker_client.api.exec_inspect(handle)['ExitCode'] + output = self._docker_client.api.exec_start(handle).decode("utf8") + exit_code = self._docker_client.api.exec_inspect(handle)["ExitCode"] if exit_code != 0: print(output) @@ -242,30 +301,56 @@ class _NetworkManager: return output + # Approximately mesure network I/O speed for interface class NetThroughput(object): def __init__(self, node): self.node = node # trying to get default interface and check it in /proc/net/dev - self.interface = self.node.exec_in_container(["bash", "-c", "awk '{print $1 \" \" $2}' /proc/net/route | grep 00000000 | awk '{print $1}'"]).strip() - check = self.node.exec_in_container(["bash", "-c", f'grep "^ *{self.interface}:" /proc/net/dev']).strip() - if not check: # if check is not successful just try eth{1-10} + self.interface = self.node.exec_in_container( + [ + "bash", + "-c", + "awk '{print $1 \" \" $2}' /proc/net/route | grep 00000000 | awk '{print $1}'", + ] + ).strip() + check = self.node.exec_in_container( + ["bash", "-c", f'grep "^ *{self.interface}:" /proc/net/dev'] + ).strip() + if not check: # if check is not successful just try eth{1-10} for i in range(10): try: - self.interface = self.node.exec_in_container(["bash", "-c", f"awk '{{print $1}}' /proc/net/route | grep 'eth{i}'"]).strip() + self.interface = self.node.exec_in_container( + [ + "bash", + "-c", + f"awk '{{print $1}}' /proc/net/route | grep 'eth{i}'", + ] + ).strip() break except Exception as ex: print(f"No interface eth{i}") else: - raise Exception("No interface eth{1-10} and default interface not specified in /proc/net/route, maybe some special network configuration") + raise Exception( + "No interface eth{1-10} and default interface not specified in /proc/net/route, maybe some special network configuration" + ) try: - check = self.node.exec_in_container(["bash", "-c", f'grep "^ *{self.interface}:" /proc/net/dev']).strip() + check = self.node.exec_in_container( + ["bash", "-c", f'grep "^ *{self.interface}:" /proc/net/dev'] + ).strip() if not check: - raise Exception(f"No such interface {self.interface} found in /proc/net/dev") + raise Exception( + f"No such interface {self.interface} found in /proc/net/dev" + ) except: - logging.error("All available interfaces %s", self.node.exec_in_container(["bash", "-c", "cat /proc/net/dev"])) - raise Exception(f"No such interface {self.interface} found in /proc/net/dev") + logging.error( + "All available interfaces %s", + self.node.exec_in_container(["bash", "-c", "cat /proc/net/dev"]), + ) + raise Exception( + f"No such interface {self.interface} found in /proc/net/dev" + ) self.current_in = self._get_in_bytes() self.current_out = self._get_out_bytes() @@ -273,27 +358,47 @@ class NetThroughput(object): def _get_in_bytes(self): try: - result = self.node.exec_in_container(['bash', '-c', f'awk "/^ *{self.interface}:/"\' {{ if ($1 ~ /.*:[0-9][0-9]*/) {{ sub(/^.*:/, "") ; print $1 }} else {{ print $2 }} }}\' /proc/net/dev']) + result = self.node.exec_in_container( + [ + "bash", + "-c", + f'awk "/^ *{self.interface}:/"\' {{ if ($1 ~ /.*:[0-9][0-9]*/) {{ sub(/^.*:/, "") ; print $1 }} else {{ print $2 }} }}\' /proc/net/dev', + ] + ) except: - raise Exception(f"Cannot receive in bytes from /proc/net/dev for interface {self.interface}") + raise Exception( + f"Cannot receive in bytes from /proc/net/dev for interface {self.interface}" + ) try: return int(result) except: - raise Exception(f"Got non-numeric in bytes '{result}' from /proc/net/dev for interface {self.interface}") + raise Exception( + f"Got non-numeric in bytes '{result}' from /proc/net/dev for interface {self.interface}" + ) def _get_out_bytes(self): try: - result = self.node.exec_in_container(['bash', '-c', f'awk "/^ *{self.interface}:/"\' {{ if ($1 ~ /.*:[0-9][0-9]*/) {{ print $9 }} else {{ print $10 }} }}\' /proc/net/dev']) + result = self.node.exec_in_container( + [ + "bash", + "-c", + f"awk \"/^ *{self.interface}:/\"' {{ if ($1 ~ /.*:[0-9][0-9]*/) {{ print $9 }} else {{ print $10 }} }}' /proc/net/dev", + ] + ) except: - raise Exception(f"Cannot receive out bytes from /proc/net/dev for interface {self.interface}") + raise Exception( + f"Cannot receive out bytes from /proc/net/dev for interface {self.interface}" + ) try: return int(result) except: - raise Exception(f"Got non-numeric out bytes '{result}' from /proc/net/dev for interface {self.interface}") + raise Exception( + f"Got non-numeric out bytes '{result}' from /proc/net/dev for interface {self.interface}" + ) - def measure_speed(self, measure='bytes'): + def measure_speed(self, measure="bytes"): new_in = self._get_in_bytes() new_out = self._get_out_bytes() current_time = time.time() @@ -304,11 +409,11 @@ class NetThroughput(object): self.current_in = new_in self.measure_time = current_time - if measure == 'bytes': + if measure == "bytes": return in_speed, out_speed - elif measure == 'kilobytes': - return in_speed / 1024., out_speed / 1024. - elif measure == 'megabytes': + elif measure == "kilobytes": + return in_speed / 1024.0, out_speed / 1024.0 + elif measure == "megabytes": return in_speed / (1024 * 1024), out_speed / (1024 * 1024) else: raise Exception(f"Unknown measure {measure}") diff --git a/tests/integration/helpers/postgres_utility.py b/tests/integration/helpers/postgres_utility.py index 16461ea3310..978b9a98fb4 100644 --- a/tests/integration/helpers/postgres_utility.py +++ b/tests/integration/helpers/postgres_utility.py @@ -23,11 +23,21 @@ postgres_table_template_5 = """ key Integer NOT NULL, value UUID, PRIMARY KEY(key)) """ -def get_postgres_conn(ip, port, database=False, auto_commit=True, database_name='postgres_database', replication=False): + +def get_postgres_conn( + ip, + port, + database=False, + auto_commit=True, + database_name="postgres_database", + replication=False, +): if database == True: conn_string = f"host={ip} port={port} dbname='{database_name}' user='postgres' password='mysecretpassword'" else: - conn_string = f"host={ip} port={port} user='postgres' password='mysecretpassword'" + conn_string = ( + f"host={ip} port={port} user='postgres' password='mysecretpassword'" + ) if replication: conn_string += " replication='database'" @@ -38,33 +48,41 @@ def get_postgres_conn(ip, port, database=False, auto_commit=True, database_name= conn.autocommit = True return conn -def create_replication_slot(conn, slot_name='user_slot'): + +def create_replication_slot(conn, slot_name="user_slot"): cursor = conn.cursor() - cursor.execute(f'CREATE_REPLICATION_SLOT {slot_name} LOGICAL pgoutput EXPORT_SNAPSHOT') + cursor.execute( + f"CREATE_REPLICATION_SLOT {slot_name} LOGICAL pgoutput EXPORT_SNAPSHOT" + ) result = cursor.fetchall() - print(result[0][0]) # slot name - print(result[0][1]) # start lsn - print(result[0][2]) # snapshot + print(result[0][0]) # slot name + print(result[0][1]) # start lsn + print(result[0][2]) # snapshot return result[0][2] -def drop_replication_slot(conn, slot_name='user_slot'): + +def drop_replication_slot(conn, slot_name="user_slot"): cursor = conn.cursor() cursor.execute(f"select pg_drop_replication_slot('{slot_name}')") def create_postgres_schema(cursor, schema_name): drop_postgres_schema(cursor, schema_name) - cursor.execute(f'CREATE SCHEMA {schema_name}') + cursor.execute(f"CREATE SCHEMA {schema_name}") + def drop_postgres_schema(cursor, schema_name): - cursor.execute(f'DROP SCHEMA IF EXISTS {schema_name} CASCADE') + cursor.execute(f"DROP SCHEMA IF EXISTS {schema_name} CASCADE") -def create_postgres_table(cursor, table_name, replica_identity_full=False, template=postgres_table_template): +def create_postgres_table( + cursor, table_name, replica_identity_full=False, template=postgres_table_template +): drop_postgres_table(cursor, table_name) cursor.execute(template.format(table_name)) if replica_identity_full: - cursor.execute(f'ALTER TABLE {table_name} REPLICA IDENTITY FULL;') + cursor.execute(f"ALTER TABLE {table_name} REPLICA IDENTITY FULL;") + def drop_postgres_table(cursor, table_name): cursor.execute(f"""DROP TABLE IF EXISTS "{table_name}" """) @@ -74,6 +92,7 @@ def create_postgres_table_with_schema(cursor, schema_name, table_name): drop_postgres_table_with_schema(cursor, schema_name, table_name) cursor.execute(postgres_table_template_4.format(schema_name, table_name)) + def drop_postgres_table_with_schema(cursor, schema_name, table_name): cursor.execute(f"""DROP TABLE IF EXISTS "{schema_name}"."{table_name}" """) @@ -102,14 +121,14 @@ class PostgresManager: def prepare(self): conn = get_postgres_conn(ip=self.ip, port=self.port) cursor = conn.cursor() - self.create_postgres_db(cursor, 'postgres_database') + self.create_postgres_db(cursor, "postgres_database") self.create_clickhouse_postgres_db(ip=self.ip, port=self.port) def clear(self): if self.conn.closed == 0: self.conn.close() for db in self.created_materialized_postgres_db_list.copy(): - self.drop_materialized_db(db); + self.drop_materialized_db(db) for db in self.created_ch_postgres_db_list.copy(): self.drop_clickhouse_postgres_db(db) if len(self.created_postgres_db_list) > 0: @@ -122,38 +141,54 @@ class PostgresManager: self.conn = get_postgres_conn(ip=self.ip, port=self.port, database=True) return self.conn.cursor() - def create_postgres_db(self, cursor, name='postgres_database'): + def create_postgres_db(self, cursor, name="postgres_database"): self.drop_postgres_db(cursor, name) self.created_postgres_db_list.add(name) cursor.execute(f"CREATE DATABASE {name}") - def drop_postgres_db(self, cursor, name='postgres_database'): + def drop_postgres_db(self, cursor, name="postgres_database"): cursor.execute(f"DROP DATABASE IF EXISTS {name}") if name in self.created_postgres_db_list: self.created_postgres_db_list.remove(name) - def create_clickhouse_postgres_db(self, ip, port, name='postgres_database', database_name='postgres_database', schema_name=''): + def create_clickhouse_postgres_db( + self, + ip, + port, + name="postgres_database", + database_name="postgres_database", + schema_name="", + ): self.drop_clickhouse_postgres_db(name) self.created_ch_postgres_db_list.add(name) if len(schema_name) == 0: - self.instance.query(f''' + self.instance.query( + f""" CREATE DATABASE {name} - ENGINE = PostgreSQL('{ip}:{port}', '{database_name}', 'postgres', 'mysecretpassword')''') + ENGINE = PostgreSQL('{ip}:{port}', '{database_name}', 'postgres', 'mysecretpassword')""" + ) else: - self.instance.query(f''' + self.instance.query( + f""" CREATE DATABASE {name} - ENGINE = PostgreSQL('{ip}:{port}', '{database_name}', 'postgres', 'mysecretpassword', '{schema_name}')''') + ENGINE = PostgreSQL('{ip}:{port}', '{database_name}', 'postgres', 'mysecretpassword', '{schema_name}')""" + ) - def drop_clickhouse_postgres_db(self, name='postgres_database'): - self.instance.query(f'DROP DATABASE IF EXISTS {name}') + def drop_clickhouse_postgres_db(self, name="postgres_database"): + self.instance.query(f"DROP DATABASE IF EXISTS {name}") if name in self.created_ch_postgres_db_list: self.created_ch_postgres_db_list.remove(name) - - def create_materialized_db(self, ip, port, - materialized_database='test_database', postgres_database='postgres_database', - settings=[], table_overrides=''): + def create_materialized_db( + self, + ip, + port, + materialized_database="test_database", + postgres_database="postgres_database", + settings=[], + table_overrides="", + ): self.created_materialized_postgres_db_list.add(materialized_database) self.instance.query(f"DROP DATABASE IF EXISTS {materialized_database}") @@ -162,17 +197,17 @@ class PostgresManager: create_query += " SETTINGS " for i in range(len(settings)): if i != 0: - create_query += ', ' + create_query += ", " create_query += settings[i] create_query += table_overrides self.instance.query(create_query) - assert materialized_database in self.instance.query('SHOW DATABASES') + assert materialized_database in self.instance.query("SHOW DATABASES") - def drop_materialized_db(self, materialized_database='test_database'): - self.instance.query(f'DROP DATABASE IF EXISTS {materialized_database} NO DELAY') + def drop_materialized_db(self, materialized_database="test_database"): + self.instance.query(f"DROP DATABASE IF EXISTS {materialized_database} NO DELAY") if materialized_database in self.created_materialized_postgres_db_list: self.created_materialized_postgres_db_list.remove(materialized_database) - assert materialized_database not in self.instance.query('SHOW DATABASES') + assert materialized_database not in self.instance.query("SHOW DATABASES") def create_and_fill_postgres_table(self, table_name): conn = get_postgres_conn(ip=self.ip, port=self.port, database=True) @@ -180,82 +215,109 @@ class PostgresManager: self.create_and_fill_postgres_table_from_cursor(cursor, table_name) def create_and_fill_postgres_table_from_cursor(self, cursor, table_name): - create_postgres_table(cursor, table_name); - self.instance.query(f"INSERT INTO postgres_database.{table_name} SELECT number, number from numbers(50)") + create_postgres_table(cursor, table_name) + self.instance.query( + f"INSERT INTO postgres_database.{table_name} SELECT number, number from numbers(50)" + ) def create_and_fill_postgres_tables(self, tables_num, numbers=50): conn = get_postgres_conn(ip=self.ip, port=self.port, database=True) cursor = conn.cursor() - self.create_and_fill_postgres_tables_from_cursor(cursor, tables_num, numbers=numbers) + self.create_and_fill_postgres_tables_from_cursor( + cursor, tables_num, numbers=numbers + ) - def create_and_fill_postgres_tables_from_cursor(self, cursor, tables_num, numbers=50): + def create_and_fill_postgres_tables_from_cursor( + self, cursor, tables_num, numbers=50 + ): for i in range(tables_num): - table_name = f'postgresql_replica_{i}' - create_postgres_table(cursor, table_name); + table_name = f"postgresql_replica_{i}" + create_postgres_table(cursor, table_name) if numbers > 0: - self.instance.query(f"INSERT INTO postgres_database.{table_name} SELECT number, number from numbers({numbers})") + self.instance.query( + f"INSERT INTO postgres_database.{table_name} SELECT number, number from numbers({numbers})" + ) queries = [ - 'INSERT INTO postgresql_replica_{} select i, i from generate_series(0, 10000) as t(i);', - 'DELETE FROM postgresql_replica_{} WHERE (value*value) % 3 = 0;', - 'UPDATE postgresql_replica_{} SET value = value - 125 WHERE key % 2 = 0;', + "INSERT INTO postgresql_replica_{} select i, i from generate_series(0, 10000) as t(i);", + "DELETE FROM postgresql_replica_{} WHERE (value*value) % 3 = 0;", + "UPDATE postgresql_replica_{} SET value = value - 125 WHERE key % 2 = 0;", "UPDATE postgresql_replica_{} SET key=key+20000 WHERE key%2=0", - 'INSERT INTO postgresql_replica_{} select i, i from generate_series(40000, 50000) as t(i);', - 'DELETE FROM postgresql_replica_{} WHERE key % 10 = 0;', - 'UPDATE postgresql_replica_{} SET value = value + 101 WHERE key % 2 = 1;', + "INSERT INTO postgresql_replica_{} select i, i from generate_series(40000, 50000) as t(i);", + "DELETE FROM postgresql_replica_{} WHERE key % 10 = 0;", + "UPDATE postgresql_replica_{} SET value = value + 101 WHERE key % 2 = 1;", "UPDATE postgresql_replica_{} SET key=key+80000 WHERE key%2=1", - 'DELETE FROM postgresql_replica_{} WHERE value % 2 = 0;', - 'UPDATE postgresql_replica_{} SET value = value + 2000 WHERE key % 5 = 0;', - 'INSERT INTO postgresql_replica_{} select i, i from generate_series(200000, 250000) as t(i);', - 'DELETE FROM postgresql_replica_{} WHERE value % 3 = 0;', - 'UPDATE postgresql_replica_{} SET value = value * 2 WHERE key % 3 = 0;', + "DELETE FROM postgresql_replica_{} WHERE value % 2 = 0;", + "UPDATE postgresql_replica_{} SET value = value + 2000 WHERE key % 5 = 0;", + "INSERT INTO postgresql_replica_{} select i, i from generate_series(200000, 250000) as t(i);", + "DELETE FROM postgresql_replica_{} WHERE value % 3 = 0;", + "UPDATE postgresql_replica_{} SET value = value * 2 WHERE key % 3 = 0;", "UPDATE postgresql_replica_{} SET key=key+500000 WHERE key%2=1", - 'INSERT INTO postgresql_replica_{} select i, i from generate_series(1000000, 1050000) as t(i);', - 'DELETE FROM postgresql_replica_{} WHERE value % 9 = 2;', + "INSERT INTO postgresql_replica_{} select i, i from generate_series(1000000, 1050000) as t(i);", + "DELETE FROM postgresql_replica_{} WHERE value % 9 = 2;", "UPDATE postgresql_replica_{} SET key=key+10000000", - 'UPDATE postgresql_replica_{} SET value = value + 2 WHERE key % 3 = 1;', - 'DELETE FROM postgresql_replica_{} WHERE value%5 = 0;' - ] + "UPDATE postgresql_replica_{} SET value = value + 2 WHERE key % 3 = 1;", + "DELETE FROM postgresql_replica_{} WHERE value%5 = 0;", +] -def assert_nested_table_is_created(instance, table_name, materialized_database='test_database', schema_name=''): +def assert_nested_table_is_created( + instance, table_name, materialized_database="test_database", schema_name="" +): if len(schema_name) == 0: table = table_name else: table = schema_name + "." + table_name - print(f'Checking table {table} exists in {materialized_database}') - database_tables = instance.query(f'SHOW TABLES FROM {materialized_database}') + print(f"Checking table {table} exists in {materialized_database}") + database_tables = instance.query(f"SHOW TABLES FROM {materialized_database}") while table not in database_tables: time.sleep(0.2) - database_tables = instance.query(f'SHOW TABLES FROM {materialized_database}') + database_tables = instance.query(f"SHOW TABLES FROM {materialized_database}") - assert(table in database_tables) + assert table in database_tables -def assert_number_of_columns(instance, expected, table_name, database_name='test_database'): - result = instance.query(f"select count() from system.columns where table = '{table_name}' and database = '{database_name}' and not startsWith(name, '_')") - while (int(result) != expected): +def assert_number_of_columns( + instance, expected, table_name, database_name="test_database" +): + result = instance.query( + f"select count() from system.columns where table = '{table_name}' and database = '{database_name}' and not startsWith(name, '_')" + ) + while int(result) != expected: time.sleep(1) - result = instance.query(f"select count() from system.columns where table = '{table_name}' and database = '{database_name}' and not startsWith(name, '_')") - print('Number of columns ok') + result = instance.query( + f"select count() from system.columns where table = '{table_name}' and database = '{database_name}' and not startsWith(name, '_')" + ) + print("Number of columns ok") -def check_tables_are_synchronized(instance, table_name, order_by='key', postgres_database='postgres_database', materialized_database='test_database', schema_name=''): - assert_nested_table_is_created(instance, table_name, materialized_database, schema_name) +def check_tables_are_synchronized( + instance, + table_name, + order_by="key", + postgres_database="postgres_database", + materialized_database="test_database", + schema_name="", +): + assert_nested_table_is_created( + instance, table_name, materialized_database, schema_name + ) - table_path = '' + table_path = "" if len(schema_name) == 0: - table_path = f'{materialized_database}.{table_name}' + table_path = f"{materialized_database}.{table_name}" else: - table_path = f'{materialized_database}.`{schema_name}.{table_name}`' + table_path = f"{materialized_database}.`{schema_name}.{table_name}`" print(f"Checking table is synchronized: {table_path}") - result_query = f'select * from {table_path} order by {order_by};' + result_query = f"select * from {table_path} order by {order_by};" - expected = instance.query(f'select * from {postgres_database}.{table_name} order by {order_by};') + expected = instance.query( + f"select * from {postgres_database}.{table_name} order by {order_by};" + ) result = instance.query(result_query) for _ in range(30): @@ -265,9 +327,16 @@ def check_tables_are_synchronized(instance, table_name, order_by='key', postgres time.sleep(0.5) result = instance.query(result_query) - assert(result == expected) + assert result == expected -def check_several_tables_are_synchronized(instance, tables_num, order_by='key', postgres_database='postgres_database', materialized_database='test_database', schema_name=''): +def check_several_tables_are_synchronized( + instance, + tables_num, + order_by="key", + postgres_database="postgres_database", + materialized_database="test_database", + schema_name="", +): for i in range(tables_num): - check_tables_are_synchronized(instance, f'postgresql_replica_{i}'); + check_tables_are_synchronized(instance, f"postgresql_replica_{i}") diff --git a/tests/integration/helpers/pytest_xdist_logging_to_separate_files.py b/tests/integration/helpers/pytest_xdist_logging_to_separate_files.py index ee9a52e042c..d424ad58fa4 100644 --- a/tests/integration/helpers/pytest_xdist_logging_to_separate_files.py +++ b/tests/integration/helpers/pytest_xdist_logging_to_separate_files.py @@ -5,17 +5,17 @@ import os.path # Without this function all workers will log to the same log file # and mix everything together making it much more difficult for troubleshooting. def setup(): - worker_name = os.environ.get('PYTEST_XDIST_WORKER', 'master') - if worker_name == 'master': + worker_name = os.environ.get("PYTEST_XDIST_WORKER", "master") + if worker_name == "master": return - logger = logging.getLogger('') + logger = logging.getLogger("") new_handlers = [] handlers_to_remove = [] for handler in logger.handlers: if isinstance(handler, logging.FileHandler): filename, ext = os.path.splitext(handler.baseFilename) - if not filename.endswith('-' + worker_name): - new_filename = filename + '-' + worker_name + if not filename.endswith("-" + worker_name): + new_filename = filename + "-" + worker_name new_handler = logging.FileHandler(new_filename + ext) new_handler.setFormatter(handler.formatter) new_handler.setLevel(handler.level) diff --git a/tests/integration/helpers/test_tools.py b/tests/integration/helpers/test_tools.py index ec3841f79d7..2afbae340be 100644 --- a/tests/integration/helpers/test_tools.py +++ b/tests/integration/helpers/test_tools.py @@ -13,12 +13,18 @@ class TSV: elif isinstance(contents, str) or isinstance(contents, str): raw_lines = contents.splitlines(True) elif isinstance(contents, list): - raw_lines = ['\t'.join(map(str, l)) if isinstance(l, list) else str(l) for l in contents] + raw_lines = [ + "\t".join(map(str, l)) if isinstance(l, list) else str(l) + for l in contents + ] elif isinstance(contents, TSV): self.lines = contents.lines return else: - raise TypeError("contents must be either file or string or list, actual type: " + type(contents).__name__) + raise TypeError( + "contents must be either file or string or list, actual type: " + + type(contents).__name__ + ) self.lines = [l.strip() for l in raw_lines if l.strip()] def __eq__(self, other): @@ -31,13 +37,18 @@ class TSV: return self != TSV(other) return self.lines != other.lines - def diff(self, other, n1='', n2=''): + def diff(self, other, n1="", n2=""): if not isinstance(other, TSV): return self.diff(TSV(other), n1=n1, n2=n2) - return list(line.rstrip() for line in difflib.unified_diff(self.lines, other.lines, fromfile=n1, tofile=n2))[2:] + return list( + line.rstrip() + for line in difflib.unified_diff( + self.lines, other.lines, fromfile=n1, tofile=n2 + ) + )[2:] def __str__(self): - return '\n'.join(self.lines) + return "\n".join(self.lines) def __repr__(self): return self.__str__() @@ -50,29 +61,70 @@ class TSV: return [line.split("\t") for line in contents.split("\n") if line.strip()] -def assert_eq_with_retry(instance, query, expectation, retry_count=20, sleep_time=0.5, stdin=None, timeout=None, - settings=None, user=None, ignore_error=False, get_result=lambda x: x): +def assert_eq_with_retry( + instance, + query, + expectation, + retry_count=20, + sleep_time=0.5, + stdin=None, + timeout=None, + settings=None, + user=None, + ignore_error=False, + get_result=lambda x: x, +): expectation_tsv = TSV(expectation) for i in range(retry_count): try: - if TSV(get_result(instance.query(query, user=user, stdin=stdin, timeout=timeout, settings=settings, - ignore_error=ignore_error))) == expectation_tsv: + if ( + TSV( + get_result( + instance.query( + query, + user=user, + stdin=stdin, + timeout=timeout, + settings=settings, + ignore_error=ignore_error, + ) + ) + ) + == expectation_tsv + ): break time.sleep(sleep_time) except Exception as ex: logging.exception(f"assert_eq_with_retry retry {i+1} exception {ex}") time.sleep(sleep_time) else: - val = TSV(get_result(instance.query(query, user=user, stdin=stdin, timeout=timeout, settings=settings, - ignore_error=ignore_error))) + val = TSV( + get_result( + instance.query( + query, + user=user, + stdin=stdin, + timeout=timeout, + settings=settings, + ignore_error=ignore_error, + ) + ) + ) if expectation_tsv != val: - raise AssertionError("'{}' != '{}'\n{}".format(expectation_tsv, val, '\n'.join( - expectation_tsv.diff(val, n1="expectation", n2="query")))) + raise AssertionError( + "'{}' != '{}'\n{}".format( + expectation_tsv, + val, + "\n".join(expectation_tsv.diff(val, n1="expectation", n2="query")), + ) + ) + def assert_logs_contain(instance, substring): if not instance.contains_in_log(substring): raise AssertionError("'{}' not found in logs".format(substring)) + def assert_logs_contain_with_retry(instance, substring, retry_count=20, sleep_time=0.5): for i in range(retry_count): try: @@ -85,7 +137,10 @@ def assert_logs_contain_with_retry(instance, substring, retry_count=20, sleep_ti else: raise AssertionError("'{}' not found in logs".format(substring)) -def exec_query_with_retry(instance, query, retry_count=40, sleep_time=0.5, silent=False, settings={}): + +def exec_query_with_retry( + instance, query, retry_count=40, sleep_time=0.5, silent=False, settings={} +): exception = None for cnt in range(retry_count): try: @@ -96,16 +151,21 @@ def exec_query_with_retry(instance, query, retry_count=40, sleep_time=0.5, silen except Exception as ex: exception = ex if not silent: - logging.exception(f"Failed to execute query '{query}' on {cnt} try on instance '{instance.name}' will retry") + logging.exception( + f"Failed to execute query '{query}' on {cnt} try on instance '{instance.name}' will retry" + ) time.sleep(sleep_time) else: raise exception + def csv_compare(result, expected): csv_result = TSV(result) csv_expected = TSV(expected) mismatch = [] - max_len = len(csv_result) if len(csv_result) > len(csv_expected) else len(csv_expected) + max_len = ( + len(csv_result) if len(csv_result) > len(csv_expected) else len(csv_expected) + ) for i in range(max_len): if i >= len(csv_result): mismatch.append("-[%d]=%s" % (i, csv_expected.lines[i])) diff --git a/tests/integration/helpers/uclient.py b/tests/integration/helpers/uclient.py index 538722580af..45c8b8f64e2 100644 --- a/tests/integration/helpers/uclient.py +++ b/tests/integration/helpers/uclient.py @@ -8,30 +8,30 @@ sys.path.insert(0, os.path.join(CURDIR)) from . import uexpect -prompt = ':\) ' -end_of_block = r'.*\r\n.*\r\n' +prompt = ":\) " +end_of_block = r".*\r\n.*\r\n" class client(object): - def __init__(self, command=None, name='', log=None): - self.client = uexpect.spawn(['/bin/bash', '--noediting']) + def __init__(self, command=None, name="", log=None): + self.client = uexpect.spawn(["/bin/bash", "--noediting"]) if command is None: - command = '/usr/bin/clickhouse-client' + command = "/usr/bin/clickhouse-client" self.client.command = command - self.client.eol('\r') + self.client.eol("\r") self.client.logger(log, prefix=name) self.client.timeout(20) - self.client.expect('[#\$] ', timeout=2) + self.client.expect("[#\$] ", timeout=2) self.client.send(command) def __enter__(self): return self.client.__enter__() def __exit__(self, type, value, traceback): - self.client.reader['kill_event'].set() + self.client.reader["kill_event"].set() # send Ctrl-C - self.client.send('\x03', eol='') + self.client.send("\x03", eol="") time.sleep(0.3) - self.client.send('quit', eol='\r') - self.client.send('\x03', eol='') + self.client.send("quit", eol="\r") + self.client.send("\x03", eol="") return self.client.__exit__(type, value, traceback) diff --git a/tests/integration/helpers/uexpect.py b/tests/integration/helpers/uexpect.py index cd26e3ddbd3..757a3a7f199 100644 --- a/tests/integration/helpers/uexpect.py +++ b/tests/integration/helpers/uexpect.py @@ -25,7 +25,7 @@ class TimeoutError(Exception): self.timeout = timeout def __str__(self): - return 'Timeout %.3fs' % float(self.timeout) + return "Timeout %.3fs" % float(self.timeout) class ExpectTimeoutError(Exception): @@ -35,12 +35,12 @@ class ExpectTimeoutError(Exception): self.buffer = buffer def __str__(self): - s = 'Timeout %.3fs ' % float(self.timeout) + s = "Timeout %.3fs " % float(self.timeout) if self.pattern: - s += 'for %s ' % repr(self.pattern.pattern) + s += "for %s " % repr(self.pattern.pattern) if self.buffer: - s += 'buffer %s ' % repr(self.buffer[:]) - s += 'or \'%s\'' % ','.join(['%x' % ord(c) for c in self.buffer[:]]) + s += "buffer %s " % repr(self.buffer[:]) + s += "or '%s'" % ",".join(["%x" % ord(c) for c in self.buffer[:]]) return s @@ -55,12 +55,12 @@ class IO(object): TIMEOUT = Timeout class Logger(object): - def __init__(self, logger, prefix=''): + def __init__(self, logger, prefix=""): self._logger = logger self._prefix = prefix def write(self, data): - self._logger.write(('\n' + data).replace('\n', '\n' + self._prefix)) + self._logger.write(("\n" + data).replace("\n", "\n" + self._prefix)) def flush(self): self._logger.flush() @@ -77,7 +77,7 @@ class IO(object): self.reader = reader self._timeout = None self._logger = None - self._eol = '' + self._eol = "" def __enter__(self): return self @@ -85,7 +85,7 @@ class IO(object): def __exit__(self, type, value, traceback): self.close() - def logger(self, logger=None, prefix=''): + def logger(self, logger=None, prefix=""): if logger: self._logger = self.Logger(logger, prefix=prefix) return self._logger @@ -101,15 +101,15 @@ class IO(object): return self._eol def close(self, force=True): - self.reader['kill_event'].set() - os.system('pkill -TERM -P %d' % self.process.pid) + self.reader["kill_event"].set() + os.system("pkill -TERM -P %d" % self.process.pid) if force: self.process.kill() else: self.process.terminate() os.close(self.master) if self._logger: - self._logger.write('\n') + self._logger.write("\n") self._logger.flush() def send(self, data, eol=None): @@ -135,9 +135,9 @@ class IO(object): if self.buffer is not None: self.match = pattern.search(self.buffer, 0) if self.match is not None: - self.after = self.buffer[self.match.start():self.match.end()] - self.before = self.buffer[:self.match.start()] - self.buffer = self.buffer[self.match.end():] + self.after = self.buffer[self.match.start() : self.match.end()] + self.before = self.buffer[: self.match.start()] + self.buffer = self.buffer[self.match.end() :] break if timeleft < 0: break @@ -145,16 +145,16 @@ class IO(object): data = self.read(timeout=timeleft, raise_exception=True) except TimeoutError: if self._logger: - self._logger.write((self.buffer or '') + '\n') + self._logger.write((self.buffer or "") + "\n") self._logger.flush() exception = ExpectTimeoutError(pattern, timeout, self.buffer) self.buffer = None raise exception - timeleft -= (time.time() - start_time) + timeleft -= time.time() - start_time if data: self.buffer = (self.buffer + data) if self.buffer else data if self._logger: - self._logger.write((self.before or '') + (self.after or '')) + self._logger.write((self.before or "") + (self.after or "")) self._logger.flush() if self.match is None: exception = ExpectTimeoutError(pattern, timeout, self.buffer) @@ -163,7 +163,7 @@ class IO(object): return self.match def read(self, timeout=0, raise_exception=False): - data = '' + data = "" timeleft = timeout try: while timeleft >= 0: @@ -171,7 +171,7 @@ class IO(object): data += self.queue.get(timeout=timeleft) if data: break - timeleft -= (time.time() - start_time) + timeleft -= time.time() - start_time except Empty: if data: return data @@ -186,7 +186,14 @@ class IO(object): def spawn(command): master, slave = pty.openpty() - process = Popen(command, preexec_fn=os.setsid, stdout=slave, stdin=slave, stderr=slave, bufsize=1) + process = Popen( + command, + preexec_fn=os.setsid, + stdout=slave, + stdin=slave, + stderr=slave, + bufsize=1, + ) os.close(slave) queue = Queue() @@ -195,14 +202,19 @@ def spawn(command): thread.daemon = True thread.start() - return IO(process, master, queue, reader={'thread': thread, 'kill_event': reader_kill_event}) + return IO( + process, + master, + queue, + reader={"thread": thread, "kill_event": reader_kill_event}, + ) def reader(process, out, queue, kill_event): while True: try: # TODO: there are some issues with 1<<16 buffer size - data = os.read(out, 1<<17).decode(errors='replace') + data = os.read(out, 1 << 17).decode(errors="replace") queue.put(data) except: if kill_event.is_set(): diff --git a/tests/integration/helpers/utility.py b/tests/integration/helpers/utility.py index 69dfa53cd3e..0fd55569d92 100644 --- a/tests/integration/helpers/utility.py +++ b/tests/integration/helpers/utility.py @@ -11,11 +11,13 @@ class SafeThread(threading.Thread): super().__init__() self.target = target self.exception = None + def run(self): try: self.target() - except Exception as e: # pylint: disable=broad-except + except Exception as e: # pylint: disable=broad-except self.exception = e + def join(self, timeout=None): super().join(timeout) if self.exception: @@ -24,7 +26,7 @@ class SafeThread(threading.Thread): def random_string(length): letters = string.ascii_letters - return ''.join(random.choice(letters) for i in range(length)) + return "".join(random.choice(letters) for i in range(length)) def generate_values(date_str, count, sign=1): @@ -34,10 +36,10 @@ def generate_values(date_str, count, sign=1): def replace_config(config_path, old, new): - config = open(config_path, 'r') + config = open(config_path, "r") config_lines = config.readlines() config.close() config_lines = [line.replace(old, new) for line in config_lines] - config = open(config_path, 'w') + config = open(config_path, "w") config.writelines(config_lines) config.close() diff --git a/tests/integration/test_MemoryTracking/test.py b/tests/integration/test_MemoryTracking/test.py index 2ec5b2457af..517090988ee 100644 --- a/tests/integration/test_MemoryTracking/test.py +++ b/tests/integration/test_MemoryTracking/test.py @@ -19,14 +19,19 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=[ - 'configs/no_system_log.xml', - 'configs/asynchronous_metrics_update_period_s.xml', -], user_configs=[ - 'configs/users.d/overrides.xml', -]) +node = cluster.add_instance( + "node", + main_configs=[ + "configs/no_system_log.xml", + "configs/asynchronous_metrics_update_period_s.xml", + ], + user_configs=[ + "configs/users.d/overrides.xml", + ], +) -@pytest.fixture(scope='module', autouse=True) + +@pytest.fixture(scope="module", autouse=True) def start_cluster(): try: cluster.start() @@ -34,31 +39,39 @@ def start_cluster(): finally: cluster.shutdown() + query_settings = { - 'max_threads': 1, - 'log_queries': 0, + "max_threads": 1, + "log_queries": 0, } sample_query = "SELECT groupArray(repeat('a', 1000)) FROM numbers(10000) GROUP BY number%10 FORMAT JSON" + def query(*args, **kwargs): - if 'settings' not in kwargs: - kwargs['settings'] = query_settings + if "settings" not in kwargs: + kwargs["settings"] = query_settings else: - kwargs['settings'].update(query_settings) + kwargs["settings"].update(query_settings) return node.query(*args, **kwargs) + + def http_query(*args, **kwargs): - if 'params' not in kwargs: - kwargs['params'] = query_settings + if "params" not in kwargs: + kwargs["params"] = query_settings else: - kwargs['params'].update(query_settings) + kwargs["params"].update(query_settings) return node.http_query(*args, **kwargs) + def get_MemoryTracking(): - return int(http_query("SELECT value FROM system.metrics WHERE metric = 'MemoryTracking'")) + return int( + http_query("SELECT value FROM system.metrics WHERE metric = 'MemoryTracking'") + ) + def check_memory(memory): # bytes -> megabytes - memory = [*map(lambda x: int(int(x)/1024/1024), memory)] + memory = [*map(lambda x: int(int(x) / 1024 / 1024), memory)] # 3 changes to MemoryTracking is minimum, since: # - this is not that high to not detect inacuracy # - memory can go like X/X+N due to some background allocations @@ -66,14 +79,19 @@ def check_memory(memory): changes_allowed = 3 # if number of samples is large enough, use 10% from them # (actually most of the time there will be only few changes, it was made 10% to avoid flackiness) - changes_allowed_auto=int(len(memory) * 0.1) + changes_allowed_auto = int(len(memory) * 0.1) changes_allowed = max(changes_allowed_auto, changes_allowed) - changed=len(set(memory)) - logging.info('Changes: allowed=%s, actual=%s, sample=%s', - changes_allowed, changed, len(memory)) + changed = len(set(memory)) + logging.info( + "Changes: allowed=%s, actual=%s, sample=%s", + changes_allowed, + changed, + len(memory), + ) assert changed < changes_allowed + def test_http(): memory = [] memory.append(get_MemoryTracking()) @@ -82,6 +100,7 @@ def test_http(): memory.append(get_MemoryTracking()) check_memory(memory) + def test_tcp_multiple_sessions(): memory = [] memory.append(get_MemoryTracking()) @@ -90,6 +109,7 @@ def test_tcp_multiple_sessions(): memory.append(get_MemoryTracking()) check_memory(memory) + def test_tcp_single_session(): memory = [] memory.append(get_MemoryTracking()) @@ -97,9 +117,9 @@ def test_tcp_single_session(): sample_query, "SELECT metric, value FROM system.metrics WHERE metric = 'MemoryTracking'", ] * 100 - rows = query(';'.join(sample_queries)) - memory = rows.split('\n') - memory = filter(lambda x: x.startswith('MemoryTracking'), memory) - memory = map(lambda x: x.split('\t')[1], memory) + rows = query(";".join(sample_queries)) + memory = rows.split("\n") + memory = filter(lambda x: x.startswith("MemoryTracking"), memory) + memory = map(lambda x: x.split("\t")[1], memory) memory = [*memory] check_memory(memory) diff --git a/tests/integration/test_access_control_on_cluster/test.py b/tests/integration/test_access_control_on_cluster/test.py index 6bcf67779ef..6c2331178e0 100644 --- a/tests/integration/test_access_control_on_cluster/test.py +++ b/tests/integration/test_access_control_on_cluster/test.py @@ -2,9 +2,15 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -ch1 = cluster.add_instance('ch1', main_configs=["configs/config.d/clusters.xml"], with_zookeeper=True) -ch2 = cluster.add_instance('ch2', main_configs=["configs/config.d/clusters.xml"], with_zookeeper=True) -ch3 = cluster.add_instance('ch3', main_configs=["configs/config.d/clusters.xml"], with_zookeeper=True) +ch1 = cluster.add_instance( + "ch1", main_configs=["configs/config.d/clusters.xml"], with_zookeeper=True +) +ch2 = cluster.add_instance( + "ch2", main_configs=["configs/config.d/clusters.xml"], with_zookeeper=True +) +ch3 = cluster.add_instance( + "ch3", main_configs=["configs/config.d/clusters.xml"], with_zookeeper=True +) @pytest.fixture(scope="module", autouse=True) @@ -18,17 +24,23 @@ def started_cluster(): def test_access_control_on_cluster(): - ch1.query_with_retry("CREATE USER IF NOT EXISTS Alex ON CLUSTER 'cluster'", retry_count=5) + ch1.query_with_retry( + "CREATE USER IF NOT EXISTS Alex ON CLUSTER 'cluster'", retry_count=5 + ) assert ch1.query("SHOW CREATE USER Alex") == "CREATE USER Alex\n" assert ch2.query("SHOW CREATE USER Alex") == "CREATE USER Alex\n" assert ch3.query("SHOW CREATE USER Alex") == "CREATE USER Alex\n" - ch2.query_with_retry("GRANT ON CLUSTER 'cluster' SELECT ON *.* TO Alex", retry_count=3) + ch2.query_with_retry( + "GRANT ON CLUSTER 'cluster' SELECT ON *.* TO Alex", retry_count=3 + ) assert ch1.query("SHOW GRANTS FOR Alex") == "GRANT SELECT ON *.* TO Alex\n" assert ch2.query("SHOW GRANTS FOR Alex") == "GRANT SELECT ON *.* TO Alex\n" assert ch3.query("SHOW GRANTS FOR Alex") == "GRANT SELECT ON *.* TO Alex\n" - ch3.query_with_retry("REVOKE ON CLUSTER 'cluster' SELECT ON *.* FROM Alex", retry_count=3) + ch3.query_with_retry( + "REVOKE ON CLUSTER 'cluster' SELECT ON *.* FROM Alex", retry_count=3 + ) assert ch1.query("SHOW GRANTS FOR Alex") == "" assert ch2.query("SHOW GRANTS FOR Alex") == "" assert ch3.query("SHOW GRANTS FOR Alex") == "" diff --git a/tests/integration/test_access_for_functions/test.py b/tests/integration/test_access_for_functions/test.py index 0abe74e31a3..be4d71502d2 100644 --- a/tests/integration/test_access_for_functions/test.py +++ b/tests/integration/test_access_for_functions/test.py @@ -3,7 +3,7 @@ import uuid from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', stay_alive=True) +instance = cluster.add_instance("instance", stay_alive=True) @pytest.fixture(scope="module", autouse=True) @@ -21,21 +21,32 @@ def test_access_rights_for_function(): instance.query("CREATE USER A") instance.query("CREATE USER B") - assert "it's necessary to have grant CREATE FUNCTION ON *.*" in instance.query_and_get_error(create_function_query, user = 'A') + assert ( + "it's necessary to have grant CREATE FUNCTION ON *.*" + in instance.query_and_get_error(create_function_query, user="A") + ) instance.query("GRANT CREATE FUNCTION on *.* TO A") - instance.query(create_function_query, user = 'A') + instance.query(create_function_query, user="A") assert instance.query("SELECT MySum(1, 2)") == "3\n" - assert "it's necessary to have grant DROP FUNCTION ON *.*" in instance.query_and_get_error("DROP FUNCTION MySum", user = 'B') + assert ( + "it's necessary to have grant DROP FUNCTION ON *.*" + in instance.query_and_get_error("DROP FUNCTION MySum", user="B") + ) instance.query("GRANT DROP FUNCTION ON *.* TO B") - instance.query("DROP FUNCTION MySum", user = 'B') - assert "Unknown function MySum" in instance.query_and_get_error("SELECT MySum(1, 2)") + instance.query("DROP FUNCTION MySum", user="B") + assert "Unknown function MySum" in instance.query_and_get_error( + "SELECT MySum(1, 2)" + ) instance.query("REVOKE CREATE FUNCTION ON *.* FROM A") - assert "it's necessary to have grant CREATE FUNCTION ON *.*" in instance.query_and_get_error(create_function_query, user = 'A') + assert ( + "it's necessary to have grant CREATE FUNCTION ON *.*" + in instance.query_and_get_error(create_function_query, user="A") + ) instance.query("DROP USER IF EXISTS A") instance.query("DROP USER IF EXISTS B") @@ -45,13 +56,21 @@ def test_ignore_obsolete_grant_on_database(): instance.stop_clickhouse() user_id = uuid.uuid4() - instance.exec_in_container(["bash", "-c" , f""" + instance.exec_in_container( + [ + "bash", + "-c", + f""" cat > /var/lib/clickhouse/access/{user_id}.sql << EOF ATTACH USER X; ATTACH GRANT CREATE FUNCTION, SELECT ON mydb.* TO X; -EOF"""]) +EOF""", + ] + ) - instance.exec_in_container(["bash", "-c" , "touch /var/lib/clickhouse/access/need_rebuild_lists.mark"]) + instance.exec_in_container( + ["bash", "-c", "touch /var/lib/clickhouse/access/need_rebuild_lists.mark"] + ) instance.start_clickhouse() assert instance.query("SHOW GRANTS FOR X") == "GRANT SELECT ON mydb.* TO X\n" diff --git a/tests/integration/test_aggregation_memory_efficient/test.py b/tests/integration/test_aggregation_memory_efficient/test.py index db0449173ca..8131fd9c1d7 100644 --- a/tests/integration/test_aggregation_memory_efficient/test.py +++ b/tests/integration/test_aggregation_memory_efficient/test.py @@ -3,8 +3,8 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1') -node2 = cluster.add_instance('node2') +node1 = cluster.add_instance("node1") +node2 = cluster.add_instance("node2") @pytest.fixture(scope="module") @@ -14,10 +14,15 @@ def start_cluster(): for node in [node1, node2]: node.query( - "create table da_memory_efficient_shard(A Int64, B Int64) Engine=MergeTree order by A partition by B % 2;") + "create table da_memory_efficient_shard(A Int64, B Int64) Engine=MergeTree order by A partition by B % 2;" + ) - node1.query("insert into da_memory_efficient_shard select number, number from numbers(100000);") - node2.query("insert into da_memory_efficient_shard select number + 100000, number from numbers(100000);") + node1.query( + "insert into da_memory_efficient_shard select number, number from numbers(100000);" + ) + node2.query( + "insert into da_memory_efficient_shard select number + 100000, number from numbers(100000);" + ) yield cluster @@ -27,23 +32,29 @@ def start_cluster(): def test_remote(start_cluster): node1.query( - "set distributed_aggregation_memory_efficient = 1, group_by_two_level_threshold = 1, group_by_two_level_threshold_bytes=1") + "set distributed_aggregation_memory_efficient = 1, group_by_two_level_threshold = 1, group_by_two_level_threshold_bytes=1" + ) res = node1.query( - "select sum(a) from (SELECT B, uniqExact(A) a FROM remote('node{1,2}', default.da_memory_efficient_shard) GROUP BY B)") - assert res == '200000\n' + "select sum(a) from (SELECT B, uniqExact(A) a FROM remote('node{1,2}', default.da_memory_efficient_shard) GROUP BY B)" + ) + assert res == "200000\n" node1.query("set distributed_aggregation_memory_efficient = 0") res = node1.query( - "select sum(a) from (SELECT B, uniqExact(A) a FROM remote('node{1,2}', default.da_memory_efficient_shard) GROUP BY B)") - assert res == '200000\n' + "select sum(a) from (SELECT B, uniqExact(A) a FROM remote('node{1,2}', default.da_memory_efficient_shard) GROUP BY B)" + ) + assert res == "200000\n" node1.query( - "set distributed_aggregation_memory_efficient = 1, group_by_two_level_threshold = 1, group_by_two_level_threshold_bytes=1") + "set distributed_aggregation_memory_efficient = 1, group_by_two_level_threshold = 1, group_by_two_level_threshold_bytes=1" + ) res = node1.query( - "SELECT fullHostName() AS h, uniqExact(A) AS a FROM remote('node{1,2}', default.da_memory_efficient_shard) GROUP BY h ORDER BY h;") - assert res == 'node1\t100000\nnode2\t100000\n' + "SELECT fullHostName() AS h, uniqExact(A) AS a FROM remote('node{1,2}', default.da_memory_efficient_shard) GROUP BY h ORDER BY h;" + ) + assert res == "node1\t100000\nnode2\t100000\n" node1.query("set distributed_aggregation_memory_efficient = 0") res = node1.query( - "SELECT fullHostName() AS h, uniqExact(A) AS a FROM remote('node{1,2}', default.da_memory_efficient_shard) GROUP BY h ORDER BY h;") - assert res == 'node1\t100000\nnode2\t100000\n' + "SELECT fullHostName() AS h, uniqExact(A) AS a FROM remote('node{1,2}', default.da_memory_efficient_shard) GROUP BY h ORDER BY h;" + ) + assert res == "node1\t100000\nnode2\t100000\n" diff --git a/tests/integration/test_allowed_client_hosts/test.py b/tests/integration/test_allowed_client_hosts/test.py index 7b803fd50f3..db2ba464b38 100644 --- a/tests/integration/test_allowed_client_hosts/test.py +++ b/tests/integration/test_allowed_client_hosts/test.py @@ -2,31 +2,42 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -server = cluster.add_instance('server', user_configs=["configs/users.d/network.xml"]) +server = cluster.add_instance("server", user_configs=["configs/users.d/network.xml"]) -clientA1 = cluster.add_instance('clientA1', hostname='clientA1.com') -clientA2 = cluster.add_instance('clientA2', hostname='clientA2.com') -clientA3 = cluster.add_instance('clientA3', hostname='clientA3.com') -clientB1 = cluster.add_instance('clientB1', hostname='clientB001.ru') -clientB2 = cluster.add_instance('clientB2', hostname='clientB002.ru') -clientB3 = cluster.add_instance('clientB3', hostname='xxx.clientB003.rutracker.com') -clientC1 = cluster.add_instance('clientC1', hostname='clientC01.ru') -clientC2 = cluster.add_instance('clientC2', hostname='xxx.clientC02.ru') -clientC3 = cluster.add_instance('clientC3', hostname='xxx.clientC03.rutracker.com') -clientD1 = cluster.add_instance('clientD1', hostname='clientD0001.ru') -clientD2 = cluster.add_instance('clientD2', hostname='xxx.clientD0002.ru') -clientD3 = cluster.add_instance('clientD3', hostname='clientD0003.ru') +clientA1 = cluster.add_instance("clientA1", hostname="clientA1.com") +clientA2 = cluster.add_instance("clientA2", hostname="clientA2.com") +clientA3 = cluster.add_instance("clientA3", hostname="clientA3.com") +clientB1 = cluster.add_instance("clientB1", hostname="clientB001.ru") +clientB2 = cluster.add_instance("clientB2", hostname="clientB002.ru") +clientB3 = cluster.add_instance("clientB3", hostname="xxx.clientB003.rutracker.com") +clientC1 = cluster.add_instance("clientC1", hostname="clientC01.ru") +clientC2 = cluster.add_instance("clientC2", hostname="xxx.clientC02.ru") +clientC3 = cluster.add_instance("clientC3", hostname="xxx.clientC03.rutracker.com") +clientD1 = cluster.add_instance("clientD1", hostname="clientD0001.ru") +clientD2 = cluster.add_instance("clientD2", hostname="xxx.clientD0002.ru") +clientD3 = cluster.add_instance("clientD3", hostname="clientD0003.ru") def check_clickhouse_is_ok(client_node, server_node): - assert client_node.exec_in_container( - ["bash", "-c", "/usr/bin/curl -s {}:8123 ".format(server_node.hostname)]) == "Ok.\n" + assert ( + client_node.exec_in_container( + ["bash", "-c", "/usr/bin/curl -s {}:8123 ".format(server_node.hostname)] + ) + == "Ok.\n" + ) def query_from_one_node_to_another(client_node, server_node, query): check_clickhouse_is_ok(client_node, server_node) return client_node.exec_in_container( - ["bash", "-c", "/usr/bin/clickhouse client --host {} --query {!r}".format(server_node.hostname, query)]) + [ + "bash", + "-c", + "/usr/bin/clickhouse client --host {} --query {!r}".format( + server_node.hostname, query + ), + ] + ) def query(node, query): @@ -38,7 +49,10 @@ def setup_nodes(): try: cluster.start() query(server, "DROP TABLE IF EXISTS test_allowed_client_hosts") - query(server, "CREATE TABLE test_allowed_client_hosts (x Int32) ENGINE = MergeTree() ORDER BY tuple()") + query( + server, + "CREATE TABLE test_allowed_client_hosts (x Int32) ENGINE = MergeTree() ORDER BY tuple()", + ) query(server, "INSERT INTO test_allowed_client_hosts VALUES (5)") yield cluster @@ -58,8 +72,15 @@ def test_allowed_host(): # expected_to_fail.extend([clientC3, clientD2]) for client_node in expected_to_pass: - assert query_from_one_node_to_another(client_node, server, "SELECT * FROM test_allowed_client_hosts") == "5\n" + assert ( + query_from_one_node_to_another( + client_node, server, "SELECT * FROM test_allowed_client_hosts" + ) + == "5\n" + ) for client_node in expected_to_fail: - with pytest.raises(Exception, match=r'default: Authentication failed'): - query_from_one_node_to_another(client_node, server, "SELECT * FROM test_allowed_client_hosts") + with pytest.raises(Exception, match=r"default: Authentication failed"): + query_from_one_node_to_another( + client_node, server, "SELECT * FROM test_allowed_client_hosts" + ) diff --git a/tests/integration/test_allowed_url_from_config/test.py b/tests/integration/test_allowed_url_from_config/test.py index 71bcea482f8..4f4f02fffdc 100644 --- a/tests/integration/test_allowed_url_from_config/test.py +++ b/tests/integration/test_allowed_url_from_config/test.py @@ -2,13 +2,23 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/config_with_hosts.xml']) -node2 = cluster.add_instance('node2', main_configs=['configs/config_with_only_primary_hosts.xml']) -node3 = cluster.add_instance('node3', main_configs=['configs/config_with_only_regexp_hosts.xml']) -node4 = cluster.add_instance('node4', main_configs=[]) # No `remote_url_allow_hosts` at all. -node5 = cluster.add_instance('node5', main_configs=['configs/config_without_allowed_hosts.xml']) -node6 = cluster.add_instance('node6', main_configs=['configs/config_for_remote.xml']) -node7 = cluster.add_instance('node7', main_configs=['configs/config_for_redirect.xml'], with_hdfs=True) +node1 = cluster.add_instance("node1", main_configs=["configs/config_with_hosts.xml"]) +node2 = cluster.add_instance( + "node2", main_configs=["configs/config_with_only_primary_hosts.xml"] +) +node3 = cluster.add_instance( + "node3", main_configs=["configs/config_with_only_regexp_hosts.xml"] +) +node4 = cluster.add_instance( + "node4", main_configs=[] +) # No `remote_url_allow_hosts` at all. +node5 = cluster.add_instance( + "node5", main_configs=["configs/config_without_allowed_hosts.xml"] +) +node6 = cluster.add_instance("node6", main_configs=["configs/config_for_remote.xml"]) +node7 = cluster.add_instance( + "node7", main_configs=["configs/config_for_redirect.xml"], with_hdfs=True +) @pytest.fixture(scope="module") @@ -21,97 +31,229 @@ def start_cluster(): def test_config_with_hosts(start_cluster): - assert node1.query("CREATE TABLE table_test_1_1 (word String) Engine=URL('http://host:80', HDFS)") == "" - assert node1.query("CREATE TABLE table_test_1_2 (word String) Engine=URL('https://yandex.ru', CSV)") == "" + assert ( + node1.query( + "CREATE TABLE table_test_1_1 (word String) Engine=URL('http://host:80', HDFS)" + ) + == "" + ) + assert ( + node1.query( + "CREATE TABLE table_test_1_2 (word String) Engine=URL('https://yandex.ru', CSV)" + ) + == "" + ) assert "not allowed" in node1.query_and_get_error( - "CREATE TABLE table_test_1_4 (word String) Engine=URL('https://host:123', S3)") + "CREATE TABLE table_test_1_4 (word String) Engine=URL('https://host:123', S3)" + ) assert "not allowed" in node1.query_and_get_error( - "CREATE TABLE table_test_1_4 (word String) Engine=URL('https://yandex2.ru', CSV)") + "CREATE TABLE table_test_1_4 (word String) Engine=URL('https://yandex2.ru', CSV)" + ) def test_config_with_only_primary_hosts(start_cluster): - assert node2.query("CREATE TABLE table_test_2_1 (word String) Engine=URL('https://host:80', CSV)") == "" - assert node2.query("CREATE TABLE table_test_2_2 (word String) Engine=URL('https://host:123', S3)") == "" - assert node2.query("CREATE TABLE table_test_2_3 (word String) Engine=URL('https://yandex.ru', CSV)") == "" - assert node2.query("CREATE TABLE table_test_2_4 (word String) Engine=URL('https://yandex.ru:87', HDFS)") == "" + assert ( + node2.query( + "CREATE TABLE table_test_2_1 (word String) Engine=URL('https://host:80', CSV)" + ) + == "" + ) + assert ( + node2.query( + "CREATE TABLE table_test_2_2 (word String) Engine=URL('https://host:123', S3)" + ) + == "" + ) + assert ( + node2.query( + "CREATE TABLE table_test_2_3 (word String) Engine=URL('https://yandex.ru', CSV)" + ) + == "" + ) + assert ( + node2.query( + "CREATE TABLE table_test_2_4 (word String) Engine=URL('https://yandex.ru:87', HDFS)" + ) + == "" + ) assert "not allowed" in node2.query_and_get_error( - "CREATE TABLE table_test_2_5 (word String) Engine=URL('https://host', HDFS)") + "CREATE TABLE table_test_2_5 (word String) Engine=URL('https://host', HDFS)" + ) assert "not allowed" in node2.query_and_get_error( - "CREATE TABLE table_test_2_5 (word String) Engine=URL('https://host:234', CSV)") + "CREATE TABLE table_test_2_5 (word String) Engine=URL('https://host:234', CSV)" + ) assert "not allowed" in node2.query_and_get_error( - "CREATE TABLE table_test_2_6 (word String) Engine=URL('https://yandex2.ru', S3)") + "CREATE TABLE table_test_2_6 (word String) Engine=URL('https://yandex2.ru', S3)" + ) def test_config_with_only_regexp_hosts(start_cluster): - assert node3.query("CREATE TABLE table_test_3_1 (word String) Engine=URL('https://host:80', HDFS)") == "" - assert node3.query("CREATE TABLE table_test_3_2 (word String) Engine=URL('https://yandex.ru', CSV)") == "" + assert ( + node3.query( + "CREATE TABLE table_test_3_1 (word String) Engine=URL('https://host:80', HDFS)" + ) + == "" + ) + assert ( + node3.query( + "CREATE TABLE table_test_3_2 (word String) Engine=URL('https://yandex.ru', CSV)" + ) + == "" + ) assert "not allowed" in node3.query_and_get_error( - "CREATE TABLE table_test_3_3 (word String) Engine=URL('https://host', CSV)") + "CREATE TABLE table_test_3_3 (word String) Engine=URL('https://host', CSV)" + ) assert "not allowed" in node3.query_and_get_error( - "CREATE TABLE table_test_3_4 (word String) Engine=URL('https://yandex2.ru', S3)") + "CREATE TABLE table_test_3_4 (word String) Engine=URL('https://yandex2.ru', S3)" + ) def test_config_without_allowed_hosts_section(start_cluster): - assert node4.query("CREATE TABLE table_test_4_1 (word String) Engine=URL('https://host:80', CSV)") == "" - assert node4.query("CREATE TABLE table_test_4_2 (word String) Engine=S3('https://host:80/bucket/key', CSV)") == "" - assert node4.query("CREATE TABLE table_test_4_3 (word String) Engine=URL('https://host', HDFS)") == "" - assert node4.query("CREATE TABLE table_test_4_4 (word String) Engine=URL('https://yandex.ru', CSV)") == "" - assert node4.query("CREATE TABLE table_test_4_5 (word String) Engine=URL('ftp://something.com', S3)") == "" + assert ( + node4.query( + "CREATE TABLE table_test_4_1 (word String) Engine=URL('https://host:80', CSV)" + ) + == "" + ) + assert ( + node4.query( + "CREATE TABLE table_test_4_2 (word String) Engine=S3('https://host:80/bucket/key', CSV)" + ) + == "" + ) + assert ( + node4.query( + "CREATE TABLE table_test_4_3 (word String) Engine=URL('https://host', HDFS)" + ) + == "" + ) + assert ( + node4.query( + "CREATE TABLE table_test_4_4 (word String) Engine=URL('https://yandex.ru', CSV)" + ) + == "" + ) + assert ( + node4.query( + "CREATE TABLE table_test_4_5 (word String) Engine=URL('ftp://something.com', S3)" + ) + == "" + ) def test_config_without_allowed_hosts(start_cluster): assert "not allowed" in node5.query_and_get_error( - "CREATE TABLE table_test_5_1 (word String) Engine=URL('https://host:80', CSV)") + "CREATE TABLE table_test_5_1 (word String) Engine=URL('https://host:80', CSV)" + ) assert "not allowed" in node5.query_and_get_error( - "CREATE TABLE table_test_5_2 (word String) Engine=S3('https://host:80/bucket/key', CSV)") + "CREATE TABLE table_test_5_2 (word String) Engine=S3('https://host:80/bucket/key', CSV)" + ) assert "not allowed" in node5.query_and_get_error( - "CREATE TABLE table_test_5_3 (word String) Engine=URL('https://host', HDFS)") + "CREATE TABLE table_test_5_3 (word String) Engine=URL('https://host', HDFS)" + ) assert "not allowed" in node5.query_and_get_error( - "CREATE TABLE table_test_5_4 (word String) Engine=URL('https://yandex.ru', CSV)") + "CREATE TABLE table_test_5_4 (word String) Engine=URL('https://yandex.ru', CSV)" + ) assert "not allowed" in node5.query_and_get_error( - "CREATE TABLE table_test_5_5 (word String) Engine=URL('ftp://something.com', S3)") + "CREATE TABLE table_test_5_5 (word String) Engine=URL('ftp://something.com', S3)" + ) def test_table_function_remote(start_cluster): assert "not allowed in configuration file" not in node6.query_and_get_error( "SELECT * FROM remoteSecure('example01-01-{1|2}', system, events)", - settings={"connections_with_failover_max_tries": 1, "connect_timeout_with_failover_ms": 1000, - "connect_timeout_with_failover_secure_ms": 1000, "connect_timeout": 1, "send_timeout": 1}) + settings={ + "connections_with_failover_max_tries": 1, + "connect_timeout_with_failover_ms": 1000, + "connect_timeout_with_failover_secure_ms": 1000, + "connect_timeout": 1, + "send_timeout": 1, + }, + ) assert "not allowed in configuration file" not in node6.query_and_get_error( "SELECT * FROM remoteSecure('example01-01-1,example01-02-1', system, events)", - settings={"connections_with_failover_max_tries": 1, "connect_timeout_with_failover_ms": 1000, - "connect_timeout_with_failover_secure_ms": 1000, "connect_timeout": 1, "send_timeout": 1}) + settings={ + "connections_with_failover_max_tries": 1, + "connect_timeout_with_failover_ms": 1000, + "connect_timeout_with_failover_secure_ms": 1000, + "connect_timeout": 1, + "send_timeout": 1, + }, + ) assert "not allowed in configuration file" not in node6.query_and_get_error( "SELECT * FROM remote('example01-0{1,2}-1', system, events", - settings={"connections_with_failover_max_tries": 1, "connect_timeout_with_failover_ms": 1000, - "connect_timeout_with_failover_secure_ms": 1000, "connect_timeout": 1, "send_timeout": 1}) + settings={ + "connections_with_failover_max_tries": 1, + "connect_timeout_with_failover_ms": 1000, + "connect_timeout_with_failover_secure_ms": 1000, + "connect_timeout": 1, + "send_timeout": 1, + }, + ) assert "not allowed in configuration file" not in node6.query_and_get_error( "SELECT * FROM remote('example01-0{1,2}-{1|2}', system, events)", - settings={"connections_with_failover_max_tries": 1, "connect_timeout_with_failover_ms": 1000, - "connect_timeout_with_failover_secure_ms": 1000, "connect_timeout": 1, "send_timeout": 1}) + settings={ + "connections_with_failover_max_tries": 1, + "connect_timeout_with_failover_ms": 1000, + "connect_timeout_with_failover_secure_ms": 1000, + "connect_timeout": 1, + "send_timeout": 1, + }, + ) assert "not allowed in configuration file" not in node6.query_and_get_error( "SELECT * FROM remoteSecure('example01-{01..02}-{1|2}', system, events)", - settings={"connections_with_failover_max_tries": 1, "connect_timeout_with_failover_ms": 1000, - "connect_timeout_with_failover_secure_ms": 1000, "connect_timeout": 1, "send_timeout": 1}) + settings={ + "connections_with_failover_max_tries": 1, + "connect_timeout_with_failover_ms": 1000, + "connect_timeout_with_failover_secure_ms": 1000, + "connect_timeout": 1, + "send_timeout": 1, + }, + ) assert "not allowed" in node6.query_and_get_error( "SELECT * FROM remoteSecure('example01-01-1,example01-03-1', system, events)", - settings={"connections_with_failover_max_tries": 1, "connect_timeout_with_failover_ms": 1000, - "connect_timeout_with_failover_secure_ms": 1000, "connect_timeout": 1, "send_timeout": 1}) - assert "not allowed" in node6.query_and_get_error("SELECT * FROM remote('example01-01-{1|3}', system, events)", - settings={"connections_with_failover_max_tries": 1, - "connect_timeout_with_failover_ms": 1000, - "connect_timeout_with_failover_secure_ms": 1000, - "connect_timeout": 1, "send_timeout": 1}) + settings={ + "connections_with_failover_max_tries": 1, + "connect_timeout_with_failover_ms": 1000, + "connect_timeout_with_failover_secure_ms": 1000, + "connect_timeout": 1, + "send_timeout": 1, + }, + ) + assert "not allowed" in node6.query_and_get_error( + "SELECT * FROM remote('example01-01-{1|3}', system, events)", + settings={ + "connections_with_failover_max_tries": 1, + "connect_timeout_with_failover_ms": 1000, + "connect_timeout_with_failover_secure_ms": 1000, + "connect_timeout": 1, + "send_timeout": 1, + }, + ) assert "not allowed" in node6.query_and_get_error( "SELECT * FROM remoteSecure('example01-0{1,3}-1', system, metrics)", - settings={"connections_with_failover_max_tries": 1, "connect_timeout_with_failover_ms": 1000, - "connect_timeout_with_failover_secure_ms": 1000, "connect_timeout": 1, "send_timeout": 1}) + settings={ + "connections_with_failover_max_tries": 1, + "connect_timeout_with_failover_ms": 1000, + "connect_timeout_with_failover_secure_ms": 1000, + "connect_timeout": 1, + "send_timeout": 1, + }, + ) assert node6.query("SELECT * FROM remote('localhost', system, events)") != "" assert node6.query("SELECT * FROM remoteSecure('localhost', system, metrics)") != "" - assert "URL \"localhost:800\" is not allowed in configuration file" in node6.query_and_get_error( - "SELECT * FROM remoteSecure('localhost:800', system, events)") - assert "URL \"localhost:800\" is not allowed in configuration file" in node6.query_and_get_error( - "SELECT * FROM remote('localhost:800', system, metrics)") + assert ( + 'URL "localhost:800" is not allowed in configuration file' + in node6.query_and_get_error( + "SELECT * FROM remoteSecure('localhost:800', system, events)" + ) + ) + assert ( + 'URL "localhost:800" is not allowed in configuration file' + in node6.query_and_get_error( + "SELECT * FROM remote('localhost:800', system, metrics)" + ) + ) def test_redirect(start_cluster): @@ -120,12 +262,17 @@ def test_redirect(start_cluster): hdfs_api.write_data("/simple_storage", "1\t\n") assert hdfs_api.read_data("/simple_storage") == "1\t\n" node7.query( - "CREATE TABLE table_test_7_1 (word String) ENGINE=URL('http://hdfs1:50070/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', CSV)") - assert "not allowed" in node7.query_and_get_error("SET max_http_get_redirects=1; SELECT * from table_test_7_1") + "CREATE TABLE table_test_7_1 (word String) ENGINE=URL('http://hdfs1:50070/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', CSV)" + ) + assert "not allowed" in node7.query_and_get_error( + "SET max_http_get_redirects=1; SELECT * from table_test_7_1" + ) def test_HDFS(start_cluster): assert "not allowed" in node7.query_and_get_error( - "CREATE TABLE table_test_7_2 (word String) ENGINE=HDFS('http://hdfs1:50075/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'CSV')") + "CREATE TABLE table_test_7_2 (word String) ENGINE=HDFS('http://hdfs1:50075/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'CSV')" + ) assert "not allowed" in node7.query_and_get_error( - "SELECT * FROM hdfs('http://hdfs1:50075/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'TSV', 'word String')") + "SELECT * FROM hdfs('http://hdfs1:50075/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'TSV', 'word String')" + ) diff --git a/tests/integration/test_alter_codec/test.py b/tests/integration/test_alter_codec/test.py index 2117893af5b..7c7ef4803e9 100644 --- a/tests/integration/test_alter_codec/test.py +++ b/tests/integration/test_alter_codec/test.py @@ -4,8 +4,7 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', - main_configs=['configs/logs_config.xml']) +node1 = cluster.add_instance("node1", main_configs=["configs/logs_config.xml"]) @pytest.fixture(scope="module") @@ -21,30 +20,60 @@ def started_cluster(): def test_alter_codec_pk(started_cluster): try: name = "test_alter_codec_pk" - node1.query(""" + node1.query( + """ CREATE TABLE {name} (id UInt64, value UInt64) Engine=MergeTree() ORDER BY id - """.format(name=name)) + """.format( + name=name + ) + ) - node1.query("INSERT INTO {name} SELECT number, number * number from numbers(100)".format(name=name)) + node1.query( + "INSERT INTO {name} SELECT number, number * number from numbers(100)".format( + name=name + ) + ) - node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 CODEC(NONE)".format(name=name)) - node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 CODEC(Delta, LZ4)".format(name=name)) + node1.query( + "ALTER TABLE {name} MODIFY COLUMN id UInt64 CODEC(NONE)".format(name=name) + ) + node1.query( + "ALTER TABLE {name} MODIFY COLUMN id UInt64 CODEC(Delta, LZ4)".format( + name=name + ) + ) assert node1.query("SELECT sum(id) FROM {name}".format(name=name)) == "4950\n" with pytest.raises(QueryRuntimeException): - node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt32 CODEC(Delta, LZ4)".format(name=name)) + node1.query( + "ALTER TABLE {name} MODIFY COLUMN id UInt32 CODEC(Delta, LZ4)".format( + name=name + ) + ) - node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 DEFAULT 3 CODEC(Delta, LZ4)".format(name=name)) + node1.query( + "ALTER TABLE {name} MODIFY COLUMN id UInt64 DEFAULT 3 CODEC(Delta, LZ4)".format( + name=name + ) + ) node1.query("INSERT INTO {name} (value) VALUES (1)".format(name=name)) assert node1.query("SELECT sum(id) FROM {name}".format(name=name)) == "4953\n" with pytest.raises(QueryRuntimeException): - node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 ALIAS 3 CODEC(Delta, LZ4)".format(name=name)) + node1.query( + "ALTER TABLE {name} MODIFY COLUMN id UInt64 ALIAS 3 CODEC(Delta, LZ4)".format( + name=name + ) + ) - node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 MATERIALIZED 3 CODEC(Delta, LZ4)".format(name=name)) + node1.query( + "ALTER TABLE {name} MODIFY COLUMN id UInt64 MATERIALIZED 3 CODEC(Delta, LZ4)".format( + name=name + ) + ) node1.query("INSERT INTO {name} (value) VALUES (1)".format(name=name)) @@ -61,28 +90,58 @@ def test_alter_codec_pk(started_cluster): def test_alter_codec_index(started_cluster): try: name = "test_alter_codec_index" - node1.query(""" + node1.query( + """ CREATE TABLE {name} (`id` UInt64, value UInt64, INDEX id_index id TYPE minmax GRANULARITY 1) Engine=MergeTree() ORDER BY tuple() - """.format(name=name)) + """.format( + name=name + ) + ) - node1.query("INSERT INTO {name} SELECT number, number * number from numbers(100)".format(name=name)) + node1.query( + "INSERT INTO {name} SELECT number, number * number from numbers(100)".format( + name=name + ) + ) - node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 CODEC(NONE)".format(name=name)) - node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 CODEC(Delta, LZ4)".format(name=name)) + node1.query( + "ALTER TABLE {name} MODIFY COLUMN id UInt64 CODEC(NONE)".format(name=name) + ) + node1.query( + "ALTER TABLE {name} MODIFY COLUMN id UInt64 CODEC(Delta, LZ4)".format( + name=name + ) + ) with pytest.raises(QueryRuntimeException): - node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt32 CODEC(Delta, LZ4)".format(name=name)) + node1.query( + "ALTER TABLE {name} MODIFY COLUMN id UInt32 CODEC(Delta, LZ4)".format( + name=name + ) + ) - node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 DEFAULT 3 CODEC(Delta, LZ4)".format(name=name)) + node1.query( + "ALTER TABLE {name} MODIFY COLUMN id UInt64 DEFAULT 3 CODEC(Delta, LZ4)".format( + name=name + ) + ) node1.query("INSERT INTO {name} (value) VALUES (1)".format(name=name)) assert node1.query("SELECT sum(id) FROM {name}".format(name=name)) == "4953\n" with pytest.raises(QueryRuntimeException): - node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 ALIAS 3 CODEC(Delta, LZ4)".format(name=name)) + node1.query( + "ALTER TABLE {name} MODIFY COLUMN id UInt64 ALIAS 3 CODEC(Delta, LZ4)".format( + name=name + ) + ) - node1.query("ALTER TABLE {name} MODIFY COLUMN id UInt64 MATERIALIZED 3 CODEC(Delta, LZ4)".format(name=name)) + node1.query( + "ALTER TABLE {name} MODIFY COLUMN id UInt64 MATERIALIZED 3 CODEC(Delta, LZ4)".format( + name=name + ) + ) node1.query("INSERT INTO {name} (value) VALUES (1)".format(name=name)) diff --git a/tests/integration/test_alter_on_mixed_type_cluster/test.py b/tests/integration/test_alter_on_mixed_type_cluster/test.py index c22626cb379..f21a97d40e1 100644 --- a/tests/integration/test_alter_on_mixed_type_cluster/test.py +++ b/tests/integration/test_alter_on_mixed_type_cluster/test.py @@ -4,11 +4,18 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) - +node1 = cluster.add_instance( + "node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node3 = cluster.add_instance( + "node3", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node4 = cluster.add_instance( + "node4", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -17,19 +24,31 @@ def started_cluster(): cluster.start() for node in [node1, node2]: - node.query_with_retry(''' + node.query_with_retry( + """ CREATE TABLE IF NOT EXISTS test_table_replicated(date Date, id UInt32, value Int32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/sometable', '{replica}') ORDER BY id; - '''.format(replica=node.name)) - node.query_with_retry('''CREATE TABLE IF NOT EXISTS test_table(date Date, id UInt32, value Int32) ENGINE=MergeTree ORDER BY id''') + """.format( + replica=node.name + ) + ) + node.query_with_retry( + """CREATE TABLE IF NOT EXISTS test_table(date Date, id UInt32, value Int32) ENGINE=MergeTree ORDER BY id""" + ) for node in [node3, node4]: - node.query_with_retry(''' + node.query_with_retry( + """ CREATE TABLE IF NOT EXISTS test_table_replicated(date Date, id UInt32, value Int32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/1/someotable', '{replica}') ORDER BY id; - '''.format(replica=node.name)) + """.format( + replica=node.name + ) + ) - node.query_with_retry('''CREATE TABLE IF NOT EXISTS test_table(date Date, id UInt32, value Int32) ENGINE=MergeTree ORDER BY id''') + node.query_with_retry( + """CREATE TABLE IF NOT EXISTS test_table(date Date, id UInt32, value Int32) ENGINE=MergeTree ORDER BY id""" + ) yield cluster @@ -46,17 +65,23 @@ def test_alter_on_cluter_non_replicated(started_cluster): assert node3.query("SELECT COUNT() FROM test_table") == "1\n" assert node4.query("SELECT COUNT() FROM test_table") == "1\n" - node1.query("ALTER TABLE test_table ON CLUSTER 'test_cluster_mixed' MODIFY COLUMN date DateTime") + node1.query( + "ALTER TABLE test_table ON CLUSTER 'test_cluster_mixed' MODIFY COLUMN date DateTime" + ) - assert node1.query("SELECT date FROM test_table") == '2019-10-01 00:00:00\n' - assert node2.query("SELECT date FROM test_table") == '2019-10-01 00:00:00\n' - assert node3.query("SELECT date FROM test_table") == '2019-10-01 00:00:00\n' - assert node4.query("SELECT date FROM test_table") == '2019-10-01 00:00:00\n' + assert node1.query("SELECT date FROM test_table") == "2019-10-01 00:00:00\n" + assert node2.query("SELECT date FROM test_table") == "2019-10-01 00:00:00\n" + assert node3.query("SELECT date FROM test_table") == "2019-10-01 00:00:00\n" + assert node4.query("SELECT date FROM test_table") == "2019-10-01 00:00:00\n" - node3.query("ALTER TABLE test_table ON CLUSTER 'test_cluster_mixed' MODIFY COLUMN value String") + node3.query( + "ALTER TABLE test_table ON CLUSTER 'test_cluster_mixed' MODIFY COLUMN value String" + ) for node in [node1, node2, node3, node4]: - node.query("INSERT INTO test_table VALUES(toDateTime('2019-10-02 00:00:00'), 2, 'Hello')") + node.query( + "INSERT INTO test_table VALUES(toDateTime('2019-10-02 00:00:00'), 2, 'Hello')" + ) assert node1.query("SELECT COUNT() FROM test_table") == "2\n" assert node2.query("SELECT COUNT() FROM test_table") == "2\n" @@ -66,22 +91,40 @@ def test_alter_on_cluter_non_replicated(started_cluster): def test_alter_replicated_on_cluster(started_cluster): for node in [node1, node3]: - node.query("INSERT INTO test_table_replicated VALUES(toDate('2019-10-01'), 1, 1)") + node.query( + "INSERT INTO test_table_replicated VALUES(toDate('2019-10-01'), 1, 1)" + ) for node in [node2, node4]: node.query("SYSTEM SYNC REPLICA test_table_replicated", timeout=20) - node1.query("ALTER TABLE test_table_replicated ON CLUSTER 'test_cluster_mixed' MODIFY COLUMN date DateTime", settings={"replication_alter_partitions_sync": "2"}) + node1.query( + "ALTER TABLE test_table_replicated ON CLUSTER 'test_cluster_mixed' MODIFY COLUMN date DateTime", + settings={"replication_alter_partitions_sync": "2"}, + ) - assert node1.query("SELECT date FROM test_table_replicated") == '2019-10-01 00:00:00\n' - assert node2.query("SELECT date FROM test_table_replicated") == '2019-10-01 00:00:00\n' - assert node3.query("SELECT date FROM test_table_replicated") == '2019-10-01 00:00:00\n' - assert node4.query("SELECT date FROM test_table_replicated") == '2019-10-01 00:00:00\n' + assert ( + node1.query("SELECT date FROM test_table_replicated") == "2019-10-01 00:00:00\n" + ) + assert ( + node2.query("SELECT date FROM test_table_replicated") == "2019-10-01 00:00:00\n" + ) + assert ( + node3.query("SELECT date FROM test_table_replicated") == "2019-10-01 00:00:00\n" + ) + assert ( + node4.query("SELECT date FROM test_table_replicated") == "2019-10-01 00:00:00\n" + ) - node3.query_with_retry("ALTER TABLE test_table_replicated ON CLUSTER 'test_cluster_mixed' MODIFY COLUMN value String", settings={"replication_alter_partitions_sync": "2"}) + node3.query_with_retry( + "ALTER TABLE test_table_replicated ON CLUSTER 'test_cluster_mixed' MODIFY COLUMN value String", + settings={"replication_alter_partitions_sync": "2"}, + ) for node in [node2, node4]: - node.query("INSERT INTO test_table_replicated VALUES(toDateTime('2019-10-02 00:00:00'), 2, 'Hello')") + node.query( + "INSERT INTO test_table_replicated VALUES(toDateTime('2019-10-02 00:00:00'), 2, 'Hello')" + ) for node in [node1, node3]: node.query("SYSTEM SYNC REPLICA test_table_replicated", timeout=20) diff --git a/tests/integration/test_alter_update_cast_keep_nullable/test.py b/tests/integration/test_alter_update_cast_keep_nullable/test.py index 497a9e21d94..71735888d69 100644 --- a/tests/integration/test_alter_update_cast_keep_nullable/test.py +++ b/tests/integration/test_alter_update_cast_keep_nullable/test.py @@ -3,7 +3,10 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', user_configs=['configs/users.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", user_configs=["configs/users.xml"], with_zookeeper=True +) + @pytest.fixture(scope="module") def started_cluster(): @@ -13,24 +16,30 @@ def started_cluster(): finally: cluster.shutdown() -def test_cast_keep_nullable(started_cluster): - setting = node1.query("SELECT value FROM system.settings WHERE name='cast_keep_nullable'") - assert(setting.strip() == "1") - result = node1.query(""" +def test_cast_keep_nullable(started_cluster): + setting = node1.query( + "SELECT value FROM system.settings WHERE name='cast_keep_nullable'" + ) + assert setting.strip() == "1" + + result = node1.query( + """ DROP TABLE IF EXISTS t; CREATE TABLE t (x UInt64) ENGINE = MergeTree ORDER BY tuple(); INSERT INTO t SELECT number FROM numbers(10); SELECT * FROM t; - """) - assert(result.strip() == "0\n1\n2\n3\n4\n5\n6\n7\n8\n9") + """ + ) + assert result.strip() == "0\n1\n2\n3\n4\n5\n6\n7\n8\n9" - error = node1.query_and_get_error(""" + error = node1.query_and_get_error( + """ SET mutations_sync = 1; ALTER TABLE t UPDATE x = x % 3 = 0 ? NULL : x WHERE x % 2 = 1;  - """) - assert("DB::Exception: Cannot convert NULL value to non-Nullable type" in error) + """ + ) + assert "DB::Exception: Cannot convert NULL value to non-Nullable type" in error result = node1.query("SELECT * FROM t;") - assert(result.strip() == "0\n1\n2\n3\n4\n5\n6\n7\n8\n9") - + assert result.strip() == "0\n1\n2\n3\n4\n5\n6\n7\n8\n9" diff --git a/tests/integration/test_always_fetch_merged/test.py b/tests/integration/test_always_fetch_merged/test.py index e3b2d5ca392..ca8e775fb97 100644 --- a/tests/integration/test_always_fetch_merged/test.py +++ b/tests/integration/test_always_fetch_merged/test.py @@ -6,8 +6,8 @@ from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True) -node2 = cluster.add_instance('node2', with_zookeeper=True) +node1 = cluster.add_instance("node1", with_zookeeper=True) +node2 = cluster.add_instance("node2", with_zookeeper=True) @pytest.fixture(scope="module") @@ -22,21 +22,25 @@ def started_cluster(): def test_replica_always_download(started_cluster): - node1.query_with_retry(""" + node1.query_with_retry( + """ CREATE TABLE IF NOT EXISTS test_table( key UInt64, value String ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_table/replicated', '1') ORDER BY tuple() - """) - node2.query_with_retry(""" + """ + ) + node2.query_with_retry( + """ CREATE TABLE IF NOT EXISTS test_table( key UInt64, value String ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_table/replicated', '2') ORDER BY tuple() SETTINGS always_fetch_merged_part=1 - """) + """ + ) # Stop merges on single node node1.query("SYSTEM STOP MERGES") @@ -50,15 +54,29 @@ def test_replica_always_download(started_cluster): time.sleep(5) # Nothing is merged - assert node1.query("SELECT COUNT() FROM system.parts WHERE table = 'test_table' and active=1") == "10\n" - assert node2.query("SELECT COUNT() FROM system.parts WHERE table = 'test_table' and active=1") == "10\n" + assert ( + node1.query( + "SELECT COUNT() FROM system.parts WHERE table = 'test_table' and active=1" + ) + == "10\n" + ) + assert ( + node2.query( + "SELECT COUNT() FROM system.parts WHERE table = 'test_table' and active=1" + ) + == "10\n" + ) node1.query("SYSTEM START MERGES") node1.query("OPTIMIZE TABLE test_table") node2.query("SYSTEM SYNC REPLICA test_table") - node1_parts = node1.query("SELECT COUNT() FROM system.parts WHERE table = 'test_table' and active=1").strip() - node2_parts = node2.query("SELECT COUNT() FROM system.parts WHERE table = 'test_table' and active=1").strip() + node1_parts = node1.query( + "SELECT COUNT() FROM system.parts WHERE table = 'test_table' and active=1" + ).strip() + node2_parts = node2.query( + "SELECT COUNT() FROM system.parts WHERE table = 'test_table' and active=1" + ).strip() assert int(node1_parts) < 10 assert int(node2_parts) < 10 diff --git a/tests/integration/test_async_drain_connection/test.py b/tests/integration/test_async_drain_connection/test.py index 40d78ebbe7c..66786f4a8f9 100644 --- a/tests/integration/test_async_drain_connection/test.py +++ b/tests/integration/test_async_drain_connection/test.py @@ -5,17 +5,19 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=['configs/config.xml']) +node = cluster.add_instance("node", main_configs=["configs/config.xml"]) -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def started_cluster(): try: cluster.start() - node.query(""" + node.query( + """ create table t (number UInt64) engine = Distributed(test_cluster_two_shards, system, numbers) - """) + """ + ) yield cluster finally: @@ -25,12 +27,15 @@ def started_cluster(): def test_filled_async_drain_connection_pool(started_cluster): def execute_queries(_): for _ in range(100): - node.query('select * from t where number = 0 limit 2', settings={ - 'sleep_in_receive_cancel_ms': int(10e6), - 'max_execution_time': 5, - # decrease drain_timeout to make test more stable - # (another way is to increase max_execution_time, but this will make test slower) - 'drain_timeout': 1, - }) + node.query( + "select * from t where number = 0 limit 2", + settings={ + "sleep_in_receive_cancel_ms": int(10e6), + "max_execution_time": 5, + # decrease drain_timeout to make test more stable + # (another way is to increase max_execution_time, but this will make test slower) + "drain_timeout": 1, + }, + ) any(map(execute_queries, range(10))) diff --git a/tests/integration/test_asynchronous_metric_log_table/test.py b/tests/integration/test_asynchronous_metric_log_table/test.py index 0091832aa7c..96de7daf9e1 100644 --- a/tests/integration/test_asynchronous_metric_log_table/test.py +++ b/tests/integration/test_asynchronous_metric_log_table/test.py @@ -4,8 +4,11 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True, - main_configs=['configs/asynchronous_metrics_update_period_s.xml']) +node1 = cluster.add_instance( + "node1", + with_zookeeper=True, + main_configs=["configs/asynchronous_metrics_update_period_s.xml"], +) @pytest.fixture(scope="module") @@ -27,20 +30,20 @@ def test_event_time_microseconds_field(started_cluster): cluster.start() node1.query("SET log_queries = 1;") node1.query("CREATE DATABASE replica;") - query_create = '''CREATE TABLE replica.test + query_create = """CREATE TABLE replica.test ( id Int64, event_time DateTime ) Engine=MergeTree() PARTITION BY toYYYYMMDD(event_time) - ORDER BY id;''' + ORDER BY id;""" time.sleep(2) node1.query(query_create) - node1.query('''INSERT INTO replica.test VALUES (1, now())''') + node1.query("""INSERT INTO replica.test VALUES (1, now())""") node1.query("SYSTEM FLUSH LOGS;") # query assumes that the event_time field is accurate - equals_query = '''WITH ( + equals_query = """WITH ( ( SELECT event_time_microseconds FROM system.asynchronous_metric_log @@ -53,7 +56,7 @@ def test_event_time_microseconds_field(started_cluster): ORDER BY event_time DESC LIMIT 1 ) AS time) - SELECT if(dateDiff('second', toDateTime(time_with_microseconds), toDateTime(time)) = 0, 'ok', 'fail')''' + SELECT if(dateDiff('second', toDateTime(time_with_microseconds), toDateTime(time)) = 0, 'ok', 'fail')""" assert "ok\n" in node1.query(equals_query) finally: cluster.shutdown() diff --git a/tests/integration/test_atomic_drop_table/test.py b/tests/integration/test_atomic_drop_table/test.py index dc1ad47aa75..1fe88dde099 100644 --- a/tests/integration/test_atomic_drop_table/test.py +++ b/tests/integration/test_atomic_drop_table/test.py @@ -5,21 +5,29 @@ from helpers.cluster import ClickHouseCluster from helpers.network import PartitionManager cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=["configs/config.d/zookeeper_session_timeout.xml", - "configs/remote_servers.xml"], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", + main_configs=[ + "configs/config.d/zookeeper_session_timeout.xml", + "configs/remote_servers.xml", + ], + with_zookeeper=True, +) @pytest.fixture(scope="module") def start_cluster(): try: cluster.start() - node1.query("CREATE DATABASE zktest ENGINE=Ordinary;") # Different behaviour with Atomic node1.query( - ''' + "CREATE DATABASE zktest ENGINE=Ordinary;" + ) # Different behaviour with Atomic + node1.query( + """ CREATE TABLE zktest.atomic_drop_table (n UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/zktest/tables/atomic_drop_table', 'node1') PARTITION BY n ORDER BY n - ''' + """ ) yield cluster finally: @@ -31,8 +39,10 @@ def test_atomic_delete_with_stopped_zookeeper(start_cluster): with PartitionManager() as pm: pm.drop_instance_zk_connections(node1) - error = node1.query_and_get_error("DROP TABLE zktest.atomic_drop_table") # Table won't drop + error = node1.query_and_get_error( + "DROP TABLE zktest.atomic_drop_table" + ) # Table won't drop assert error != "" time.sleep(5) - assert '8192' in node1.query("select * from zktest.atomic_drop_table") + assert "8192" in node1.query("select * from zktest.atomic_drop_table") diff --git a/tests/integration/test_attach_partition_with_large_destination/test.py b/tests/integration/test_attach_partition_with_large_destination/test.py index 50f24f7a01e..0a4ab9fada1 100644 --- a/tests/integration/test_attach_partition_with_large_destination/test.py +++ b/tests/integration/test_attach_partition_with_large_destination/test.py @@ -3,7 +3,9 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=["configs/config.xml"], with_zookeeper=True) +node = cluster.add_instance( + "node", main_configs=["configs/config.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -14,18 +16,35 @@ def started_cluster(): finally: cluster.shutdown() + def create_force_drop_flag(node): force_drop_flag_path = "/var/lib/clickhouse/flags/force_drop_table" - node.exec_in_container(["bash", "-c", "touch {} && chmod a=rw {}".format(force_drop_flag_path, force_drop_flag_path)], user="root") + node.exec_in_container( + [ + "bash", + "-c", + "touch {} && chmod a=rw {}".format( + force_drop_flag_path, force_drop_flag_path + ), + ], + user="root", + ) -@pytest.mark.parametrize("engine", ['Ordinary', 'Atomic']) + +@pytest.mark.parametrize("engine", ["Ordinary", "Atomic"]) def test_attach_partition_with_large_destination(started_cluster, engine): # Initialize node.query("CREATE DATABASE db ENGINE={}".format(engine)) - node.query("CREATE TABLE db.destination (n UInt64) ENGINE=ReplicatedMergeTree('/test/destination', 'r1') ORDER BY n PARTITION BY n % 2") - node.query("CREATE TABLE db.source_1 (n UInt64) ENGINE=ReplicatedMergeTree('/test/source_1', 'r1') ORDER BY n PARTITION BY n % 2") + node.query( + "CREATE TABLE db.destination (n UInt64) ENGINE=ReplicatedMergeTree('/test/destination', 'r1') ORDER BY n PARTITION BY n % 2" + ) + node.query( + "CREATE TABLE db.source_1 (n UInt64) ENGINE=ReplicatedMergeTree('/test/source_1', 'r1') ORDER BY n PARTITION BY n % 2" + ) node.query("INSERT INTO db.source_1 VALUES (1), (2), (3), (4)") - node.query("CREATE TABLE db.source_2 (n UInt64) ENGINE=ReplicatedMergeTree('/test/source_2', 'r1') ORDER BY n PARTITION BY n % 2") + node.query( + "CREATE TABLE db.source_2 (n UInt64) ENGINE=ReplicatedMergeTree('/test/source_2', 'r1') ORDER BY n PARTITION BY n % 2" + ) node.query("INSERT INTO db.source_2 VALUES (5), (6), (7), (8)") # Attach partition when destination partition is empty @@ -33,7 +52,9 @@ def test_attach_partition_with_large_destination(started_cluster, engine): assert node.query("SELECT n FROM db.destination ORDER BY n") == "2\n4\n" # REPLACE PARTITION should still respect max_partition_size_to_drop - assert node.query_and_get_error("ALTER TABLE db.destination REPLACE PARTITION 0 FROM db.source_2") + assert node.query_and_get_error( + "ALTER TABLE db.destination REPLACE PARTITION 0 FROM db.source_2" + ) assert node.query("SELECT n FROM db.destination ORDER BY n") == "2\n4\n" # Attach partition when destination partition is larger than max_partition_size_to_drop @@ -47,4 +68,4 @@ def test_attach_partition_with_large_destination(started_cluster, engine): node.query("DROP TABLE db.source_2 SYNC") create_force_drop_flag(node) node.query("DROP TABLE db.destination SYNC") - node.query("DROP DATABASE db") \ No newline at end of file + node.query("DROP DATABASE db") diff --git a/tests/integration/test_attach_without_checksums/test.py b/tests/integration/test_attach_without_checksums/test.py index ab55c5efb43..aee4b757efe 100644 --- a/tests/integration/test_attach_without_checksums/test.py +++ b/tests/integration/test_attach_without_checksums/test.py @@ -3,7 +3,8 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1') +node1 = cluster.add_instance("node1") + @pytest.fixture(scope="module") def start_cluster(): @@ -17,9 +18,12 @@ def start_cluster(): def test_attach_without_checksums(start_cluster): node1.query( - "CREATE TABLE test (date Date, key Int32, value String) Engine=MergeTree ORDER BY key PARTITION by date") + "CREATE TABLE test (date Date, key Int32, value String) Engine=MergeTree ORDER BY key PARTITION by date" + ) - node1.query("INSERT INTO test SELECT toDate('2019-10-01'), number, toString(number) FROM numbers(100)") + node1.query( + "INSERT INTO test SELECT toDate('2019-10-01'), number, toString(number) FROM numbers(100)" + ) assert node1.query("SELECT COUNT() FROM test WHERE key % 10 == 0") == "10\n" @@ -30,15 +34,27 @@ def test_attach_without_checksums(start_cluster): # to be sure output not empty node1.exec_in_container( - ['bash', '-c', 'find /var/lib/clickhouse/data/default/test/detached -name "checksums.txt" | grep -e ".*" '], - privileged=True, user='root') + [ + "bash", + "-c", + 'find /var/lib/clickhouse/data/default/test/detached -name "checksums.txt" | grep -e ".*" ', + ], + privileged=True, + user="root", + ) node1.exec_in_container( - ['bash', '-c', 'find /var/lib/clickhouse/data/default/test/detached -name "checksums.txt" -delete'], - privileged=True, user='root') + [ + "bash", + "-c", + 'find /var/lib/clickhouse/data/default/test/detached -name "checksums.txt" -delete', + ], + privileged=True, + user="root", + ) node1.query("ALTER TABLE test ATTACH PARTITION '2019-10-01'") assert node1.query("SELECT COUNT() FROM test WHERE key % 10 == 0") == "10\n" assert node1.query("SELECT COUNT() FROM test") == "100\n" - node1.query("DROP TABLE test") \ No newline at end of file + node1.query("DROP TABLE test") diff --git a/tests/integration/test_attach_without_fetching/test.py b/tests/integration/test_attach_without_fetching/test.py index 874f5b36ddc..60500380b31 100644 --- a/tests/integration/test_attach_without_fetching/test.py +++ b/tests/integration/test_attach_without_fetching/test.py @@ -7,19 +7,25 @@ from helpers.test_tools import assert_eq_with_retry from helpers.network import PartitionManager from helpers.corrupt_part_data_on_disk import corrupt_part_data_by_path + def fill_node(node): node.query_with_retry( - ''' + """ CREATE TABLE IF NOT EXISTS test(n UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test', '{replica}') ORDER BY n PARTITION BY n % 10; - '''.format(replica=node.name)) + """.format( + replica=node.name + ) + ) + cluster = ClickHouseCluster(__file__) -node_1 = cluster.add_instance('replica1', with_zookeeper=True) -node_2 = cluster.add_instance('replica2', with_zookeeper=True) -node_3 = cluster.add_instance('replica3', with_zookeeper=True) +node_1 = cluster.add_instance("replica1", with_zookeeper=True) +node_2 = cluster.add_instance("replica2", with_zookeeper=True) +node_3 = cluster.add_instance("replica3", with_zookeeper=True) + @pytest.fixture(scope="module") def start_cluster(): @@ -36,27 +42,42 @@ def start_cluster(): finally: cluster.shutdown() + def check_data(nodes, detached_parts): for node in nodes: - print("> Replication queue for", node.name, "\n> table\treplica_name\tsource_replica\ttype\tposition\n", - node.query_with_retry("SELECT table, replica_name, source_replica, type, position FROM system.replication_queue")) + print( + "> Replication queue for", + node.name, + "\n> table\treplica_name\tsource_replica\ttype\tposition\n", + node.query_with_retry( + "SELECT table, replica_name, source_replica, type, position FROM system.replication_queue" + ), + ) node.query_with_retry("SYSTEM SYNC REPLICA test") print("> Checking data integrity for", node.name) for i in range(10): - assert_eq_with_retry(node, "SELECT count() FROM test WHERE n % 10 == " + str(i), - "0\n" if i in detached_parts else "10\n") + assert_eq_with_retry( + node, + "SELECT count() FROM test WHERE n % 10 == " + str(i), + "0\n" if i in detached_parts else "10\n", + ) - assert_eq_with_retry(node, "SELECT count() FROM system.parts WHERE table='test'", - str(10 - len(detached_parts)) + "\n") + assert_eq_with_retry( + node, + "SELECT count() FROM system.parts WHERE table='test'", + str(10 - len(detached_parts)) + "\n", + ) res: str = node.query("SELECT * FROM test ORDER BY n") for other in nodes: if other != node: - logging.debug(f"> Checking data consistency, {other.name} vs {node.name}") + logging.debug( + f"> Checking data consistency, {other.name} vs {node.name}" + ) assert_eq_with_retry(other, "SELECT * FROM test ORDER BY n", res) @@ -83,7 +104,6 @@ def test_attach_without_fetching(start_cluster): # files missing. node_1.query("ALTER TABLE test DETACH PARTITION 2") - check_data([node_1, node_2], detached_parts=[0, 1, 2]) # 2. Create the third replica @@ -94,14 +114,28 @@ def test_attach_without_fetching(start_cluster): # Replica 2 should also download the data from 1 as the checksums won't match. logging.debug("Checking attach with corrupted part data with files missing") - to_delete = node_2.exec_in_container(['bash', '-c', - 'cd {p} && ls *.bin'.format( - p="/var/lib/clickhouse/data/default/test/detached/2_0_0_0")], privileged=True) + to_delete = node_2.exec_in_container( + [ + "bash", + "-c", + "cd {p} && ls *.bin".format( + p="/var/lib/clickhouse/data/default/test/detached/2_0_0_0" + ), + ], + privileged=True, + ) logging.debug(f"Before deleting: {to_delete}") - node_2.exec_in_container(['bash', '-c', - 'cd {p} && rm -fr *.bin'.format( - p="/var/lib/clickhouse/data/default/test/detached/2_0_0_0")], privileged=True) + node_2.exec_in_container( + [ + "bash", + "-c", + "cd {p} && rm -fr *.bin".format( + p="/var/lib/clickhouse/data/default/test/detached/2_0_0_0" + ), + ], + privileged=True, + ) node_1.query("ALTER TABLE test ATTACH PARTITION 2") check_data([node_1, node_2, node_3], detached_parts=[0, 1]) @@ -111,7 +145,9 @@ def test_attach_without_fetching(start_cluster): # Replica 2 should also download the data from 1 as the checksums won't match. print("Checking attach with corrupted part data with all of the files present") - corrupt_part_data_by_path(node_2, "/var/lib/clickhouse/data/default/test/detached/1_0_0_0") + corrupt_part_data_by_path( + node_2, "/var/lib/clickhouse/data/default/test/detached/1_0_0_0" + ) node_1.query("ALTER TABLE test ATTACH PARTITION 1") check_data([node_1, node_2, node_3], detached_parts=[0]) @@ -123,8 +159,8 @@ def test_attach_without_fetching(start_cluster): with PartitionManager() as pm: # If something goes wrong and replica 2 wants to fetch data, the test will fail. - pm.partition_instances(node_2, node_1, action='REJECT --reject-with tcp-reset') - pm.partition_instances(node_1, node_3, action='REJECT --reject-with tcp-reset') + pm.partition_instances(node_2, node_1, action="REJECT --reject-with tcp-reset") + pm.partition_instances(node_1, node_3, action="REJECT --reject-with tcp-reset") node_1.query("ALTER TABLE test ATTACH PART '0_0_0_0'") diff --git a/tests/integration/test_authentication/test.py b/tests/integration/test_authentication/test.py index 0651efa11b4..38be07eca49 100644 --- a/tests/integration/test_authentication/test.py +++ b/tests/integration/test_authentication/test.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance') +instance = cluster.add_instance("instance") @pytest.fixture(scope="module", autouse=True) @@ -20,18 +20,30 @@ def setup_nodes(): def test_authentication_pass(): - assert instance.query("SELECT currentUser()", user='sasha') == 'sasha\n' - assert instance.query("SELECT currentUser()", user='masha', password='qwerty') == 'masha\n' + assert instance.query("SELECT currentUser()", user="sasha") == "sasha\n" + assert ( + instance.query("SELECT currentUser()", user="masha", password="qwerty") + == "masha\n" + ) # 'no_password' authentication type allows to login with any password. - assert instance.query("SELECT currentUser()", user='sasha', password='something') == 'sasha\n' - assert instance.query("SELECT currentUser()", user='sasha', password='something2') == 'sasha\n' + assert ( + instance.query("SELECT currentUser()", user="sasha", password="something") + == "sasha\n" + ) + assert ( + instance.query("SELECT currentUser()", user="sasha", password="something2") + == "sasha\n" + ) def test_authentication_fail(): # User doesn't exist. - assert "vasya: Authentication failed" in instance.query_and_get_error("SELECT currentUser()", user='vasya') + assert "vasya: Authentication failed" in instance.query_and_get_error( + "SELECT currentUser()", user="vasya" + ) # Wrong password. - assert "masha: Authentication failed" in instance.query_and_get_error("SELECT currentUser()", user='masha', - password='123') + assert "masha: Authentication failed" in instance.query_and_get_error( + "SELECT currentUser()", user="masha", password="123" + ) diff --git a/tests/integration/test_azure_blob_storage_zero_copy_replication/test.py b/tests/integration/test_azure_blob_storage_zero_copy_replication/test.py index 08fb6e53e7b..c1d5cdc7ce5 100644 --- a/tests/integration/test_azure_blob_storage_zero_copy_replication/test.py +++ b/tests/integration/test_azure_blob_storage_zero_copy_replication/test.py @@ -17,12 +17,20 @@ CLUSTER_NAME = "test_cluster" def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance(NODE1, main_configs=["configs/config.d/storage_conf.xml"], macros={'replica': '1'}, - with_azurite=True, - with_zookeeper=True) - cluster.add_instance(NODE2, main_configs=["configs/config.d/storage_conf.xml"], macros={'replica': '2'}, - with_azurite=True, - with_zookeeper=True) + cluster.add_instance( + NODE1, + main_configs=["configs/config.d/storage_conf.xml"], + macros={"replica": "1"}, + with_azurite=True, + with_zookeeper=True, + ) + cluster.add_instance( + NODE2, + main_configs=["configs/config.d/storage_conf.xml"], + macros={"replica": "2"}, + with_azurite=True, + with_zookeeper=True, + ) logging.info("Starting cluster...") cluster.start() logging.info("Cluster started") @@ -53,7 +61,10 @@ def create_table(node, table_name, replica, **additional_settings): def get_large_objects_count(blob_container_client, large_size_threshold=100): - return sum(blob['size'] > large_size_threshold for blob in blob_container_client.list_blobs()) + return sum( + blob["size"] > large_size_threshold + for blob in blob_container_client.list_blobs() + ) def test_zero_copy_replication(cluster): @@ -61,15 +72,21 @@ def test_zero_copy_replication(cluster): node2 = cluster.instances[NODE2] create_table(node1, TABLE_NAME, 1) - blob_container_client = cluster.blob_service_client.get_container_client(CONTAINER_NAME) + blob_container_client = cluster.blob_service_client.get_container_client( + CONTAINER_NAME + ) values1 = "(0,'data'),(1,'data')" values2 = "(2,'data'),(3,'data')" node1.query(f"INSERT INTO {TABLE_NAME} VALUES {values1}") node2.query(f"SYSTEM SYNC REPLICA {TABLE_NAME}") - assert node1.query(f"SELECT * FROM {TABLE_NAME} order by id FORMAT Values") == values1 - assert node2.query(f"SELECT * FROM {TABLE_NAME} order by id FORMAT Values") == values1 + assert ( + node1.query(f"SELECT * FROM {TABLE_NAME} order by id FORMAT Values") == values1 + ) + assert ( + node2.query(f"SELECT * FROM {TABLE_NAME} order by id FORMAT Values") == values1 + ) # Based on version 21.x - should be only one file with size 100+ (checksums.txt), used by both nodes assert get_large_objects_count(blob_container_client) == 1 @@ -77,7 +94,13 @@ def test_zero_copy_replication(cluster): node2.query(f"INSERT INTO {TABLE_NAME} VALUES {values2}") node1.query(f"SYSTEM SYNC REPLICA {TABLE_NAME}") - assert node2.query(f"SELECT * FROM {TABLE_NAME} order by id FORMAT Values") == values1 + "," + values2 - assert node1.query(f"SELECT * FROM {TABLE_NAME} order by id FORMAT Values") == values1 + "," + values2 + assert ( + node2.query(f"SELECT * FROM {TABLE_NAME} order by id FORMAT Values") + == values1 + "," + values2 + ) + assert ( + node1.query(f"SELECT * FROM {TABLE_NAME} order by id FORMAT Values") + == values1 + "," + values2 + ) assert get_large_objects_count(blob_container_client) == 2 diff --git a/tests/integration/test_backup_restore/test.py b/tests/integration/test_backup_restore/test.py index b990cec2364..905abef05b0 100644 --- a/tests/integration/test_backup_restore/test.py +++ b/tests/integration/test_backup_restore/test.py @@ -6,25 +6,35 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('node') -path_to_data = '/var/lib/clickhouse/' +instance = cluster.add_instance("node") +path_to_data = "/var/lib/clickhouse/" @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() - instance.query('CREATE DATABASE test ENGINE = Ordinary') # Different path in shadow/ with Atomic + instance.query( + "CREATE DATABASE test ENGINE = Ordinary" + ) # Different path in shadow/ with Atomic instance.query("DROP TABLE IF EXISTS test.tbl") - instance.query("CREATE TABLE test.tbl (p Date, k Int8) ENGINE = MergeTree PARTITION BY toYYYYMM(p) ORDER BY p") + instance.query( + "CREATE TABLE test.tbl (p Date, k Int8) ENGINE = MergeTree PARTITION BY toYYYYMM(p) ORDER BY p" + ) for i in range(1, 4): - instance.query('INSERT INTO test.tbl (p, k) VALUES(toDate({}), {})'.format(i, i)) + instance.query( + "INSERT INTO test.tbl (p, k) VALUES(toDate({}), {})".format(i, i) + ) for i in range(31, 34): - instance.query('INSERT INTO test.tbl (p, k) VALUES(toDate({}), {})'.format(i, i)) + instance.query( + "INSERT INTO test.tbl (p, k) VALUES(toDate({}), {})".format(i, i) + ) - expected = TSV('1970-01-02\t1\n1970-01-03\t2\n1970-01-04\t3\n1970-02-01\t31\n1970-02-02\t32\n1970-02-03\t33') + expected = TSV( + "1970-01-02\t1\n1970-01-03\t2\n1970-01-04\t3\n1970-02-01\t31\n1970-02-02\t32\n1970-02-03\t33" + ) res = instance.query("SELECT * FROM test.tbl ORDER BY p") - assert (TSV(res) == expected) + assert TSV(res) == expected instance.query("ALTER TABLE test.tbl FREEZE") @@ -33,21 +43,24 @@ def started_cluster(): finally: cluster.shutdown() + def get_last_backup_path(instance, database, table): - fp_increment = os.path.join(path_to_data, 'shadow/increment.txt') - increment = instance.exec_in_container(['cat', fp_increment]).strip() - return os.path.join(path_to_data, 'shadow', increment, 'data', database, table) + fp_increment = os.path.join(path_to_data, "shadow/increment.txt") + increment = instance.exec_in_container(["cat", fp_increment]).strip() + return os.path.join(path_to_data, "shadow", increment, "data", database, table) + def copy_backup_to_detached(instance, database, src_table, dst_table): - fp_backup = os.path.join(path_to_data, 'shadow', '*', 'data', database, src_table) - fp_detached = os.path.join(path_to_data, 'data', database, dst_table, 'detached') - logging.debug(f'copy from {fp_backup} to {fp_detached}') - instance.exec_in_container(['bash', '-c', f'cp -r {fp_backup} -T {fp_detached}']) + fp_backup = os.path.join(path_to_data, "shadow", "*", "data", database, src_table) + fp_detached = os.path.join(path_to_data, "data", database, dst_table, "detached") + logging.debug(f"copy from {fp_backup} to {fp_detached}") + instance.exec_in_container(["bash", "-c", f"cp -r {fp_backup} -T {fp_detached}"]) + def test_restore(started_cluster): instance.query("CREATE TABLE test.tbl1 AS test.tbl") - copy_backup_to_detached(started_cluster.instances['node'], 'test', 'tbl', 'tbl1') + copy_backup_to_detached(started_cluster.instances["node"], "test", "tbl", "tbl1") # The data_version of parts to be attached are larger than the newly created table's data_version. instance.query("ALTER TABLE test.tbl1 ATTACH PARTITION 197001") @@ -55,17 +68,21 @@ def test_restore(started_cluster): instance.query("SELECT sleep(2)") # Validate the attached parts are identical to the backup. - expected = TSV('1970-01-02\t1\n1970-01-03\t2\n1970-01-04\t3\n1970-02-01\t31\n1970-02-02\t32\n1970-02-03\t33') + expected = TSV( + "1970-01-02\t1\n1970-01-03\t2\n1970-01-04\t3\n1970-02-01\t31\n1970-02-02\t32\n1970-02-03\t33" + ) res = instance.query("SELECT * FROM test.tbl1 ORDER BY p") - assert (TSV(res) == expected) + assert TSV(res) == expected instance.query("ALTER TABLE test.tbl1 UPDATE k=10 WHERE 1") instance.query("SELECT sleep(2)") # Validate mutation has been applied to all attached parts. - expected = TSV('1970-01-02\t10\n1970-01-03\t10\n1970-01-04\t10\n1970-02-01\t10\n1970-02-02\t10\n1970-02-03\t10') + expected = TSV( + "1970-01-02\t10\n1970-01-03\t10\n1970-01-04\t10\n1970-02-01\t10\n1970-02-02\t10\n1970-02-03\t10" + ) res = instance.query("SELECT * FROM test.tbl1 ORDER BY p") - assert (TSV(res) == expected) + assert TSV(res) == expected instance.query("DROP TABLE IF EXISTS test.tbl1") @@ -73,15 +90,19 @@ def test_restore(started_cluster): def test_attach_partition(started_cluster): instance.query("CREATE TABLE test.tbl2 AS test.tbl") for i in range(3, 5): - instance.query('INSERT INTO test.tbl2(p, k) VALUES(toDate({}), {})'.format(i, i)) + instance.query( + "INSERT INTO test.tbl2(p, k) VALUES(toDate({}), {})".format(i, i) + ) for i in range(33, 35): - instance.query('INSERT INTO test.tbl2(p, k) VALUES(toDate({}), {})'.format(i, i)) + instance.query( + "INSERT INTO test.tbl2(p, k) VALUES(toDate({}), {})".format(i, i) + ) - expected = TSV('1970-01-04\t3\n1970-01-05\t4\n1970-02-03\t33\n1970-02-04\t34') + expected = TSV("1970-01-04\t3\n1970-01-05\t4\n1970-02-03\t33\n1970-02-04\t34") res = instance.query("SELECT * FROM test.tbl2 ORDER BY p") - assert (TSV(res) == expected) + assert TSV(res) == expected - copy_backup_to_detached(started_cluster.instances['node'], 'test', 'tbl', 'tbl2') + copy_backup_to_detached(started_cluster.instances["node"], "test", "tbl", "tbl2") # The data_version of parts to be attached # - may be less than, equal to or larger than the current table's data_version. @@ -91,18 +112,20 @@ def test_attach_partition(started_cluster): instance.query("SELECT sleep(2)") expected = TSV( - '1970-01-02\t1\n1970-01-03\t2\n1970-01-04\t3\n1970-01-04\t3\n1970-01-05\t4\n1970-02-01\t31\n1970-02-02\t32\n1970-02-03\t33\n1970-02-03\t33\n1970-02-04\t34') + "1970-01-02\t1\n1970-01-03\t2\n1970-01-04\t3\n1970-01-04\t3\n1970-01-05\t4\n1970-02-01\t31\n1970-02-02\t32\n1970-02-03\t33\n1970-02-03\t33\n1970-02-04\t34" + ) res = instance.query("SELECT * FROM test.tbl2 ORDER BY p") - assert (TSV(res) == expected) + assert TSV(res) == expected instance.query("ALTER TABLE test.tbl2 UPDATE k=10 WHERE 1") instance.query("SELECT sleep(2)") # Validate mutation has been applied to all attached parts. expected = TSV( - '1970-01-02\t10\n1970-01-03\t10\n1970-01-04\t10\n1970-01-04\t10\n1970-01-05\t10\n1970-02-01\t10\n1970-02-02\t10\n1970-02-03\t10\n1970-02-03\t10\n1970-02-04\t10') + "1970-01-02\t10\n1970-01-03\t10\n1970-01-04\t10\n1970-01-04\t10\n1970-01-05\t10\n1970-02-01\t10\n1970-02-02\t10\n1970-02-03\t10\n1970-02-03\t10\n1970-02-04\t10" + ) res = instance.query("SELECT * FROM test.tbl2 ORDER BY p") - assert (TSV(res) == expected) + assert TSV(res) == expected instance.query("DROP TABLE IF EXISTS test.tbl2") @@ -110,15 +133,19 @@ def test_attach_partition(started_cluster): def test_replace_partition(started_cluster): instance.query("CREATE TABLE test.tbl3 AS test.tbl") for i in range(3, 5): - instance.query('INSERT INTO test.tbl3(p, k) VALUES(toDate({}), {})'.format(i, i)) + instance.query( + "INSERT INTO test.tbl3(p, k) VALUES(toDate({}), {})".format(i, i) + ) for i in range(33, 35): - instance.query('INSERT INTO test.tbl3(p, k) VALUES(toDate({}), {})'.format(i, i)) + instance.query( + "INSERT INTO test.tbl3(p, k) VALUES(toDate({}), {})".format(i, i) + ) - expected = TSV('1970-01-04\t3\n1970-01-05\t4\n1970-02-03\t33\n1970-02-04\t34') + expected = TSV("1970-01-04\t3\n1970-01-05\t4\n1970-02-03\t33\n1970-02-04\t34") res = instance.query("SELECT * FROM test.tbl3 ORDER BY p") - assert (TSV(res) == expected) + assert TSV(res) == expected - copy_backup_to_detached(started_cluster.instances['node'], 'test', 'tbl', 'tbl3') + copy_backup_to_detached(started_cluster.instances["node"], "test", "tbl", "tbl3") # The data_version of parts to be copied # - may be less than, equal to or larger than the current table data_version. @@ -126,35 +153,56 @@ def test_replace_partition(started_cluster): instance.query("ALTER TABLE test.tbl3 REPLACE PARTITION 197002 FROM test.tbl") instance.query("SELECT sleep(2)") - expected = TSV('1970-01-04\t3\n1970-01-05\t4\n1970-02-01\t31\n1970-02-02\t32\n1970-02-03\t33') + expected = TSV( + "1970-01-04\t3\n1970-01-05\t4\n1970-02-01\t31\n1970-02-02\t32\n1970-02-03\t33" + ) res = instance.query("SELECT * FROM test.tbl3 ORDER BY p") - assert (TSV(res) == expected) + assert TSV(res) == expected instance.query("ALTER TABLE test.tbl3 UPDATE k=10 WHERE 1") instance.query("SELECT sleep(2)") # Validate mutation has been applied to all copied parts. - expected = TSV('1970-01-04\t10\n1970-01-05\t10\n1970-02-01\t10\n1970-02-02\t10\n1970-02-03\t10') + expected = TSV( + "1970-01-04\t10\n1970-01-05\t10\n1970-02-01\t10\n1970-02-02\t10\n1970-02-03\t10" + ) res = instance.query("SELECT * FROM test.tbl3 ORDER BY p") - assert (TSV(res) == expected) + assert TSV(res) == expected instance.query("DROP TABLE IF EXISTS test.tbl3") + def test_freeze_in_memory(started_cluster): - instance.query("CREATE TABLE test.t_in_memory(a UInt32, s String) ENGINE = MergeTree ORDER BY a SETTINGS min_rows_for_compact_part = 1000") + instance.query( + "CREATE TABLE test.t_in_memory(a UInt32, s String) ENGINE = MergeTree ORDER BY a SETTINGS min_rows_for_compact_part = 1000" + ) instance.query("INSERT INTO test.t_in_memory VALUES (1, 'a')") instance.query("ALTER TABLE test.t_in_memory FREEZE") - fp_backup = get_last_backup_path(started_cluster.instances['node'], 'test', 't_in_memory') - part_path = fp_backup + '/all_1_1_0/' + fp_backup = get_last_backup_path( + started_cluster.instances["node"], "test", "t_in_memory" + ) + part_path = fp_backup + "/all_1_1_0/" - assert TSV(instance.query("SELECT part_type, is_frozen FROM system.parts WHERE database = 'test' AND table = 't_in_memory'")) == TSV("InMemory\t1\n") - instance.exec_in_container(['test', '-f', part_path + '/data.bin']) - assert instance.exec_in_container(['cat', part_path + '/count.txt']).strip() == '1' + assert TSV( + instance.query( + "SELECT part_type, is_frozen FROM system.parts WHERE database = 'test' AND table = 't_in_memory'" + ) + ) == TSV("InMemory\t1\n") + instance.exec_in_container(["test", "-f", part_path + "/data.bin"]) + assert instance.exec_in_container(["cat", part_path + "/count.txt"]).strip() == "1" - instance.query("CREATE TABLE test.t_in_memory_2(a UInt32, s String) ENGINE = MergeTree ORDER BY a") - copy_backup_to_detached(started_cluster.instances['node'], 'test', 't_in_memory', 't_in_memory_2') + instance.query( + "CREATE TABLE test.t_in_memory_2(a UInt32, s String) ENGINE = MergeTree ORDER BY a" + ) + copy_backup_to_detached( + started_cluster.instances["node"], "test", "t_in_memory", "t_in_memory_2" + ) instance.query("ALTER TABLE test.t_in_memory_2 ATTACH PARTITION ID 'all'") - assert TSV(instance.query("SELECT part_type FROM system.parts WHERE database = 'test' AND table = 't_in_memory_2'")) == TSV("Compact\n") + assert TSV( + instance.query( + "SELECT part_type FROM system.parts WHERE database = 'test' AND table = 't_in_memory_2'" + ) + ) == TSV("Compact\n") assert TSV(instance.query("SELECT a, s FROM test.t_in_memory_2")) == TSV("1\ta\n") diff --git a/tests/integration/test_backup_restore_new/test.py b/tests/integration/test_backup_restore_new/test.py index f9bfababadc..32ad0fbebbc 100644 --- a/tests/integration/test_backup_restore_new/test.py +++ b/tests/integration/test_backup_restore_new/test.py @@ -1,16 +1,22 @@ import pytest import re +import os.path from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', main_configs=["configs/backups_disk.xml"], external_dirs=["/backups/"]) +instance = cluster.add_instance( + "instance", main_configs=["configs/backups_disk.xml"], external_dirs=["/backups/"] +) + def create_and_fill_table(engine="MergeTree"): if engine == "MergeTree": engine = "MergeTree ORDER BY y PARTITION BY x%10" instance.query("CREATE DATABASE test") instance.query(f"CREATE TABLE test.table(x UInt32, y String) ENGINE={engine}") - instance.query("INSERT INTO test.table SELECT number, toString(number) FROM numbers(100)") + instance.query( + "INSERT INTO test.table SELECT number, toString(number) FROM numbers(100)" + ) @pytest.fixture(scope="module", autouse=True) @@ -31,13 +37,22 @@ def cleanup_after_test(): backup_id_counter = 0 + + def new_backup_name(): global backup_id_counter backup_id_counter += 1 return f"Disk('backups', '{backup_id_counter}/')" -@pytest.mark.parametrize("engine", ["MergeTree", "Log", "TinyLog", "StripeLog"]) +def get_backup_dir(backup_name): + counter = int(backup_name.split(",")[1].strip("')/ ")) + return os.path.join(instance.path, f"backups/{counter}") + + +@pytest.mark.parametrize( + "engine", ["MergeTree", "Log", "TinyLog", "StripeLog", "Memory"] +) def test_restore_table(engine): backup_name = new_backup_name() create_and_fill_table(engine=engine) @@ -52,7 +67,9 @@ def test_restore_table(engine): assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" -@pytest.mark.parametrize("engine", ["MergeTree", "Log", "TinyLog", "StripeLog"]) +@pytest.mark.parametrize( + "engine", ["MergeTree", "Log", "TinyLog", "StripeLog", "Memory"] +) def test_restore_table_into_existing_table(engine): backup_name = new_backup_name() create_and_fill_table(engine=engine) @@ -60,10 +77,14 @@ def test_restore_table_into_existing_table(engine): assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" instance.query(f"BACKUP TABLE test.table TO {backup_name}") - instance.query(f"RESTORE TABLE test.table INTO test.table FROM {backup_name}") + instance.query( + f"RESTORE TABLE test.table INTO test.table FROM {backup_name} SETTINGS throw_if_table_exists=0" + ) assert instance.query("SELECT count(), sum(x) FROM test.table") == "200\t9900\n" - instance.query(f"RESTORE TABLE test.table INTO test.table FROM {backup_name}") + instance.query( + f"RESTORE TABLE test.table INTO test.table FROM {backup_name} SETTINGS throw_if_table_exists=0" + ) assert instance.query("SELECT count(), sum(x) FROM test.table") == "300\t14850\n" @@ -93,6 +114,20 @@ def test_backup_table_under_another_name(): assert instance.query("SELECT count(), sum(x) FROM test.table2") == "100\t4950\n" +def test_materialized_view(): + backup_name = new_backup_name() + instance.query( + "CREATE MATERIALIZED VIEW mv_1(x UInt8) ENGINE=MergeTree ORDER BY tuple() POPULATE AS SELECT 1 AS x" + ) + + instance.query(f"BACKUP TABLE mv_1 TO {backup_name}") + instance.query("DROP TABLE mv_1") + instance.query(f"RESTORE TABLE mv_1 FROM {backup_name}") + + assert instance.query("SELECT * FROM mv_1") == "1\n" + instance.query("DROP TABLE mv_1") + + def test_incremental_backup(): backup_name = new_backup_name() incremental_backup_name = new_backup_name() @@ -104,23 +139,64 @@ def test_incremental_backup(): instance.query("INSERT INTO test.table VALUES (65, 'a'), (66, 'b')") assert instance.query("SELECT count(), sum(x) FROM test.table") == "102\t5081\n" - instance.query(f"BACKUP TABLE test.table TO {incremental_backup_name} SETTINGS base_backup = {backup_name}") + instance.query( + f"BACKUP TABLE test.table TO {incremental_backup_name} SETTINGS base_backup = {backup_name}" + ) - instance.query(f"RESTORE TABLE test.table AS test.table2 FROM {incremental_backup_name}") + instance.query( + f"RESTORE TABLE test.table AS test.table2 FROM {incremental_backup_name}" + ) assert instance.query("SELECT count(), sum(x) FROM test.table2") == "102\t5081\n" +def test_incremental_backup_after_renaming_table(): + backup_name = new_backup_name() + incremental_backup_name = new_backup_name() + create_and_fill_table() + + instance.query(f"BACKUP TABLE test.table TO {backup_name}") + instance.query("RENAME TABLE test.table TO test.table2") + instance.query( + f"BACKUP TABLE test.table2 TO {incremental_backup_name} SETTINGS base_backup = {backup_name}" + ) + + # Files in a base backup can be searched by checksum, so an incremental backup with a renamed table actually + # contains only its changed metadata. + assert os.path.isdir(os.path.join(get_backup_dir(backup_name), "metadata")) == True + assert os.path.isdir(os.path.join(get_backup_dir(backup_name), "data")) == True + assert ( + os.path.isdir(os.path.join(get_backup_dir(incremental_backup_name), "metadata")) + == True + ) + assert ( + os.path.isdir(os.path.join(get_backup_dir(incremental_backup_name), "data")) + == False + ) + + instance.query("DROP TABLE test.table2") + instance.query(f"RESTORE TABLE test.table2 FROM {incremental_backup_name}") + assert instance.query("SELECT count(), sum(x) FROM test.table2") == "100\t4950\n" + + def test_backup_not_found_or_already_exists(): backup_name = new_backup_name() expected_error = "Backup .* not found" - assert re.search(expected_error, instance.query_and_get_error(f"RESTORE TABLE test.table AS test.table2 FROM {backup_name}")) + assert re.search( + expected_error, + instance.query_and_get_error( + f"RESTORE TABLE test.table AS test.table2 FROM {backup_name}" + ), + ) create_and_fill_table() instance.query(f"BACKUP TABLE test.table TO {backup_name}") expected_error = "Backup .* already exists" - assert re.search(expected_error, instance.query_and_get_error(f"BACKUP TABLE test.table TO {backup_name}")) + assert re.search( + expected_error, + instance.query_and_get_error(f"BACKUP TABLE test.table TO {backup_name}"), + ) def test_file_engine(): @@ -147,3 +223,38 @@ def test_database(): instance.query(f"RESTORE DATABASE test FROM {backup_name}") assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + + +def test_zip_archive(): + backup_name = f"File('/backups/archive.zip')" + create_and_fill_table() + + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + instance.query(f"BACKUP TABLE test.table TO {backup_name}") + assert os.path.isfile( + os.path.join(os.path.join(instance.path, "backups/archive.zip")) + ) + + instance.query("DROP TABLE test.table") + assert instance.query("EXISTS test.table") == "0\n" + + instance.query(f"RESTORE TABLE test.table FROM {backup_name}") + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + + +def test_zip_archive_with_settings(): + backup_name = f"File('/backups/archive_with_settings.zip')" + create_and_fill_table() + + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + instance.query( + f"BACKUP TABLE test.table TO {backup_name} SETTINGS compression_method='lzma', compression_level=3, password='qwerty'" + ) + + instance.query("DROP TABLE test.table") + assert instance.query("EXISTS test.table") == "0\n" + + instance.query( + f"RESTORE TABLE test.table FROM {backup_name} SETTINGS password='qwerty'" + ) + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" diff --git a/tests/integration/test_backup_with_other_granularity/test.py b/tests/integration/test_backup_with_other_granularity/test.py index 0f35c0f849e..2fd5e65b123 100644 --- a/tests/integration/test_backup_with_other_granularity/test.py +++ b/tests/integration/test_backup_with_other_granularity/test.py @@ -4,13 +4,31 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True, image='yandex/clickhouse-server', tag='19.4.5.35', - stay_alive=True, with_installed_binary=True) -node2 = cluster.add_instance('node2', with_zookeeper=True, image='yandex/clickhouse-server', tag='19.4.5.35', - stay_alive=True, with_installed_binary=True) -node3 = cluster.add_instance('node3', with_zookeeper=True, image='yandex/clickhouse-server', tag='19.4.5.35', - stay_alive=True, with_installed_binary=True) -node4 = cluster.add_instance('node4') +node1 = cluster.add_instance( + "node1", + with_zookeeper=True, + image="yandex/clickhouse-server", + tag="19.4.5.35", + stay_alive=True, + with_installed_binary=True, +) +node2 = cluster.add_instance( + "node2", + with_zookeeper=True, + image="yandex/clickhouse-server", + tag="19.4.5.35", + stay_alive=True, + with_installed_binary=True, +) +node3 = cluster.add_instance( + "node3", + with_zookeeper=True, + image="yandex/clickhouse-server", + tag="19.4.5.35", + stay_alive=True, + with_installed_binary=True, +) +node4 = cluster.add_instance("node4") @pytest.fixture(scope="module") @@ -24,7 +42,9 @@ def started_cluster(): def test_backup_from_old_version(started_cluster): - node1.query("CREATE TABLE source_table(A Int64, B String) Engine = MergeTree order by tuple()") + node1.query( + "CREATE TABLE source_table(A Int64, B String) Engine = MergeTree order by tuple()" + ) node1.query("INSERT INTO source_table VALUES(1, '1')") @@ -37,14 +57,24 @@ def test_backup_from_old_version(started_cluster): node1.restart_with_latest_version() node1.query( - "CREATE TABLE dest_table (A Int64, B String, Y String) ENGINE = ReplicatedMergeTree('/test/dest_table1', '1') ORDER BY tuple()") + "CREATE TABLE dest_table (A Int64, B String, Y String) ENGINE = ReplicatedMergeTree('/test/dest_table1', '1') ORDER BY tuple()" + ) node1.query("INSERT INTO dest_table VALUES(2, '2', 'Hello')") assert node1.query("SELECT COUNT() FROM dest_table") == "1\n" - node1.exec_in_container(['find', '/var/lib/clickhouse/shadow/1/data/default/source_table']) - node1.exec_in_container(['cp', '-r', '/var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/', '/var/lib/clickhouse/data/default/dest_table/detached']) + node1.exec_in_container( + ["find", "/var/lib/clickhouse/shadow/1/data/default/source_table"] + ) + node1.exec_in_container( + [ + "cp", + "-r", + "/var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/", + "/var/lib/clickhouse/data/default/dest_table/detached", + ] + ) assert node1.query("SELECT COUNT() FROM dest_table") == "1\n" @@ -62,7 +92,9 @@ def test_backup_from_old_version(started_cluster): def test_backup_from_old_version_setting(started_cluster): - node2.query("CREATE TABLE source_table(A Int64, B String) Engine = MergeTree order by tuple()") + node2.query( + "CREATE TABLE source_table(A Int64, B String) Engine = MergeTree order by tuple()" + ) node2.query("INSERT INTO source_table VALUES(1, '1')") @@ -75,13 +107,21 @@ def test_backup_from_old_version_setting(started_cluster): node2.restart_with_latest_version() node2.query( - "CREATE TABLE dest_table (A Int64, B String, Y String) ENGINE = ReplicatedMergeTree('/test/dest_table2', '1') ORDER BY tuple() SETTINGS enable_mixed_granularity_parts = 1") + "CREATE TABLE dest_table (A Int64, B String, Y String) ENGINE = ReplicatedMergeTree('/test/dest_table2', '1') ORDER BY tuple() SETTINGS enable_mixed_granularity_parts = 1" + ) node2.query("INSERT INTO dest_table VALUES(2, '2', 'Hello')") assert node2.query("SELECT COUNT() FROM dest_table") == "1\n" - node2.exec_in_container(['cp', '-r', '/var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/', '/var/lib/clickhouse/data/default/dest_table/detached']) + node2.exec_in_container( + [ + "cp", + "-r", + "/var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/", + "/var/lib/clickhouse/data/default/dest_table/detached", + ] + ) assert node2.query("SELECT COUNT() FROM dest_table") == "1\n" @@ -99,7 +139,9 @@ def test_backup_from_old_version_setting(started_cluster): def test_backup_from_old_version_config(started_cluster): - node3.query("CREATE TABLE source_table(A Int64, B String) Engine = MergeTree order by tuple()") + node3.query( + "CREATE TABLE source_table(A Int64, B String) Engine = MergeTree order by tuple()" + ) node3.query("INSERT INTO source_table VALUES(1, '1')") @@ -110,19 +152,29 @@ def test_backup_from_old_version_config(started_cluster): node3.query("ALTER TABLE source_table FREEZE PARTITION tuple();") def callback(n): - n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", - "1") + n.replace_config( + "/etc/clickhouse-server/merge_tree_settings.xml", + "1", + ) node3.restart_with_latest_version(callback_onstop=callback) node3.query( - "CREATE TABLE dest_table (A Int64, B String, Y String) ENGINE = ReplicatedMergeTree('/test/dest_table3', '1') ORDER BY tuple() SETTINGS enable_mixed_granularity_parts = 1") + "CREATE TABLE dest_table (A Int64, B String, Y String) ENGINE = ReplicatedMergeTree('/test/dest_table3', '1') ORDER BY tuple() SETTINGS enable_mixed_granularity_parts = 1" + ) node3.query("INSERT INTO dest_table VALUES(2, '2', 'Hello')") assert node3.query("SELECT COUNT() FROM dest_table") == "1\n" - node3.exec_in_container(['cp', '-r', '/var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/', '/var/lib/clickhouse/data/default/dest_table/detached']) + node3.exec_in_container( + [ + "cp", + "-r", + "/var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/", + "/var/lib/clickhouse/data/default/dest_table/detached", + ] + ) assert node3.query("SELECT COUNT() FROM dest_table") == "1\n" @@ -140,9 +192,13 @@ def test_backup_from_old_version_config(started_cluster): def test_backup_and_alter(started_cluster): - node4.query("CREATE DATABASE test ENGINE=Ordinary") # Different path in shadow/ with Atomic + node4.query( + "CREATE DATABASE test ENGINE=Ordinary" + ) # Different path in shadow/ with Atomic - node4.query("CREATE TABLE test.backup_table(A Int64, B String, C Date) Engine = MergeTree order by tuple()") + node4.query( + "CREATE TABLE test.backup_table(A Int64, B String, C Date) Engine = MergeTree order by tuple()" + ) node4.query("INSERT INTO test.backup_table VALUES(2, '2', toDate('2019-10-01'))") @@ -154,7 +210,14 @@ def test_backup_and_alter(started_cluster): node4.query("ALTER TABLE test.backup_table DROP PARTITION tuple()") - node4.exec_in_container(['cp', '-r', '/var/lib/clickhouse/shadow/1/data/test/backup_table/all_1_1_0/', '/var/lib/clickhouse/data/test/backup_table/detached']) + node4.exec_in_container( + [ + "cp", + "-r", + "/var/lib/clickhouse/shadow/1/data/test/backup_table/all_1_1_0/", + "/var/lib/clickhouse/data/test/backup_table/detached", + ] + ) node4.query("ALTER TABLE test.backup_table ATTACH PARTITION tuple()") diff --git a/tests/integration/test_backward_compatibility/test.py b/tests/integration/test_backward_compatibility/test.py index a8f4968956c..01ed02720f8 100644 --- a/tests/integration/test_backward_compatibility/test.py +++ b/tests/integration/test_backward_compatibility/test.py @@ -3,20 +3,29 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True, image='yandex/clickhouse-server', tag='19.17.8.54', stay_alive=True, with_installed_binary=True) -node2 = cluster.add_instance('node2', main_configs=['configs/wide_parts_only.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", + with_zookeeper=True, + image="yandex/clickhouse-server", + tag="19.17.8.54", + stay_alive=True, + with_installed_binary=True, +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/wide_parts_only.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") def start_cluster(): try: cluster.start() - create_query = '''CREATE TABLE t(date Date, id UInt32) + create_query = """CREATE TABLE t(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/t', '{}') PARTITION BY toYYYYMM(date) - ORDER BY id''' + ORDER BY id""" node1.query(create_query.format(1)) - node1.query("DETACH TABLE t") # stop being leader + node1.query("DETACH TABLE t") # stop being leader node2.query(create_query.format(2)) node1.query("ATTACH TABLE t") yield cluster diff --git a/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py b/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py index fc8d27cfa16..35cdaeef9ac 100644 --- a/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py +++ b/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py @@ -3,9 +3,15 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__, name="aggregate_fixed_key") -node1 = cluster.add_instance('node1', with_zookeeper=True, image='yandex/clickhouse-server', tag='21.3', with_installed_binary=True) -node2 = cluster.add_instance('node2', with_zookeeper=True) -node3 = cluster.add_instance('node3', with_zookeeper=True) +node1 = cluster.add_instance( + "node1", + with_zookeeper=True, + image="yandex/clickhouse-server", + tag="21.3", + with_installed_binary=True, +) +node2 = cluster.add_instance("node2", with_zookeeper=True) +node3 = cluster.add_instance("node3", with_zookeeper=True) @pytest.fixture(scope="module") @@ -38,8 +44,9 @@ def test_two_level_merge(start_cluster): # covers only the keys64 method for node in start_cluster.instances.values(): - print(node.query( - """ + print( + node.query( + """ SELECT throwIf(uniqExact(date) != count(), 'group by is borked') FROM ( @@ -58,4 +65,5 @@ def test_two_level_merge(start_cluster): max_threads = 2, prefer_localhost_replica = 0 """ - )) + ) + ) diff --git a/tests/integration/test_backward_compatibility/test_aggregate_function_state_avg.py b/tests/integration/test_backward_compatibility/test_aggregate_function_state_avg.py index feaf96c439d..b3ad9011239 100644 --- a/tests/integration/test_backward_compatibility/test_aggregate_function_state_avg.py +++ b/tests/integration/test_backward_compatibility/test_aggregate_function_state_avg.py @@ -3,14 +3,24 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__, name="aggregate_state") -node1 = cluster.add_instance('node1', - with_zookeeper=False, image='yandex/clickhouse-server', tag='19.16.9.37', stay_alive=True, - with_installed_binary=True) -node2 = cluster.add_instance('node2', - with_zookeeper=False, image='yandex/clickhouse-server', tag='19.16.9.37', stay_alive=True, - with_installed_binary=True) -node3 = cluster.add_instance('node3', with_zookeeper=False) -node4 = cluster.add_instance('node4', with_zookeeper=False) +node1 = cluster.add_instance( + "node1", + with_zookeeper=False, + image="yandex/clickhouse-server", + tag="19.16.9.37", + stay_alive=True, + with_installed_binary=True, +) +node2 = cluster.add_instance( + "node2", + with_zookeeper=False, + image="yandex/clickhouse-server", + tag="19.16.9.37", + stay_alive=True, + with_installed_binary=True, +) +node3 = cluster.add_instance("node3", with_zookeeper=False) +node4 = cluster.add_instance("node4", with_zookeeper=False) @pytest.fixture(scope="module") @@ -27,6 +37,7 @@ def start_cluster(): # TODO Implement versioning of serialization format for aggregate function states. # NOTE This test is too ad-hoc. + def test_backward_compatability(start_cluster): node1.query("create table tab (x UInt64) engine = Memory") node2.query("create table tab (x UInt64) engine = Memory") @@ -38,24 +49,34 @@ def test_backward_compatability(start_cluster): node3.query("INSERT INTO tab VALUES (3)") node4.query("INSERT INTO tab VALUES (4)") - assert (node1.query("SELECT avg(x) FROM remote('node{1..4}', default, tab)") == '2.5\n') - assert (node2.query("SELECT avg(x) FROM remote('node{1..4}', default, tab)") == '2.5\n') - assert (node3.query("SELECT avg(x) FROM remote('node{1..4}', default, tab)") == '2.5\n') - assert (node4.query("SELECT avg(x) FROM remote('node{1..4}', default, tab)") == '2.5\n') + assert ( + node1.query("SELECT avg(x) FROM remote('node{1..4}', default, tab)") == "2.5\n" + ) + assert ( + node2.query("SELECT avg(x) FROM remote('node{1..4}', default, tab)") == "2.5\n" + ) + assert ( + node3.query("SELECT avg(x) FROM remote('node{1..4}', default, tab)") == "2.5\n" + ) + assert ( + node4.query("SELECT avg(x) FROM remote('node{1..4}', default, tab)") == "2.5\n" + ) # Also check with persisted aggregate function state node1.query("create table state (x AggregateFunction(avg, UInt64)) engine = Log") - node1.query("INSERT INTO state SELECT avgState(arrayJoin(CAST([1, 2, 3, 4] AS Array(UInt64))))") + node1.query( + "INSERT INTO state SELECT avgState(arrayJoin(CAST([1, 2, 3, 4] AS Array(UInt64))))" + ) - assert (node1.query("SELECT avgMerge(x) FROM state") == '2.5\n') + assert node1.query("SELECT avgMerge(x) FROM state") == "2.5\n" node1.restart_with_latest_version() - assert (node1.query("SELECT avgMerge(x) FROM state") == '2.5\n') + assert node1.query("SELECT avgMerge(x) FROM state") == "2.5\n" node1.query("drop table tab") node1.query("drop table state") node2.query("drop table tab") node3.query("drop table tab") - node4.query("drop table tab") \ No newline at end of file + node4.query("drop table tab") diff --git a/tests/integration/test_backward_compatibility/test_cte_distributed.py b/tests/integration/test_backward_compatibility/test_cte_distributed.py index 3aec527524b..89a565b4b37 100644 --- a/tests/integration/test_backward_compatibility/test_cte_distributed.py +++ b/tests/integration/test_backward_compatibility/test_cte_distributed.py @@ -3,10 +3,15 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__, name="cte_distributed") -node1 = cluster.add_instance('node1', with_zookeeper=False) -node2 = cluster.add_instance('node2', - with_zookeeper=False, image='yandex/clickhouse-server', tag='21.7.3.14', stay_alive=True, - with_installed_binary=True) +node1 = cluster.add_instance("node1", with_zookeeper=False) +node2 = cluster.add_instance( + "node2", + with_zookeeper=False, + image="yandex/clickhouse-server", + tag="21.7.3.14", + stay_alive=True, + with_installed_binary=True, +) @pytest.fixture(scope="module") @@ -19,9 +24,9 @@ def start_cluster(): cluster.shutdown() - def test_cte_distributed(start_cluster): - node2.query(""" + node2.query( + """ WITH quantile(0.05)(cnt) as p05, quantile(0.95)(cnt) as p95, @@ -35,9 +40,11 @@ FROM ( count() as cnt FROM remote('node{1,2}', numbers(10)) GROUP BY number -)""") +)""" + ) - node1.query(""" + node1.query( + """ WITH quantile(0.05)(cnt) as p05, quantile(0.95)(cnt) as p95, @@ -51,4 +58,5 @@ FROM ( count() as cnt FROM remote('node{1,2}', numbers(10)) GROUP BY number -)""") +)""" + ) diff --git a/tests/integration/test_backward_compatibility/test_data_skipping_indices.py b/tests/integration/test_backward_compatibility/test_data_skipping_indices.py index db6a3eb7a08..60d709c257f 100644 --- a/tests/integration/test_backward_compatibility/test_data_skipping_indices.py +++ b/tests/integration/test_backward_compatibility/test_data_skipping_indices.py @@ -6,7 +6,13 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__, name="skipping_indices") -node = cluster.add_instance('node', image='yandex/clickhouse-server', tag='21.6', stay_alive=True, with_installed_binary=True) +node = cluster.add_instance( + "node", + image="yandex/clickhouse-server", + tag="21.6", + stay_alive=True, + with_installed_binary=True, +) @pytest.fixture(scope="module") @@ -23,7 +29,8 @@ def start_cluster(): # restart_with_tagged_version(), since right now it is not possible to # switch to old tagged clickhouse version. def test_index(start_cluster): - node.query(""" + node.query( + """ CREATE TABLE data ( key Int, @@ -36,9 +43,12 @@ def test_index(start_cluster): INSERT INTO data SELECT number, number FROM numbers(10000); SELECT * FROM data WHERE value = 20000 SETTINGS force_data_skipping_indices = 'value_index' SETTINGS force_data_skipping_indices = 'value_index', max_rows_to_read=1; - """) + """ + ) node.restart_with_latest_version() - node.query(""" + node.query( + """ SELECT * FROM data WHERE value = 20000 SETTINGS force_data_skipping_indices = 'value_index' SETTINGS force_data_skipping_indices = 'value_index', max_rows_to_read=1; DROP TABLE data; - """) \ No newline at end of file + """ + ) diff --git a/tests/integration/test_backward_compatibility/test_detach_part_wrong_partition_id.py b/tests/integration/test_backward_compatibility/test_detach_part_wrong_partition_id.py index abebaaea8b8..cb9929db48b 100644 --- a/tests/integration/test_backward_compatibility/test_detach_part_wrong_partition_id.py +++ b/tests/integration/test_backward_compatibility/test_detach_part_wrong_partition_id.py @@ -4,7 +4,13 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__, name="detach") # Version 21.6.3.14 has incompatible partition id for tables with UUID in partition key. -node_21_6 = cluster.add_instance('node_21_6', image='yandex/clickhouse-server', tag='21.6.3.14', stay_alive=True, with_installed_binary=True) +node_21_6 = cluster.add_instance( + "node_21_6", + image="yandex/clickhouse-server", + tag="21.6.3.14", + stay_alive=True, + with_installed_binary=True, +) @pytest.fixture(scope="module") @@ -16,11 +22,16 @@ def start_cluster(): finally: cluster.shutdown() + def test_detach_part_wrong_partition_id(start_cluster): # Here we create table with partition by UUID. - node_21_6.query("create table tab (id UUID, value UInt32) engine = MergeTree PARTITION BY (id) order by tuple()") - node_21_6.query("insert into tab values ('61f0c404-5cb3-11e7-907b-a6006ad3dba0', 2)") + node_21_6.query( + "create table tab (id UUID, value UInt32) engine = MergeTree PARTITION BY (id) order by tuple()" + ) + node_21_6.query( + "insert into tab values ('61f0c404-5cb3-11e7-907b-a6006ad3dba0', 2)" + ) # After restart, partition id will be different. # There is a single 0-level part, which will become broken. @@ -29,7 +40,7 @@ def test_detach_part_wrong_partition_id(start_cluster): node_21_6.restart_with_latest_version() num_detached = node_21_6.query("select count() from system.detached_parts") - assert num_detached == '1\n' + assert num_detached == "1\n" node_21_6.restart_with_original_version() diff --git a/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py b/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py index 9a7c7f73eb5..e98894d887a 100644 --- a/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py +++ b/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py @@ -3,10 +3,15 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__, name="aggregate_alias_column") -node1 = cluster.add_instance('node1', with_zookeeper=False) -node2 = cluster.add_instance('node2', - with_zookeeper=False, image='yandex/clickhouse-server', tag='21.7.2.7', stay_alive=True, - with_installed_binary=True) +node1 = cluster.add_instance("node1", with_zookeeper=False) +node2 = cluster.add_instance( + "node2", + with_zookeeper=False, + image="yandex/clickhouse-server", + tag="21.7.2.7", + stay_alive=True, + with_installed_binary=True, +) @pytest.fixture(scope="module") @@ -22,11 +27,11 @@ def start_cluster(): def test_select_aggregate_alias_column(start_cluster): node1.query("create table tab (x UInt64, x_alias UInt64 ALIAS x) engine = Memory") node2.query("create table tab (x UInt64, x_alias UInt64 ALIAS x) engine = Memory") - node1.query('insert into tab values (1)') - node2.query('insert into tab values (1)') + node1.query("insert into tab values (1)") + node2.query("insert into tab values (1)") node1.query("select sum(x_alias) from remote('node{1,2}', default, tab)") node2.query("select sum(x_alias) from remote('node{1,2}', default, tab)") node1.query("drop table tab") - node2.query("drop table tab") \ No newline at end of file + node2.query("drop table tab") diff --git a/tests/integration/test_backward_compatibility/test_short_strings_aggregation.py b/tests/integration/test_backward_compatibility/test_short_strings_aggregation.py index 54dd53c344e..8053ad417ec 100644 --- a/tests/integration/test_backward_compatibility/test_short_strings_aggregation.py +++ b/tests/integration/test_backward_compatibility/test_short_strings_aggregation.py @@ -3,11 +3,23 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__, name="short_strings") -node1 = cluster.add_instance('node1', with_zookeeper=False, image='yandex/clickhouse-server', tag='19.16.9.37', - stay_alive=True, with_installed_binary=True) -node2 = cluster.add_instance('node2', with_zookeeper=False, image='yandex/clickhouse-server', tag='19.16.9.37', - stay_alive=True, with_installed_binary=True) -node3 = cluster.add_instance('node3', with_zookeeper=False) +node1 = cluster.add_instance( + "node1", + with_zookeeper=False, + image="yandex/clickhouse-server", + tag="19.16.9.37", + stay_alive=True, + with_installed_binary=True, +) +node2 = cluster.add_instance( + "node2", + with_zookeeper=False, + image="yandex/clickhouse-server", + tag="19.16.9.37", + stay_alive=True, + with_installed_binary=True, +) +node3 = cluster.add_instance("node3", with_zookeeper=False) @pytest.fixture(scope="module") @@ -26,8 +38,9 @@ def test_backward_compatability(start_cluster): node1.query("insert into tab select number from numbers(50)") node2.query("insert into tab select number from numbers(1000000)") res = node3.query( - "select s, count() from remote('node{1,2}', default, tab) group by s order by toUInt64(s) limit 50") + "select s, count() from remote('node{1,2}', default, tab) group by s order by toUInt64(s) limit 50" + ) print(res) - assert res == ''.join('{}\t2\n'.format(i) for i in range(50)) + assert res == "".join("{}\t2\n".format(i) for i in range(50)) node1.query("drop table tab") node2.query("drop table tab") diff --git a/tests/integration/test_block_structure_mismatch/test.py b/tests/integration/test_block_structure_mismatch/test.py index 12f9bd090a3..b04607fc9d6 100644 --- a/tests/integration/test_block_structure_mismatch/test.py +++ b/tests/integration/test_block_structure_mismatch/test.py @@ -4,8 +4,8 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml']) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml']) +node1 = cluster.add_instance("node1", main_configs=["configs/remote_servers.xml"]) +node2 = cluster.add_instance("node2", main_configs=["configs/remote_servers.xml"]) # test reproducing issue https://github.com/ClickHouse/ClickHouse/issues/3162 @@ -15,7 +15,8 @@ def started_cluster(): cluster.start() for node in (node1, node2): - node.query(''' + node.query( + """ CREATE TABLE local_test ( t UInt64, date Date DEFAULT toDate(t/1000), @@ -26,9 +27,11 @@ CREATE TABLE local_test ( PARTITION BY toRelativeDayNum(date) ORDER BY (t) SETTINGS index_granularity=8192 - ''') + """ + ) - node.query(''' + node.query( + """ CREATE TABLE dist_test ( t UInt64, shard UInt64, @@ -36,7 +39,8 @@ CREATE TABLE dist_test ( col1 String, col2 String ) Engine = Distributed(testcluster, default, local_test, shard) - ''') + """ + ) yield cluster @@ -45,7 +49,15 @@ CREATE TABLE dist_test ( def test(started_cluster): - node1.query("INSERT INTO local_test (t, shard, col1, col2) VALUES (1000, 0, 'x', 'y')") - node2.query("INSERT INTO local_test (t, shard, col1, col2) VALUES (1000, 1, 'foo', 'bar')") - assert node1.query( - "SELECT col1, col2 FROM dist_test WHERE (t < 3600000) AND (col1 = 'foo') ORDER BY t ASC") == "foo\tbar\n" + node1.query( + "INSERT INTO local_test (t, shard, col1, col2) VALUES (1000, 0, 'x', 'y')" + ) + node2.query( + "INSERT INTO local_test (t, shard, col1, col2) VALUES (1000, 1, 'foo', 'bar')" + ) + assert ( + node1.query( + "SELECT col1, col2 FROM dist_test WHERE (t < 3600000) AND (col1 = 'foo') ORDER BY t ASC" + ) + == "foo\tbar\n" + ) diff --git a/tests/integration/test_broken_part_during_merge/test.py b/tests/integration/test_broken_part_during_merge/test.py index 1c03add49db..d7492be686b 100644 --- a/tests/integration/test_broken_part_during_merge/test.py +++ b/tests/integration/test_broken_part_during_merge/test.py @@ -8,7 +8,7 @@ import time cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True) +node1 = cluster.add_instance("node1", with_zookeeper=True) @pytest.fixture(scope="module") @@ -22,36 +22,55 @@ def started_cluster(): def test_merge_and_part_corruption(started_cluster): - node1.query(''' + node1.query( + """ CREATE TABLE replicated_mt(date Date, id UInt32, value Int32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt', '{replica}') ORDER BY id; - '''.format(replica=node1.name)) - + """.format( + replica=node1.name + ) + ) node1.query("SYSTEM STOP REPLICATION QUEUES replicated_mt") for i in range(4): - node1.query("INSERT INTO replicated_mt SELECT toDate('2019-10-01'), number, number * number FROM numbers ({f}, 100000)".format(f=i*100000)) + node1.query( + "INSERT INTO replicated_mt SELECT toDate('2019-10-01'), number, number * number FROM numbers ({f}, 100000)".format( + f=i * 100000 + ) + ) - assert node1.query("SELECT COUNT() FROM system.parts WHERE table='replicated_mt' AND active=1") == "4\n" + assert ( + node1.query( + "SELECT COUNT() FROM system.parts WHERE table='replicated_mt' AND active=1" + ) + == "4\n" + ) # Need to corrupt "border part" (left or right). If we will corrupt something in the middle # clickhouse will not consider merge as broken, because we have parts with the same min and max # block numbers. - corrupt_part_data_on_disk(node1, 'replicated_mt', 'all_3_3_0') + corrupt_part_data_on_disk(node1, "replicated_mt", "all_3_3_0") with Pool(1) as p: + def optimize_with_delay(x): node1.query("OPTIMIZE TABLE replicated_mt FINAL", timeout=30) # corrupt part after merge already assigned, but not started res_opt = p.apply_async(optimize_with_delay, (1,)) - node1.query("CHECK TABLE replicated_mt", settings={"check_query_single_value_result": 0}) + node1.query( + "CHECK TABLE replicated_mt", settings={"check_query_single_value_result": 0} + ) # start merge node1.query("SYSTEM START REPLICATION QUEUES replicated_mt") res_opt.get() # will hung if checked bug not fixed - node1.query("ALTER TABLE replicated_mt UPDATE value = 7 WHERE 1", settings={"mutations_sync": 2}, timeout=30) + node1.query( + "ALTER TABLE replicated_mt UPDATE value = 7 WHERE 1", + settings={"mutations_sync": 2}, + timeout=30, + ) assert node1.query("SELECT sum(value) FROM replicated_mt") == "2100000\n" - node1.query('DROP TABLE replicated_mt SYNC') + node1.query("DROP TABLE replicated_mt SYNC") diff --git a/tests/integration/test_buffer_profile/test.py b/tests/integration/test_buffer_profile/test.py index ae9220898ab..b1185493c47 100644 --- a/tests/integration/test_buffer_profile/test.py +++ b/tests/integration/test_buffer_profile/test.py @@ -9,12 +9,15 @@ from helpers.client import QueryRuntimeException cluster = ClickHouseCluster(__file__) -node_default = cluster.add_instance('node_default') -node_buffer_profile = cluster.add_instance('node_buffer_profile', - main_configs=['configs/buffer_profile.xml'], - user_configs=['configs/users.d/buffer_profile.xml']) +node_default = cluster.add_instance("node_default") +node_buffer_profile = cluster.add_instance( + "node_buffer_profile", + main_configs=["configs/buffer_profile.xml"], + user_configs=["configs/users.d/buffer_profile.xml"], +) -@pytest.fixture(scope='module', autouse=True) + +@pytest.fixture(scope="module", autouse=True) def start_cluster(): try: cluster.start() @@ -22,8 +25,10 @@ def start_cluster(): finally: cluster.shutdown() + def bootstrap(node): - node.query(""" + node.query( + """ CREATE TABLE data (key Int) Engine=MergeTree() ORDER BY key PARTITION BY key % 2; @@ -40,15 +45,20 @@ def bootstrap(node): ); INSERT INTO buffer SELECT * FROM numbers(100); - """) + """ + ) + def test_default_profile(): bootstrap(node_default) # flush the buffer - node_default.query('OPTIMIZE TABLE buffer') + node_default.query("OPTIMIZE TABLE buffer") + def test_buffer_profile(): bootstrap(node_buffer_profile) - with pytest.raises(QueryRuntimeException, match='Too many partitions for single INSERT block'): + with pytest.raises( + QueryRuntimeException, match="Too many partitions for single INSERT block" + ): # flush the buffer - node_buffer_profile.query('OPTIMIZE TABLE buffer') + node_buffer_profile.query("OPTIMIZE TABLE buffer") diff --git a/tests/integration/test_catboost_model_config_reload/test.py b/tests/integration/test_catboost_model_config_reload/test.py index 4059e739dc9..c12c28e2338 100644 --- a/tests/integration/test_catboost_model_config_reload/test.py +++ b/tests/integration/test_catboost_model_config_reload/test.py @@ -10,16 +10,24 @@ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', stay_alive=True, main_configs=['config/models_config.xml', 'config/catboost_lib.xml']) +node = cluster.add_instance( + "node", + stay_alive=True, + main_configs=["config/models_config.xml", "config/catboost_lib.xml"], +) def copy_file_to_container(local_path, dist_path, container_id): - os.system("docker cp {local} {cont_id}:{dist}".format(local=local_path, cont_id=container_id, dist=dist_path)) + os.system( + "docker cp {local} {cont_id}:{dist}".format( + local=local_path, cont_id=container_id, dist=dist_path + ) + ) -config = ''' +config = """ /etc/clickhouse-server/model/{model_config} -''' +""" @pytest.fixture(scope="module") @@ -27,7 +35,11 @@ def started_cluster(): try: cluster.start() - copy_file_to_container(os.path.join(SCRIPT_DIR, 'model/.'), '/etc/clickhouse-server/model', node.docker_id) + copy_file_to_container( + os.path.join(SCRIPT_DIR, "model/."), + "/etc/clickhouse-server/model", + node.docker_id, + ) node.restart_clickhouse() yield cluster @@ -37,7 +49,10 @@ def started_cluster(): def change_config(model_config): - node.replace_config("/etc/clickhouse-server/config.d/models_config.xml", config.format(model_config=model_config)) + node.replace_config( + "/etc/clickhouse-server/config.d/models_config.xml", + config.format(model_config=model_config), + ) node.query("SYSTEM RELOAD CONFIG;") @@ -57,5 +72,6 @@ def test(started_cluster): node.query("SELECT modelEvaluate('model2', 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);") # Check that the old model was unloaded. - node.query_and_get_error("SELECT modelEvaluate('model1', 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);") - + node.query_and_get_error( + "SELECT modelEvaluate('model1', 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);" + ) diff --git a/tests/integration/test_catboost_model_first_evaluate/test.py b/tests/integration/test_catboost_model_first_evaluate/test.py index 7e498ccfe21..b15f481c0e9 100644 --- a/tests/integration/test_catboost_model_first_evaluate/test.py +++ b/tests/integration/test_catboost_model_first_evaluate/test.py @@ -10,11 +10,17 @@ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', stay_alive=True, main_configs=['config/models_config.xml']) +node = cluster.add_instance( + "node", stay_alive=True, main_configs=["config/models_config.xml"] +) def copy_file_to_container(local_path, dist_path, container_id): - os.system("docker cp {local} {cont_id}:{dist}".format(local=local_path, cont_id=container_id, dist=dist_path)) + os.system( + "docker cp {local} {cont_id}:{dist}".format( + local=local_path, cont_id=container_id, dist=dist_path + ) + ) @pytest.fixture(scope="module") @@ -22,7 +28,11 @@ def started_cluster(): try: cluster.start() - copy_file_to_container(os.path.join(SCRIPT_DIR, 'model/.'), '/etc/clickhouse-server/model', node.docker_id) + copy_file_to_container( + os.path.join(SCRIPT_DIR, "model/."), + "/etc/clickhouse-server/model", + node.docker_id, + ) node.restart_clickhouse() yield cluster @@ -36,4 +46,3 @@ def test(started_cluster): pytest.skip("Memory Sanitizer cannot work with third-party shared libraries") node.query("select modelEvaluate('titanic', 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);") - diff --git a/tests/integration/test_catboost_model_reload/test.py b/tests/integration/test_catboost_model_reload/test.py index 3d88c19cd2c..3bf7ca18cdd 100644 --- a/tests/integration/test_catboost_model_reload/test.py +++ b/tests/integration/test_catboost_model_reload/test.py @@ -10,17 +10,31 @@ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', stay_alive=True, main_configs=['config/models_config.xml', 'config/catboost_lib.xml']) +node = cluster.add_instance( + "node", + stay_alive=True, + main_configs=["config/models_config.xml", "config/catboost_lib.xml"], +) + def copy_file_to_container(local_path, dist_path, container_id): - os.system("docker cp {local} {cont_id}:{dist}".format(local=local_path, cont_id=container_id, dist=dist_path)) + os.system( + "docker cp {local} {cont_id}:{dist}".format( + local=local_path, cont_id=container_id, dist=dist_path + ) + ) + @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() - copy_file_to_container(os.path.join(SCRIPT_DIR, 'model/.'), '/etc/clickhouse-server/model', node.docker_id) + copy_file_to_container( + os.path.join(SCRIPT_DIR, "model/."), + "/etc/clickhouse-server/model", + node.docker_id, + ) node.query("CREATE TABLE binary (x UInt64, y UInt64) ENGINE = TinyLog()") node.query("INSERT INTO binary VALUES (1, 1), (1, 0), (0, 1), (0, 0)") @@ -31,50 +45,88 @@ def started_cluster(): finally: cluster.shutdown() + def test_model_reload(started_cluster): if node.is_built_with_memory_sanitizer(): pytest.skip("Memory Sanitizer cannot work with third-party shared libraries") - node.exec_in_container(["bash", "-c", "rm -f /etc/clickhouse-server/model/model.cbm"]) - node.exec_in_container(["bash", "-c", "ln /etc/clickhouse-server/model/conjunction.cbm /etc/clickhouse-server/model/model.cbm"]) + node.exec_in_container( + ["bash", "-c", "rm -f /etc/clickhouse-server/model/model.cbm"] + ) + node.exec_in_container( + [ + "bash", + "-c", + "ln /etc/clickhouse-server/model/conjunction.cbm /etc/clickhouse-server/model/model.cbm", + ] + ) node.query("SYSTEM RELOAD MODEL model") - result = node.query(""" + result = node.query( + """ WITH modelEvaluate('model', toFloat64(x), toFloat64(y)) as prediction, exp(prediction) / (1 + exp(prediction)) as probability SELECT if(probability > 0.5, 1, 0) FROM binary; - """) - assert result == '1\n0\n0\n0\n' + """ + ) + assert result == "1\n0\n0\n0\n" node.exec_in_container(["bash", "-c", "rm /etc/clickhouse-server/model/model.cbm"]) - node.exec_in_container(["bash", "-c", "ln /etc/clickhouse-server/model/disjunction.cbm /etc/clickhouse-server/model/model.cbm"]) + node.exec_in_container( + [ + "bash", + "-c", + "ln /etc/clickhouse-server/model/disjunction.cbm /etc/clickhouse-server/model/model.cbm", + ] + ) node.query("SYSTEM RELOAD MODEL model") - result = node.query(""" + result = node.query( + """ WITH modelEvaluate('model', toFloat64(x), toFloat64(y)) as prediction, exp(prediction) / (1 + exp(prediction)) as probability SELECT if(probability > 0.5, 1, 0) FROM binary; - """) - assert result == '1\n1\n1\n0\n' + """ + ) + assert result == "1\n1\n1\n0\n" + def test_models_reload(started_cluster): if node.is_built_with_memory_sanitizer(): pytest.skip("Memory Sanitizer cannot work with third-party shared libraries") - node.exec_in_container(["bash", "-c", "rm -f /etc/clickhouse-server/model/model.cbm"]) - node.exec_in_container(["bash", "-c", "ln /etc/clickhouse-server/model/conjunction.cbm /etc/clickhouse-server/model/model.cbm"]) + node.exec_in_container( + ["bash", "-c", "rm -f /etc/clickhouse-server/model/model.cbm"] + ) + node.exec_in_container( + [ + "bash", + "-c", + "ln /etc/clickhouse-server/model/conjunction.cbm /etc/clickhouse-server/model/model.cbm", + ] + ) node.query("SYSTEM RELOAD MODELS") - result = node.query(""" + result = node.query( + """ WITH modelEvaluate('model', toFloat64(x), toFloat64(y)) as prediction, exp(prediction) / (1 + exp(prediction)) as probability SELECT if(probability > 0.5, 1, 0) FROM binary; - """) - assert result == '1\n0\n0\n0\n' + """ + ) + assert result == "1\n0\n0\n0\n" node.exec_in_container(["bash", "-c", "rm /etc/clickhouse-server/model/model.cbm"]) - node.exec_in_container(["bash", "-c", "ln /etc/clickhouse-server/model/disjunction.cbm /etc/clickhouse-server/model/model.cbm"]) + node.exec_in_container( + [ + "bash", + "-c", + "ln /etc/clickhouse-server/model/disjunction.cbm /etc/clickhouse-server/model/model.cbm", + ] + ) node.query("SYSTEM RELOAD MODELS") - result = node.query(""" + result = node.query( + """ WITH modelEvaluate('model', toFloat64(x), toFloat64(y)) as prediction, exp(prediction) / (1 + exp(prediction)) as probability SELECT if(probability > 0.5, 1, 0) FROM binary; - """) - assert result == '1\n1\n1\n0\n' + """ + ) + assert result == "1\n1\n1\n0\n" diff --git a/tests/integration/test_cgroup_limit/test.py b/tests/integration/test_cgroup_limit/test.py index c3a92bee032..f6392eca4d7 100644 --- a/tests/integration/test_cgroup_limit/test.py +++ b/tests/integration/test_cgroup_limit/test.py @@ -6,32 +6,50 @@ import subprocess from tempfile import NamedTemporaryFile import pytest + def run_command_in_container(cmd, *args): # /clickhouse is mounted by integration tests runner - alternative_binary = os.getenv('CLICKHOUSE_BINARY', '/clickhouse') + alternative_binary = os.getenv("CLICKHOUSE_BINARY", "/clickhouse") if alternative_binary: args += ( - '--volume', f'{alternative_binary}:/usr/bin/clickhouse', + "--volume", + f"{alternative_binary}:/usr/bin/clickhouse", ) - return subprocess.check_output(['docker', 'run', '--rm', - *args, - 'ubuntu:20.04', - 'sh', '-c', cmd, - ]) + return subprocess.check_output( + [ + "docker", + "run", + "--rm", + *args, + "ubuntu:20.04", + "sh", + "-c", + cmd, + ] + ) + def run_with_cpu_limit(cmd, num_cpus, *args): args += ( - '--cpus', f'{num_cpus}', + "--cpus", + f"{num_cpus}", ) return run_command_in_container(cmd, *args) + def test_cgroup_cpu_limit(): for num_cpus in (1, 2, 4, 2.8): - result = run_with_cpu_limit('clickhouse local -q "select value from system.settings where name=\'max_threads\'"', num_cpus) + result = run_with_cpu_limit( + "clickhouse local -q \"select value from system.settings where name='max_threads'\"", + num_cpus, + ) expect_output = (r"\'auto({})\'".format(math.ceil(num_cpus))).encode() - assert result.strip() == expect_output, f"fail for cpu limit={num_cpus}, result={result.strip()}, expect={expect_output}" + assert ( + result.strip() == expect_output + ), f"fail for cpu limit={num_cpus}, result={result.strip()}, expect={expect_output}" + # For manual run -if __name__ == '__main__': +if __name__ == "__main__": test_cgroup_cpu_limit() diff --git a/tests/integration/test_check_table/test.py b/tests/integration/test_check_table/test.py index b184813d24f..613ac3fb35f 100644 --- a/tests/integration/test_check_table/test.py +++ b/tests/integration/test_check_table/test.py @@ -4,8 +4,8 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True) -node2 = cluster.add_instance('node2', with_zookeeper=True) +node1 = cluster.add_instance("node1", with_zookeeper=True) +node2 = cluster.add_instance("node2", with_zookeeper=True) @pytest.fixture(scope="module") @@ -21,140 +21,246 @@ def started_cluster(): def corrupt_data_part_on_disk(node, table, part_name): part_path = node.query( - "SELECT path FROM system.parts WHERE table = '{}' and name = '{}'".format(table, part_name)).strip() - node.exec_in_container(['bash', '-c', - 'cd {p} && ls *.bin | head -n 1 | xargs -I{{}} sh -c \'echo "1" >> $1\' -- {{}}'.format( - p=part_path)], privileged=True) + "SELECT path FROM system.parts WHERE table = '{}' and name = '{}'".format( + table, part_name + ) + ).strip() + node.exec_in_container( + [ + "bash", + "-c", + "cd {p} && ls *.bin | head -n 1 | xargs -I{{}} sh -c 'echo \"1\" >> $1' -- {{}}".format( + p=part_path + ), + ], + privileged=True, + ) def remove_checksums_on_disk(node, table, part_name): part_path = node.query( - "SELECT path FROM system.parts WHERE table = '{}' and name = '{}'".format(table, part_name)).strip() - node.exec_in_container(['bash', '-c', 'rm -r {p}/checksums.txt'.format(p=part_path)], privileged=True) + "SELECT path FROM system.parts WHERE table = '{}' and name = '{}'".format( + table, part_name + ) + ).strip() + node.exec_in_container( + ["bash", "-c", "rm -r {p}/checksums.txt".format(p=part_path)], privileged=True + ) def remove_part_from_disk(node, table, part_name): part_path = node.query( - "SELECT path FROM system.parts WHERE table = '{}' and name = '{}'".format(table, part_name)).strip() + "SELECT path FROM system.parts WHERE table = '{}' and name = '{}'".format( + table, part_name + ) + ).strip() if not part_path: raise Exception("Part " + part_name + "doesn't exist") - node.exec_in_container(['bash', '-c', 'rm -r {p}/*'.format(p=part_path)], privileged=True) + node.exec_in_container( + ["bash", "-c", "rm -r {p}/*".format(p=part_path)], privileged=True + ) def test_check_normal_table_corruption(started_cluster): node1.query("DROP TABLE IF EXISTS non_replicated_mt") - node1.query(''' + node1.query( + """ CREATE TABLE non_replicated_mt(date Date, id UInt32, value Int32) ENGINE = MergeTree() PARTITION BY toYYYYMM(date) ORDER BY id SETTINGS min_bytes_for_wide_part=0; - ''') + """ + ) - node1.query("INSERT INTO non_replicated_mt VALUES (toDate('2019-02-01'), 1, 10), (toDate('2019-02-01'), 2, 12)") - assert node1.query("CHECK TABLE non_replicated_mt PARTITION 201902", - settings={"check_query_single_value_result": 0}) == "201902_1_1_0\t1\t\n" + node1.query( + "INSERT INTO non_replicated_mt VALUES (toDate('2019-02-01'), 1, 10), (toDate('2019-02-01'), 2, 12)" + ) + assert ( + node1.query( + "CHECK TABLE non_replicated_mt PARTITION 201902", + settings={"check_query_single_value_result": 0}, + ) + == "201902_1_1_0\t1\t\n" + ) remove_checksums_on_disk(node1, "non_replicated_mt", "201902_1_1_0") - assert node1.query("CHECK TABLE non_replicated_mt", settings={ - "check_query_single_value_result": 0}).strip() == "201902_1_1_0\t1\tChecksums recounted and written to disk." + assert ( + node1.query( + "CHECK TABLE non_replicated_mt", + settings={"check_query_single_value_result": 0}, + ).strip() + == "201902_1_1_0\t1\tChecksums recounted and written to disk." + ) assert node1.query("SELECT COUNT() FROM non_replicated_mt") == "2\n" remove_checksums_on_disk(node1, "non_replicated_mt", "201902_1_1_0") - assert node1.query("CHECK TABLE non_replicated_mt PARTITION 201902", settings={ - "check_query_single_value_result": 0}).strip() == "201902_1_1_0\t1\tChecksums recounted and written to disk." + assert ( + node1.query( + "CHECK TABLE non_replicated_mt PARTITION 201902", + settings={"check_query_single_value_result": 0}, + ).strip() + == "201902_1_1_0\t1\tChecksums recounted and written to disk." + ) assert node1.query("SELECT COUNT() FROM non_replicated_mt") == "2\n" corrupt_data_part_on_disk(node1, "non_replicated_mt", "201902_1_1_0") - assert node1.query("CHECK TABLE non_replicated_mt", settings={ - "check_query_single_value_result": 0}).strip() == "201902_1_1_0\t0\tCannot read all data. Bytes read: 2. Bytes expected: 25." + assert ( + node1.query( + "CHECK TABLE non_replicated_mt", + settings={"check_query_single_value_result": 0}, + ).strip() + == "201902_1_1_0\t0\tCannot read all data. Bytes read: 2. Bytes expected: 25." + ) - assert node1.query("CHECK TABLE non_replicated_mt", settings={ - "check_query_single_value_result": 0}).strip() == "201902_1_1_0\t0\tCannot read all data. Bytes read: 2. Bytes expected: 25." + assert ( + node1.query( + "CHECK TABLE non_replicated_mt", + settings={"check_query_single_value_result": 0}, + ).strip() + == "201902_1_1_0\t0\tCannot read all data. Bytes read: 2. Bytes expected: 25." + ) - node1.query("INSERT INTO non_replicated_mt VALUES (toDate('2019-01-01'), 1, 10), (toDate('2019-01-01'), 2, 12)") + node1.query( + "INSERT INTO non_replicated_mt VALUES (toDate('2019-01-01'), 1, 10), (toDate('2019-01-01'), 2, 12)" + ) - assert node1.query("CHECK TABLE non_replicated_mt PARTITION 201901", - settings={"check_query_single_value_result": 0}) == "201901_2_2_0\t1\t\n" + assert ( + node1.query( + "CHECK TABLE non_replicated_mt PARTITION 201901", + settings={"check_query_single_value_result": 0}, + ) + == "201901_2_2_0\t1\t\n" + ) corrupt_data_part_on_disk(node1, "non_replicated_mt", "201901_2_2_0") remove_checksums_on_disk(node1, "non_replicated_mt", "201901_2_2_0") - assert node1.query("CHECK TABLE non_replicated_mt PARTITION 201901", settings={ - "check_query_single_value_result": 0}) == "201901_2_2_0\t0\tCheck of part finished with error: \\'Cannot read all data. Bytes read: 2. Bytes expected: 25.\\'\n" + assert ( + node1.query( + "CHECK TABLE non_replicated_mt PARTITION 201901", + settings={"check_query_single_value_result": 0}, + ) + == "201901_2_2_0\t0\tCheck of part finished with error: \\'Cannot read all data. Bytes read: 2. Bytes expected: 25.\\'\n" + ) def test_check_replicated_table_simple(started_cluster): for node in [node1, node2]: node.query("DROP TABLE IF EXISTS replicated_mt") - node.query(''' + node.query( + """ CREATE TABLE replicated_mt(date Date, id UInt32, value Int32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt', '{replica}') PARTITION BY toYYYYMM(date) ORDER BY id; - '''.format(replica=node.name)) + """.format( + replica=node.name + ) + ) - node1.query("INSERT INTO replicated_mt VALUES (toDate('2019-02-01'), 1, 10), (toDate('2019-02-01'), 2, 12)") + node1.query( + "INSERT INTO replicated_mt VALUES (toDate('2019-02-01'), 1, 10), (toDate('2019-02-01'), 2, 12)" + ) node2.query("SYSTEM SYNC REPLICA replicated_mt") assert node1.query("SELECT count() from replicated_mt") == "2\n" assert node2.query("SELECT count() from replicated_mt") == "2\n" - assert node1.query("CHECK TABLE replicated_mt", - settings={"check_query_single_value_result": 0}) == "201902_0_0_0\t1\t\n" - assert node2.query("CHECK TABLE replicated_mt", - settings={"check_query_single_value_result": 0}) == "201902_0_0_0\t1\t\n" + assert ( + node1.query( + "CHECK TABLE replicated_mt", settings={"check_query_single_value_result": 0} + ) + == "201902_0_0_0\t1\t\n" + ) + assert ( + node2.query( + "CHECK TABLE replicated_mt", settings={"check_query_single_value_result": 0} + ) + == "201902_0_0_0\t1\t\n" + ) - node2.query("INSERT INTO replicated_mt VALUES (toDate('2019-01-02'), 3, 10), (toDate('2019-01-02'), 4, 12)") + node2.query( + "INSERT INTO replicated_mt VALUES (toDate('2019-01-02'), 3, 10), (toDate('2019-01-02'), 4, 12)" + ) node1.query("SYSTEM SYNC REPLICA replicated_mt") assert node1.query("SELECT count() from replicated_mt") == "4\n" assert node2.query("SELECT count() from replicated_mt") == "4\n" - assert node1.query("CHECK TABLE replicated_mt PARTITION 201901", - settings={"check_query_single_value_result": 0}) == "201901_0_0_0\t1\t\n" - assert node2.query("CHECK TABLE replicated_mt PARTITION 201901", - settings={"check_query_single_value_result": 0}) == "201901_0_0_0\t1\t\n" + assert ( + node1.query( + "CHECK TABLE replicated_mt PARTITION 201901", + settings={"check_query_single_value_result": 0}, + ) + == "201901_0_0_0\t1\t\n" + ) + assert ( + node2.query( + "CHECK TABLE replicated_mt PARTITION 201901", + settings={"check_query_single_value_result": 0}, + ) + == "201901_0_0_0\t1\t\n" + ) def test_check_replicated_table_corruption(started_cluster): for node in [node1, node2]: node.query_with_retry("DROP TABLE IF EXISTS replicated_mt_1") - node.query_with_retry(''' + node.query_with_retry( + """ CREATE TABLE replicated_mt_1(date Date, id UInt32, value Int32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt_1', '{replica}') PARTITION BY toYYYYMM(date) ORDER BY id; - '''.format(replica=node.name)) + """.format( + replica=node.name + ) + ) - node1.query("INSERT INTO replicated_mt_1 VALUES (toDate('2019-02-01'), 1, 10), (toDate('2019-02-01'), 2, 12)") - node1.query("INSERT INTO replicated_mt_1 VALUES (toDate('2019-01-02'), 3, 10), (toDate('2019-01-02'), 4, 12)") + node1.query( + "INSERT INTO replicated_mt_1 VALUES (toDate('2019-02-01'), 1, 10), (toDate('2019-02-01'), 2, 12)" + ) + node1.query( + "INSERT INTO replicated_mt_1 VALUES (toDate('2019-01-02'), 3, 10), (toDate('2019-01-02'), 4, 12)" + ) node2.query("SYSTEM SYNC REPLICA replicated_mt_1") assert node1.query("SELECT count() from replicated_mt_1") == "4\n" assert node2.query("SELECT count() from replicated_mt_1") == "4\n" part_name = node1.query_with_retry( - "SELECT name from system.parts where table = 'replicated_mt_1' and partition_id = '201901' and active = 1").strip() + "SELECT name from system.parts where table = 'replicated_mt_1' and partition_id = '201901' and active = 1" + ).strip() corrupt_data_part_on_disk(node1, "replicated_mt_1", part_name) - assert node1.query("CHECK TABLE replicated_mt_1 PARTITION 201901", settings={ - "check_query_single_value_result": 0}) == "{p}\t0\tPart {p} looks broken. Removing it and will try to fetch.\n".format( - p=part_name) + assert node1.query( + "CHECK TABLE replicated_mt_1 PARTITION 201901", + settings={"check_query_single_value_result": 0}, + ) == "{p}\t0\tPart {p} looks broken. Removing it and will try to fetch.\n".format( + p=part_name + ) node1.query_with_retry("SYSTEM SYNC REPLICA replicated_mt_1") - assert node1.query("CHECK TABLE replicated_mt_1 PARTITION 201901", - settings={"check_query_single_value_result": 0}) == "{}\t1\t\n".format(part_name) + assert node1.query( + "CHECK TABLE replicated_mt_1 PARTITION 201901", + settings={"check_query_single_value_result": 0}, + ) == "{}\t1\t\n".format(part_name) assert node1.query("SELECT count() from replicated_mt_1") == "4\n" remove_part_from_disk(node2, "replicated_mt_1", part_name) - assert node2.query("CHECK TABLE replicated_mt_1 PARTITION 201901", settings={ - "check_query_single_value_result": 0}) == "{p}\t0\tPart {p} looks broken. Removing it and will try to fetch.\n".format( - p=part_name) + assert node2.query( + "CHECK TABLE replicated_mt_1 PARTITION 201901", + settings={"check_query_single_value_result": 0}, + ) == "{p}\t0\tPart {p} looks broken. Removing it and will try to fetch.\n".format( + p=part_name + ) node1.query("SYSTEM SYNC REPLICA replicated_mt_1") - assert node1.query("CHECK TABLE replicated_mt_1 PARTITION 201901", - settings={"check_query_single_value_result": 0}) == "{}\t1\t\n".format(part_name) + assert node1.query( + "CHECK TABLE replicated_mt_1 PARTITION 201901", + settings={"check_query_single_value_result": 0}, + ) == "{}\t1\t\n".format(part_name) assert node1.query("SELECT count() from replicated_mt_1") == "4\n" diff --git a/tests/integration/test_cleanup_dir_after_bad_zk_conn/test.py b/tests/integration/test_cleanup_dir_after_bad_zk_conn/test.py index a79015835db..3c59d99b7fc 100644 --- a/tests/integration/test_cleanup_dir_after_bad_zk_conn/test.py +++ b/tests/integration/test_cleanup_dir_after_bad_zk_conn/test.py @@ -5,7 +5,7 @@ from helpers.cluster import ClickHouseCluster from helpers.network import PartitionManager cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True) +node1 = cluster.add_instance("node1", with_zookeeper=True) @pytest.fixture(scope="module") @@ -29,14 +29,14 @@ def start_cluster(): # the table creation works. def test_cleanup_dir_after_bad_zk_conn(start_cluster): node1.query("CREATE DATABASE replica;") - query_create = '''CREATE TABLE replica.test + query_create = """CREATE TABLE replica.test ( id Int64, event_time DateTime ) Engine=ReplicatedMergeTree('/clickhouse/tables/replica/test', 'node1') PARTITION BY toYYYYMMDD(event_time) - ORDER BY id;''' + ORDER BY id;""" with PartitionManager() as pm: pm.drop_instance_zk_connections(node1) time.sleep(3) @@ -45,38 +45,54 @@ def test_cleanup_dir_after_bad_zk_conn(start_cluster): error = node1.query_and_get_error(query_create) assert "Directory for table data data/replica/test/ already exists" not in error node1.query_with_retry(query_create) - node1.query_with_retry('''INSERT INTO replica.test VALUES (1, now())''') - assert "1\n" in node1.query('''SELECT count() from replica.test FORMAT TSV''') + node1.query_with_retry("""INSERT INTO replica.test VALUES (1, now())""") + assert "1\n" in node1.query("""SELECT count() from replica.test FORMAT TSV""") node1.query("DROP TABLE replica.test SYNC") node1.query("DROP DATABASE replica") + def test_cleanup_dir_after_wrong_replica_name(start_cluster): node1.query_with_retry( - "CREATE TABLE IF NOT EXISTS test2_r1 (n UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test2/', 'r1') ORDER BY n") + "CREATE TABLE IF NOT EXISTS test2_r1 (n UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test2/', 'r1') ORDER BY n" + ) error = node1.query_and_get_error( - "CREATE TABLE test2_r2 (n UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test2/', 'r1') ORDER BY n") + "CREATE TABLE test2_r2 (n UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test2/', 'r1') ORDER BY n" + ) assert "already exists" in error node1.query_with_retry( - "CREATE TABLE IF NOT EXISTS test_r2 (n UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test2/', 'r2') ORDER BY n") + "CREATE TABLE IF NOT EXISTS test_r2 (n UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test2/', 'r2') ORDER BY n" + ) def test_cleanup_dir_after_wrong_zk_path(start_cluster): node1.query( - "CREATE TABLE test3_r1 (n UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test3/', 'r1') ORDER BY n") + "CREATE TABLE test3_r1 (n UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test3/', 'r1') ORDER BY n" + ) error = node1.query_and_get_error( - "CREATE TABLE test3_r2 (n UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/', 'r2') ORDER BY n") + "CREATE TABLE test3_r2 (n UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/', 'r2') ORDER BY n" + ) assert "Cannot create" in error node1.query( - "CREATE TABLE test3_r2 (n UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test3/', 'r2') ORDER BY n") + "CREATE TABLE test3_r2 (n UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test3/', 'r2') ORDER BY n" + ) node1.query("DROP TABLE test3_r1 SYNC") node1.query("DROP TABLE test3_r2 SYNC") + def test_attach_without_zk(start_cluster): node1.query_with_retry( - "CREATE TABLE test4_r1 (n UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test4/', 'r1') ORDER BY n") + "CREATE TABLE test4_r1 (n UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test4/', 'r1') ORDER BY n" + ) node1.query("DETACH TABLE test4_r1") with PartitionManager() as pm: - pm._add_rule({'probability': 0.5, 'source': node1.ip_address, 'destination_port': 2181, 'action': 'DROP'}) + pm._add_rule( + { + "probability": 0.5, + "source": node1.ip_address, + "destination_port": 2181, + "action": "DROP", + } + ) try: node1.query("ATTACH TABLE test4_r1") except: diff --git a/tests/integration/test_cluster_all_replicas/test.py b/tests/integration/test_cluster_all_replicas/test.py index 7cb170ce52a..445eef64fcb 100644 --- a/tests/integration/test_cluster_all_replicas/test.py +++ b/tests/integration/test_cluster_all_replicas/test.py @@ -4,8 +4,12 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -18,5 +22,13 @@ def start_cluster(): def test_remote(start_cluster): - assert node1.query('''SELECT hostName() FROM clusterAllReplicas("two_shards", system.one)''') == 'node1\nnode2\n' - assert node1.query('''SELECT hostName() FROM cluster("two_shards", system.one)''') == 'node1\n' + assert ( + node1.query( + """SELECT hostName() FROM clusterAllReplicas("two_shards", system.one)""" + ) + == "node1\nnode2\n" + ) + assert ( + node1.query("""SELECT hostName() FROM cluster("two_shards", system.one)""") + == "node1\n" + ) diff --git a/tests/integration/test_cluster_copier/test.py b/tests/integration/test_cluster_copier/test.py index 3d28295d40e..14417f151ee 100644 --- a/tests/integration/test_cluster_copier/test.py +++ b/tests/integration/test_cluster_copier/test.py @@ -18,10 +18,13 @@ sys.path.insert(0, os.path.dirname(CURRENT_TEST_DIR)) COPYING_FAIL_PROBABILITY = 0.2 MOVING_FAIL_PROBABILITY = 0.2 -cluster = ClickHouseCluster(__file__, name='copier_test') +cluster = ClickHouseCluster(__file__, name="copier_test") + def generateRandomString(count): - return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(count)) + return "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(count) + ) def check_all_hosts_sucesfully_executed(tsv_content, num_hosts): @@ -45,26 +48,29 @@ def started_cluster(): global cluster try: clusters_schema = { - "0": { - "0": ["0", "1"], - "1": ["0"] - }, - "1": { - "0": ["0", "1"], - "1": ["0"] - } + "0": {"0": ["0", "1"], "1": ["0"]}, + "1": {"0": ["0", "1"], "1": ["0"]}, } for cluster_name, shards in clusters_schema.items(): for shard_name, replicas in shards.items(): for replica_name in replicas: name = "s{}_{}_{}".format(cluster_name, shard_name, replica_name) - cluster.add_instance(name, - main_configs=["configs/conf.d/query_log.xml", "configs/conf.d/ddl.xml", - "configs/conf.d/clusters.xml"], - user_configs=["configs/users.xml"], - macros={"cluster": cluster_name, "shard": shard_name, "replica": replica_name}, - with_zookeeper=True) + cluster.add_instance( + name, + main_configs=[ + "configs/conf.d/query_log.xml", + "configs/conf.d/ddl.xml", + "configs/conf.d/clusters.xml", + ], + user_configs=["configs/users.xml"], + macros={ + "cluster": cluster_name, + "shard": shard_name, + "replica": replica_name, + }, + with_zookeeper=True, + ) cluster.start() yield cluster @@ -74,7 +80,6 @@ def started_cluster(): class Task1: - def __init__(self, cluster): self.cluster = cluster self.zk_task_path = "/clickhouse-copier/task_simple_" + generateRandomString(10) @@ -82,36 +87,78 @@ class Task1: for instance_name, _ in cluster.instances.items(): instance = cluster.instances[instance_name] - instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, './task0_description.xml'), self.container_task_file) - print("Copied task file to container of '{}' instance. Path {}".format(instance_name, self.container_task_file)) + instance.copy_file_to_container( + os.path.join(CURRENT_TEST_DIR, "./task0_description.xml"), + self.container_task_file, + ) + print( + "Copied task file to container of '{}' instance. Path {}".format( + instance_name, self.container_task_file + ) + ) def start(self): - instance = cluster.instances['s0_0_0'] + instance = cluster.instances["s0_0_0"] for cluster_num in ["0", "1"]: - ddl_check_query(instance, "DROP DATABASE IF EXISTS default ON CLUSTER cluster{} SYNC".format(cluster_num)) - ddl_check_query(instance, - "CREATE DATABASE default ON CLUSTER cluster{} ".format( - cluster_num)) + ddl_check_query( + instance, + "DROP DATABASE IF EXISTS default ON CLUSTER cluster{} SYNC".format( + cluster_num + ), + ) + ddl_check_query( + instance, + "CREATE DATABASE default ON CLUSTER cluster{} ".format(cluster_num), + ) - ddl_check_query(instance, "CREATE TABLE hits ON CLUSTER cluster0 (d UInt64, d1 UInt64 MATERIALIZED d+1) " + - "ENGINE=ReplicatedMergeTree " + - "PARTITION BY d % 3 ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d) SETTINGS index_granularity = 16") - ddl_check_query(instance, - "CREATE TABLE hits_all ON CLUSTER cluster0 (d UInt64) ENGINE=Distributed(cluster0, default, hits, d)") - ddl_check_query(instance, - "CREATE TABLE hits_all ON CLUSTER cluster1 (d UInt64) ENGINE=Distributed(cluster1, default, hits, d + 1)") - instance.query("INSERT INTO hits_all SELECT * FROM system.numbers LIMIT 1002", - settings={"insert_distributed_sync": 1}) + ddl_check_query( + instance, + "CREATE TABLE hits ON CLUSTER cluster0 (d UInt64, d1 UInt64 MATERIALIZED d+1) " + + "ENGINE=ReplicatedMergeTree " + + "PARTITION BY d % 3 ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d) SETTINGS index_granularity = 16", + ) + ddl_check_query( + instance, + "CREATE TABLE hits_all ON CLUSTER cluster0 (d UInt64) ENGINE=Distributed(cluster0, default, hits, d)", + ) + ddl_check_query( + instance, + "CREATE TABLE hits_all ON CLUSTER cluster1 (d UInt64) ENGINE=Distributed(cluster1, default, hits, d + 1)", + ) + instance.query( + "INSERT INTO hits_all SELECT * FROM system.numbers LIMIT 1002", + settings={"insert_distributed_sync": 1}, + ) def check(self): - assert self.cluster.instances['s0_0_0'].query("SELECT count() FROM hits_all").strip() == "1002" - assert self.cluster.instances['s1_0_0'].query("SELECT count() FROM hits_all").strip() == "1002" + assert ( + self.cluster.instances["s0_0_0"] + .query("SELECT count() FROM hits_all") + .strip() + == "1002" + ) + assert ( + self.cluster.instances["s1_0_0"] + .query("SELECT count() FROM hits_all") + .strip() + == "1002" + ) - assert self.cluster.instances['s1_0_0'].query("SELECT DISTINCT d % 2 FROM hits").strip() == "1" - assert self.cluster.instances['s1_1_0'].query("SELECT DISTINCT d % 2 FROM hits").strip() == "0" + assert ( + self.cluster.instances["s1_0_0"] + .query("SELECT DISTINCT d % 2 FROM hits") + .strip() + == "1" + ) + assert ( + self.cluster.instances["s1_1_0"] + .query("SELECT DISTINCT d % 2 FROM hits") + .strip() + == "0" + ) - instance = self.cluster.instances['s0_0_0'] + instance = self.cluster.instances["s0_0_0"] ddl_check_query(instance, "DROP TABLE hits_all ON CLUSTER cluster0") ddl_check_query(instance, "DROP TABLE hits_all ON CLUSTER cluster1") ddl_check_query(instance, "DROP TABLE hits ON CLUSTER cluster0") @@ -119,124 +166,193 @@ class Task1: class Task2: - def __init__(self, cluster, unique_zk_path): self.cluster = cluster - self.zk_task_path = "/clickhouse-copier/task_month_to_week_partition_" + generateRandomString(5) + self.zk_task_path = ( + "/clickhouse-copier/task_month_to_week_partition_" + generateRandomString(5) + ) self.unique_zk_path = generateRandomString(10) self.container_task_file = "/task_month_to_week_description.xml" for instance_name, _ in cluster.instances.items(): instance = cluster.instances[instance_name] - instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, './task_month_to_week_description.xml'), self.container_task_file) - print("Copied task file to container of '{}' instance. Path {}".format(instance_name, self.container_task_file)) + instance.copy_file_to_container( + os.path.join(CURRENT_TEST_DIR, "./task_month_to_week_description.xml"), + self.container_task_file, + ) + print( + "Copied task file to container of '{}' instance. Path {}".format( + instance_name, self.container_task_file + ) + ) def start(self): - instance = cluster.instances['s0_0_0'] + instance = cluster.instances["s0_0_0"] for cluster_num in ["0", "1"]: - ddl_check_query(instance, "DROP DATABASE IF EXISTS default ON CLUSTER cluster{}".format(cluster_num)) - ddl_check_query(instance, - "CREATE DATABASE IF NOT EXISTS default ON CLUSTER cluster{}".format( - cluster_num)) + ddl_check_query( + instance, + "DROP DATABASE IF EXISTS default ON CLUSTER cluster{}".format( + cluster_num + ), + ) + ddl_check_query( + instance, + "CREATE DATABASE IF NOT EXISTS default ON CLUSTER cluster{}".format( + cluster_num + ), + ) - ddl_check_query(instance, - "CREATE TABLE a ON CLUSTER cluster0 (date Date, d UInt64, d1 UInt64 ALIAS d+1) " - "ENGINE=ReplicatedMergeTree('/clickhouse/tables/cluster_{cluster}/{shard}/" + self.unique_zk_path + "', " - "'{replica}', date, intHash64(d), (date, intHash64(d)), 8192)") - ddl_check_query(instance, - "CREATE TABLE a_all ON CLUSTER cluster0 (date Date, d UInt64) ENGINE=Distributed(cluster0, default, a, d)") + ddl_check_query( + instance, + "CREATE TABLE a ON CLUSTER cluster0 (date Date, d UInt64, d1 UInt64 ALIAS d+1) " + "ENGINE=ReplicatedMergeTree('/clickhouse/tables/cluster_{cluster}/{shard}/" + + self.unique_zk_path + + "', " + "'{replica}', date, intHash64(d), (date, intHash64(d)), 8192)", + ) + ddl_check_query( + instance, + "CREATE TABLE a_all ON CLUSTER cluster0 (date Date, d UInt64) ENGINE=Distributed(cluster0, default, a, d)", + ) instance.query( "INSERT INTO a_all SELECT toDate(17581 + number) AS date, number AS d FROM system.numbers LIMIT 85", - settings={"insert_distributed_sync": 1}) + settings={"insert_distributed_sync": 1}, + ) def check(self): - assert TSV(self.cluster.instances['s0_0_0'].query("SELECT count() FROM cluster(cluster0, default, a)")) == TSV( - "85\n") - assert TSV(self.cluster.instances['s1_0_0'].query( - "SELECT count(), uniqExact(date) FROM cluster(cluster1, default, b)")) == TSV("85\t85\n") + assert TSV( + self.cluster.instances["s0_0_0"].query( + "SELECT count() FROM cluster(cluster0, default, a)" + ) + ) == TSV("85\n") + assert TSV( + self.cluster.instances["s1_0_0"].query( + "SELECT count(), uniqExact(date) FROM cluster(cluster1, default, b)" + ) + ) == TSV("85\t85\n") - assert TSV(self.cluster.instances['s1_0_0'].query( - "SELECT DISTINCT jumpConsistentHash(intHash64(d), 2) FROM b")) == TSV("0\n") - assert TSV(self.cluster.instances['s1_1_0'].query( - "SELECT DISTINCT jumpConsistentHash(intHash64(d), 2) FROM b")) == TSV("1\n") + assert TSV( + self.cluster.instances["s1_0_0"].query( + "SELECT DISTINCT jumpConsistentHash(intHash64(d), 2) FROM b" + ) + ) == TSV("0\n") + assert TSV( + self.cluster.instances["s1_1_0"].query( + "SELECT DISTINCT jumpConsistentHash(intHash64(d), 2) FROM b" + ) + ) == TSV("1\n") - assert TSV(self.cluster.instances['s1_0_0'].query( - "SELECT uniqExact(partition) IN (12, 13) FROM system.parts WHERE active AND database='default' AND table='b'")) == TSV( - "1\n") - assert TSV(self.cluster.instances['s1_1_0'].query( - "SELECT uniqExact(partition) IN (12, 13) FROM system.parts WHERE active AND database='default' AND table='b'")) == TSV( - "1\n") + assert TSV( + self.cluster.instances["s1_0_0"].query( + "SELECT uniqExact(partition) IN (12, 13) FROM system.parts WHERE active AND database='default' AND table='b'" + ) + ) == TSV("1\n") + assert TSV( + self.cluster.instances["s1_1_0"].query( + "SELECT uniqExact(partition) IN (12, 13) FROM system.parts WHERE active AND database='default' AND table='b'" + ) + ) == TSV("1\n") - instance = cluster.instances['s0_0_0'] + instance = cluster.instances["s0_0_0"] ddl_check_query(instance, "DROP TABLE a ON CLUSTER cluster0") ddl_check_query(instance, "DROP TABLE b ON CLUSTER cluster1") class Task_test_block_size: - def __init__(self, cluster): self.cluster = cluster - self.zk_task_path = "/clickhouse-copier/task_test_block_size_" + generateRandomString(5) + self.zk_task_path = ( + "/clickhouse-copier/task_test_block_size_" + generateRandomString(5) + ) self.rows = 1000000 self.container_task_file = "/task_test_block_size.xml" for instance_name, _ in cluster.instances.items(): instance = cluster.instances[instance_name] - instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, './task_test_block_size.xml'), self.container_task_file) - print("Copied task file to container of '{}' instance. Path {}".format(instance_name, self.container_task_file)) + instance.copy_file_to_container( + os.path.join(CURRENT_TEST_DIR, "./task_test_block_size.xml"), + self.container_task_file, + ) + print( + "Copied task file to container of '{}' instance. Path {}".format( + instance_name, self.container_task_file + ) + ) def start(self): - instance = cluster.instances['s0_0_0'] + instance = cluster.instances["s0_0_0"] - ddl_check_query(instance, """ + ddl_check_query( + instance, + """ CREATE TABLE test_block_size ON CLUSTER shard_0_0 (partition Date, d UInt64) ENGINE=ReplicatedMergeTree - ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d)""", 2) + ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d)""", + 2, + ) instance.query( "INSERT INTO test_block_size SELECT toDate(0) AS partition, number as d FROM system.numbers LIMIT {}".format( - self.rows)) + self.rows + ) + ) def check(self): - assert TSV(self.cluster.instances['s1_0_0'].query( - "SELECT count() FROM cluster(cluster1, default, test_block_size)")) == TSV("{}\n".format(self.rows)) + assert TSV( + self.cluster.instances["s1_0_0"].query( + "SELECT count() FROM cluster(cluster1, default, test_block_size)" + ) + ) == TSV("{}\n".format(self.rows)) - instance = cluster.instances['s0_0_0'] + instance = cluster.instances["s0_0_0"] ddl_check_query(instance, "DROP TABLE test_block_size ON CLUSTER shard_0_0", 2) ddl_check_query(instance, "DROP TABLE test_block_size ON CLUSTER cluster1") class Task_no_index: - def __init__(self, cluster): self.cluster = cluster - self.zk_task_path = "/clickhouse-copier/task_no_index_" + generateRandomString(5) + self.zk_task_path = "/clickhouse-copier/task_no_index_" + generateRandomString( + 5 + ) self.rows = 1000000 self.container_task_file = "/task_no_index.xml" for instance_name, _ in cluster.instances.items(): instance = cluster.instances[instance_name] - instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, './task_no_index.xml'), self.container_task_file) - print("Copied task file to container of '{}' instance. Path {}".format(instance_name, self.container_task_file)) + instance.copy_file_to_container( + os.path.join(CURRENT_TEST_DIR, "./task_no_index.xml"), + self.container_task_file, + ) + print( + "Copied task file to container of '{}' instance. Path {}".format( + instance_name, self.container_task_file + ) + ) def start(self): - instance = cluster.instances['s0_0_0'] + instance = cluster.instances["s0_0_0"] instance.query("DROP TABLE IF EXISTS ontime SYNC") - instance.query("create table IF NOT EXISTS ontime (Year UInt16, FlightDate String) ENGINE = Memory") - instance.query("insert into ontime values (2016, 'test6'), (2017, 'test7'), (2018, 'test8')") + instance.query( + "create table IF NOT EXISTS ontime (Year UInt16, FlightDate String) ENGINE = Memory" + ) + instance.query( + "insert into ontime values (2016, 'test6'), (2017, 'test7'), (2018, 'test8')" + ) def check(self): - assert TSV(self.cluster.instances['s1_1_0'].query("SELECT Year FROM ontime22")) == TSV("2017\n") - instance = cluster.instances['s0_0_0'] + assert TSV( + self.cluster.instances["s1_1_0"].query("SELECT Year FROM ontime22") + ) == TSV("2017\n") + instance = cluster.instances["s0_0_0"] instance.query("DROP TABLE ontime") - instance = cluster.instances['s1_1_0'] + instance = cluster.instances["s1_1_0"] instance.query("DROP TABLE ontime22") class Task_no_arg: - def __init__(self, cluster): self.cluster = cluster self.zk_task_path = "/clickhouse-copier/task_no_arg" @@ -245,25 +361,35 @@ class Task_no_arg: for instance_name, _ in cluster.instances.items(): instance = cluster.instances[instance_name] - instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, './task_no_arg.xml'), self.container_task_file) - print("Copied task file to container of '{}' instance. Path {}".format(instance_name, self.container_task_file)) + instance.copy_file_to_container( + os.path.join(CURRENT_TEST_DIR, "./task_no_arg.xml"), + self.container_task_file, + ) + print( + "Copied task file to container of '{}' instance. Path {}".format( + instance_name, self.container_task_file + ) + ) def start(self): - instance = cluster.instances['s0_0_0'] + instance = cluster.instances["s0_0_0"] instance.query("DROP TABLE IF EXISTS copier_test1 SYNC") instance.query( - "create table if not exists copier_test1 (date Date, id UInt32) engine = MergeTree PARTITION BY date ORDER BY date SETTINGS index_granularity = 8192") + "create table if not exists copier_test1 (date Date, id UInt32) engine = MergeTree PARTITION BY date ORDER BY date SETTINGS index_granularity = 8192" + ) instance.query("insert into copier_test1 values ('2016-01-01', 10);") def check(self): - assert TSV(self.cluster.instances['s1_1_0'].query("SELECT date FROM copier_test1_1")) == TSV("2016-01-01\n") - instance = cluster.instances['s0_0_0'] + assert TSV( + self.cluster.instances["s1_1_0"].query("SELECT date FROM copier_test1_1") + ) == TSV("2016-01-01\n") + instance = cluster.instances["s0_0_0"] instance.query("DROP TABLE copier_test1 SYNC") - instance = cluster.instances['s1_1_0'] + instance = cluster.instances["s1_1_0"] instance.query("DROP TABLE copier_test1_1 SYNC") -class Task_non_partitioned_table: +class Task_non_partitioned_table: def __init__(self, cluster): self.cluster = cluster self.zk_task_path = "/clickhouse-copier/task_non_partitoned_table" @@ -272,25 +398,35 @@ class Task_non_partitioned_table: for instance_name, _ in cluster.instances.items(): instance = cluster.instances[instance_name] - instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, './task_non_partitioned_table.xml'), self.container_task_file) - print("Copied task file to container of '{}' instance. Path {}".format(instance_name, self.container_task_file)) + instance.copy_file_to_container( + os.path.join(CURRENT_TEST_DIR, "./task_non_partitioned_table.xml"), + self.container_task_file, + ) + print( + "Copied task file to container of '{}' instance. Path {}".format( + instance_name, self.container_task_file + ) + ) def start(self): - instance = cluster.instances['s0_0_0'] + instance = cluster.instances["s0_0_0"] instance.query("DROP TABLE IF EXISTS copier_test1 SYNC") instance.query( - "create table copier_test1 (date Date, id UInt32) engine = MergeTree ORDER BY date SETTINGS index_granularity = 8192") + "create table copier_test1 (date Date, id UInt32) engine = MergeTree ORDER BY date SETTINGS index_granularity = 8192" + ) instance.query("insert into copier_test1 values ('2016-01-01', 10);") def check(self): - assert TSV(self.cluster.instances['s1_1_0'].query("SELECT date FROM copier_test1_1")) == TSV("2016-01-01\n") - instance = cluster.instances['s0_0_0'] + assert TSV( + self.cluster.instances["s1_1_0"].query("SELECT date FROM copier_test1_1") + ) == TSV("2016-01-01\n") + instance = cluster.instances["s0_0_0"] instance.query("DROP TABLE copier_test1") - instance = cluster.instances['s1_1_0'] + instance = cluster.instances["s1_1_0"] instance.query("DROP TABLE copier_test1_1") -class Task_self_copy: +class Task_self_copy: def __init__(self, cluster): self.cluster = cluster self.zk_task_path = "/clickhouse-copier/task_self_copy" @@ -298,26 +434,37 @@ class Task_self_copy: for instance_name, _ in cluster.instances.items(): instance = cluster.instances[instance_name] - instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, './task_self_copy.xml'), self.container_task_file) - print("Copied task file to container of '{}' instance. Path {}".format(instance_name, self.container_task_file)) + instance.copy_file_to_container( + os.path.join(CURRENT_TEST_DIR, "./task_self_copy.xml"), + self.container_task_file, + ) + print( + "Copied task file to container of '{}' instance. Path {}".format( + instance_name, self.container_task_file + ) + ) def start(self): - instance = cluster.instances['s0_0_0'] + instance = cluster.instances["s0_0_0"] instance.query("DROP DATABASE IF EXISTS db1 SYNC") instance.query("DROP DATABASE IF EXISTS db2 SYNC") instance.query("CREATE DATABASE IF NOT EXISTS db1;") instance.query( - "CREATE TABLE IF NOT EXISTS db1.source_table (`a` Int8, `b` String, `c` Int8) ENGINE = MergeTree PARTITION BY a ORDER BY a SETTINGS index_granularity = 8192") + "CREATE TABLE IF NOT EXISTS db1.source_table (`a` Int8, `b` String, `c` Int8) ENGINE = MergeTree PARTITION BY a ORDER BY a SETTINGS index_granularity = 8192" + ) instance.query("CREATE DATABASE IF NOT EXISTS db2;") instance.query( - "CREATE TABLE IF NOT EXISTS db2.destination_table (`a` Int8, `b` String, `c` Int8) ENGINE = MergeTree PARTITION BY a ORDER BY a SETTINGS index_granularity = 8192") + "CREATE TABLE IF NOT EXISTS db2.destination_table (`a` Int8, `b` String, `c` Int8) ENGINE = MergeTree PARTITION BY a ORDER BY a SETTINGS index_granularity = 8192" + ) instance.query("INSERT INTO db1.source_table VALUES (1, 'ClickHouse', 1);") instance.query("INSERT INTO db1.source_table VALUES (2, 'Copier', 2);") def check(self): - instance = cluster.instances['s0_0_0'] - assert TSV(instance.query("SELECT * FROM db2.destination_table ORDER BY a")) == TSV(instance.query("SELECT * FROM db1.source_table ORDER BY a")) - instance = cluster.instances['s0_0_0'] + instance = cluster.instances["s0_0_0"] + assert TSV( + instance.query("SELECT * FROM db2.destination_table ORDER BY a") + ) == TSV(instance.query("SELECT * FROM db1.source_table ORDER BY a")) + instance = cluster.instances["s0_0_0"] instance.query("DROP DATABASE IF EXISTS db1 SYNC") instance.query("DROP DATABASE IF EXISTS db2 SYNC") @@ -325,10 +472,9 @@ class Task_self_copy: def execute_task(started_cluster, task, cmd_options): task.start() - zk = started_cluster.get_kazoo_client('zoo1') + zk = started_cluster.get_kazoo_client("zoo1") print("Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1])) - try: zk.delete("/clickhouse-copier", recursive=True) except kazoo.exceptions.NoNodeError: @@ -338,12 +484,20 @@ def execute_task(started_cluster, task, cmd_options): docker_api = started_cluster.docker_client.api copiers_exec_ids = [] - cmd = ['/usr/bin/clickhouse', 'copier', - '--config', '/etc/clickhouse-server/config-copier.xml', - '--task-path', task.zk_task_path, - '--task-file', task.container_task_file, - '--task-upload-force', 'true', - '--base-dir', '/var/log/clickhouse-server/copier'] + cmd = [ + "/usr/bin/clickhouse", + "copier", + "--config", + "/etc/clickhouse-server/config-copier.xml", + "--task-path", + task.zk_task_path, + "--task-file", + task.container_task_file, + "--task-upload-force", + "true", + "--base-dir", + "/var/log/clickhouse-server/copier", + ] cmd += cmd_options print(cmd) @@ -353,25 +507,31 @@ def execute_task(started_cluster, task, cmd_options): for instance_name in copiers: instance = started_cluster.instances[instance_name] container = instance.get_docker_handle() - instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, "configs/config-copier.xml"), - "/etc/clickhouse-server/config-copier.xml") + instance.copy_file_to_container( + os.path.join(CURRENT_TEST_DIR, "configs/config-copier.xml"), + "/etc/clickhouse-server/config-copier.xml", + ) print("Copied copier config to {}".format(instance.name)) exec_id = docker_api.exec_create(container.id, cmd, stderr=True) - output = docker_api.exec_start(exec_id).decode('utf8') + output = docker_api.exec_start(exec_id).decode("utf8") print(output) copiers_exec_ids.append(exec_id) - print("Copier for {} ({}) has started".format(instance.name, instance.ip_address)) + print( + "Copier for {} ({}) has started".format(instance.name, instance.ip_address) + ) # Wait for copiers stopping and check their return codes for exec_id, instance_name in zip(copiers_exec_ids, copiers): instance = started_cluster.instances[instance_name] while True: res = docker_api.exec_inspect(exec_id) - if not res['Running']: + if not res["Running"]: break time.sleep(0.5) - assert res['ExitCode'] == 0, "Instance: {} ({}). Info: {}".format(instance.name, instance.ip_address, repr(res)) + assert res["ExitCode"] == 0, "Instance: {} ({}). Info: {}".format( + instance.name, instance.ip_address, repr(res) + ) try: task.check() @@ -381,30 +541,59 @@ def execute_task(started_cluster, task, cmd_options): # Tests -@pytest.mark.parametrize(('use_sample_offset'), [False, True]) + +@pytest.mark.parametrize(("use_sample_offset"), [False, True]) def test_copy_simple(started_cluster, use_sample_offset): if use_sample_offset: - execute_task(started_cluster, Task1(started_cluster), ['--experimental-use-sample-offset', '1']) + execute_task( + started_cluster, + Task1(started_cluster), + ["--experimental-use-sample-offset", "1"], + ) else: execute_task(started_cluster, Task1(started_cluster), []) -@pytest.mark.parametrize(('use_sample_offset'),[False, True]) +@pytest.mark.parametrize(("use_sample_offset"), [False, True]) def test_copy_with_recovering(started_cluster, use_sample_offset): if use_sample_offset: - execute_task(started_cluster, Task1(started_cluster), ['--copy-fault-probability', str(COPYING_FAIL_PROBABILITY), - '--experimental-use-sample-offset', '1']) + execute_task( + started_cluster, + Task1(started_cluster), + [ + "--copy-fault-probability", + str(COPYING_FAIL_PROBABILITY), + "--experimental-use-sample-offset", + "1", + ], + ) else: - execute_task(started_cluster, Task1(started_cluster), ['--copy-fault-probability', str(COPYING_FAIL_PROBABILITY)]) + execute_task( + started_cluster, + Task1(started_cluster), + ["--copy-fault-probability", str(COPYING_FAIL_PROBABILITY)], + ) -@pytest.mark.parametrize(('use_sample_offset'),[False, True]) +@pytest.mark.parametrize(("use_sample_offset"), [False, True]) def test_copy_with_recovering_after_move_faults(started_cluster, use_sample_offset): if use_sample_offset: - execute_task(started_cluster, Task1(started_cluster), ['--move-fault-probability', str(MOVING_FAIL_PROBABILITY), - '--experimental-use-sample-offset', '1']) + execute_task( + started_cluster, + Task1(started_cluster), + [ + "--move-fault-probability", + str(MOVING_FAIL_PROBABILITY), + "--experimental-use-sample-offset", + "1", + ], + ) else: - execute_task(started_cluster, Task1(started_cluster), ['--move-fault-probability', str(MOVING_FAIL_PROBABILITY)]) + execute_task( + started_cluster, + Task1(started_cluster), + ["--move-fault-probability", str(MOVING_FAIL_PROBABILITY)], + ) @pytest.mark.timeout(600) @@ -414,12 +603,22 @@ def test_copy_month_to_week_partition(started_cluster): @pytest.mark.timeout(600) def test_copy_month_to_week_partition_with_recovering(started_cluster): - execute_task(started_cluster, Task2(started_cluster, "test2"), ['--copy-fault-probability', str(COPYING_FAIL_PROBABILITY)]) + execute_task( + started_cluster, + Task2(started_cluster, "test2"), + ["--copy-fault-probability", str(COPYING_FAIL_PROBABILITY)], + ) @pytest.mark.timeout(600) -def test_copy_month_to_week_partition_with_recovering_after_move_faults(started_cluster): - execute_task(started_cluster, Task2(started_cluster, "test3"), ['--move-fault-probability', str(MOVING_FAIL_PROBABILITY)]) +def test_copy_month_to_week_partition_with_recovering_after_move_faults( + started_cluster, +): + execute_task( + started_cluster, + Task2(started_cluster, "test3"), + ["--move-fault-probability", str(MOVING_FAIL_PROBABILITY)], + ) def test_block_size(started_cluster): diff --git a/tests/integration/test_cluster_copier/test_three_nodes.py b/tests/integration/test_cluster_copier/test_three_nodes.py index 63b0bcc6679..c8039792fe8 100644 --- a/tests/integration/test_cluster_copier/test_three_nodes.py +++ b/tests/integration/test_cluster_copier/test_three_nodes.py @@ -12,7 +12,8 @@ import docker CURRENT_TEST_DIR = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, os.path.dirname(CURRENT_TEST_DIR)) -cluster = ClickHouseCluster(__file__, name='copier_test_three_nodes') +cluster = ClickHouseCluster(__file__, name="copier_test_three_nodes") + @pytest.fixture(scope="module") def started_cluster(): @@ -20,9 +21,15 @@ def started_cluster(): try: for name in ["first", "second", "third"]: - cluster.add_instance(name, - main_configs=["configs_three_nodes/conf.d/clusters.xml", "configs_three_nodes/conf.d/ddl.xml"], user_configs=["configs_three_nodes/users.xml"], - with_zookeeper=True) + cluster.add_instance( + name, + main_configs=[ + "configs_three_nodes/conf.d/clusters.xml", + "configs_three_nodes/conf.d/ddl.xml", + ], + user_configs=["configs_three_nodes/users.xml"], + with_zookeeper=True, + ) cluster.start() yield cluster @@ -30,17 +37,22 @@ def started_cluster(): finally: cluster.shutdown() + class Task: def __init__(self, cluster): self.cluster = cluster - self.zk_task_path = '/clickhouse-copier/task' + self.zk_task_path = "/clickhouse-copier/task" self.container_task_file = "/task_taxi_data.xml" for instance_name, _ in cluster.instances.items(): instance = cluster.instances[instance_name] - instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, './task_taxi_data.xml'), self.container_task_file) - logging.debug(f"Copied task file to container of '{instance_name}' instance. Path {self.container_task_file}") - + instance.copy_file_to_container( + os.path.join(CURRENT_TEST_DIR, "./task_taxi_data.xml"), + self.container_task_file, + ) + logging.debug( + f"Copied task file to container of '{instance_name}' instance. Path {self.container_task_file}" + ) def start(self): for name in ["first", "second", "third"]: @@ -48,11 +60,12 @@ class Task: node.query("DROP DATABASE IF EXISTS dailyhistory SYNC;") node.query("DROP DATABASE IF EXISTS monthlyhistory SYNC;") - first = cluster.instances['first'] + first = cluster.instances["first"] # daily partition database first.query("CREATE DATABASE IF NOT EXISTS dailyhistory on cluster events;") - first.query("""CREATE TABLE dailyhistory.yellow_tripdata_staging ON CLUSTER events + first.query( + """CREATE TABLE dailyhistory.yellow_tripdata_staging ON CLUSTER events ( id UUID DEFAULT generateUUIDv4(), vendor_id String, @@ -82,14 +95,18 @@ class Task: Engine = ReplacingMergeTree() PRIMARY KEY (tpep_pickup_datetime, id) ORDER BY (tpep_pickup_datetime, id) - PARTITION BY (toYYYYMMDD(tpep_pickup_datetime))""") + PARTITION BY (toYYYYMMDD(tpep_pickup_datetime))""" + ) - first.query("""CREATE TABLE dailyhistory.yellow_tripdata + first.query( + """CREATE TABLE dailyhistory.yellow_tripdata ON CLUSTER events AS dailyhistory.yellow_tripdata_staging - ENGINE = Distributed('events', 'dailyhistory', yellow_tripdata_staging, sipHash64(id) % 3);""") + ENGINE = Distributed('events', 'dailyhistory', yellow_tripdata_staging, sipHash64(id) % 3);""" + ) - first.query("""INSERT INTO dailyhistory.yellow_tripdata + first.query( + """INSERT INTO dailyhistory.yellow_tripdata SELECT * FROM generateRandom( 'id UUID DEFAULT generateUUIDv4(), vendor_id String, @@ -116,11 +133,13 @@ class Task: congestion_surcharge String, junk1 String, junk2 String', - 1, 10, 2) LIMIT 50;""") + 1, 10, 2) LIMIT 50;""" + ) # monthly partition database first.query("create database IF NOT EXISTS monthlyhistory on cluster events;") - first.query("""CREATE TABLE monthlyhistory.yellow_tripdata_staging ON CLUSTER events + first.query( + """CREATE TABLE monthlyhistory.yellow_tripdata_staging ON CLUSTER events ( id UUID DEFAULT generateUUIDv4(), vendor_id String, @@ -151,13 +170,15 @@ class Task: Engine = ReplacingMergeTree() PRIMARY KEY (tpep_pickup_datetime, id) ORDER BY (tpep_pickup_datetime, id) - PARTITION BY (pickup_location_id, toYYYYMM(tpep_pickup_datetime))""") + PARTITION BY (pickup_location_id, toYYYYMM(tpep_pickup_datetime))""" + ) - first.query("""CREATE TABLE monthlyhistory.yellow_tripdata + first.query( + """CREATE TABLE monthlyhistory.yellow_tripdata ON CLUSTER events AS monthlyhistory.yellow_tripdata_staging - ENGINE = Distributed('events', 'monthlyhistory', yellow_tripdata_staging, sipHash64(id) % 3);""") - + ENGINE = Distributed('events', 'monthlyhistory', yellow_tripdata_staging, sipHash64(id) % 3);""" + ) def check(self): first = cluster.instances["first"] @@ -167,12 +188,24 @@ class Task: for instance_name, instance in cluster.instances.items(): instance = cluster.instances[instance_name] - a = instance.query("SELECT count() from dailyhistory.yellow_tripdata_staging") - b = instance.query("SELECT count() from monthlyhistory.yellow_tripdata_staging") + a = instance.query( + "SELECT count() from dailyhistory.yellow_tripdata_staging" + ) + b = instance.query( + "SELECT count() from monthlyhistory.yellow_tripdata_staging" + ) assert a == b, "MergeTree tables on each shard" - a = TSV(instance.query("SELECT sipHash64(*) from dailyhistory.yellow_tripdata_staging ORDER BY id")) - b = TSV(instance.query("SELECT sipHash64(*) from monthlyhistory.yellow_tripdata_staging ORDER BY id")) + a = TSV( + instance.query( + "SELECT sipHash64(*) from dailyhistory.yellow_tripdata_staging ORDER BY id" + ) + ) + b = TSV( + instance.query( + "SELECT sipHash64(*) from monthlyhistory.yellow_tripdata_staging ORDER BY id" + ) + ) assert a == b, "Data on each shard" @@ -182,23 +215,30 @@ class Task: node.query("DROP DATABASE IF EXISTS monthlyhistory SYNC;") - def execute_task(started_cluster, task, cmd_options): task.start() - zk = started_cluster.get_kazoo_client('zoo1') + zk = started_cluster.get_kazoo_client("zoo1") logging.debug("Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1])) # Run cluster-copier processes on each node docker_api = started_cluster.docker_client.api copiers_exec_ids = [] - cmd = ['/usr/bin/clickhouse', 'copier', - '--config', '/etc/clickhouse-server/config-copier.xml', - '--task-path', task.zk_task_path, - '--task-file', task.container_task_file, - '--task-upload-force', 'true', - '--base-dir', '/var/log/clickhouse-server/copier'] + cmd = [ + "/usr/bin/clickhouse", + "copier", + "--config", + "/etc/clickhouse-server/config-copier.xml", + "--task-path", + task.zk_task_path, + "--task-file", + task.container_task_file, + "--task-upload-force", + "true", + "--base-dir", + "/var/log/clickhouse-server/copier", + ] cmd += cmd_options logging.debug(f"execute_task cmd: {cmd}") @@ -206,25 +246,34 @@ def execute_task(started_cluster, task, cmd_options): for instance_name in started_cluster.instances.keys(): instance = started_cluster.instances[instance_name] container = instance.get_docker_handle() - instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, "configs_three_nodes/config-copier.xml"), "/etc/clickhouse-server/config-copier.xml") + instance.copy_file_to_container( + os.path.join(CURRENT_TEST_DIR, "configs_three_nodes/config-copier.xml"), + "/etc/clickhouse-server/config-copier.xml", + ) logging.info("Copied copier config to {}".format(instance.name)) exec_id = docker_api.exec_create(container.id, cmd, stderr=True) - output = docker_api.exec_start(exec_id).decode('utf8') + output = docker_api.exec_start(exec_id).decode("utf8") logging.info(output) copiers_exec_ids.append(exec_id) - logging.info("Copier for {} ({}) has started".format(instance.name, instance.ip_address)) + logging.info( + "Copier for {} ({}) has started".format(instance.name, instance.ip_address) + ) # time.sleep(1000) # Wait for copiers stopping and check their return codes - for exec_id, instance in zip(copiers_exec_ids, iter(started_cluster.instances.values())): + for exec_id, instance in zip( + copiers_exec_ids, iter(started_cluster.instances.values()) + ): while True: res = docker_api.exec_inspect(exec_id) - if not res['Running']: + if not res["Running"]: break time.sleep(1) - assert res['ExitCode'] == 0, "Instance: {} ({}). Info: {}".format(instance.name, instance.ip_address, repr(res)) + assert res["ExitCode"] == 0, "Instance: {} ({}). Info: {}".format( + instance.name, instance.ip_address, repr(res) + ) try: task.check() diff --git a/tests/integration/test_cluster_copier/test_trivial.py b/tests/integration/test_cluster_copier/test_trivial.py index e58c6edcb4d..84bf39f0d76 100644 --- a/tests/integration/test_cluster_copier/test_trivial.py +++ b/tests/integration/test_cluster_copier/test_trivial.py @@ -19,11 +19,13 @@ sys.path.insert(0, os.path.dirname(CURRENT_TEST_DIR)) COPYING_FAIL_PROBABILITY = 0.1 MOVING_FAIL_PROBABILITY = 0.1 -cluster = ClickHouseCluster(__file__, name='copier_test_trivial') +cluster = ClickHouseCluster(__file__, name="copier_test_trivial") def generateRandomString(count): - return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(count)) + return "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(count) + ) @pytest.fixture(scope="module") @@ -31,11 +33,17 @@ def started_cluster(): global cluster try: for name in ["first_trivial", "second_trivial"]: - instance = cluster.add_instance(name, + instance = cluster.add_instance( + name, main_configs=["configs/conf.d/clusters_trivial.xml"], user_configs=["configs_two_nodes/users.xml"], - macros={"cluster" : name, "shard" : "the_only_shard", "replica" : "the_only_replica"}, - with_zookeeper=True) + macros={ + "cluster": name, + "shard": "the_only_shard", + "replica": "the_only_replica", + }, + with_zookeeper=True, + ) cluster.start() yield cluster @@ -48,30 +56,41 @@ class TaskTrivial: def __init__(self, cluster): self.cluster = cluster self.zk_task_path = "/clickhouse-copier/task_trivial" - self.copier_task_config = open(os.path.join(CURRENT_TEST_DIR, 'task_trivial.xml'), 'r').read() + self.copier_task_config = open( + os.path.join(CURRENT_TEST_DIR, "task_trivial.xml"), "r" + ).read() def start(self): - source = cluster.instances['first_trivial'] - destination = cluster.instances['second_trivial'] + source = cluster.instances["first_trivial"] + destination = cluster.instances["second_trivial"] for node in [source, destination]: node.query("DROP DATABASE IF EXISTS default") node.query("CREATE DATABASE IF NOT EXISTS default") - source.query("CREATE TABLE trivial (d UInt64, d1 UInt64 MATERIALIZED d+1)" - "ENGINE=ReplicatedMergeTree('/clickhouse/tables/source_trivial_cluster/1/trivial/{}', '1') " - "PARTITION BY d % 5 ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d) SETTINGS index_granularity = 16".format(generateRandomString(10))) + source.query( + "CREATE TABLE trivial (d UInt64, d1 UInt64 MATERIALIZED d+1)" + "ENGINE=ReplicatedMergeTree('/clickhouse/tables/source_trivial_cluster/1/trivial/{}', '1') " + "PARTITION BY d % 5 ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d) SETTINGS index_granularity = 16".format( + generateRandomString(10) + ) + ) - source.query("INSERT INTO trivial SELECT * FROM system.numbers LIMIT 1002", - settings={"insert_distributed_sync": 1}) + source.query( + "INSERT INTO trivial SELECT * FROM system.numbers LIMIT 1002", + settings={"insert_distributed_sync": 1}, + ) def check(self): - zk = cluster.get_kazoo_client('zoo1') + zk = cluster.get_kazoo_client("zoo1") status_data, _ = zk.get(self.zk_task_path + "/status") - assert status_data == b'{"hits":{"all_partitions_count":5,"processed_partitions_count":5}}' + assert ( + status_data + == b'{"hits":{"all_partitions_count":5,"processed_partitions_count":5}}' + ) - source = cluster.instances['first_trivial'] - destination = cluster.instances['second_trivial'] + source = cluster.instances["first_trivial"] + destination = cluster.instances["second_trivial"] assert TSV(source.query("SELECT count() FROM trivial")) == TSV("1002\n") assert TSV(destination.query("SELECT count() FROM trivial")) == TSV("1002\n") @@ -84,33 +103,46 @@ class TaskReplicatedWithoutArguments: def __init__(self, cluster): self.cluster = cluster self.zk_task_path = "/clickhouse-copier/task_trivial_without_arguments" - self.copier_task_config = open(os.path.join(CURRENT_TEST_DIR, 'task_trivial_without_arguments.xml'), 'r').read() + self.copier_task_config = open( + os.path.join(CURRENT_TEST_DIR, "task_trivial_without_arguments.xml"), "r" + ).read() def start(self): - source = cluster.instances['first_trivial'] - destination = cluster.instances['second_trivial'] + source = cluster.instances["first_trivial"] + destination = cluster.instances["second_trivial"] for node in [source, destination]: node.query("DROP DATABASE IF EXISTS default") node.query("CREATE DATABASE IF NOT EXISTS default") - source.query("CREATE TABLE trivial_without_arguments ON CLUSTER source_trivial_cluster (d UInt64, d1 UInt64 MATERIALIZED d+1) " - "ENGINE=ReplicatedMergeTree() " - "PARTITION BY d % 5 ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d) SETTINGS index_granularity = 16") + source.query( + "CREATE TABLE trivial_without_arguments ON CLUSTER source_trivial_cluster (d UInt64, d1 UInt64 MATERIALIZED d+1) " + "ENGINE=ReplicatedMergeTree() " + "PARTITION BY d % 5 ORDER BY (d, sipHash64(d)) SAMPLE BY sipHash64(d) SETTINGS index_granularity = 16" + ) - source.query("INSERT INTO trivial_without_arguments SELECT * FROM system.numbers LIMIT 1002", - settings={"insert_distributed_sync": 1}) + source.query( + "INSERT INTO trivial_without_arguments SELECT * FROM system.numbers LIMIT 1002", + settings={"insert_distributed_sync": 1}, + ) def check(self): - zk = cluster.get_kazoo_client('zoo1') + zk = cluster.get_kazoo_client("zoo1") status_data, _ = zk.get(self.zk_task_path + "/status") - assert status_data == b'{"hits":{"all_partitions_count":5,"processed_partitions_count":5}}' + assert ( + status_data + == b'{"hits":{"all_partitions_count":5,"processed_partitions_count":5}}' + ) - source = cluster.instances['first_trivial'] - destination = cluster.instances['second_trivial'] + source = cluster.instances["first_trivial"] + destination = cluster.instances["second_trivial"] - assert TSV(source.query("SELECT count() FROM trivial_without_arguments")) == TSV("1002\n") - assert TSV(destination.query("SELECT count() FROM trivial_without_arguments")) == TSV("1002\n") + assert TSV( + source.query("SELECT count() FROM trivial_without_arguments") + ) == TSV("1002\n") + assert TSV( + destination.query("SELECT count() FROM trivial_without_arguments") + ) == TSV("1002\n") for node in [source, destination]: node.query("DROP TABLE trivial_without_arguments") @@ -119,7 +151,7 @@ class TaskReplicatedWithoutArguments: def execute_task(started_cluster, task, cmd_options): task.start() - zk = started_cluster.get_kazoo_client('zoo1') + zk = started_cluster.get_kazoo_client("zoo1") print("Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1])) try: @@ -135,10 +167,16 @@ def execute_task(started_cluster, task, cmd_options): docker_api = started_cluster.docker_client.api copiers_exec_ids = [] - cmd = ['/usr/bin/clickhouse', 'copier', - '--config', '/etc/clickhouse-server/config-copier.xml', - '--task-path', zk_task_path, - '--base-dir', '/var/log/clickhouse-server/copier'] + cmd = [ + "/usr/bin/clickhouse", + "copier", + "--config", + "/etc/clickhouse-server/config-copier.xml", + "--task-path", + zk_task_path, + "--base-dir", + "/var/log/clickhouse-server/copier", + ] cmd += cmd_options copiers = list(started_cluster.instances.keys()) @@ -146,25 +184,31 @@ def execute_task(started_cluster, task, cmd_options): for instance_name in copiers: instance = started_cluster.instances[instance_name] container = instance.get_docker_handle() - instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, "configs/config-copier.xml"), - "/etc/clickhouse-server/config-copier.xml") + instance.copy_file_to_container( + os.path.join(CURRENT_TEST_DIR, "configs/config-copier.xml"), + "/etc/clickhouse-server/config-copier.xml", + ) print("Copied copier config to {}".format(instance.name)) exec_id = docker_api.exec_create(container.id, cmd, stderr=True) - output = docker_api.exec_start(exec_id).decode('utf8') + output = docker_api.exec_start(exec_id).decode("utf8") print(output) copiers_exec_ids.append(exec_id) - print("Copier for {} ({}) has started".format(instance.name, instance.ip_address)) + print( + "Copier for {} ({}) has started".format(instance.name, instance.ip_address) + ) # Wait for copiers stopping and check their return codes for exec_id, instance_name in zip(copiers_exec_ids, copiers): instance = started_cluster.instances[instance_name] while True: res = docker_api.exec_inspect(exec_id) - if not res['Running']: + if not res["Running"]: break time.sleep(0.5) - assert res['ExitCode'] == 0, "Instance: {} ({}). Info: {}".format(instance.name, instance.ip_address, repr(res)) + assert res["ExitCode"] == 0, "Instance: {} ({}). Info: {}".format( + instance.name, instance.ip_address, repr(res) + ) try: task.check() @@ -174,6 +218,7 @@ def execute_task(started_cluster, task, cmd_options): # Tests + def test_trivial_copy(started_cluster): execute_task(started_cluster, TaskTrivial(started_cluster), []) diff --git a/tests/integration/test_cluster_copier/test_two_nodes.py b/tests/integration/test_cluster_copier/test_two_nodes.py index 255af13213a..6fdaaeea720 100644 --- a/tests/integration/test_cluster_copier/test_two_nodes.py +++ b/tests/integration/test_cluster_copier/test_two_nodes.py @@ -12,7 +12,7 @@ import docker CURRENT_TEST_DIR = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, os.path.dirname(CURRENT_TEST_DIR)) -cluster = ClickHouseCluster(__file__, name='copier_test_two_nodes') +cluster = ClickHouseCluster(__file__, name="copier_test_two_nodes") @pytest.fixture(scope="module") @@ -21,38 +21,49 @@ def started_cluster(): try: for name in ["first_of_two", "second_of_two"]: - instance = cluster.add_instance(name, + instance = cluster.add_instance( + name, main_configs=[ "configs_two_nodes/conf.d/clusters.xml", "configs_two_nodes/conf.d/ddl.xml", - "configs_two_nodes/conf.d/storage_configuration.xml"], + "configs_two_nodes/conf.d/storage_configuration.xml", + ], user_configs=["configs_two_nodes/users.xml"], - with_zookeeper=True) + with_zookeeper=True, + ) cluster.start() for name in ["first_of_two", "second_of_two"]: instance = cluster.instances[name] - instance.exec_in_container(['bash', '-c', 'mkdir /jbod1']) - instance.exec_in_container(['bash', '-c', 'mkdir /jbod2']) - instance.exec_in_container(['bash', '-c', 'mkdir /external']) + instance.exec_in_container(["bash", "-c", "mkdir /jbod1"]) + instance.exec_in_container(["bash", "-c", "mkdir /jbod2"]) + instance.exec_in_container(["bash", "-c", "mkdir /external"]) yield cluster finally: cluster.shutdown() + # Will copy table from `first` node to `second` class TaskWithDifferentSchema: def __init__(self, cluster): self.cluster = cluster - self.zk_task_path = '/clickhouse-copier/task_with_different_schema' + self.zk_task_path = "/clickhouse-copier/task_with_different_schema" self.container_task_file = "/task_with_different_schema.xml" for instance_name, _ in cluster.instances.items(): instance = cluster.instances[instance_name] - instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, './task_with_different_schema.xml'), self.container_task_file) - print("Copied task file to container of '{}' instance. Path {}".format(instance_name, self.container_task_file)) + instance.copy_file_to_container( + os.path.join(CURRENT_TEST_DIR, "./task_with_different_schema.xml"), + self.container_task_file, + ) + print( + "Copied task file to container of '{}' instance. Path {}".format( + instance_name, self.container_task_file + ) + ) def start(self): first = cluster.instances["first_of_two"] @@ -62,7 +73,8 @@ class TaskWithDifferentSchema: second.query("DROP DATABASE IF EXISTS db_different_schema SYNC") first.query("CREATE DATABASE IF NOT EXISTS db_different_schema;") - first.query("""CREATE TABLE db_different_schema.source + first.query( + """CREATE TABLE db_different_schema.source ( Column1 String, Column2 UInt32, @@ -83,16 +95,19 @@ class TaskWithDifferentSchema: PARTITION BY (toYYYYMMDD(Column3), Column3) PRIMARY KEY (Column1, Column2, Column3, Column4, Column6, Column7, Column8, Column9) ORDER BY (Column1, Column2, Column3, Column4, Column6, Column7, Column8, Column9) - SETTINGS index_granularity = 8192""") + SETTINGS index_granularity = 8192""" + ) - first.query("""INSERT INTO db_different_schema.source SELECT * FROM generateRandom( + first.query( + """INSERT INTO db_different_schema.source SELECT * FROM generateRandom( 'Column1 String, Column2 UInt32, Column3 Date, Column4 DateTime, Column5 UInt16, Column6 String, Column7 String, Column8 String, Column9 String, Column10 String, - Column11 String, Column12 Decimal(3, 1), Column13 DateTime, Column14 UInt16', 1, 10, 2) LIMIT 50;""") - + Column11 String, Column12 Decimal(3, 1), Column13 DateTime, Column14 UInt16', 1, 10, 2) LIMIT 50;""" + ) second.query("CREATE DATABASE IF NOT EXISTS db_different_schema;") - second.query("""CREATE TABLE db_different_schema.destination + second.query( + """CREATE TABLE db_different_schema.destination ( Column1 LowCardinality(String) CODEC(LZ4), Column2 UInt32 CODEC(LZ4), @@ -110,7 +125,8 @@ class TaskWithDifferentSchema: Column14 UInt16 CODEC(LZ4) ) ENGINE = MergeTree() PARTITION BY toYYYYMMDD(Column3) - ORDER BY (Column9, Column1, Column2, Column3, Column4);""") + ORDER BY (Column9, Column1, Column2, Column3, Column4);""" + ) print("Preparation completed") @@ -122,10 +138,18 @@ class TaskWithDifferentSchema: b = second.query("SELECT count() from db_different_schema.destination") assert a == b, "Count" - a = TSV(first.query("""SELECT sipHash64(*) from db_different_schema.source - ORDER BY (Column1, Column2, Column3, Column4, Column5, Column6, Column7, Column8, Column9, Column10, Column11, Column12, Column13, Column14)""")) - b = TSV(second.query("""SELECT sipHash64(*) from db_different_schema.destination - ORDER BY (Column1, Column2, Column3, Column4, Column5, Column6, Column7, Column8, Column9, Column10, Column11, Column12, Column13, Column14)""")) + a = TSV( + first.query( + """SELECT sipHash64(*) from db_different_schema.source + ORDER BY (Column1, Column2, Column3, Column4, Column5, Column6, Column7, Column8, Column9, Column10, Column11, Column12, Column13, Column14)""" + ) + ) + b = TSV( + second.query( + """SELECT sipHash64(*) from db_different_schema.destination + ORDER BY (Column1, Column2, Column3, Column4, Column5, Column6, Column7, Column8, Column9, Column10, Column11, Column12, Column13, Column14)""" + ) + ) assert a == b, "Data" first.query("DROP DATABASE IF EXISTS db_different_schema SYNC") @@ -137,13 +161,20 @@ class TaskWithDifferentSchema: class TaskTTL: def __init__(self, cluster): self.cluster = cluster - self.zk_task_path = '/clickhouse-copier/task_ttl_columns' + self.zk_task_path = "/clickhouse-copier/task_ttl_columns" self.container_task_file = "/task_ttl_columns.xml" for instance_name, _ in cluster.instances.items(): instance = cluster.instances[instance_name] - instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, './task_ttl_columns.xml'), self.container_task_file) - print("Copied task file to container of '{}' instance. Path {}".format(instance_name, self.container_task_file)) + instance.copy_file_to_container( + os.path.join(CURRENT_TEST_DIR, "./task_ttl_columns.xml"), + self.container_task_file, + ) + print( + "Copied task file to container of '{}' instance. Path {}".format( + instance_name, self.container_task_file + ) + ) def start(self): first = cluster.instances["first_of_two"] @@ -153,7 +184,8 @@ class TaskTTL: second.query("DROP DATABASE IF EXISTS db_ttl_columns SYNC") first.query("CREATE DATABASE IF NOT EXISTS db_ttl_columns;") - first.query("""CREATE TABLE db_ttl_columns.source + first.query( + """CREATE TABLE db_ttl_columns.source ( Column1 String, Column2 UInt32, @@ -168,14 +200,18 @@ class TaskTTL: PARTITION BY (toYYYYMMDD(Column3), Column3) PRIMARY KEY (Column1, Column2, Column3) ORDER BY (Column1, Column2, Column3) - SETTINGS index_granularity = 8192""") + SETTINGS index_granularity = 8192""" + ) - first.query("""INSERT INTO db_ttl_columns.source SELECT * FROM generateRandom( + first.query( + """INSERT INTO db_ttl_columns.source SELECT * FROM generateRandom( 'Column1 String, Column2 UInt32, Column3 Date, Column4 DateTime, Column5 UInt16, - Column6 String, Column7 Decimal(3, 1), Column8 Tuple(Float64, Float64)', 1, 10, 2) LIMIT 50;""") + Column6 String, Column7 Decimal(3, 1), Column8 Tuple(Float64, Float64)', 1, 10, 2) LIMIT 50;""" + ) second.query("CREATE DATABASE IF NOT EXISTS db_ttl_columns;") - second.query("""CREATE TABLE db_ttl_columns.destination + second.query( + """CREATE TABLE db_ttl_columns.destination ( Column1 String, Column2 UInt32, @@ -187,7 +223,8 @@ class TaskTTL: Column8 Tuple(Float64, Float64) ) ENGINE = MergeTree() PARTITION BY toYYYYMMDD(Column3) - ORDER BY (Column3, Column2, Column1);""") + ORDER BY (Column3, Column2, Column1);""" + ) print("Preparation completed") @@ -199,10 +236,18 @@ class TaskTTL: b = second.query("SELECT count() from db_ttl_columns.destination") assert a == b, "Count" - a = TSV(first.query("""SELECT sipHash64(*) from db_ttl_columns.source - ORDER BY (Column1, Column2, Column3, Column4, Column5, Column6, Column7, Column8)""")) - b = TSV(second.query("""SELECT sipHash64(*) from db_ttl_columns.destination - ORDER BY (Column1, Column2, Column3, Column4, Column5, Column6, Column7, Column8)""")) + a = TSV( + first.query( + """SELECT sipHash64(*) from db_ttl_columns.source + ORDER BY (Column1, Column2, Column3, Column4, Column5, Column6, Column7, Column8)""" + ) + ) + b = TSV( + second.query( + """SELECT sipHash64(*) from db_ttl_columns.destination + ORDER BY (Column1, Column2, Column3, Column4, Column5, Column6, Column7, Column8)""" + ) + ) assert a == b, "Data" first.query("DROP DATABASE IF EXISTS db_ttl_columns SYNC") @@ -212,13 +257,20 @@ class TaskTTL: class TaskSkipIndex: def __init__(self, cluster): self.cluster = cluster - self.zk_task_path = '/clickhouse-copier/task_skip_index' + self.zk_task_path = "/clickhouse-copier/task_skip_index" self.container_task_file = "/task_skip_index.xml" for instance_name, _ in cluster.instances.items(): instance = cluster.instances[instance_name] - instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, './task_skip_index.xml'), self.container_task_file) - print("Copied task file to container of '{}' instance. Path {}".format(instance_name, self.container_task_file)) + instance.copy_file_to_container( + os.path.join(CURRENT_TEST_DIR, "./task_skip_index.xml"), + self.container_task_file, + ) + print( + "Copied task file to container of '{}' instance. Path {}".format( + instance_name, self.container_task_file + ) + ) def start(self): first = cluster.instances["first_of_two"] @@ -228,7 +280,8 @@ class TaskSkipIndex: second.query("DROP DATABASE IF EXISTS db_skip_index SYNC") first.query("CREATE DATABASE IF NOT EXISTS db_skip_index;") - first.query("""CREATE TABLE db_skip_index.source + first.query( + """CREATE TABLE db_skip_index.source ( Column1 UInt64, Column2 Int32, @@ -242,13 +295,17 @@ class TaskSkipIndex: PARTITION BY (toYYYYMMDD(Column3), Column3) PRIMARY KEY (Column1, Column2, Column3) ORDER BY (Column1, Column2, Column3) - SETTINGS index_granularity = 8192""") + SETTINGS index_granularity = 8192""" + ) - first.query("""INSERT INTO db_skip_index.source SELECT * FROM generateRandom( - 'Column1 UInt64, Column2 Int32, Column3 Date, Column4 DateTime, Column5 String', 1, 10, 2) LIMIT 100;""") + first.query( + """INSERT INTO db_skip_index.source SELECT * FROM generateRandom( + 'Column1 UInt64, Column2 Int32, Column3 Date, Column4 DateTime, Column5 String', 1, 10, 2) LIMIT 100;""" + ) second.query("CREATE DATABASE IF NOT EXISTS db_skip_index;") - second.query("""CREATE TABLE db_skip_index.destination + second.query( + """CREATE TABLE db_skip_index.destination ( Column1 UInt64, Column2 Int32, @@ -259,7 +316,8 @@ class TaskSkipIndex: INDEX b (Column1 * length(Column5)) TYPE set(1000) GRANULARITY 4 ) ENGINE = MergeTree() PARTITION BY toYYYYMMDD(Column3) - ORDER BY (Column3, Column2, Column1);""") + ORDER BY (Column3, Column2, Column1);""" + ) print("Preparation completed") @@ -271,10 +329,18 @@ class TaskSkipIndex: b = second.query("SELECT count() from db_skip_index.destination") assert a == b, "Count" - a = TSV(first.query("""SELECT sipHash64(*) from db_skip_index.source - ORDER BY (Column1, Column2, Column3, Column4, Column5)""")) - b = TSV(second.query("""SELECT sipHash64(*) from db_skip_index.destination - ORDER BY (Column1, Column2, Column3, Column4, Column5)""")) + a = TSV( + first.query( + """SELECT sipHash64(*) from db_skip_index.source + ORDER BY (Column1, Column2, Column3, Column4, Column5)""" + ) + ) + b = TSV( + second.query( + """SELECT sipHash64(*) from db_skip_index.destination + ORDER BY (Column1, Column2, Column3, Column4, Column5)""" + ) + ) assert a == b, "Data" first.query("DROP DATABASE IF EXISTS db_skip_index SYNC") @@ -284,13 +350,20 @@ class TaskSkipIndex: class TaskTTLMoveToVolume: def __init__(self, cluster): self.cluster = cluster - self.zk_task_path = '/clickhouse-copier/task_ttl_move_to_volume' + self.zk_task_path = "/clickhouse-copier/task_ttl_move_to_volume" self.container_task_file = "/task_ttl_move_to_volume.xml" for instance_name, _ in cluster.instances.items(): instance = cluster.instances[instance_name] - instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, './task_ttl_move_to_volume.xml'), self.container_task_file) - print("Copied task file to container of '{}' instance. Path {}".format(instance_name, self.container_task_file)) + instance.copy_file_to_container( + os.path.join(CURRENT_TEST_DIR, "./task_ttl_move_to_volume.xml"), + self.container_task_file, + ) + print( + "Copied task file to container of '{}' instance. Path {}".format( + instance_name, self.container_task_file + ) + ) def start(self): first = cluster.instances["first_of_two"] @@ -300,7 +373,8 @@ class TaskTTLMoveToVolume: second.query("DROP DATABASE IF EXISTS db_move_to_volume SYNC") first.query("CREATE DATABASE IF NOT EXISTS db_move_to_volume;") - first.query("""CREATE TABLE db_move_to_volume.source + first.query( + """CREATE TABLE db_move_to_volume.source ( Column1 UInt64, Column2 Int32, @@ -313,13 +387,17 @@ class TaskTTLMoveToVolume: PRIMARY KEY (Column1, Column2, Column3) ORDER BY (Column1, Column2, Column3) TTL Column3 + INTERVAL 1 MONTH TO VOLUME 'external' - SETTINGS storage_policy = 'external_with_jbods';""") + SETTINGS storage_policy = 'external_with_jbods';""" + ) - first.query("""INSERT INTO db_move_to_volume.source SELECT * FROM generateRandom( - 'Column1 UInt64, Column2 Int32, Column3 Date, Column4 DateTime, Column5 String', 1, 10, 2) LIMIT 100;""") + first.query( + """INSERT INTO db_move_to_volume.source SELECT * FROM generateRandom( + 'Column1 UInt64, Column2 Int32, Column3 Date, Column4 DateTime, Column5 String', 1, 10, 2) LIMIT 100;""" + ) second.query("CREATE DATABASE IF NOT EXISTS db_move_to_volume;") - second.query("""CREATE TABLE db_move_to_volume.destination + second.query( + """CREATE TABLE db_move_to_volume.destination ( Column1 UInt64, Column2 Int32, @@ -330,7 +408,8 @@ class TaskTTLMoveToVolume: PARTITION BY toYYYYMMDD(Column3) ORDER BY (Column3, Column2, Column1) TTL Column3 + INTERVAL 1 MONTH TO VOLUME 'external' - SETTINGS storage_policy = 'external_with_jbods';""") + SETTINGS storage_policy = 'external_with_jbods';""" + ) print("Preparation completed") @@ -342,10 +421,18 @@ class TaskTTLMoveToVolume: b = second.query("SELECT count() from db_move_to_volume.destination") assert a == b, "Count" - a = TSV(first.query("""SELECT sipHash64(*) from db_move_to_volume.source - ORDER BY (Column1, Column2, Column3, Column4, Column5)""")) - b = TSV(second.query("""SELECT sipHash64(*) from db_move_to_volume.destination - ORDER BY (Column1, Column2, Column3, Column4, Column5)""")) + a = TSV( + first.query( + """SELECT sipHash64(*) from db_move_to_volume.source + ORDER BY (Column1, Column2, Column3, Column4, Column5)""" + ) + ) + b = TSV( + second.query( + """SELECT sipHash64(*) from db_move_to_volume.destination + ORDER BY (Column1, Column2, Column3, Column4, Column5)""" + ) + ) assert a == b, "Data" first.query("DROP DATABASE IF EXISTS db_move_to_volume SYNC") @@ -355,13 +442,20 @@ class TaskTTLMoveToVolume: class TaskDropTargetPartition: def __init__(self, cluster): self.cluster = cluster - self.zk_task_path = '/clickhouse-copier/task_drop_target_partition' + self.zk_task_path = "/clickhouse-copier/task_drop_target_partition" self.container_task_file = "/task_drop_target_partition.xml" for instance_name, _ in cluster.instances.items(): instance = cluster.instances[instance_name] - instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, './task_drop_target_partition.xml'), self.container_task_file) - print("Copied task file to container of '{}' instance. Path {}".format(instance_name, self.container_task_file)) + instance.copy_file_to_container( + os.path.join(CURRENT_TEST_DIR, "./task_drop_target_partition.xml"), + self.container_task_file, + ) + print( + "Copied task file to container of '{}' instance. Path {}".format( + instance_name, self.container_task_file + ) + ) def start(self): first = cluster.instances["first_of_two"] @@ -371,7 +465,8 @@ class TaskDropTargetPartition: second.query("DROP DATABASE IF EXISTS db_drop_target_partition SYNC") first.query("CREATE DATABASE IF NOT EXISTS db_drop_target_partition;") - first.query("""CREATE TABLE db_drop_target_partition.source + first.query( + """CREATE TABLE db_drop_target_partition.source ( Column1 UInt64, Column2 Int32, @@ -382,14 +477,17 @@ class TaskDropTargetPartition: ENGINE = MergeTree() PARTITION BY (toYYYYMMDD(Column3), Column3) PRIMARY KEY (Column1, Column2, Column3) - ORDER BY (Column1, Column2, Column3);""") - - first.query("""INSERT INTO db_drop_target_partition.source SELECT * FROM generateRandom( - 'Column1 UInt64, Column2 Int32, Column3 Date, Column4 DateTime, Column5 String', 1, 10, 2) LIMIT 100;""") + ORDER BY (Column1, Column2, Column3);""" + ) + first.query( + """INSERT INTO db_drop_target_partition.source SELECT * FROM generateRandom( + 'Column1 UInt64, Column2 Int32, Column3 Date, Column4 DateTime, Column5 String', 1, 10, 2) LIMIT 100;""" + ) second.query("CREATE DATABASE IF NOT EXISTS db_drop_target_partition;") - second.query("""CREATE TABLE db_drop_target_partition.destination + second.query( + """CREATE TABLE db_drop_target_partition.destination ( Column1 UInt64, Column2 Int32, @@ -398,10 +496,13 @@ class TaskDropTargetPartition: Column5 String ) ENGINE = MergeTree() PARTITION BY toYYYYMMDD(Column3) - ORDER BY (Column3, Column2, Column1);""") + ORDER BY (Column3, Column2, Column1);""" + ) # Insert data in target too. It has to be dropped. - first.query("""INSERT INTO db_drop_target_partition.destination SELECT * FROM db_drop_target_partition.source;""") + first.query( + """INSERT INTO db_drop_target_partition.destination SELECT * FROM db_drop_target_partition.source;""" + ) print("Preparation completed") @@ -413,10 +514,18 @@ class TaskDropTargetPartition: b = second.query("SELECT count() from db_drop_target_partition.destination") assert a == b, "Count" - a = TSV(first.query("""SELECT sipHash64(*) from db_drop_target_partition.source - ORDER BY (Column1, Column2, Column3, Column4, Column5)""")) - b = TSV(second.query("""SELECT sipHash64(*) from db_drop_target_partition.destination - ORDER BY (Column1, Column2, Column3, Column4, Column5)""")) + a = TSV( + first.query( + """SELECT sipHash64(*) from db_drop_target_partition.source + ORDER BY (Column1, Column2, Column3, Column4, Column5)""" + ) + ) + b = TSV( + second.query( + """SELECT sipHash64(*) from db_drop_target_partition.destination + ORDER BY (Column1, Column2, Column3, Column4, Column5)""" + ) + ) assert a == b, "Data" first.query("DROP DATABASE IF EXISTS db_drop_target_partition SYNC") @@ -426,19 +535,27 @@ class TaskDropTargetPartition: def execute_task(started_cluster, task, cmd_options): task.start() - zk = started_cluster.get_kazoo_client('zoo1') + zk = started_cluster.get_kazoo_client("zoo1") print("Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1])) # Run cluster-copier processes on each node docker_api = started_cluster.docker_client.api copiers_exec_ids = [] - cmd = ['/usr/bin/clickhouse', 'copier', - '--config', '/etc/clickhouse-server/config-copier.xml', - '--task-path', task.zk_task_path, - '--task-file', task.container_task_file, - '--task-upload-force', 'true', - '--base-dir', '/var/log/clickhouse-server/copier'] + cmd = [ + "/usr/bin/clickhouse", + "copier", + "--config", + "/etc/clickhouse-server/config-copier.xml", + "--task-path", + task.zk_task_path, + "--task-file", + task.container_task_file, + "--task-upload-force", + "true", + "--base-dir", + "/var/log/clickhouse-server/copier", + ] cmd += cmd_options print(cmd) @@ -446,25 +563,34 @@ def execute_task(started_cluster, task, cmd_options): for instance_name in started_cluster.instances.keys(): instance = started_cluster.instances[instance_name] container = instance.get_docker_handle() - instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, "configs_two_nodes/config-copier.xml"), "/etc/clickhouse-server/config-copier.xml") + instance.copy_file_to_container( + os.path.join(CURRENT_TEST_DIR, "configs_two_nodes/config-copier.xml"), + "/etc/clickhouse-server/config-copier.xml", + ) logging.info("Copied copier config to {}".format(instance.name)) exec_id = docker_api.exec_create(container.id, cmd, stderr=True) - output = docker_api.exec_start(exec_id).decode('utf8') + output = docker_api.exec_start(exec_id).decode("utf8") logging.info(output) copiers_exec_ids.append(exec_id) - logging.info("Copier for {} ({}) has started".format(instance.name, instance.ip_address)) + logging.info( + "Copier for {} ({}) has started".format(instance.name, instance.ip_address) + ) # time.sleep(1000) # Wait for copiers stopping and check their return codes - for exec_id, instance in zip(copiers_exec_ids, iter(started_cluster.instances.values())): + for exec_id, instance in zip( + copiers_exec_ids, iter(started_cluster.instances.values()) + ): while True: res = docker_api.exec_inspect(exec_id) - if not res['Running']: + if not res["Running"]: break time.sleep(1) - assert res['ExitCode'] == 0, "Instance: {} ({}). Info: {}".format(instance.name, instance.ip_address, repr(res)) + assert res["ExitCode"] == 0, "Instance: {} ({}). Info: {}".format( + instance.name, instance.ip_address, repr(res) + ) try: task.check() diff --git a/tests/integration/test_cluster_discovery/test.py b/tests/integration/test_cluster_discovery/test.py index acddd855040..311c955d1e3 100644 --- a/tests/integration/test_cluster_discovery/test.py +++ b/tests/integration/test_cluster_discovery/test.py @@ -7,18 +7,16 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -shard_configs = { - i: f'config/config_shard{i}.xml' - for i in [1, 3] -} +shard_configs = {i: f"config/config_shard{i}.xml" for i in [1, 3]} nodes = [ cluster.add_instance( - f'node{i}', - main_configs=[shard_configs.get(i, 'config/config.xml')], + f"node{i}", + main_configs=[shard_configs.get(i, "config/config.xml")], stay_alive=True, - with_zookeeper=True - ) for i in range(5) + with_zookeeper=True, + ) + for i in range(5) ] @@ -31,7 +29,9 @@ def start_cluster(): cluster.shutdown() -def check_on_cluster(nodes, expected, *, what, cluster_name='test_auto_cluster', msg=None, retries=5): +def check_on_cluster( + nodes, expected, *, what, cluster_name="test_auto_cluster", msg=None, retries=5 +): """ Select data from `system.clusters` on specified nodes and check the result """ @@ -39,17 +39,23 @@ def check_on_cluster(nodes, expected, *, what, cluster_name='test_auto_cluster', for retry in range(1, retries + 1): nodes_res = { - node.name: int(node.query(f"SELECT {what} FROM system.clusters WHERE cluster = '{cluster_name}'")) + node.name: int( + node.query( + f"SELECT {what} FROM system.clusters WHERE cluster = '{cluster_name}'" + ) + ) for node in nodes } if all(actual == expected for actual in nodes_res.values()): break if retry != retries: - time.sleep(2 ** retry) + time.sleep(2**retry) else: msg = msg or f"Wrong '{what}' result" - raise Exception(f'{msg}: {nodes_res}, expected: {expected} (after {retries} retries)') + raise Exception( + f"{msg}: {nodes_res}, expected: {expected} (after {retries} retries)" + ) def test_cluster_discovery_startup_and_stop(start_cluster): @@ -58,8 +64,14 @@ def test_cluster_discovery_startup_and_stop(start_cluster): then stop/start some nodes and check that it (dis)appeared in cluster. """ - check_nodes_count = functools.partial(check_on_cluster, what='count()', msg='Wrong nodes count in cluster') - check_shard_num = functools.partial(check_on_cluster, what='count(DISTINCT shard_num)', msg='Wrong shard_num count in cluster') + check_nodes_count = functools.partial( + check_on_cluster, what="count()", msg="Wrong nodes count in cluster" + ) + check_shard_num = functools.partial( + check_on_cluster, + what="count(DISTINCT shard_num)", + msg="Wrong shard_num count in cluster", + ) total_shards = len(shard_configs) + 1 check_nodes_count([nodes[0], nodes[2]], len(nodes)) @@ -78,4 +90,4 @@ def test_cluster_discovery_startup_and_stop(start_cluster): nodes[3].start_clickhouse() check_nodes_count([nodes[0], nodes[2]], len(nodes)) - check_nodes_count([nodes[1], nodes[2]], 2, cluster_name='two_shards', retries=1) + check_nodes_count([nodes[1], nodes[2]], 2, cluster_name="two_shards", retries=1) diff --git a/tests/integration/test_codec_encrypted/test.py b/tests/integration/test_codec_encrypted/test.py index 439fcdd8ef8..ebe837c9e3a 100644 --- a/tests/integration/test_codec_encrypted/test.py +++ b/tests/integration/test_codec_encrypted/test.py @@ -5,7 +5,8 @@ from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node') +node = cluster.add_instance("node") + @pytest.fixture(scope="module") def start_cluster(): @@ -16,8 +17,13 @@ def start_cluster(): finally: cluster.shutdown() + def make_storage_with_key(id): - node.exec_in_container(["bash", "-c" , """cat > /etc/clickhouse-server/config.d/storage_keys_config.xml << EOF + node.exec_in_container( + [ + "bash", + "-c", + """cat > /etc/clickhouse-server/config.d/storage_keys_config.xml << EOF @@ -33,27 +39,36 @@ def make_storage_with_key(id): -EOF""".format(cur_id=id)]) - node.query("SYSTEM RELOAD CONFIG") +EOF""".format( + cur_id=id + ), + ] + ) + node.query("SYSTEM RELOAD CONFIG") + def test_different_keys(start_cluster): make_storage_with_key(0) - node.query(""" + node.query( + """ CREATE TABLE encrypted_test_128 ( id Int64, data String Codec(AES_128_GCM_SIV) ) ENGINE=MergeTree() ORDER BY id - """) + """ + ) - node.query(""" + node.query( + """ CREATE TABLE encrypted_test_256 ( id Int64, data String Codec(AES_256_GCM_SIV) ) ENGINE=MergeTree() ORDER BY id - """) - + """ + ) + node.query("INSERT INTO encrypted_test_128 VALUES (0,'data'),(1,'data')") select_query = "SELECT * FROM encrypted_test_128 ORDER BY id FORMAT Values" assert node.query(select_query) == "(0,'data'),(1,'data')" diff --git a/tests/integration/test_compression_codec_read/test.py b/tests/integration/test_compression_codec_read/test.py index 35ae60f05ea..38cd61e241d 100644 --- a/tests/integration/test_compression_codec_read/test.py +++ b/tests/integration/test_compression_codec_read/test.py @@ -5,7 +5,14 @@ from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', image='yandex/clickhouse-server', tag='20.8.11.17', with_installed_binary=True, stay_alive=True) +node1 = cluster.add_instance( + "node1", + image="yandex/clickhouse-server", + tag="20.8.11.17", + with_installed_binary=True, + stay_alive=True, +) + @pytest.fixture(scope="module") def start_cluster(): @@ -16,10 +23,12 @@ def start_cluster(): finally: cluster.shutdown() + def test_default_codec_read(start_cluster): node1.query("DROP TABLE IF EXISTS test_18340") - node1.query(""" + node1.query( + """ CREATE TABLE test_18340 ( `lns` LowCardinality(Nullable(String)), @@ -36,10 +45,12 @@ def test_default_codec_read(start_cluster): PARTITION BY i32 ORDER BY (s, farmHash64(s)) SAMPLE BY farmHash64(s) - """) - - node1.query("insert into test_18340 values ('test', 'test', 'test', 0, 0, ['a'], ['a'], now(), 0)") + """ + ) + node1.query( + "insert into test_18340 values ('test', 'test', 'test', 0, 0, ['a'], ['a'], now(), 0)" + ) assert node1.query("SELECT COUNT() FROM test_18340") == "1\n" diff --git a/tests/integration/test_compression_nested_columns/test.py b/tests/integration/test_compression_nested_columns/test.py index f73adadd770..55d88174287 100644 --- a/tests/integration/test_compression_nested_columns/test.py +++ b/tests/integration/test_compression_nested_columns/test.py @@ -6,8 +6,8 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True) -node2 = cluster.add_instance('node2', with_zookeeper=True) +node1 = cluster.add_instance("node1", with_zookeeper=True) +node2 = cluster.add_instance("node2", with_zookeeper=True) @pytest.fixture(scope="module") @@ -19,24 +19,29 @@ def start_cluster(): finally: cluster.shutdown() + def get_compression_codec_byte(node, table_name, part_name, filename): cmd = "tail -c +17 /var/lib/clickhouse/data/default/{}/{}/{}.bin | od -x -N 1 | head -n 1 | awk '{{print $2}}'".format( - table_name, part_name, filename) + table_name, part_name, filename + ) return node.exec_in_container(["bash", "-c", cmd]).strip() + CODECS_MAPPING = { - 'NONE' : '0002', - 'LZ4': '0082', - 'LZ4HC': '0082', # not an error, same byte - 'ZSTD': '0090', - 'Multiple': '0091', - 'Delta': '0092', - 'T64': '0093', + "NONE": "0002", + "LZ4": "0082", + "LZ4HC": "0082", # not an error, same byte + "ZSTD": "0090", + "Multiple": "0091", + "Delta": "0092", + "T64": "0093", } + def test_nested_compression_codec(start_cluster): for i, node in enumerate([node1, node2]): - node.query(""" + node.query( + """ CREATE TABLE compression_table ( key UInt64, column_ok Nullable(UInt64) CODEC(Delta, LZ4), @@ -44,7 +49,14 @@ def test_nested_compression_codec(start_cluster): column_bad LowCardinality(Int64) CODEC(Delta) ) ENGINE = ReplicatedMergeTree('/t', '{}') ORDER BY tuple() PARTITION BY key SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0; - """.format(i), settings={"allow_suspicious_codecs" : "1", "allow_suspicious_low_cardinality_types" : "1"}) + """.format( + i + ), + settings={ + "allow_suspicious_codecs": "1", + "allow_suspicious_low_cardinality_types": "1", + }, + ) node1.query("INSERT INTO compression_table VALUES (1, 1, [[77]], 32)") @@ -57,12 +69,47 @@ def test_nested_compression_codec(start_cluster): node2.query("ATTACH TABLE compression_table") for node in [node1, node2]: - assert get_compression_codec_byte(node, "compression_table", "1_0_0_0", "column_ok") == CODECS_MAPPING['Multiple'] - assert get_compression_codec_byte(node, "compression_table", "1_0_0_0", "column_ok.null") == CODECS_MAPPING['LZ4'] + assert ( + get_compression_codec_byte( + node, "compression_table", "1_0_0_0", "column_ok" + ) + == CODECS_MAPPING["Multiple"] + ) + assert ( + get_compression_codec_byte( + node, "compression_table", "1_0_0_0", "column_ok.null" + ) + == CODECS_MAPPING["LZ4"] + ) - assert get_compression_codec_byte(node1, "compression_table", "1_0_0_0", "column_array") == CODECS_MAPPING['Multiple'] - assert get_compression_codec_byte(node2, "compression_table", "1_0_0_0", "column_array.size0") == CODECS_MAPPING['LZ4'] - assert get_compression_codec_byte(node2, "compression_table", "1_0_0_0", "column_array.size1") == CODECS_MAPPING['LZ4'] + assert ( + get_compression_codec_byte( + node1, "compression_table", "1_0_0_0", "column_array" + ) + == CODECS_MAPPING["Multiple"] + ) + assert ( + get_compression_codec_byte( + node2, "compression_table", "1_0_0_0", "column_array.size0" + ) + == CODECS_MAPPING["LZ4"] + ) + assert ( + get_compression_codec_byte( + node2, "compression_table", "1_0_0_0", "column_array.size1" + ) + == CODECS_MAPPING["LZ4"] + ) - assert get_compression_codec_byte(node2, "compression_table", "1_0_0_0", "column_bad.dict") == CODECS_MAPPING['Delta'] - assert get_compression_codec_byte(node1, "compression_table", "1_0_0_0", "column_bad") == CODECS_MAPPING['NONE'] + assert ( + get_compression_codec_byte( + node2, "compression_table", "1_0_0_0", "column_bad.dict" + ) + == CODECS_MAPPING["Delta"] + ) + assert ( + get_compression_codec_byte( + node1, "compression_table", "1_0_0_0", "column_bad" + ) + == CODECS_MAPPING["NONE"] + ) diff --git a/tests/integration/test_concurrent_queries_for_all_users_restriction/test.py b/tests/integration/test_concurrent_queries_for_all_users_restriction/test.py index ac6e87cdee5..166724a7f8c 100644 --- a/tests/integration/test_concurrent_queries_for_all_users_restriction/test.py +++ b/tests/integration/test_concurrent_queries_for_all_users_restriction/test.py @@ -6,14 +6,16 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', user_configs=['configs/user_restrictions.xml']) +node1 = cluster.add_instance("node1", user_configs=["configs/user_restrictions.xml"]) @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() - node1.query("create table nums (number UInt64) ENGINE = MergeTree() order by tuple()") + node1.query( + "create table nums (number UInt64) ENGINE = MergeTree() order by tuple()" + ) node1.query("insert into nums values (0), (1)") yield cluster finally: @@ -25,7 +27,7 @@ def test_exception_message(started_cluster): def node_busy(_): for i in range(10): - node1.query("select sleep(2)", user='someuser', ignore_error=True) + node1.query("select sleep(2)", user="someuser", ignore_error=True) busy_pool = Pool(3) busy_pool.map_async(node_busy, range(3)) @@ -33,9 +35,21 @@ def test_exception_message(started_cluster): with pytest.raises(Exception) as exc_info: for i in range(3): - assert node1.query("select number from remote('node1', 'default', 'nums')", user='default') == "0\n1\n" + assert ( + node1.query( + "select number from remote('node1', 'default', 'nums')", + user="default", + ) + == "0\n1\n" + ) exc_info.match("Too many simultaneous queries for all users") for i in range(3): - assert node1.query("select number from remote('node1', 'default', 'nums')", user='default', - settings={'max_concurrent_queries_for_all_users': 0}) == "0\n1\n" + assert ( + node1.query( + "select number from remote('node1', 'default', 'nums')", + user="default", + settings={"max_concurrent_queries_for_all_users": 0}, + ) + == "0\n1\n" + ) diff --git a/tests/integration/test_concurrent_queries_for_user_restriction/test.py b/tests/integration/test_concurrent_queries_for_user_restriction/test.py index 279e0dfe439..c4afdf99685 100644 --- a/tests/integration/test_concurrent_queries_for_user_restriction/test.py +++ b/tests/integration/test_concurrent_queries_for_user_restriction/test.py @@ -6,15 +6,17 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', user_configs=['configs/user_restrictions.xml']) -node2 = cluster.add_instance('node2', user_configs=['configs/user_restrictions.xml']) +node1 = cluster.add_instance("node1", user_configs=["configs/user_restrictions.xml"]) +node2 = cluster.add_instance("node2", user_configs=["configs/user_restrictions.xml"]) @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() - node1.query("create table nums (number UInt64) ENGINE = MergeTree() order by tuple()") + node1.query( + "create table nums (number UInt64) ENGINE = MergeTree() order by tuple()" + ) node1.query("insert into nums values(0),(1)") yield cluster @@ -27,13 +29,20 @@ def test_exception_message(started_cluster): def node_busy(_): for i in range(10): - node1.query("select sleep(2)", user='default') + node1.query("select sleep(2)", user="default") busy_pool = Pool(3) busy_pool.map_async(node_busy, range(3)) time.sleep(1) # wait a little until polling starts try: - assert node2.query("select number from remote('node1', 'default', 'nums')", user='good') == "0\n1\n" + assert ( + node2.query( + "select number from remote('node1', 'default', 'nums')", user="good" + ) + == "0\n1\n" + ) except Exception as ex: print(ex.message) - assert False, "Exception thrown while max_concurrent_queries_for_user is not exceeded" + assert ( + False + ), "Exception thrown while max_concurrent_queries_for_user is not exceeded" diff --git a/tests/integration/test_concurrent_queries_restriction_by_query_kind/test.py b/tests/integration/test_concurrent_queries_restriction_by_query_kind/test.py index 2d16d9157f6..6bda1df147c 100644 --- a/tests/integration/test_concurrent_queries_restriction_by_query_kind/test.py +++ b/tests/integration/test_concurrent_queries_restriction_by_query_kind/test.py @@ -6,16 +6,24 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node_insert = cluster.add_instance('node_insert', main_configs=['configs/concurrent_insert_restriction.xml']) -node_select = cluster.add_instance('node_select', main_configs=['configs/concurrent_select_restriction.xml']) +node_insert = cluster.add_instance( + "node_insert", main_configs=["configs/concurrent_insert_restriction.xml"] +) +node_select = cluster.add_instance( + "node_select", main_configs=["configs/concurrent_select_restriction.xml"] +) @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() - node_select.query("create table test_concurrent_insert (x UInt64) ENGINE = MergeTree() order by tuple()") - node_insert.query("create table test_concurrent_insert (x UInt64) ENGINE = MergeTree() order by tuple()") + node_select.query( + "create table test_concurrent_insert (x UInt64) ENGINE = MergeTree() order by tuple()" + ) + node_insert.query( + "create table test_concurrent_insert (x UInt64) ENGINE = MergeTree() order by tuple()" + ) yield cluster finally: cluster.shutdown() @@ -24,54 +32,58 @@ def started_cluster(): def execute_with_background(node, sql, background_sql, background_times, wait_times=3): r = None for _ in range(wait_times): - r = node.query('show processlist', stdin='') + r = node.query("show processlist", stdin="") if not r.strip(): break time.sleep(1) else: assert False, "there are unknown background queries: {}".format(r) for _ in range(background_times): - node.get_query_request(background_sql, stdin='') - time.sleep(0.5) # wait background to start. - return node.query(sql, stdin='') + node.get_query_request(background_sql, stdin="") + time.sleep(0.5) # wait background to start. + return node.query(sql, stdin="") def common_pattern(node, query_kind, restricted_sql, normal_sql, limit, wait_times): # restriction is working - with pytest.raises(Exception, match=r".*Too many simultaneous {} queries.*".format(query_kind)): + with pytest.raises( + Exception, match=r".*Too many simultaneous {} queries.*".format(query_kind) + ): execute_with_background(node, restricted_sql, restricted_sql, limit, wait_times) # different query kind is independent execute_with_background(node, normal_sql, restricted_sql, limit, wait_times) # normal - execute_with_background(node, restricted_sql, '', 0, wait_times) + execute_with_background(node, restricted_sql, "", 0, wait_times) def test_select(started_cluster): common_pattern( - node_select, 'select', - 'select sleep(3)', - 'insert into test_concurrent_insert values (0)', + node_select, + "select", + "select sleep(3)", + "insert into test_concurrent_insert values (0)", 2, - 10 + 10, ) # subquery is not counted execute_with_background( node_select, - 'select sleep(3)', - 'insert into test_concurrent_insert select sleep(3)', + "select sleep(3)", + "insert into test_concurrent_insert select sleep(3)", 2, - 10 + 10, ) def test_insert(started_cluster): common_pattern( - node_insert, 'insert', - 'insert into test_concurrent_insert select sleep(3)', - 'select 1', + node_insert, + "insert", + "insert into test_concurrent_insert select sleep(3)", + "select 1", 2, - 10 + 10, ) diff --git a/tests/integration/test_concurrent_ttl_merges/test.py b/tests/integration/test_concurrent_ttl_merges/test.py index 8c3c490d055..07e91dcbc9f 100644 --- a/tests/integration/test_concurrent_ttl_merges/test.py +++ b/tests/integration/test_concurrent_ttl_merges/test.py @@ -6,8 +6,12 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry, TSV cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/fast_background_pool.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/fast_background_pool.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/fast_background_pool.xml"], with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/fast_background_pool.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -22,30 +26,40 @@ def started_cluster(): def count_ttl_merges_in_queue(node, table): result = node.query( - f"SELECT count() FROM system.replication_queue WHERE merge_type = 'TTL_DELETE' and table = '{table}'") + f"SELECT count() FROM system.replication_queue WHERE merge_type = 'TTL_DELETE' and table = '{table}'" + ) if not result: return 0 return int(result.strip()) def count_ttl_merges_in_background_pool(node, table, level): - result = TSV(node.query( - f"SELECT * FROM system.merges WHERE merge_type = 'TTL_DELETE' and table = '{table}'")) + result = TSV( + node.query( + f"SELECT * FROM system.merges WHERE merge_type = 'TTL_DELETE' and table = '{table}'" + ) + ) count = len(result) if count >= level: - logging.debug(f"count_ttl_merges_in_background_pool: merges more than warn level:\n{result}") + logging.debug( + f"count_ttl_merges_in_background_pool: merges more than warn level:\n{result}" + ) return count def count_regular_merges_in_background_pool(node, table): - result = node.query(f"SELECT count() FROM system.merges WHERE merge_type = 'REGULAR' and table = '{table}'") + result = node.query( + f"SELECT count() FROM system.merges WHERE merge_type = 'REGULAR' and table = '{table}'" + ) if not result: return 0 return int(result.strip()) def count_running_mutations(node, table): - result = node.query(f"SELECT count() FROM system.merges WHERE table = '{table}' and is_mutation=1") + result = node.query( + f"SELECT count() FROM system.merges WHERE table = '{table}' and is_mutation=1" + ) if not result: return 0 return int(result.strip()) @@ -56,13 +70,15 @@ def count_running_mutations(node, table): # on the borders of partitions. def test_no_ttl_merges_in_busy_pool(started_cluster): node1.query( - "CREATE TABLE test_ttl (d DateTime, key UInt64, data UInt64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0, number_of_free_entries_in_pool_to_execute_mutation = 0") + "CREATE TABLE test_ttl (d DateTime, key UInt64, data UInt64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0, number_of_free_entries_in_pool_to_execute_mutation = 0" + ) node1.query("SYSTEM STOP TTL MERGES") for i in range(1, 7): node1.query( - f"INSERT INTO test_ttl SELECT now() - INTERVAL 1 MONTH + number - 1, {i}, number FROM numbers(5)") + f"INSERT INTO test_ttl SELECT now() - INTERVAL 1 MONTH + number - 1, {i}, number FROM numbers(5)" + ) node1.query("ALTER TABLE test_ttl UPDATE data = data + 1 WHERE sleepEachRow(1) = 0") @@ -75,7 +91,9 @@ def test_no_ttl_merges_in_busy_pool(started_cluster): rows_count = [] while count_running_mutations(node1, "test_ttl") == 6: - logging.debug(f"Mutations count after start TTL{count_running_mutations(node1, 'test_ttl')}") + logging.debug( + f"Mutations count after start TTL{count_running_mutations(node1, 'test_ttl')}" + ) rows_count.append(int(node1.query("SELECT count() FROM test_ttl").strip())) time.sleep(0.5) @@ -89,12 +107,15 @@ def test_no_ttl_merges_in_busy_pool(started_cluster): def test_limited_ttl_merges_in_empty_pool(started_cluster): node1.query( - "CREATE TABLE test_ttl_v2 (d DateTime, key UInt64, data UInt64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0") + "CREATE TABLE test_ttl_v2 (d DateTime, key UInt64, data UInt64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0" + ) node1.query("SYSTEM STOP TTL MERGES") for i in range(100): - node1.query(f"INSERT INTO test_ttl_v2 SELECT now() - INTERVAL 1 MONTH, {i}, number FROM numbers(1)") + node1.query( + f"INSERT INTO test_ttl_v2 SELECT now() - INTERVAL 1 MONTH, {i}, number FROM numbers(1)" + ) assert node1.query("SELECT COUNT() FROM test_ttl_v2") == "100\n" @@ -102,7 +123,9 @@ def test_limited_ttl_merges_in_empty_pool(started_cluster): merges_with_ttl_count = set({}) while True: - merges_with_ttl_count.add(count_ttl_merges_in_background_pool(node1, "test_ttl_v2", 3)) + merges_with_ttl_count.add( + count_ttl_merges_in_background_pool(node1, "test_ttl_v2", 3) + ) time.sleep(0.01) if node1.query("SELECT COUNT() FROM test_ttl_v2") == "0\n": break @@ -113,12 +136,15 @@ def test_limited_ttl_merges_in_empty_pool(started_cluster): def test_limited_ttl_merges_in_empty_pool_replicated(started_cluster): node1.query( - "CREATE TABLE replicated_ttl (d DateTime, key UInt64, data UInt64) ENGINE = ReplicatedMergeTree('/test/t', '1') ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0") + "CREATE TABLE replicated_ttl (d DateTime, key UInt64, data UInt64) ENGINE = ReplicatedMergeTree('/test/t', '1') ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0" + ) node1.query("SYSTEM STOP TTL MERGES") for i in range(100): - node1.query_with_retry(f"INSERT INTO replicated_ttl SELECT now() - INTERVAL 1 MONTH, {i}, number FROM numbers(1)") + node1.query_with_retry( + f"INSERT INTO replicated_ttl SELECT now() - INTERVAL 1 MONTH, {i}, number FROM numbers(1)" + ) assert node1.query("SELECT COUNT() FROM replicated_ttl") == "100\n" @@ -127,7 +153,9 @@ def test_limited_ttl_merges_in_empty_pool_replicated(started_cluster): merges_with_ttl_count = set({}) entries_with_ttl_count = set({}) while True: - merges_with_ttl_count.add(count_ttl_merges_in_background_pool(node1, "replicated_ttl", 3)) + merges_with_ttl_count.add( + count_ttl_merges_in_background_pool(node1, "replicated_ttl", 3) + ) entries_with_ttl_count.add(count_ttl_merges_in_queue(node1, "replicated_ttl")) time.sleep(0.01) if node1.query("SELECT COUNT() FROM replicated_ttl") == "0\n": @@ -142,16 +170,19 @@ def test_limited_ttl_merges_in_empty_pool_replicated(started_cluster): def test_limited_ttl_merges_two_replicas(started_cluster): # Actually this test quite fast and often we cannot catch any merges. node1.query( - "CREATE TABLE replicated_ttl_2 (d DateTime, key UInt64, data UInt64) ENGINE = ReplicatedMergeTree('/test/t2', '1') ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0") + "CREATE TABLE replicated_ttl_2 (d DateTime, key UInt64, data UInt64) ENGINE = ReplicatedMergeTree('/test/t2', '1') ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0" + ) node2.query( - "CREATE TABLE replicated_ttl_2 (d DateTime, key UInt64, data UInt64) ENGINE = ReplicatedMergeTree('/test/t2', '2') ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0") + "CREATE TABLE replicated_ttl_2 (d DateTime, key UInt64, data UInt64) ENGINE = ReplicatedMergeTree('/test/t2', '2') ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0" + ) node1.query("SYSTEM STOP TTL MERGES") node2.query("SYSTEM STOP TTL MERGES") for i in range(100): node1.query_with_retry( - f"INSERT INTO replicated_ttl_2 SELECT now() - INTERVAL 1 MONTH, {i}, number FROM numbers(10000)") + f"INSERT INTO replicated_ttl_2 SELECT now() - INTERVAL 1 MONTH, {i}, number FROM numbers(10000)" + ) node2.query("SYSTEM SYNC REPLICA replicated_ttl_2", timeout=10) assert node1.query("SELECT COUNT() FROM replicated_ttl_2") == "1000000\n" @@ -163,10 +194,16 @@ def test_limited_ttl_merges_two_replicas(started_cluster): merges_with_ttl_count_node1 = set({}) merges_with_ttl_count_node2 = set({}) while True: - merges_with_ttl_count_node1.add(count_ttl_merges_in_background_pool(node1, "replicated_ttl_2", 3)) - merges_with_ttl_count_node2.add(count_ttl_merges_in_background_pool(node2, "replicated_ttl_2", 3)) - if node1.query("SELECT COUNT() FROM replicated_ttl_2") == "0\n" and node2.query( - "SELECT COUNT() FROM replicated_ttl_2") == "0\n": + merges_with_ttl_count_node1.add( + count_ttl_merges_in_background_pool(node1, "replicated_ttl_2", 3) + ) + merges_with_ttl_count_node2.add( + count_ttl_merges_in_background_pool(node2, "replicated_ttl_2", 3) + ) + if ( + node1.query("SELECT COUNT() FROM replicated_ttl_2") == "0\n" + and node2.query("SELECT COUNT() FROM replicated_ttl_2") == "0\n" + ): break # Both replicas can assign merges with TTL. If one will perform better than diff --git a/tests/integration/test_config_corresponding_root/test.py b/tests/integration/test_config_corresponding_root/test.py index da6af7d11ef..f4ec1f1e658 100644 --- a/tests/integration/test_config_corresponding_root/test.py +++ b/tests/integration/test_config_corresponding_root/test.py @@ -6,7 +6,7 @@ from helpers.cluster import ClickHouseCluster SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=["configs/config.d/bad.xml"]) +node = cluster.add_instance("node", main_configs=["configs/config.d/bad.xml"]) caught_exception = "" @@ -21,4 +21,9 @@ def start_cluster(): def test_work(start_cluster): print(caught_exception) - assert caught_exception.find("Root element doesn't have the corresponding root element as the config file.") != -1 + assert ( + caught_exception.find( + "Root element doesn't have the corresponding root element as the config file." + ) + != -1 + ) diff --git a/tests/integration/test_config_substitutions/test.py b/tests/integration/test_config_substitutions/test.py index aec3f1d3635..692b36f1fae 100644 --- a/tests/integration/test_config_substitutions/test.py +++ b/tests/integration/test_config_substitutions/test.py @@ -3,25 +3,51 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', user_configs=['configs/config_no_substs.xml']) # hardcoded value 33333 -node2 = cluster.add_instance('node2', user_configs=['configs/config_env.xml'], - env_variables={"MAX_QUERY_SIZE": "55555"}) -node3 = cluster.add_instance('node3', user_configs=['configs/config_zk.xml'], with_zookeeper=True) -node4 = cluster.add_instance('node4', user_configs=['configs/config_incl.xml'], - main_configs=['configs/include_from_source.xml']) # include value 77777 -node5 = cluster.add_instance('node5', user_configs=['configs/config_allow_databases.xml']) -node6 = cluster.add_instance('node6', user_configs=['configs/config_include_from_env.xml'], - env_variables={"INCLUDE_FROM_ENV": "/etc/clickhouse-server/config.d/include_from_source.xml"}, - main_configs=['configs/include_from_source.xml']) +node1 = cluster.add_instance( + "node1", user_configs=["configs/config_no_substs.xml"] +) # hardcoded value 33333 +node2 = cluster.add_instance( + "node2", + user_configs=["configs/config_env.xml"], + env_variables={"MAX_QUERY_SIZE": "55555"}, +) +node3 = cluster.add_instance( + "node3", user_configs=["configs/config_zk.xml"], with_zookeeper=True +) +node4 = cluster.add_instance( + "node4", + user_configs=["configs/config_incl.xml"], + main_configs=["configs/include_from_source.xml"], +) # include value 77777 +node5 = cluster.add_instance( + "node5", user_configs=["configs/config_allow_databases.xml"] +) +node6 = cluster.add_instance( + "node6", + user_configs=["configs/config_include_from_env.xml"], + env_variables={ + "INCLUDE_FROM_ENV": "/etc/clickhouse-server/config.d/include_from_source.xml" + }, + main_configs=["configs/include_from_source.xml"], +) @pytest.fixture(scope="module") def start_cluster(): try: + def create_zk_roots(zk): zk.create(path="/setting/max_query_size", value=b"77777", makepath=True) - zk.create(path="/users_from_zk_1", value=b"default", makepath=True) - zk.create(path="/users_from_zk_2", value=b"default", makepath=True) + zk.create( + path="/users_from_zk_1", + value=b"default", + makepath=True, + ) + zk.create( + path="/users_from_zk_2", + value=b"default", + makepath=True, + ) cluster.add_zookeeper_startup_command(create_zk_roots) @@ -32,11 +58,26 @@ def start_cluster(): def test_config(start_cluster): - assert node1.query("select value from system.settings where name = 'max_query_size'") == "33333\n" - assert node2.query("select value from system.settings where name = 'max_query_size'") == "55555\n" - assert node3.query("select value from system.settings where name = 'max_query_size'") == "77777\n" - assert node4.query("select value from system.settings where name = 'max_query_size'") == "99999\n" - assert node6.query("select value from system.settings where name = 'max_query_size'") == "99999\n" + assert ( + node1.query("select value from system.settings where name = 'max_query_size'") + == "33333\n" + ) + assert ( + node2.query("select value from system.settings where name = 'max_query_size'") + == "55555\n" + ) + assert ( + node3.query("select value from system.settings where name = 'max_query_size'") + == "77777\n" + ) + assert ( + node4.query("select value from system.settings where name = 'max_query_size'") + == "99999\n" + ) + assert ( + node6.query("select value from system.settings where name = 'max_query_size'") + == "99999\n" + ) def test_include_config(start_cluster): @@ -54,24 +95,68 @@ def test_include_config(start_cluster): def test_allow_databases(start_cluster): node5.query("CREATE DATABASE db1") node5.query( - "CREATE TABLE db1.test_table(date Date, k1 String, v1 Int32) ENGINE = MergeTree(date, (k1, date), 8192)") + "CREATE TABLE db1.test_table(date Date, k1 String, v1 Int32) ENGINE = MergeTree(date, (k1, date), 8192)" + ) node5.query("INSERT INTO db1.test_table VALUES('2000-01-01', 'test_key', 1)") - assert node5.query("SELECT name FROM system.databases WHERE name = 'db1'") == "db1\n" - assert node5.query( - "SELECT name FROM system.tables WHERE database = 'db1' AND name = 'test_table' ") == "test_table\n" - assert node5.query( - "SELECT name FROM system.columns WHERE database = 'db1' AND table = 'test_table'") == "date\nk1\nv1\n" - assert node5.query( - "SELECT name FROM system.parts WHERE database = 'db1' AND table = 'test_table'") == "20000101_20000101_1_1_0\n" - assert node5.query( - "SELECT name FROM system.parts_columns WHERE database = 'db1' AND table = 'test_table'") == "20000101_20000101_1_1_0\n20000101_20000101_1_1_0\n20000101_20000101_1_1_0\n" + assert ( + node5.query("SELECT name FROM system.databases WHERE name = 'db1'") == "db1\n" + ) + assert ( + node5.query( + "SELECT name FROM system.tables WHERE database = 'db1' AND name = 'test_table' " + ) + == "test_table\n" + ) + assert ( + node5.query( + "SELECT name FROM system.columns WHERE database = 'db1' AND table = 'test_table'" + ) + == "date\nk1\nv1\n" + ) + assert ( + node5.query( + "SELECT name FROM system.parts WHERE database = 'db1' AND table = 'test_table'" + ) + == "20000101_20000101_1_1_0\n" + ) + assert ( + node5.query( + "SELECT name FROM system.parts_columns WHERE database = 'db1' AND table = 'test_table'" + ) + == "20000101_20000101_1_1_0\n20000101_20000101_1_1_0\n20000101_20000101_1_1_0\n" + ) - assert node5.query("SELECT name FROM system.databases WHERE name = 'db1'", user="test_allow").strip() == "" - assert node5.query("SELECT name FROM system.tables WHERE database = 'db1' AND name = 'test_table'", - user="test_allow").strip() == "" - assert node5.query("SELECT name FROM system.columns WHERE database = 'db1' AND table = 'test_table'", - user="test_allow").strip() == "" - assert node5.query("SELECT name FROM system.parts WHERE database = 'db1' AND table = 'test_table'", - user="test_allow").strip() == "" - assert node5.query("SELECT name FROM system.parts_columns WHERE database = 'db1' AND table = 'test_table'", - user="test_allow").strip() == "" + assert ( + node5.query( + "SELECT name FROM system.databases WHERE name = 'db1'", user="test_allow" + ).strip() + == "" + ) + assert ( + node5.query( + "SELECT name FROM system.tables WHERE database = 'db1' AND name = 'test_table'", + user="test_allow", + ).strip() + == "" + ) + assert ( + node5.query( + "SELECT name FROM system.columns WHERE database = 'db1' AND table = 'test_table'", + user="test_allow", + ).strip() + == "" + ) + assert ( + node5.query( + "SELECT name FROM system.parts WHERE database = 'db1' AND table = 'test_table'", + user="test_allow", + ).strip() + == "" + ) + assert ( + node5.query( + "SELECT name FROM system.parts_columns WHERE database = 'db1' AND table = 'test_table'", + user="test_allow", + ).strip() + == "" + ) diff --git a/tests/integration/test_config_xml_full/test.py b/tests/integration/test_config_xml_full/test.py index a8650a0dc55..ada3dc3f027 100644 --- a/tests/integration/test_config_xml_full/test.py +++ b/tests/integration/test_config_xml_full/test.py @@ -10,31 +10,53 @@ from helpers.cluster import ClickHouseCluster def test_xml_full_conf(): # all configs are in XML - cluster = ClickHouseCluster(__file__, zookeeper_config_path='configs/config.d/zookeeper.xml') + cluster = ClickHouseCluster( + __file__, zookeeper_config_path="configs/config.d/zookeeper.xml" + ) - all_confd = ['configs/config.d/access_control.xml', - 'configs/config.d/keeper_port.xml', - 'configs/config.d/logging_no_rotate.xml', - 'configs/config.d/log_to_console.xml', - 'configs/config.d/macros.xml', - 'configs/config.d/metric_log.xml', - 'configs/config.d/more_clusters.xml', - 'configs/config.d/part_log.xml', - 'configs/config.d/path.xml', - 'configs/config.d/query_masking_rules.xml', - 'configs/config.d/tcp_with_proxy.xml', - 'configs/config.d/text_log.xml', - 'configs/config.d/zookeeper.xml'] + all_confd = [ + "configs/config.d/access_control.xml", + "configs/config.d/keeper_port.xml", + "configs/config.d/logging_no_rotate.xml", + "configs/config.d/log_to_console.xml", + "configs/config.d/macros.xml", + "configs/config.d/metric_log.xml", + "configs/config.d/more_clusters.xml", + "configs/config.d/part_log.xml", + "configs/config.d/path.xml", + "configs/config.d/query_masking_rules.xml", + "configs/config.d/tcp_with_proxy.xml", + "configs/config.d/text_log.xml", + "configs/config.d/zookeeper.xml", + ] - all_userd = ['configs/users.d/allow_introspection_functions.xml', - 'configs/users.d/log_queries.xml'] + all_userd = [ + "configs/users.d/allow_introspection_functions.xml", + "configs/users.d/log_queries.xml", + ] - node = cluster.add_instance('node', base_config_dir='configs', main_configs=all_confd, user_configs=all_userd, with_zookeeper=False) + node = cluster.add_instance( + "node", + base_config_dir="configs", + main_configs=all_confd, + user_configs=all_userd, + with_zookeeper=False, + ) try: cluster.start() - assert(node.query("select value from system.settings where name = 'max_memory_usage'") == "10000000000\n") - assert(node.query("select value from system.settings where name = 'max_block_size'") == "64999\n") + assert ( + node.query( + "select value from system.settings where name = 'max_memory_usage'" + ) + == "10000000000\n" + ) + assert ( + node.query( + "select value from system.settings where name = 'max_block_size'" + ) + == "64999\n" + ) finally: cluster.shutdown() diff --git a/tests/integration/test_config_xml_main/test.py b/tests/integration/test_config_xml_main/test.py index 11efb5e283c..e6c2cf2973e 100644 --- a/tests/integration/test_config_xml_main/test.py +++ b/tests/integration/test_config_xml_main/test.py @@ -1,5 +1,3 @@ - - import time import threading from os import path as p, unlink @@ -12,32 +10,55 @@ from helpers.cluster import ClickHouseCluster def test_xml_main_conf(): # main configs are in XML; config.d and users.d are in YAML - cluster = ClickHouseCluster(__file__, zookeeper_config_path='configs/config.d/zookeeper.yaml') + cluster = ClickHouseCluster( + __file__, zookeeper_config_path="configs/config.d/zookeeper.yaml" + ) - all_confd = ['configs/config.d/access_control.yaml', - 'configs/config.d/keeper_port.yaml', - 'configs/config.d/logging_no_rotate.yaml', - 'configs/config.d/log_to_console.yaml', - 'configs/config.d/macros.yaml', - 'configs/config.d/metric_log.yaml', - 'configs/config.d/more_clusters.yaml', - 'configs/config.d/part_log.yaml', - 'configs/config.d/path.yaml', - 'configs/config.d/query_masking_rules.yaml', - 'configs/config.d/tcp_with_proxy.yaml', - 'configs/config.d/test_cluster_with_incorrect_pw.yaml', - 'configs/config.d/text_log.yaml', - 'configs/config.d/zookeeper.yaml'] + all_confd = [ + "configs/config.d/access_control.yaml", + "configs/config.d/keeper_port.yaml", + "configs/config.d/logging_no_rotate.yaml", + "configs/config.d/log_to_console.yaml", + "configs/config.d/macros.yaml", + "configs/config.d/metric_log.yaml", + "configs/config.d/more_clusters.yaml", + "configs/config.d/part_log.yaml", + "configs/config.d/path.yaml", + "configs/config.d/query_masking_rules.yaml", + "configs/config.d/tcp_with_proxy.yaml", + "configs/config.d/test_cluster_with_incorrect_pw.yaml", + "configs/config.d/text_log.yaml", + "configs/config.d/zookeeper.yaml", + ] - all_userd = ['configs/users.d/allow_introspection_functions.yaml', - 'configs/users.d/log_queries.yaml'] + all_userd = [ + "configs/users.d/allow_introspection_functions.yaml", + "configs/users.d/log_queries.yaml", + ] - node = cluster.add_instance('node', base_config_dir='configs', main_configs=all_confd, user_configs=all_userd, with_zookeeper=False, config_root_name='clickhouse') + node = cluster.add_instance( + "node", + base_config_dir="configs", + main_configs=all_confd, + user_configs=all_userd, + with_zookeeper=False, + config_root_name="clickhouse", + ) try: cluster.start() - assert(node.query("select value from system.settings where name = 'max_memory_usage'") == "10000000000\n") - assert(node.query("select value from system.settings where name = 'max_block_size'") == "64999\n") + assert ( + node.query( + "select value from system.settings where name = 'max_memory_usage'" + ) + == "10000000000\n" + ) + assert ( + node.query( + "select value from system.settings where name = 'max_block_size'" + ) + == "64999\n" + ) finally: cluster.shutdown() diff --git a/tests/integration/test_config_xml_yaml_mix/test.py b/tests/integration/test_config_xml_yaml_mix/test.py index 86cd68b3378..4138441b881 100644 --- a/tests/integration/test_config_xml_yaml_mix/test.py +++ b/tests/integration/test_config_xml_yaml_mix/test.py @@ -10,34 +10,58 @@ from helpers.cluster import ClickHouseCluster def test_extra_yaml_mix(): # some configs are written in XML, others are written in YAML - cluster = ClickHouseCluster(__file__, zookeeper_config_path='configs/config.d/zookeeper.xml') + cluster = ClickHouseCluster( + __file__, zookeeper_config_path="configs/config.d/zookeeper.xml" + ) - all_confd = ['configs/config.d/0_common_instance_config.yaml', - 'configs/config.d/access_control.yaml', - 'configs/config.d/keeper_port.xml', - 'configs/config.d/logging_no_rotate.xml', - 'configs/config.d/log_to_console.yaml', - 'configs/config.d/macros.yaml', - 'configs/config.d/metric_log.xml', - 'configs/config.d/more_clusters.yaml', - 'configs/config.d/part_log.xml', - 'configs/config.d/path.yaml', - 'configs/config.d/query_masking_rules.xml', - 'configs/config.d/tcp_with_proxy.yaml', - 'configs/config.d/test_cluster_with_incorrect_pw.xml', - 'configs/config.d/text_log.yaml', - 'configs/config.d/zookeeper.xml'] + all_confd = [ + "configs/config.d/0_common_instance_config.yaml", + "configs/config.d/access_control.yaml", + "configs/config.d/keeper_port.xml", + "configs/config.d/logging_no_rotate.xml", + "configs/config.d/log_to_console.yaml", + "configs/config.d/macros.yaml", + "configs/config.d/metric_log.xml", + "configs/config.d/more_clusters.yaml", + "configs/config.d/part_log.xml", + "configs/config.d/path.yaml", + "configs/config.d/query_masking_rules.xml", + "configs/config.d/tcp_with_proxy.yaml", + "configs/config.d/test_cluster_with_incorrect_pw.xml", + "configs/config.d/text_log.yaml", + "configs/config.d/zookeeper.xml", + ] - all_userd = ['configs/users.d/allow_introspection_functions.xml', - 'configs/users.d/log_queries.yaml'] + all_userd = [ + "configs/users.d/allow_introspection_functions.xml", + "configs/users.d/log_queries.yaml", + ] - node = cluster.add_instance('node', base_config_dir='configs', main_configs=all_confd, user_configs=all_userd, with_zookeeper=False, - users_config_name="users.yaml", copy_common_configs=False, config_root_name="clickhouse") + node = cluster.add_instance( + "node", + base_config_dir="configs", + main_configs=all_confd, + user_configs=all_userd, + with_zookeeper=False, + users_config_name="users.yaml", + copy_common_configs=False, + config_root_name="clickhouse", + ) try: cluster.start() - assert(node.query("select value from system.settings where name = 'max_memory_usage'") == "10000000000\n") - assert(node.query("select value from system.settings where name = 'max_block_size'") == "64999\n") + assert ( + node.query( + "select value from system.settings where name = 'max_memory_usage'" + ) + == "10000000000\n" + ) + assert ( + node.query( + "select value from system.settings where name = 'max_block_size'" + ) + == "64999\n" + ) finally: cluster.shutdown() diff --git a/tests/integration/test_config_yaml_full/test.py b/tests/integration/test_config_yaml_full/test.py index e8bf21754e0..ea0fd8c130c 100644 --- a/tests/integration/test_config_yaml_full/test.py +++ b/tests/integration/test_config_yaml_full/test.py @@ -7,36 +7,62 @@ import helpers import pytest from helpers.cluster import ClickHouseCluster + def test_yaml_full_conf(): # all configs are in YAML - cluster = ClickHouseCluster(__file__, zookeeper_config_path='configs/config.d/zookeeper.yaml') + cluster = ClickHouseCluster( + __file__, zookeeper_config_path="configs/config.d/zookeeper.yaml" + ) - all_confd = ['configs/config.d/0_common_instance_config.yaml', - 'configs/config.d/access_control.yaml', - 'configs/config.d/keeper_port.yaml', - 'configs/config.d/logging_no_rotate.yaml', - 'configs/config.d/log_to_console.yaml', - 'configs/config.d/macros.yaml', - 'configs/config.d/metric_log.yaml', - 'configs/config.d/more_clusters.yaml', - 'configs/config.d/part_log.yaml', - 'configs/config.d/path.yaml', - 'configs/config.d/query_masking_rules.yaml', - 'configs/config.d/tcp_with_proxy.yaml', - 'configs/config.d/test_cluster_with_incorrect_pw.yaml', - 'configs/config.d/text_log.yaml', - 'configs/config.d/zookeeper.yaml'] + all_confd = [ + "configs/config.d/0_common_instance_config.yaml", + "configs/config.d/access_control.yaml", + "configs/config.d/keeper_port.yaml", + "configs/config.d/logging_no_rotate.yaml", + "configs/config.d/log_to_console.yaml", + "configs/config.d/macros.yaml", + "configs/config.d/metric_log.yaml", + "configs/config.d/more_clusters.yaml", + "configs/config.d/part_log.yaml", + "configs/config.d/path.yaml", + "configs/config.d/query_masking_rules.yaml", + "configs/config.d/tcp_with_proxy.yaml", + "configs/config.d/test_cluster_with_incorrect_pw.yaml", + "configs/config.d/text_log.yaml", + "configs/config.d/zookeeper.yaml", + ] - all_userd = ['configs/users.d/allow_introspection_functions.yaml', - 'configs/users.d/log_queries.yaml'] + all_userd = [ + "configs/users.d/allow_introspection_functions.yaml", + "configs/users.d/log_queries.yaml", + ] - node = cluster.add_instance('node', base_config_dir='configs', main_configs=all_confd, user_configs=all_userd, - with_zookeeper=False, main_config_name="config.yaml", users_config_name="users.yaml", copy_common_configs=False, config_root_name="clickhouse") + node = cluster.add_instance( + "node", + base_config_dir="configs", + main_configs=all_confd, + user_configs=all_userd, + with_zookeeper=False, + main_config_name="config.yaml", + users_config_name="users.yaml", + copy_common_configs=False, + config_root_name="clickhouse", + ) try: cluster.start() - assert(node.query("select value from system.settings where name = 'max_memory_usage'") == "10000000000\n") - assert(node.query("select value from system.settings where name = 'max_block_size'") == "64999\n") + assert ( + node.query( + "select value from system.settings where name = 'max_memory_usage'" + ) + == "10000000000\n" + ) + assert ( + node.query( + "select value from system.settings where name = 'max_block_size'" + ) + == "64999\n" + ) finally: cluster.shutdown() diff --git a/tests/integration/test_config_yaml_main/test.py b/tests/integration/test_config_yaml_main/test.py index bb4c8eb8f9f..468a63359e3 100644 --- a/tests/integration/test_config_yaml_main/test.py +++ b/tests/integration/test_config_yaml_main/test.py @@ -10,35 +10,59 @@ from helpers.cluster import ClickHouseCluster def test_yaml_main_conf(): # main configs are in YAML; config.d and users.d are in XML - cluster = ClickHouseCluster(__file__, zookeeper_config_path='configs/config.d/zookeeper.xml') + cluster = ClickHouseCluster( + __file__, zookeeper_config_path="configs/config.d/zookeeper.xml" + ) - all_confd = ['configs/config.d/0_common_instance_config.yaml', - 'configs/config.d/access_control.xml', - 'configs/config.d/keeper_port.xml', - 'configs/config.d/logging_no_rotate.xml', - 'configs/config.d/log_to_console.xml', - 'configs/config.d/macros.xml', - 'configs/config.d/metric_log.xml', - 'configs/config.d/more_clusters.xml', - 'configs/config.d/part_log.xml', - 'configs/config.d/path.xml', - 'configs/config.d/query_masking_rules.xml', - 'configs/config.d/tcp_with_proxy.xml', - 'configs/config.d/test_cluster_with_incorrect_pw.xml', - 'configs/config.d/text_log.xml', - 'configs/config.d/zookeeper.xml'] + all_confd = [ + "configs/config.d/0_common_instance_config.yaml", + "configs/config.d/access_control.xml", + "configs/config.d/keeper_port.xml", + "configs/config.d/logging_no_rotate.xml", + "configs/config.d/log_to_console.xml", + "configs/config.d/macros.xml", + "configs/config.d/metric_log.xml", + "configs/config.d/more_clusters.xml", + "configs/config.d/part_log.xml", + "configs/config.d/path.xml", + "configs/config.d/query_masking_rules.xml", + "configs/config.d/tcp_with_proxy.xml", + "configs/config.d/test_cluster_with_incorrect_pw.xml", + "configs/config.d/text_log.xml", + "configs/config.d/zookeeper.xml", + ] - all_userd = ['configs/users.d/allow_introspection_functions.xml', - 'configs/users.d/log_queries.xml'] + all_userd = [ + "configs/users.d/allow_introspection_functions.xml", + "configs/users.d/log_queries.xml", + ] - node = cluster.add_instance('node', base_config_dir='configs', main_configs=all_confd, user_configs=all_userd, - with_zookeeper=False, main_config_name="config.yaml", users_config_name="users.yaml", - copy_common_configs=False, config_root_name="clickhouse") + node = cluster.add_instance( + "node", + base_config_dir="configs", + main_configs=all_confd, + user_configs=all_userd, + with_zookeeper=False, + main_config_name="config.yaml", + users_config_name="users.yaml", + copy_common_configs=False, + config_root_name="clickhouse", + ) try: cluster.start() - assert(node.query("select value from system.settings where name = 'max_memory_usage'") == "10000000000\n") - assert(node.query("select value from system.settings where name = 'max_block_size'") == "64999\n") + assert ( + node.query( + "select value from system.settings where name = 'max_memory_usage'" + ) + == "10000000000\n" + ) + assert ( + node.query( + "select value from system.settings where name = 'max_block_size'" + ) + == "64999\n" + ) finally: cluster.shutdown() diff --git a/tests/integration/test_consistant_parts_after_move_partition/test.py b/tests/integration/test_consistant_parts_after_move_partition/test.py index 2070c8cb3f8..63a51472773 100644 --- a/tests/integration/test_consistant_parts_after_move_partition/test.py +++ b/tests/integration/test_consistant_parts_after_move_partition/test.py @@ -3,12 +3,13 @@ import pytest from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry -CLICKHOUSE_DATABASE = 'test' +CLICKHOUSE_DATABASE = "test" def initialize_database(nodes, shard): for node in nodes: - node.query(''' + node.query( + """ CREATE DATABASE {database}; CREATE TABLE `{database}`.src (p UInt64, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/{database}/tables/test_consistent_shard1{shard}/replicated', '{replica}') @@ -18,12 +19,19 @@ def initialize_database(nodes, shard): ENGINE = ReplicatedMergeTree('/clickhouse/{database}/tables/test_consistent_shard2{shard}/replicated', '{replica}') ORDER BY d PARTITION BY p SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; - '''.format(shard=shard, replica=node.name, database=CLICKHOUSE_DATABASE)) + """.format( + shard=shard, replica=node.name, database=CLICKHOUSE_DATABASE + ) + ) cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -41,15 +49,25 @@ def start_cluster(): def test_consistent_part_after_move_partition(start_cluster): # insert into all replicas for i in range(100): - node1.query('INSERT INTO `{database}`.src VALUES ({value} % 2, {value})'.format(database=CLICKHOUSE_DATABASE, - value=i)) - query_source = 'SELECT COUNT(*) FROM `{database}`.src'.format(database=CLICKHOUSE_DATABASE) - query_dest = 'SELECT COUNT(*) FROM `{database}`.dest'.format(database=CLICKHOUSE_DATABASE) + node1.query( + "INSERT INTO `{database}`.src VALUES ({value} % 2, {value})".format( + database=CLICKHOUSE_DATABASE, value=i + ) + ) + query_source = "SELECT COUNT(*) FROM `{database}`.src".format( + database=CLICKHOUSE_DATABASE + ) + query_dest = "SELECT COUNT(*) FROM `{database}`.dest".format( + database=CLICKHOUSE_DATABASE + ) assert_eq_with_retry(node2, query_source, node1.query(query_source)) assert_eq_with_retry(node2, query_dest, node1.query(query_dest)) node1.query( - 'ALTER TABLE `{database}`.src MOVE PARTITION 1 TO TABLE `{database}`.dest'.format(database=CLICKHOUSE_DATABASE)) + "ALTER TABLE `{database}`.src MOVE PARTITION 1 TO TABLE `{database}`.dest".format( + database=CLICKHOUSE_DATABASE + ) + ) assert_eq_with_retry(node2, query_source, node1.query(query_source)) assert_eq_with_retry(node2, query_dest, node1.query(query_dest)) diff --git a/tests/integration/test_consistent_parts_after_clone_replica/test.py b/tests/integration/test_consistent_parts_after_clone_replica/test.py index b0b69da0902..d3f8b22ef57 100644 --- a/tests/integration/test_consistent_parts_after_clone_replica/test.py +++ b/tests/integration/test_consistent_parts_after_clone_replica/test.py @@ -4,21 +4,29 @@ from helpers.cluster import ClickHouseCluster from helpers.network import PartitionManager from helpers.test_tools import assert_eq_with_retry + def fill_nodes(nodes, shard): for node in nodes: node.query( - ''' + """ CREATE DATABASE test; CREATE TABLE test_table(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; - '''.format(shard=shard, replica=node.name)) + """.format( + shard=shard, replica=node.name + ) + ) cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -38,14 +46,20 @@ def test_inconsistent_parts_if_drop_while_replica_not_active(start_cluster): # insert into all replicas for i in range(10): node1.query("INSERT INTO test_table VALUES ('2019-08-16', {})".format(i)) - assert_eq_with_retry(node2, "SELECT count(*) FROM test_table", node1.query("SELECT count(*) FROM test_table")) + assert_eq_with_retry( + node2, + "SELECT count(*) FROM test_table", + node1.query("SELECT count(*) FROM test_table"), + ) # partition the first replica from the second one and (later) from zk pm.partition_instances(node1, node2) # insert some parts on the second replica only, we will drop these parts for i in range(10): - node2.query("INSERT INTO test_table VALUES ('2019-08-16', {})".format(10 + i)) + node2.query( + "INSERT INTO test_table VALUES ('2019-08-16', {})".format(10 + i) + ) pm.drop_instance_zk_connections(node1) @@ -56,16 +70,26 @@ def test_inconsistent_parts_if_drop_while_replica_not_active(start_cluster): # insert into the second replica # DROP_RANGE will be removed from the replication log and the first replica will be lost for i in range(20): - node2.query("INSERT INTO test_table VALUES ('2019-08-16', {})".format(20 + i)) + node2.query( + "INSERT INTO test_table VALUES ('2019-08-16', {})".format(20 + i) + ) - assert_eq_with_retry(node2, "SELECT value FROM system.zookeeper WHERE path='/clickhouse/tables/test1/replicated/replicas/node1' AND name='is_lost'", "1") + assert_eq_with_retry( + node2, + "SELECT value FROM system.zookeeper WHERE path='/clickhouse/tables/test1/replicated/replicas/node1' AND name='is_lost'", + "1", + ) node2.wait_for_log_line("Will mark replica node1 as lost") # the first replica will be cloned from the second pm.heal_all() node2.wait_for_log_line("Sending part") - assert_eq_with_retry(node1, "SELECT count(*) FROM test_table", node2.query("SELECT count(*) FROM test_table")) + assert_eq_with_retry( + node1, + "SELECT count(*) FROM test_table", + node2.query("SELECT count(*) FROM test_table"), + ) # ensure replica was cloned assert node1.contains_in_log("Will mimic node2") @@ -77,5 +101,13 @@ def test_inconsistent_parts_if_drop_while_replica_not_active(start_cluster): # `Skipping action for part 201908_40_40_0 because part 201908_21_40_4 already exists.` # # In any case after a short while the replication queue should be empty - assert_eq_with_retry(node1, "SELECT count() FROM system.replication_queue WHERE type != 'MERGE_PARTS'", "0") - assert_eq_with_retry(node2, "SELECT count() FROM system.replication_queue WHERE type != 'MERGE_PARTS'", "0") + assert_eq_with_retry( + node1, + "SELECT count() FROM system.replication_queue WHERE type != 'MERGE_PARTS'", + "0", + ) + assert_eq_with_retry( + node2, + "SELECT count() FROM system.replication_queue WHERE type != 'MERGE_PARTS'", + "0", + ) diff --git a/tests/integration/test_create_user_and_login/test.py b/tests/integration/test_create_user_and_login/test.py index 2ce134fea1a..fd052ba9716 100644 --- a/tests/integration/test_create_user_and_login/test.py +++ b/tests/integration/test_create_user_and_login/test.py @@ -5,7 +5,7 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance') +instance = cluster.add_instance("instance") @pytest.fixture(scope="module", autouse=True) @@ -28,34 +28,38 @@ def cleanup_after_test(): def test_login(): instance.query("CREATE USER A") instance.query("CREATE USER B") - assert instance.query("SELECT 1", user='A') == "1\n" - assert instance.query("SELECT 1", user='B') == "1\n" + assert instance.query("SELECT 1", user="A") == "1\n" + assert instance.query("SELECT 1", user="B") == "1\n" def test_grant_create_user(): instance.query("CREATE USER A") expected_error = "Not enough privileges" - assert expected_error in instance.query_and_get_error("CREATE USER B", user='A') + assert expected_error in instance.query_and_get_error("CREATE USER B", user="A") instance.query("GRANT CREATE USER ON *.* TO A") - instance.query("CREATE USER B", user='A') - assert instance.query("SELECT 1", user='B') == "1\n" + instance.query("CREATE USER B", user="A") + assert instance.query("SELECT 1", user="B") == "1\n" def test_login_as_dropped_user(): for _ in range(0, 2): instance.query("CREATE USER A") - assert instance.query("SELECT 1", user='A') == "1\n" + assert instance.query("SELECT 1", user="A") == "1\n" instance.query("DROP USER A") expected_error = "no user with such name" - assert expected_error in instance.query_and_get_error("SELECT 1", user='A') + assert expected_error in instance.query_and_get_error("SELECT 1", user="A") def test_login_as_dropped_user_xml(): for _ in range(0, 2): - instance.exec_in_container(["bash", "-c" , """ + instance.exec_in_container( + [ + "bash", + "-c", + """ cat > /etc/clickhouse-server/users.d/user_c.xml << EOF @@ -65,15 +69,21 @@ def test_login_as_dropped_user_xml(): -EOF"""]) +EOF""", + ] + ) - assert_eq_with_retry(instance, "SELECT name FROM system.users WHERE name='C'", "C") + assert_eq_with_retry( + instance, "SELECT name FROM system.users WHERE name='C'", "C" + ) - instance.exec_in_container(["bash", "-c" , "rm /etc/clickhouse-server/users.d/user_c.xml"]) + instance.exec_in_container( + ["bash", "-c", "rm /etc/clickhouse-server/users.d/user_c.xml"] + ) expected_error = "no user with such name" while True: - out, err = instance.query_and_get_answer_with_error("SELECT 1", user='C') + out, err = instance.query_and_get_answer_with_error("SELECT 1", user="C") if expected_error in err: logging.debug(f"Got error '{expected_error}' just as expected") break @@ -81,6 +91,8 @@ EOF"""]) logging.debug(f"Got output '1', retrying...") time.sleep(0.5) continue - raise Exception(f"Expected either output '1' or error '{expected_error}', got output={out} and error={err}") - + raise Exception( + f"Expected either output '1' or error '{expected_error}', got output={out} and error={err}" + ) + assert instance.query("SELECT name FROM system.users WHERE name='C'") == "" diff --git a/tests/integration/test_cross_replication/test.py b/tests/integration/test_cross_replication/test.py index cc5618e04e6..143b8823bf2 100644 --- a/tests/integration/test_cross_replication/test.py +++ b/tests/integration/test_cross_replication/test.py @@ -8,9 +8,15 @@ from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node3 = cluster.add_instance( + "node3", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -26,24 +32,30 @@ def started_cluster(): for node, shards in node_to_shards: for shard in shards: - node.query(''' + node.query( + """ CREATE DATABASE shard_{shard}; CREATE TABLE shard_{shard}.replicated(date Date, id UInt32, shard_id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/replicated', '{replica}', date, id, 8192); - '''.format(shard=shard, replica=node.name)) + """.format( + shard=shard, replica=node.name + ) + ) - node.query(''' + node.query( + """ CREATE TABLE distributed(date Date, id UInt32, shard_id UInt32) ENGINE = Distributed(test_cluster, '', replicated, shard_id); -''') +""" + ) # Insert some data onto different shards using the Distributed table - to_insert = '''\ + to_insert = """\ 2017-06-16 111 0 2017-06-16 222 1 2017-06-16 333 2 -''' +""" node1.query("INSERT INTO distributed FORMAT TSV", stdin=to_insert) time.sleep(5) @@ -55,38 +67,52 @@ CREATE TABLE distributed(date Date, id UInt32, shard_id UInt32) def test(started_cluster): # Check that the data has been inserted into correct tables. - assert_eq_with_retry(node1, "SELECT id FROM shard_0.replicated", '111') - assert_eq_with_retry(node1, "SELECT id FROM shard_2.replicated", '333') + assert_eq_with_retry(node1, "SELECT id FROM shard_0.replicated", "111") + assert_eq_with_retry(node1, "SELECT id FROM shard_2.replicated", "333") - assert_eq_with_retry(node2, "SELECT id FROM shard_0.replicated", '111') - assert_eq_with_retry(node2, "SELECT id FROM shard_1.replicated", '222') + assert_eq_with_retry(node2, "SELECT id FROM shard_0.replicated", "111") + assert_eq_with_retry(node2, "SELECT id FROM shard_1.replicated", "222") - assert_eq_with_retry(node3, "SELECT id FROM shard_1.replicated", '222') - assert_eq_with_retry(node3, "SELECT id FROM shard_2.replicated", '333') + assert_eq_with_retry(node3, "SELECT id FROM shard_1.replicated", "222") + assert_eq_with_retry(node3, "SELECT id FROM shard_2.replicated", "333") # Check that SELECT from the Distributed table works. - expected_from_distributed = '''\ + expected_from_distributed = """\ 2017-06-16 111 0 2017-06-16 222 1 2017-06-16 333 2 -''' - assert_eq_with_retry(node1, "SELECT * FROM distributed ORDER BY id", expected_from_distributed) - assert_eq_with_retry(node2, "SELECT * FROM distributed ORDER BY id", expected_from_distributed) - assert_eq_with_retry(node3, "SELECT * FROM distributed ORDER BY id", expected_from_distributed) +""" + assert_eq_with_retry( + node1, "SELECT * FROM distributed ORDER BY id", expected_from_distributed + ) + assert_eq_with_retry( + node2, "SELECT * FROM distributed ORDER BY id", expected_from_distributed + ) + assert_eq_with_retry( + node3, "SELECT * FROM distributed ORDER BY id", expected_from_distributed + ) # Now isolate node3 from other nodes and check that SELECTs on other nodes still work. with PartitionManager() as pm: - pm.partition_instances(node3, node1, action='REJECT --reject-with tcp-reset') - pm.partition_instances(node3, node2, action='REJECT --reject-with tcp-reset') + pm.partition_instances(node3, node1, action="REJECT --reject-with tcp-reset") + pm.partition_instances(node3, node2, action="REJECT --reject-with tcp-reset") - assert_eq_with_retry(node1, "SELECT * FROM distributed ORDER BY id", expected_from_distributed) - assert_eq_with_retry(node2, "SELECT * FROM distributed ORDER BY id", expected_from_distributed) + assert_eq_with_retry( + node1, "SELECT * FROM distributed ORDER BY id", expected_from_distributed + ) + assert_eq_with_retry( + node2, "SELECT * FROM distributed ORDER BY id", expected_from_distributed + ) with pytest.raises(Exception): - print(node3.query_with_retry("SELECT * FROM distributed ORDER BY id", retry_count=5)) + print( + node3.query_with_retry( + "SELECT * FROM distributed ORDER BY id", retry_count=5 + ) + ) -if __name__ == '__main__': +if __name__ == "__main__": with contextmanager(started_cluster)() as cluster: for name, instance in list(cluster.instances.items()): print(name, instance.ip_address) diff --git a/tests/integration/test_custom_settings/test.py b/tests/integration/test_custom_settings/test.py index 7e147f999a9..2dd4a7dafef 100644 --- a/tests/integration/test_custom_settings/test.py +++ b/tests/integration/test_custom_settings/test.py @@ -4,7 +4,7 @@ from helpers.cluster import ClickHouseCluster SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node') +node = cluster.add_instance("node") @pytest.fixture(scope="module", autouse=True) @@ -18,7 +18,10 @@ def started_cluster(): def test_custom_settings(): - node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/custom_settings.xml"), '/etc/clickhouse-server/users.d/z.xml') + node.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/custom_settings.xml"), + "/etc/clickhouse-server/users.d/z.xml", + ) node.query("SYSTEM RELOAD CONFIG") assert node.query("SELECT getSetting('custom_a')") == "-5\n" @@ -28,6 +31,9 @@ def test_custom_settings(): def test_illformed_setting(): - node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/illformed_setting.xml"), '/etc/clickhouse-server/users.d/z.xml') + node.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/illformed_setting.xml"), + "/etc/clickhouse-server/users.d/z.xml", + ) error_message = "Couldn't restore Field from dump: 1" assert error_message in node.query_and_get_error("SYSTEM RELOAD CONFIG") diff --git a/tests/integration/test_ddl_alter_query/test.py b/tests/integration/test_ddl_alter_query/test.py index d65e40084f6..f87d943622c 100644 --- a/tests/integration/test_ddl_alter_query/test.py +++ b/tests/integration/test_ddl_alter_query/test.py @@ -4,10 +4,18 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node3 = cluster.add_instance( + "node3", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node4 = cluster.add_instance( + "node4", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -18,13 +26,17 @@ def started_cluster(): for i, node in enumerate([node1, node2]): node.query("CREATE DATABASE testdb") node.query( - '''CREATE TABLE testdb.test_table(id UInt32, val String) ENGINE = ReplicatedMergeTree('/clickhouse/test/test_table1', '{}') ORDER BY id;'''.format( - i)) + """CREATE TABLE testdb.test_table(id UInt32, val String) ENGINE = ReplicatedMergeTree('/clickhouse/test/test_table1', '{}') ORDER BY id;""".format( + i + ) + ) for i, node in enumerate([node3, node4]): node.query("CREATE DATABASE testdb") node.query( - '''CREATE TABLE testdb.test_table(id UInt32, val String) ENGINE = ReplicatedMergeTree('/clickhouse/test/test_table2', '{}') ORDER BY id;'''.format( - i)) + """CREATE TABLE testdb.test_table(id UInt32, val String) ENGINE = ReplicatedMergeTree('/clickhouse/test/test_table2', '{}') ORDER BY id;""".format( + i + ) + ) yield cluster finally: @@ -32,13 +44,19 @@ def started_cluster(): def test_alter(started_cluster): - node1.query("INSERT INTO testdb.test_table SELECT number, toString(number) FROM numbers(100)") - node3.query("INSERT INTO testdb.test_table SELECT number, toString(number) FROM numbers(100)") + node1.query( + "INSERT INTO testdb.test_table SELECT number, toString(number) FROM numbers(100)" + ) + node3.query( + "INSERT INTO testdb.test_table SELECT number, toString(number) FROM numbers(100)" + ) node2.query("SYSTEM SYNC REPLICA testdb.test_table") node4.query("SYSTEM SYNC REPLICA testdb.test_table") - node1.query("ALTER TABLE testdb.test_table ON CLUSTER test_cluster ADD COLUMN somecolumn UInt8 AFTER val", - settings={"replication_alter_partitions_sync": "2"}) + node1.query( + "ALTER TABLE testdb.test_table ON CLUSTER test_cluster ADD COLUMN somecolumn UInt8 AFTER val", + settings={"replication_alter_partitions_sync": "2"}, + ) node1.query("SYSTEM SYNC REPLICA testdb.test_table") node2.query("SYSTEM SYNC REPLICA testdb.test_table") diff --git a/tests/integration/test_ddl_worker_non_leader/test.py b/tests/integration/test_ddl_worker_non_leader/test.py index 172fc03c005..e7b0efa54f1 100644 --- a/tests/integration/test_ddl_worker_non_leader/test.py +++ b/tests/integration/test_ddl_worker_non_leader/test.py @@ -5,8 +5,13 @@ from helpers.network import PartitionManager from helpers.client import QueryRuntimeException cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) + @pytest.fixture(scope="module") def started_cluster(): @@ -20,40 +25,58 @@ def started_cluster(): def test_non_leader_replica(started_cluster): - node1.query_with_retry('''CREATE TABLE IF NOT EXISTS sometable(id UInt32, value String) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/sometable', '1') ORDER BY tuple()''') + node1.query_with_retry( + """CREATE TABLE IF NOT EXISTS sometable(id UInt32, value String) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/sometable', '1') ORDER BY tuple()""" + ) - node2.query_with_retry('''CREATE TABLE IF NOT EXISTS sometable(id UInt32, value String) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/sometable', '2') ORDER BY tuple() SETTINGS replicated_can_become_leader = 0''') + node2.query_with_retry( + """CREATE TABLE IF NOT EXISTS sometable(id UInt32, value String) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/sometable', '2') ORDER BY tuple() SETTINGS replicated_can_become_leader = 0""" + ) - node1.query("INSERT INTO sometable SELECT number, toString(number) FROM numbers(100)") + node1.query( + "INSERT INTO sometable SELECT number, toString(number) FROM numbers(100)" + ) node2.query_with_retry("SYSTEM SYNC REPLICA sometable", timeout=10) assert node1.query("SELECT COUNT() FROM sometable") == "100\n" assert node2.query("SELECT COUNT() FROM sometable") == "100\n" - with PartitionManager() as pm: pm.drop_instance_zk_connections(node1) # this query should be executed by leader, but leader partitioned from zookeeper with pytest.raises(Exception): - node2.query("ALTER TABLE sometable ON CLUSTER 'test_cluster' MODIFY COLUMN value UInt64 SETTINGS distributed_ddl_task_timeout=5") + node2.query( + "ALTER TABLE sometable ON CLUSTER 'test_cluster' MODIFY COLUMN value UInt64 SETTINGS distributed_ddl_task_timeout=5" + ) for _ in range(100): - if 'UInt64' in node1.query("SELECT type FROM system.columns WHERE name='value' and table = 'sometable'"): + if "UInt64" in node1.query( + "SELECT type FROM system.columns WHERE name='value' and table = 'sometable'" + ): break time.sleep(0.1) for _ in range(100): - if 'UInt64' in node2.query("SELECT type FROM system.columns WHERE name='value' and table = 'sometable'"): + if "UInt64" in node2.query( + "SELECT type FROM system.columns WHERE name='value' and table = 'sometable'" + ): break time.sleep(0.1) - assert 'UInt64' in node1.query("SELECT type FROM system.columns WHERE name='value' and table = 'sometable'") - assert 'UInt64' in node2.query("SELECT type FROM system.columns WHERE name='value' and table = 'sometable'") + assert "UInt64" in node1.query( + "SELECT type FROM system.columns WHERE name='value' and table = 'sometable'" + ) + assert "UInt64" in node2.query( + "SELECT type FROM system.columns WHERE name='value' and table = 'sometable'" + ) # Checking that DDLWorker doesn't hung and still able to execute DDL queries - node1.query("CREATE TABLE new_table_with_ddl ON CLUSTER 'test_cluster' (key UInt32) ENGINE=MergeTree() ORDER BY tuple()", settings={"distributed_ddl_task_timeout": "10"}) + node1.query( + "CREATE TABLE new_table_with_ddl ON CLUSTER 'test_cluster' (key UInt32) ENGINE=MergeTree() ORDER BY tuple()", + settings={"distributed_ddl_task_timeout": "10"}, + ) assert node1.query("EXISTS new_table_with_ddl") == "1\n" assert node2.query("EXISTS new_table_with_ddl") == "1\n" diff --git a/tests/integration/test_default_compression_codec/test.py b/tests/integration/test_default_compression_codec/test.py index d114954d739..4af276b9728 100644 --- a/tests/integration/test_default_compression_codec/test.py +++ b/tests/integration/test_default_compression_codec/test.py @@ -7,10 +7,26 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/default_compression.xml', 'configs/wide_parts_only.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/default_compression.xml', 'configs/wide_parts_only.xml'], with_zookeeper=True) -node3 = cluster.add_instance('node3', main_configs=['configs/default_compression.xml', 'configs/wide_parts_only.xml'], image='yandex/clickhouse-server', tag='20.3.16', stay_alive=True, with_installed_binary=True) -node4 = cluster.add_instance('node4') +node1 = cluster.add_instance( + "node1", + main_configs=["configs/default_compression.xml", "configs/wide_parts_only.xml"], + with_zookeeper=True, +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/default_compression.xml", "configs/wide_parts_only.xml"], + with_zookeeper=True, +) +node3 = cluster.add_instance( + "node3", + main_configs=["configs/default_compression.xml", "configs/wide_parts_only.xml"], + image="yandex/clickhouse-server", + tag="20.3.16", + stay_alive=True, + with_installed_binary=True, +) +node4 = cluster.add_instance("node4") + @pytest.fixture(scope="module") def start_cluster(): @@ -24,45 +40,59 @@ def start_cluster(): def get_compression_codec_byte(node, table_name, part_name): cmd = "tail -c +17 /var/lib/clickhouse/data/default/{}/{}/data1.bin | od -x -N 1 | head -n 1 | awk '{{print $2}}'".format( - table_name, part_name) + table_name, part_name + ) return node.exec_in_container(["bash", "-c", cmd]).strip() def get_second_multiple_codec_byte(node, table_name, part_name): cmd = "tail -c +17 /var/lib/clickhouse/data/default/{}/{}/data1.bin | od -x -j 11 -N 1 | head -n 1 | awk '{{print $2}}'".format( - table_name, part_name) + table_name, part_name + ) return node.exec_in_container(["bash", "-c", cmd]).strip() def get_random_string(length): - return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length)) + return "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(length) + ) CODECS_MAPPING = { - 'LZ4': '0082', - 'LZ4HC': '0082', # not an error, same byte - 'ZSTD': '0090', - 'Multiple': '0091', + "LZ4": "0082", + "LZ4HC": "0082", # not an error, same byte + "ZSTD": "0090", + "Multiple": "0091", } def test_default_codec_single(start_cluster): for i, node in enumerate([node1, node2]): - node.query(""" + node.query( + """ CREATE TABLE compression_table ( key UInt64, data1 String CODEC(Default) ) ENGINE = ReplicatedMergeTree('/t', '{}') ORDER BY tuple() PARTITION BY key; - """.format(i)) + """.format( + i + ) + ) # ZSTD(10) and ZSTD(10) after merge node1.query("INSERT INTO compression_table VALUES (1, 'x')") # ZSTD(10) and LZ4HC(10) after merge - node1.query("INSERT INTO compression_table VALUES (2, '{}')".format(get_random_string(2048))) + node1.query( + "INSERT INTO compression_table VALUES (2, '{}')".format(get_random_string(2048)) + ) # ZSTD(10) and LZ4 after merge - node1.query("INSERT INTO compression_table VALUES (3, '{}')".format(get_random_string(22048))) + node1.query( + "INSERT INTO compression_table VALUES (3, '{}')".format( + get_random_string(22048) + ) + ) node2.query("SYSTEM SYNC REPLICA compression_table", timeout=15) @@ -77,23 +107,56 @@ def test_default_codec_single(start_cluster): node2.query("SYSTEM FLUSH LOGS") # Same codec for all - assert get_compression_codec_byte(node1, "compression_table", "1_0_0_0") == CODECS_MAPPING['ZSTD'] - assert node1.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_0_0_0'") == "ZSTD(10)\n" - assert node2.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_0_0_0'") == "ZSTD(10)\n" + assert ( + get_compression_codec_byte(node1, "compression_table", "1_0_0_0") + == CODECS_MAPPING["ZSTD"] + ) + assert ( + node1.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_0_0_0'" + ) + == "ZSTD(10)\n" + ) + assert ( + node2.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_0_0_0'" + ) + == "ZSTD(10)\n" + ) - assert get_compression_codec_byte(node1, "compression_table", "2_0_0_0") == CODECS_MAPPING['ZSTD'] - assert node1.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '2_0_0_0'") == "ZSTD(10)\n" - assert node2.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '2_0_0_0'") == "ZSTD(10)\n" + assert ( + get_compression_codec_byte(node1, "compression_table", "2_0_0_0") + == CODECS_MAPPING["ZSTD"] + ) + assert ( + node1.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '2_0_0_0'" + ) + == "ZSTD(10)\n" + ) + assert ( + node2.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '2_0_0_0'" + ) + == "ZSTD(10)\n" + ) - assert get_compression_codec_byte(node1, "compression_table", "3_0_0_0") == CODECS_MAPPING['ZSTD'] - assert node1.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_0_0_0'") == "ZSTD(10)\n" - assert node2.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_0_0_0'") == "ZSTD(10)\n" + assert ( + get_compression_codec_byte(node1, "compression_table", "3_0_0_0") + == CODECS_MAPPING["ZSTD"] + ) + assert ( + node1.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_0_0_0'" + ) + == "ZSTD(10)\n" + ) + assert ( + node2.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_0_0_0'" + ) + == "ZSTD(10)\n" + ) # just to be sure that replication works node1.query("OPTIMIZE TABLE compression_table FINAL") @@ -110,23 +173,56 @@ def test_default_codec_single(start_cluster): node1.query("SYSTEM FLUSH LOGS") node2.query("SYSTEM FLUSH LOGS") - assert get_compression_codec_byte(node1, "compression_table", "1_0_0_1") == CODECS_MAPPING['ZSTD'] - assert node1.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_0_0_1'") == "ZSTD(10)\n" - assert node2.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_0_0_1'") == "ZSTD(10)\n" + assert ( + get_compression_codec_byte(node1, "compression_table", "1_0_0_1") + == CODECS_MAPPING["ZSTD"] + ) + assert ( + node1.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_0_0_1'" + ) + == "ZSTD(10)\n" + ) + assert ( + node2.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_0_0_1'" + ) + == "ZSTD(10)\n" + ) - assert get_compression_codec_byte(node1, "compression_table", "2_0_0_1") == CODECS_MAPPING['LZ4HC'] - assert node1.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '2_0_0_1'") == "LZ4HC(5)\n" - assert node2.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '2_0_0_1'") == "LZ4HC(5)\n" + assert ( + get_compression_codec_byte(node1, "compression_table", "2_0_0_1") + == CODECS_MAPPING["LZ4HC"] + ) + assert ( + node1.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '2_0_0_1'" + ) + == "LZ4HC(5)\n" + ) + assert ( + node2.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '2_0_0_1'" + ) + == "LZ4HC(5)\n" + ) - assert get_compression_codec_byte(node1, "compression_table", "3_0_0_1") == CODECS_MAPPING['LZ4'] - assert node1.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_0_0_1'") == "LZ4\n" - assert node2.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_0_0_1'") == "LZ4\n" + assert ( + get_compression_codec_byte(node1, "compression_table", "3_0_0_1") + == CODECS_MAPPING["LZ4"] + ) + assert ( + node1.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_0_0_1'" + ) + == "LZ4\n" + ) + assert ( + node2.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_0_0_1'" + ) + == "LZ4\n" + ) assert node1.query("SELECT COUNT() FROM compression_table") == "3\n" assert node2.query("SELECT COUNT() FROM compression_table") == "3\n" @@ -137,68 +233,165 @@ def test_default_codec_single(start_cluster): def test_default_codec_multiple(start_cluster): for i, node in enumerate([node1, node2]): - node.query(""" + node.query( + """ CREATE TABLE compression_table_multiple ( key UInt64, data1 String CODEC(NONE, Default) ) ENGINE = ReplicatedMergeTree('/d', '{}') ORDER BY tuple() PARTITION BY key; - """.format(i), settings={"allow_suspicious_codecs": 1}) + """.format( + i + ), + settings={"allow_suspicious_codecs": 1}, + ) # ZSTD(10) and ZSTD(10) after merge node1.query("INSERT INTO compression_table_multiple VALUES (1, 'x')") # ZSTD(10) and LZ4HC(10) after merge - node1.query("INSERT INTO compression_table_multiple VALUES (2, '{}')".format(get_random_string(2048))) + node1.query( + "INSERT INTO compression_table_multiple VALUES (2, '{}')".format( + get_random_string(2048) + ) + ) # ZSTD(10) and LZ4 after merge - node1.query("INSERT INTO compression_table_multiple VALUES (3, '{}')".format(get_random_string(22048))) + node1.query( + "INSERT INTO compression_table_multiple VALUES (3, '{}')".format( + get_random_string(22048) + ) + ) # Same codec for all - assert get_compression_codec_byte(node1, "compression_table_multiple", "1_0_0_0") == CODECS_MAPPING['Multiple'] - assert get_second_multiple_codec_byte(node1, "compression_table_multiple", "1_0_0_0") == CODECS_MAPPING['ZSTD'] - assert node1.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '1_0_0_0'") == "ZSTD(10)\n" - assert node2.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '1_0_0_0'") == "ZSTD(10)\n" + assert ( + get_compression_codec_byte(node1, "compression_table_multiple", "1_0_0_0") + == CODECS_MAPPING["Multiple"] + ) + assert ( + get_second_multiple_codec_byte(node1, "compression_table_multiple", "1_0_0_0") + == CODECS_MAPPING["ZSTD"] + ) + assert ( + node1.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '1_0_0_0'" + ) + == "ZSTD(10)\n" + ) + assert ( + node2.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '1_0_0_0'" + ) + == "ZSTD(10)\n" + ) - assert get_compression_codec_byte(node1, "compression_table_multiple", "2_0_0_0") == CODECS_MAPPING['Multiple'] - assert get_second_multiple_codec_byte(node1, "compression_table_multiple", "2_0_0_0") == CODECS_MAPPING['ZSTD'] - assert node1.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '2_0_0_0'") == "ZSTD(10)\n" - assert node2.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '2_0_0_0'") == "ZSTD(10)\n" + assert ( + get_compression_codec_byte(node1, "compression_table_multiple", "2_0_0_0") + == CODECS_MAPPING["Multiple"] + ) + assert ( + get_second_multiple_codec_byte(node1, "compression_table_multiple", "2_0_0_0") + == CODECS_MAPPING["ZSTD"] + ) + assert ( + node1.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '2_0_0_0'" + ) + == "ZSTD(10)\n" + ) + assert ( + node2.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '2_0_0_0'" + ) + == "ZSTD(10)\n" + ) - assert get_compression_codec_byte(node1, "compression_table_multiple", "3_0_0_0") == CODECS_MAPPING['Multiple'] - assert get_second_multiple_codec_byte(node1, "compression_table_multiple", "3_0_0_0") == CODECS_MAPPING['ZSTD'] - assert node1.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '3_0_0_0'") == "ZSTD(10)\n" - assert node2.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '3_0_0_0'") == "ZSTD(10)\n" + assert ( + get_compression_codec_byte(node1, "compression_table_multiple", "3_0_0_0") + == CODECS_MAPPING["Multiple"] + ) + assert ( + get_second_multiple_codec_byte(node1, "compression_table_multiple", "3_0_0_0") + == CODECS_MAPPING["ZSTD"] + ) + assert ( + node1.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '3_0_0_0'" + ) + == "ZSTD(10)\n" + ) + assert ( + node2.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '3_0_0_0'" + ) + == "ZSTD(10)\n" + ) node2.query("SYSTEM SYNC REPLICA compression_table_multiple", timeout=15) node1.query("OPTIMIZE TABLE compression_table_multiple FINAL") - assert get_compression_codec_byte(node1, "compression_table_multiple", "1_0_0_1") == CODECS_MAPPING['Multiple'] - assert get_second_multiple_codec_byte(node1, "compression_table_multiple", "1_0_0_1") == CODECS_MAPPING['ZSTD'] - assert node1.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '1_0_0_1'") == "ZSTD(10)\n" - assert node2.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '1_0_0_1'") == "ZSTD(10)\n" + assert ( + get_compression_codec_byte(node1, "compression_table_multiple", "1_0_0_1") + == CODECS_MAPPING["Multiple"] + ) + assert ( + get_second_multiple_codec_byte(node1, "compression_table_multiple", "1_0_0_1") + == CODECS_MAPPING["ZSTD"] + ) + assert ( + node1.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '1_0_0_1'" + ) + == "ZSTD(10)\n" + ) + assert ( + node2.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '1_0_0_1'" + ) + == "ZSTD(10)\n" + ) - assert get_compression_codec_byte(node1, "compression_table_multiple", "2_0_0_1") == CODECS_MAPPING['Multiple'] - assert get_second_multiple_codec_byte(node1, "compression_table_multiple", "2_0_0_1") == CODECS_MAPPING['LZ4HC'] - assert node1.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '2_0_0_1'") == "LZ4HC(5)\n" - assert node2.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '2_0_0_1'") == "LZ4HC(5)\n" + assert ( + get_compression_codec_byte(node1, "compression_table_multiple", "2_0_0_1") + == CODECS_MAPPING["Multiple"] + ) + assert ( + get_second_multiple_codec_byte(node1, "compression_table_multiple", "2_0_0_1") + == CODECS_MAPPING["LZ4HC"] + ) + assert ( + node1.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '2_0_0_1'" + ) + == "LZ4HC(5)\n" + ) + assert ( + node2.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '2_0_0_1'" + ) + == "LZ4HC(5)\n" + ) - assert get_compression_codec_byte(node1, "compression_table_multiple", "3_0_0_1") == CODECS_MAPPING['Multiple'] - assert get_second_multiple_codec_byte(node1, "compression_table_multiple", "3_0_0_1") == CODECS_MAPPING['LZ4'] - assert node1.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '3_0_0_1'") == "LZ4\n" - assert node2.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '3_0_0_1'") == "LZ4\n" + assert ( + get_compression_codec_byte(node1, "compression_table_multiple", "3_0_0_1") + == CODECS_MAPPING["Multiple"] + ) + assert ( + get_second_multiple_codec_byte(node1, "compression_table_multiple", "3_0_0_1") + == CODECS_MAPPING["LZ4"] + ) + assert ( + node1.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '3_0_0_1'" + ) + == "LZ4\n" + ) + assert ( + node2.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table_multiple' and name = '3_0_0_1'" + ) + == "LZ4\n" + ) assert node1.query("SELECT COUNT() FROM compression_table_multiple") == "3\n" assert node2.query("SELECT COUNT() FROM compression_table_multiple") == "3\n" @@ -208,41 +401,81 @@ def test_default_codec_multiple(start_cluster): def test_default_codec_version_update(start_cluster): - node3.query(""" + node3.query( + """ CREATE TABLE compression_table ( key UInt64 CODEC(LZ4HC(7)), data1 String ) ENGINE = MergeTree ORDER BY tuple() PARTITION BY key; - """) + """ + ) node3.query("INSERT INTO compression_table VALUES (1, 'x')") - node3.query("INSERT INTO compression_table VALUES (2, '{}')".format(get_random_string(2048))) - node3.query("INSERT INTO compression_table VALUES (3, '{}')".format(get_random_string(22048))) + node3.query( + "INSERT INTO compression_table VALUES (2, '{}')".format(get_random_string(2048)) + ) + node3.query( + "INSERT INTO compression_table VALUES (3, '{}')".format( + get_random_string(22048) + ) + ) old_version = node3.query("SELECT version()") node3.restart_with_latest_version() new_version = node3.query("SELECT version()") logging.debug(f"Updated from {old_version} to {new_version}") - assert node3.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_1_1_0'") == "ZSTD(1)\n" - assert node3.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '2_2_2_0'") == "ZSTD(1)\n" - assert node3.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_3_3_0'") == "ZSTD(1)\n" + assert ( + node3.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_1_1_0'" + ) + == "ZSTD(1)\n" + ) + assert ( + node3.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '2_2_2_0'" + ) + == "ZSTD(1)\n" + ) + assert ( + node3.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_3_3_0'" + ) + == "ZSTD(1)\n" + ) node3.query("OPTIMIZE TABLE compression_table FINAL") - assert node3.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_1_1_1'") == "ZSTD(10)\n" - assert node3.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '2_2_2_1'") == "LZ4HC(5)\n" - assert node3.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_3_3_1'") == "LZ4\n" + assert ( + node3.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_1_1_1'" + ) + == "ZSTD(10)\n" + ) + assert ( + node3.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '2_2_2_1'" + ) + == "LZ4HC(5)\n" + ) + assert ( + node3.query( + "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_3_3_1'" + ) + == "LZ4\n" + ) node3.query("DROP TABLE compression_table SYNC") def callback(n): - n.exec_in_container(['bash', '-c', 'rm -rf /var/lib/clickhouse/metadata/system /var/lib/clickhouse/data/system '], user='root') + n.exec_in_container( + [ + "bash", + "-c", + "rm -rf /var/lib/clickhouse/metadata/system /var/lib/clickhouse/data/system ", + ], + user="root", + ) + node3.restart_with_original_version(callback_onstop=callback) cur_version = node3.query("SELECT version()") @@ -250,20 +483,28 @@ def test_default_codec_version_update(start_cluster): def test_default_codec_for_compact_parts(start_cluster): - node4.query(""" + node4.query( + """ CREATE TABLE compact_parts_table ( key UInt64, data String ) ENGINE MergeTree ORDER BY tuple() - """) + """ + ) node4.query("INSERT INTO compact_parts_table VALUES (1, 'Hello world')") assert node4.query("SELECT COUNT() FROM compact_parts_table") == "1\n" node4.query("ALTER TABLE compact_parts_table DETACH PART 'all_1_1_0'") - node4.exec_in_container(["bash", "-c", "rm /var/lib/clickhouse/data/default/compact_parts_table/detached/all_1_1_0/default_compression_codec.txt"]) + node4.exec_in_container( + [ + "bash", + "-c", + "rm /var/lib/clickhouse/data/default/compact_parts_table/detached/all_1_1_0/default_compression_codec.txt", + ] + ) node4.query("ALTER TABLE compact_parts_table ATTACH PART 'all_1_1_0'") diff --git a/tests/integration/test_default_database_on_cluster/test.py b/tests/integration/test_default_database_on_cluster/test.py index 28a3cfad1d1..f0f1360434f 100644 --- a/tests/integration/test_default_database_on_cluster/test.py +++ b/tests/integration/test_default_database_on_cluster/test.py @@ -2,18 +2,38 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -ch1 = cluster.add_instance('ch1', - main_configs=["configs/config.d/clusters.xml", "configs/config.d/distributed_ddl.xml"], - with_zookeeper=True) -ch2 = cluster.add_instance('ch2', - main_configs=["configs/config.d/clusters.xml", "configs/config.d/distributed_ddl.xml"], - with_zookeeper=True) -ch3 = cluster.add_instance('ch3', - main_configs=["configs/config.d/clusters.xml", "configs/config.d/distributed_ddl.xml"], - with_zookeeper=True) -ch4 = cluster.add_instance('ch4', - main_configs=["configs/config.d/clusters.xml", "configs/config.d/distributed_ddl.xml"], - with_zookeeper=True) +ch1 = cluster.add_instance( + "ch1", + main_configs=[ + "configs/config.d/clusters.xml", + "configs/config.d/distributed_ddl.xml", + ], + with_zookeeper=True, +) +ch2 = cluster.add_instance( + "ch2", + main_configs=[ + "configs/config.d/clusters.xml", + "configs/config.d/distributed_ddl.xml", + ], + with_zookeeper=True, +) +ch3 = cluster.add_instance( + "ch3", + main_configs=[ + "configs/config.d/clusters.xml", + "configs/config.d/distributed_ddl.xml", + ], + with_zookeeper=True, +) +ch4 = cluster.add_instance( + "ch4", + main_configs=[ + "configs/config.d/clusters.xml", + "configs/config.d/distributed_ddl.xml", + ], + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -28,17 +48,30 @@ def started_cluster(): def test_default_database_on_cluster(started_cluster): - ch1.query(database='test_default_database', - sql="CREATE TABLE test_local_table ON CLUSTER 'cluster' (column UInt8) ENGINE = Memory;") + ch1.query( + database="test_default_database", + sql="CREATE TABLE test_local_table ON CLUSTER 'cluster' (column UInt8) ENGINE = Memory;", + ) for node in [ch1, ch2, ch3, ch4]: - assert node.query("SHOW TABLES FROM test_default_database FORMAT TSV") == "test_local_table\n" + assert ( + node.query("SHOW TABLES FROM test_default_database FORMAT TSV") + == "test_local_table\n" + ) - ch1.query(database='test_default_database', - sql="CREATE TABLE test_distributed_table ON CLUSTER 'cluster' (column UInt8) ENGINE = Distributed(cluster, currentDatabase(), 'test_local_table');") + ch1.query( + database="test_default_database", + sql="CREATE TABLE test_distributed_table ON CLUSTER 'cluster' (column UInt8) ENGINE = Distributed(cluster, currentDatabase(), 'test_local_table');", + ) for node in [ch1, ch2, ch3, ch4]: - assert node.query( - "SHOW TABLES FROM test_default_database FORMAT TSV") == "test_distributed_table\ntest_local_table\n" - assert node.query( - "SHOW CREATE TABLE test_default_database.test_distributed_table FORMAT TSV") == "CREATE TABLE test_default_database.test_distributed_table\\n(\\n `column` UInt8\\n)\\nENGINE = Distributed(\\'cluster\\', \\'test_default_database\\', \\'test_local_table\\')\n" + assert ( + node.query("SHOW TABLES FROM test_default_database FORMAT TSV") + == "test_distributed_table\ntest_local_table\n" + ) + assert ( + node.query( + "SHOW CREATE TABLE test_default_database.test_distributed_table FORMAT TSV" + ) + == "CREATE TABLE test_default_database.test_distributed_table\\n(\\n `column` UInt8\\n)\\nENGINE = Distributed(\\'cluster\\', \\'test_default_database\\', \\'test_local_table\\')\n" + ) diff --git a/tests/integration/test_default_role/test.py b/tests/integration/test_default_role/test.py index 2f00fb603a8..1a321a8269a 100644 --- a/tests/integration/test_default_role/test.py +++ b/tests/integration/test_default_role/test.py @@ -3,7 +3,7 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance') +instance = cluster.add_instance("instance") @pytest.fixture(scope="module", autouse=True) @@ -31,46 +31,64 @@ def test_set_default_roles(): assert instance.query("SHOW CURRENT ROLES", user="john") == "" instance.query("GRANT rx, ry TO john") - assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([['rx', 0, 1], ['ry', 0, 1]]) + assert instance.query("SHOW CURRENT ROLES", user="john") == TSV( + [["rx", 0, 1], ["ry", 0, 1]] + ) instance.query("SET DEFAULT ROLE NONE TO john") assert instance.query("SHOW CURRENT ROLES", user="john") == "" instance.query("SET DEFAULT ROLE rx TO john") - assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([['rx', 0, 1]]) + assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([["rx", 0, 1]]) instance.query("SET DEFAULT ROLE ry TO john") - assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([['ry', 0, 1]]) + assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([["ry", 0, 1]]) instance.query("SET DEFAULT ROLE ALL TO john") - assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([['rx', 0, 1], ['ry', 0, 1]]) + assert instance.query("SHOW CURRENT ROLES", user="john") == TSV( + [["rx", 0, 1], ["ry", 0, 1]] + ) instance.query("SET DEFAULT ROLE ALL EXCEPT rx TO john") - assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([['ry', 0, 1]]) + assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([["ry", 0, 1]]) def test_alter_user(): assert instance.query("SHOW CURRENT ROLES", user="john") == "" instance.query("GRANT rx, ry TO john") - assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([['rx', 0, 1], ['ry', 0, 1]]) + assert instance.query("SHOW CURRENT ROLES", user="john") == TSV( + [["rx", 0, 1], ["ry", 0, 1]] + ) instance.query("ALTER USER john DEFAULT ROLE NONE") assert instance.query("SHOW CURRENT ROLES", user="john") == "" instance.query("ALTER USER john DEFAULT ROLE rx") - assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([['rx', 0, 1]]) + assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([["rx", 0, 1]]) instance.query("ALTER USER john DEFAULT ROLE ALL") - assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([['rx', 0, 1], ['ry', 0, 1]]) + assert instance.query("SHOW CURRENT ROLES", user="john") == TSV( + [["rx", 0, 1], ["ry", 0, 1]] + ) instance.query("ALTER USER john DEFAULT ROLE ALL EXCEPT rx") - assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([['ry', 0, 1]]) + assert instance.query("SHOW CURRENT ROLES", user="john") == TSV([["ry", 0, 1]]) def test_wrong_set_default_role(): - assert "There is no user `rx`" in instance.query_and_get_error("SET DEFAULT ROLE NONE TO rx") - assert "There is no user `ry`" in instance.query_and_get_error("SET DEFAULT ROLE rx TO ry") - assert "There is no role `john`" in instance.query_and_get_error("SET DEFAULT ROLE john TO john") - assert "There is no role `john`" in instance.query_and_get_error("ALTER USER john DEFAULT ROLE john") - assert "There is no role `john`" in instance.query_and_get_error("ALTER USER john DEFAULT ROLE ALL EXCEPT john") + assert "There is no user `rx`" in instance.query_and_get_error( + "SET DEFAULT ROLE NONE TO rx" + ) + assert "There is no user `ry`" in instance.query_and_get_error( + "SET DEFAULT ROLE rx TO ry" + ) + assert "There is no role `john`" in instance.query_and_get_error( + "SET DEFAULT ROLE john TO john" + ) + assert "There is no role `john`" in instance.query_and_get_error( + "ALTER USER john DEFAULT ROLE john" + ) + assert "There is no role `john`" in instance.query_and_get_error( + "ALTER USER john DEFAULT ROLE ALL EXCEPT john" + ) diff --git a/tests/integration/test_delayed_replica_failover/test.py b/tests/integration/test_delayed_replica_failover/test.py index 404848e155b..387d6a12f48 100644 --- a/tests/integration/test_delayed_replica_failover/test.py +++ b/tests/integration/test_delayed_replica_failover/test.py @@ -14,10 +14,11 @@ cluster = ClickHouseCluster(__file__) # Cluster with 2 shards of 2 replicas each. node_1_1 is the instance with Distributed table. # Thus we have a shard with a local replica and a shard with remote replicas. node_1_1 = instance_with_dist_table = cluster.add_instance( - 'node_1_1', with_zookeeper=True, main_configs=['configs/remote_servers.xml']) -node_1_2 = cluster.add_instance('node_1_2', with_zookeeper=True) -node_2_1 = cluster.add_instance('node_2_1', with_zookeeper=True) -node_2_2 = cluster.add_instance('node_2_2', with_zookeeper=True) + "node_1_1", with_zookeeper=True, main_configs=["configs/remote_servers.xml"] +) +node_1_2 = cluster.add_instance("node_1_2", with_zookeeper=True) +node_2_1 = cluster.add_instance("node_2_1", with_zookeeper=True) +node_2_2 = cluster.add_instance("node_2_2", with_zookeeper=True) @pytest.fixture(scope="module") @@ -27,15 +28,19 @@ def started_cluster(): for shard in (1, 2): for replica in (1, 2): - node = cluster.instances['node_{}_{}'.format(shard, replica)] - node.query(''' + node = cluster.instances["node_{}_{}".format(shard, replica)] + node.query( + """ CREATE TABLE replicated (d Date, x UInt32) ENGINE = - ReplicatedMergeTree('/clickhouse/tables/{shard}/replicated', '{instance}', d, d, 8192)''' - .format(shard=shard, instance=node.name)) + ReplicatedMergeTree('/clickhouse/tables/{shard}/replicated', '{instance}', d, d, 8192)""".format( + shard=shard, instance=node.name + ) + ) node_1_1.query( "CREATE TABLE distributed (d Date, x UInt32) ENGINE = " - "Distributed('test_cluster', 'default', 'replicated')") + "Distributed('test_cluster', 'default', 'replicated')" + ) yield cluster @@ -54,27 +59,41 @@ def test(started_cluster): time.sleep(1) # accrue replica delay - assert node_1_1.query("SELECT sum(x) FROM replicated").strip() == '0' - assert node_1_2.query("SELECT sum(x) FROM replicated").strip() == '1' - assert node_2_1.query("SELECT sum(x) FROM replicated").strip() == '0' - assert node_2_2.query("SELECT sum(x) FROM replicated").strip() == '2' + assert node_1_1.query("SELECT sum(x) FROM replicated").strip() == "0" + assert node_1_2.query("SELECT sum(x) FROM replicated").strip() == "1" + assert node_2_1.query("SELECT sum(x) FROM replicated").strip() == "0" + assert node_2_2.query("SELECT sum(x) FROM replicated").strip() == "2" # With in_order balancing first replicas are chosen. - assert instance_with_dist_table.query( - "SELECT count() FROM distributed SETTINGS load_balancing='in_order'").strip() == '0' + assert ( + instance_with_dist_table.query( + "SELECT count() FROM distributed SETTINGS load_balancing='in_order'" + ).strip() + == "0" + ) # When we set max_replica_delay, first replicas must be excluded. - assert instance_with_dist_table.query(''' + assert ( + instance_with_dist_table.query( + """ SELECT sum(x) FROM distributed SETTINGS load_balancing='in_order', max_replica_delay_for_distributed_queries=1 -''').strip() == '3' +""" + ).strip() + == "3" + ) - assert instance_with_dist_table.query(''' + assert ( + instance_with_dist_table.query( + """ SELECT sum(x) FROM distributed WITH TOTALS SETTINGS load_balancing='in_order', max_replica_delay_for_distributed_queries=1 -''').strip() == '3\n\n3' +""" + ).strip() + == "3\n\n3" + ) pm.drop_instance_zk_connections(node_1_2) pm.drop_instance_zk_connections(node_2_2) @@ -90,29 +109,41 @@ SELECT sum(x) FROM distributed WITH TOTALS SETTINGS raise Exception("Connection with zookeeper was not lost") # At this point all replicas are stale, but the query must still go to second replicas which are the least stale ones. - assert instance_with_dist_table.query(''' + assert ( + instance_with_dist_table.query( + """ SELECT sum(x) FROM distributed SETTINGS load_balancing='in_order', max_replica_delay_for_distributed_queries=1 -''').strip() == '3' +""" + ).strip() + == "3" + ) # Regression for skip_unavailable_shards in conjunction with skip_unavailable_shards - assert instance_with_dist_table.query(''' + assert ( + instance_with_dist_table.query( + """ SELECT sum(x) FROM distributed SETTINGS load_balancing='in_order', skip_unavailable_shards=1, max_replica_delay_for_distributed_queries=1 -''').strip() == '3' +""" + ).strip() + == "3" + ) # If we forbid stale replicas, the query must fail. But sometimes we must have bigger timeouts. for _ in range(20): try: - instance_with_dist_table.query(''' + instance_with_dist_table.query( + """ SELECT count() FROM distributed SETTINGS load_balancing='in_order', max_replica_delay_for_distributed_queries=1, fallback_to_stale_replicas_for_distributed_queries=0 -''') +""" + ) time.sleep(0.5) except: break @@ -122,8 +153,13 @@ SELECT count() FROM distributed SETTINGS # Now partition off the remote replica of the local shard and test that failover still works. pm.partition_instances(node_1_1, node_1_2, port=9000) - assert instance_with_dist_table.query(''' + assert ( + instance_with_dist_table.query( + """ SELECT sum(x) FROM distributed SETTINGS load_balancing='in_order', max_replica_delay_for_distributed_queries=1 -''').strip() == '2' +""" + ).strip() + == "2" + ) diff --git a/tests/integration/test_dictionaries_access/test.py b/tests/integration/test_dictionaries_access/test.py index 1b64b0de1fb..993c8259f32 100644 --- a/tests/integration/test_dictionaries_access/test.py +++ b/tests/integration/test_dictionaries_access/test.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance') +instance = cluster.add_instance("instance") @pytest.fixture(scope="module", autouse=True) @@ -41,7 +41,9 @@ drop_query = "DROP DICTIONARY test_dict" def test_create(): assert instance.query("SHOW GRANTS FOR mira") == "" - assert "Not enough privileges" in instance.query_and_get_error(create_query, user="mira") + assert "Not enough privileges" in instance.query_and_get_error( + create_query, user="mira" + ) instance.query("GRANT CREATE DICTIONARY ON *.* TO mira") instance.query(create_query, user="mira") @@ -49,7 +51,9 @@ def test_create(): instance.query("REVOKE CREATE DICTIONARY ON *.* FROM mira") assert instance.query("SHOW GRANTS FOR mira") == "" - assert "Not enough privileges" in instance.query_and_get_error(create_query, user="mira") + assert "Not enough privileges" in instance.query_and_get_error( + create_query, user="mira" + ) instance.query("GRANT CREATE DICTIONARY ON default.* TO mira") instance.query(create_query, user="mira") @@ -57,7 +61,9 @@ def test_create(): instance.query("REVOKE CREATE DICTIONARY ON default.* FROM mira") assert instance.query("SHOW GRANTS FOR mira") == "" - assert "Not enough privileges" in instance.query_and_get_error(create_query, user="mira") + assert "Not enough privileges" in instance.query_and_get_error( + create_query, user="mira" + ) instance.query("GRANT CREATE DICTIONARY ON default.test_dict TO mira") instance.query(create_query, user="mira") @@ -67,7 +73,9 @@ def test_drop(): instance.query(create_query) assert instance.query("SHOW GRANTS FOR mira") == "" - assert "Not enough privileges" in instance.query_and_get_error(drop_query, user="mira") + assert "Not enough privileges" in instance.query_and_get_error( + drop_query, user="mira" + ) instance.query("GRANT DROP DICTIONARY ON *.* TO mira") instance.query(drop_query, user="mira") @@ -79,14 +87,18 @@ def test_dictget(): dictget_query = "SELECT dictGet('default.test_dict', 'y', toUInt64(5))" instance.query(dictget_query) == "6\n" - assert "Not enough privileges" in instance.query_and_get_error(dictget_query, user='mira') + assert "Not enough privileges" in instance.query_and_get_error( + dictget_query, user="mira" + ) instance.query("GRANT dictGet ON default.test_dict TO mira") - instance.query(dictget_query, user='mira') == "6\n" + instance.query(dictget_query, user="mira") == "6\n" dictget_query = "SELECT dictGet('default.test_dict', 'y', toUInt64(1))" instance.query(dictget_query) == "0\n" - instance.query(dictget_query, user='mira') == "0\n" + instance.query(dictget_query, user="mira") == "0\n" instance.query("REVOKE dictGet ON *.* FROM mira") - assert "Not enough privileges" in instance.query_and_get_error(dictget_query, user='mira') + assert "Not enough privileges" in instance.query_and_get_error( + dictget_query, user="mira" + ) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/common.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/common.py index 20d086afe8c..b38e81b0227 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/common.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/common.py @@ -4,51 +4,51 @@ import shutil from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout KEY_FIELDS = { - "simple": [ - Field("KeyField", 'UInt64', is_key=True, default_value_for_get=9999999) - ], + "simple": [Field("KeyField", "UInt64", is_key=True, default_value_for_get=9999999)], "complex": [ - Field("KeyField1", 'UInt64', is_key=True, default_value_for_get=9999999), - Field("KeyField2", 'String', is_key=True, default_value_for_get='xxxxxxxxx') + Field("KeyField1", "UInt64", is_key=True, default_value_for_get=9999999), + Field("KeyField2", "String", is_key=True, default_value_for_get="xxxxxxxxx"), ], "ranged": [ - Field("KeyField1", 'UInt64', is_key=True), - Field("KeyField2", 'Date', is_range_key=True) - ] + Field("KeyField1", "UInt64", is_key=True), + Field("KeyField2", "Date", is_range_key=True), + ], } START_FIELDS = { "simple": [], "complex": [], - "ranged" : [ - Field("StartDate", 'Date', range_hash_type='min'), - Field("EndDate", 'Date', range_hash_type='max') - ] + "ranged": [ + Field("StartDate", "Date", range_hash_type="min"), + Field("EndDate", "Date", range_hash_type="max"), + ], } MIDDLE_FIELDS = [ - Field("UInt8_", 'UInt8', default_value_for_get=55), - Field("UInt16_", 'UInt16', default_value_for_get=66), - Field("UInt32_", 'UInt32', default_value_for_get=77), - Field("UInt64_", 'UInt64', default_value_for_get=88), - Field("Int8_", 'Int8', default_value_for_get=-55), - Field("Int16_", 'Int16', default_value_for_get=-66), - Field("Int32_", 'Int32', default_value_for_get=-77), - Field("Int64_", 'Int64', default_value_for_get=-88), - Field("UUID_", 'UUID', default_value_for_get='550e8400-0000-0000-0000-000000000000'), - Field("Date_", 'Date', default_value_for_get='2018-12-30'), - Field("DateTime_", 'DateTime', default_value_for_get='2018-12-30 00:00:00'), - Field("String_", 'String', default_value_for_get='hi'), - Field("Float32_", 'Float32', default_value_for_get=555.11), - Field("Float64_", 'Float64', default_value_for_get=777.11), + Field("UInt8_", "UInt8", default_value_for_get=55), + Field("UInt16_", "UInt16", default_value_for_get=66), + Field("UInt32_", "UInt32", default_value_for_get=77), + Field("UInt64_", "UInt64", default_value_for_get=88), + Field("Int8_", "Int8", default_value_for_get=-55), + Field("Int16_", "Int16", default_value_for_get=-66), + Field("Int32_", "Int32", default_value_for_get=-77), + Field("Int64_", "Int64", default_value_for_get=-88), + Field( + "UUID_", "UUID", default_value_for_get="550e8400-0000-0000-0000-000000000000" + ), + Field("Date_", "Date", default_value_for_get="2018-12-30"), + Field("DateTime_", "DateTime", default_value_for_get="2018-12-30 00:00:00"), + Field("String_", "String", default_value_for_get="hi"), + Field("Float32_", "Float32", default_value_for_get=555.11), + Field("Float64_", "Float64", default_value_for_get=777.11), ] END_FIELDS = { - "simple" : [ + "simple": [ Field("ParentKeyField", "UInt64", default_value_for_get=444, hierarchical=True) ], - "complex" : [], - "ranged" : [] + "complex": [], + "ranged": [], } LAYOUTS_SIMPLE = ["flat", "hashed", "cache", "direct"] @@ -57,40 +57,129 @@ LAYOUTS_RANGED = ["range_hashed"] VALUES = { "simple": [ - [1, 22, 333, 4444, 55555, -6, -77, - -888, -999, '550e8400-e29b-41d4-a716-446655440003', - '1973-06-28', '1985-02-28 23:43:25', 'hello', 22.543, 3332154213.4, 0], - [2, 3, 4, 5, 6, -7, -8, - -9, -10, '550e8400-e29b-41d4-a716-446655440002', - '1978-06-28', '1986-02-28 23:42:25', 'hello', 21.543, 3222154213.4, 1] + [ + 1, + 22, + 333, + 4444, + 55555, + -6, + -77, + -888, + -999, + "550e8400-e29b-41d4-a716-446655440003", + "1973-06-28", + "1985-02-28 23:43:25", + "hello", + 22.543, + 3332154213.4, + 0, + ], + [ + 2, + 3, + 4, + 5, + 6, + -7, + -8, + -9, + -10, + "550e8400-e29b-41d4-a716-446655440002", + "1978-06-28", + "1986-02-28 23:42:25", + "hello", + 21.543, + 3222154213.4, + 1, + ], ], "complex": [ - [1, 'world', 22, 333, 4444, 55555, -6, - -77, -888, -999, '550e8400-e29b-41d4-a716-446655440003', - '1973-06-28', '1985-02-28 23:43:25', - 'hello', 22.543, 3332154213.4], - [2, 'qwerty2', 52, 2345, 6544, 9191991, -2, - -717, -81818, -92929, '550e8400-e29b-41d4-a716-446655440007', - '1975-09-28', '2000-02-28 23:33:24', - 'my', 255.543, 3332221.44] + [ + 1, + "world", + 22, + 333, + 4444, + 55555, + -6, + -77, + -888, + -999, + "550e8400-e29b-41d4-a716-446655440003", + "1973-06-28", + "1985-02-28 23:43:25", + "hello", + 22.543, + 3332154213.4, + ], + [ + 2, + "qwerty2", + 52, + 2345, + 6544, + 9191991, + -2, + -717, + -81818, + -92929, + "550e8400-e29b-41d4-a716-446655440007", + "1975-09-28", + "2000-02-28 23:33:24", + "my", + 255.543, + 3332221.44, + ], ], "ranged": [ - [1, '2019-02-10', '2019-02-01', '2019-02-28', - 22, 333, 4444, 55555, -6, -77, -888, -999, - '550e8400-e29b-41d4-a716-446655440003', - '1973-06-28', '1985-02-28 23:43:25', 'hello', - 22.543, 3332154213.4], - [2, '2019-04-10', '2019-04-01', '2019-04-28', - 11, 3223, 41444, 52515, -65, -747, -8388, -9099, - '550e8400-e29b-41d4-a716-446655440004', - '1973-06-29', '2002-02-28 23:23:25', '!!!!', - 32.543, 3332543.4] - ] + [ + 1, + "2019-02-10", + "2019-02-01", + "2019-02-28", + 22, + 333, + 4444, + 55555, + -6, + -77, + -888, + -999, + "550e8400-e29b-41d4-a716-446655440003", + "1973-06-28", + "1985-02-28 23:43:25", + "hello", + 22.543, + 3332154213.4, + ], + [ + 2, + "2019-04-10", + "2019-04-01", + "2019-04-28", + 11, + 3223, + 41444, + 52515, + -65, + -747, + -8388, + -9099, + "550e8400-e29b-41d4-a716-446655440004", + "1973-06-29", + "2002-02-28 23:23:25", + "!!!!", + 32.543, + 3332543.4, + ], + ], } SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -DICT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'configs', 'dictionaries') +DICT_CONFIG_PATH = os.path.join(SCRIPT_DIR, "configs", "dictionaries") + class BaseLayoutTester: def __init__(self, test_name): @@ -109,29 +198,39 @@ class BaseLayoutTester: directory = self.get_dict_directory() for fname in os.listdir(directory): dictionaries.append(os.path.join(directory, fname)) - return dictionaries + return dictionaries def create_dictionaries(self, source_): for layout in self.layouts: if source_.compatible_with_layout(Layout(layout)): - self.layout_to_dictionary[layout] = self.get_dict(source_, Layout(layout), self.fields) + self.layout_to_dictionary[layout] = self.get_dict( + source_, Layout(layout), self.fields + ) def prepare(self, cluster_): for _, dictionary in list(self.layout_to_dictionary.items()): dictionary.prepare_source(cluster_) dictionary.load_data(self.data) - def get_dict(self, source, layout, fields, suffix_name=''): + def get_dict(self, source, layout, fields, suffix_name=""): structure = DictionaryStructure(layout, fields) - dict_name = source.name + "_" + layout.name + '_' + suffix_name - dict_path = os.path.join(self.get_dict_directory(), dict_name + '.xml') - dictionary = Dictionary(dict_name, structure, source, dict_path, "table_" + dict_name, fields) + dict_name = source.name + "_" + layout.name + "_" + suffix_name + dict_path = os.path.join(self.get_dict_directory(), dict_name + ".xml") + dictionary = Dictionary( + dict_name, structure, source, dict_path, "table_" + dict_name, fields + ) dictionary.generate_config() return dictionary + class SimpleLayoutTester(BaseLayoutTester): def __init__(self, test_name): - self.fields = KEY_FIELDS["simple"] + START_FIELDS["simple"] + MIDDLE_FIELDS + END_FIELDS["simple"] + self.fields = ( + KEY_FIELDS["simple"] + + START_FIELDS["simple"] + + MIDDLE_FIELDS + + END_FIELDS["simple"] + ) self.values = VALUES["simple"] self.data = [Row(self.fields, vals) for vals in self.values] self.layout_to_dictionary = dict() @@ -151,13 +250,17 @@ class SimpleLayoutTester(BaseLayoutTester): for field in self.fields: if not field.is_key: for query in dct.get_select_get_queries(field, row): - queries_with_answers.append((query, row.get_value_by_name(field.name))) + queries_with_answers.append( + (query, row.get_value_by_name(field.name)) + ) for query in dct.get_select_has_queries(field, row): queries_with_answers.append((query, 1)) for query in dct.get_select_get_or_default_queries(field, row): - queries_with_answers.append((query, field.default_value_for_get)) + queries_with_answers.append( + (query, field.default_value_for_get) + ) for query in dct.get_hierarchical_queries(self.data[0]): queries_with_answers.append((query, [1])) @@ -174,15 +277,22 @@ class SimpleLayoutTester(BaseLayoutTester): for query, answer in queries_with_answers: # print query if isinstance(answer, list): - answer = str(answer).replace(' ', '') - answer = str(answer) + '\n' + answer = str(answer).replace(" ", "") + answer = str(answer) + "\n" node_answer = node.query(query) - assert str(node_answer).strip() == answer.strip(), f"Expected '{answer.strip()}', got '{node_answer.strip()}' in query '{query}'" + assert ( + str(node_answer).strip() == answer.strip() + ), f"Expected '{answer.strip()}', got '{node_answer.strip()}' in query '{query}'" class ComplexLayoutTester(BaseLayoutTester): def __init__(self, test_name): - self.fields = KEY_FIELDS["complex"] + START_FIELDS["complex"] + MIDDLE_FIELDS + END_FIELDS["complex"] + self.fields = ( + KEY_FIELDS["complex"] + + START_FIELDS["complex"] + + MIDDLE_FIELDS + + END_FIELDS["complex"] + ) self.values = VALUES["complex"] self.data = [Row(self.fields, vals) for vals in self.values] self.layout_to_dictionary = dict() @@ -202,24 +312,35 @@ class ComplexLayoutTester(BaseLayoutTester): for field in self.fields: if not field.is_key: for query in dct.get_select_get_queries(field, row): - queries_with_answers.append((query, row.get_value_by_name(field.name))) + queries_with_answers.append( + (query, row.get_value_by_name(field.name)) + ) for query in dct.get_select_has_queries(field, row): queries_with_answers.append((query, 1)) for query in dct.get_select_get_or_default_queries(field, row): - queries_with_answers.append((query, field.default_value_for_get)) + queries_with_answers.append( + (query, field.default_value_for_get) + ) for query, answer in queries_with_answers: # print query node_answer = node.query(query) - answer = str(answer) + '\n' - assert node_answer == answer, f"Expected '{answer.strip()}', got '{node_answer.strip()}' in query '{query}'" + answer = str(answer) + "\n" + assert ( + node_answer == answer + ), f"Expected '{answer.strip()}', got '{node_answer.strip()}' in query '{query}'" class RangedLayoutTester(BaseLayoutTester): def __init__(self, test_name): - self.fields = KEY_FIELDS["ranged"] + START_FIELDS["ranged"] + MIDDLE_FIELDS + END_FIELDS["ranged"] + self.fields = ( + KEY_FIELDS["ranged"] + + START_FIELDS["ranged"] + + MIDDLE_FIELDS + + END_FIELDS["ranged"] + ) self.values = VALUES["ranged"] self.data = [Row(self.fields, vals) for vals in self.values] self.layout_to_dictionary = dict() @@ -240,10 +361,14 @@ class RangedLayoutTester(BaseLayoutTester): for field in self.fields: if not field.is_key and not field.is_range: for query in dct.get_select_get_queries(field, row): - queries_with_answers.append((query, row.get_value_by_name(field.name))) + queries_with_answers.append( + (query, row.get_value_by_name(field.name)) + ) for query, answer in queries_with_answers: # print query node_answer = node.query(query) - answer = str(answer) + '\n' - assert node_answer == answer, f"Expected '{answer.strip()}', got '{node_answer.strip()}' in query '{query}'" + answer = str(answer) + "\n" + assert ( + node_answer == answer + ), f"Expected '{answer.strip()}', got '{node_answer.strip()}' in query '{query}'" diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_cassandra.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_cassandra.py index 78715bd17cf..aa1eb614dd5 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_cassandra.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_cassandra.py @@ -16,6 +16,7 @@ complex_tester = None ranged_tester = None test_name = "cassandra" + def setup_module(module): global cluster global node @@ -25,7 +26,15 @@ def setup_module(module): cluster = ClickHouseCluster(__file__, name=test_name) - SOURCE = SourceCassandra("Cassandra", None, cluster.cassandra_port, cluster.cassandra_host, cluster.cassandra_port, "", "") + SOURCE = SourceCassandra( + "Cassandra", + None, + cluster.cassandra_port, + cluster.cassandra_host, + cluster.cassandra_port, + "", + "", + ) simple_tester = SimpleLayoutTester(test_name) simple_tester.cleanup() @@ -39,16 +48,22 @@ def setup_module(module): # Since that all .xml configs were created main_configs = [] - main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml')) + main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) dictionaries = simple_tester.list_dictionaries() - node = cluster.add_instance('cass_node', main_configs=main_configs, dictionaries=dictionaries, with_cassandra=True) + node = cluster.add_instance( + "cass_node", + main_configs=main_configs, + dictionaries=dictionaries, + with_cassandra=True, + ) def teardown_module(module): simple_tester.cleanup() + @pytest.fixture(scope="module") def started_cluster(): try: @@ -63,14 +78,17 @@ def started_cluster(): finally: cluster.shutdown() + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_SIMPLE)) def test_simple(started_cluster, layout_name): simple_tester.execute(layout_name, node) + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_COMPLEX)) def test_complex(started_cluster, layout_name): complex_tester.execute(layout_name, node) + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_RANGED)) def test_ranged(started_cluster, layout_name): ranged_tester.execute(layout_name, node) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_clickhouse_local.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_clickhouse_local.py index 051b4ff3086..b7f8226960f 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_clickhouse_local.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_clickhouse_local.py @@ -1,4 +1,4 @@ -import os +import os import math import pytest @@ -8,7 +8,9 @@ from helpers.cluster import ClickHouseCluster from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout from helpers.external_sources import SourceClickHouse -SOURCE = SourceClickHouse("LocalClickHouse", "localhost", "9000", "local_node", "9000", "default", "") +SOURCE = SourceClickHouse( + "LocalClickHouse", "localhost", "9000", "local_node", "9000", "default", "" +) cluster = None node = None @@ -17,6 +19,7 @@ complex_tester = None ranged_tester = None test_name = "local" + def setup_module(module): global cluster global node @@ -38,13 +41,15 @@ def setup_module(module): cluster = ClickHouseCluster(__file__, name=test_name) main_configs = [] - main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml')) + main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) dictionaries = simple_tester.list_dictionaries() - - node = cluster.add_instance('local_node', main_configs=main_configs, dictionaries=dictionaries) - + node = cluster.add_instance( + "local_node", main_configs=main_configs, dictionaries=dictionaries + ) + + def teardown_module(module): simple_tester.cleanup() @@ -63,14 +68,17 @@ def started_cluster(): finally: cluster.shutdown() + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_SIMPLE)) def test_simple(started_cluster, layout_name): simple_tester.execute(layout_name, node) + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_COMPLEX)) def test_complex(started_cluster, layout_name): complex_tester.execute(layout_name, node) - + + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_RANGED)) def test_ranged(started_cluster, layout_name): ranged_tester.execute(layout_name, node) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_clickhouse_remote.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_clickhouse_remote.py index 3ed335a1987..6790d11ed1a 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_clickhouse_remote.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_clickhouse_remote.py @@ -8,7 +8,9 @@ from helpers.cluster import ClickHouseCluster from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout from helpers.external_sources import SourceClickHouse -SOURCE = SourceClickHouse("RemoteClickHouse", "localhost", "9000", "clickhouse_remote", "9000", "default", "") +SOURCE = SourceClickHouse( + "RemoteClickHouse", "localhost", "9000", "clickhouse_remote", "9000", "default", "" +) cluster = None node = None @@ -17,6 +19,7 @@ complex_tester = None ranged_tester = None test_name = "remote" + def setup_module(module): global cluster global node @@ -38,13 +41,15 @@ def setup_module(module): cluster = ClickHouseCluster(__file__, name=test_name) main_configs = [] - main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml')) + main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) dictionaries = simple_tester.list_dictionaries() - cluster.add_instance('clickhouse_remote', main_configs=main_configs) + cluster.add_instance("clickhouse_remote", main_configs=main_configs) - node = cluster.add_instance('remote_node', main_configs=main_configs, dictionaries=dictionaries) + node = cluster.add_instance( + "remote_node", main_configs=main_configs, dictionaries=dictionaries + ) def teardown_module(module): @@ -65,14 +70,22 @@ def started_cluster(): finally: cluster.shutdown() -@pytest.mark.parametrize("layout_name", sorted(list(set(LAYOUTS_SIMPLE).difference(set("cache"))) )) + +@pytest.mark.parametrize( + "layout_name", sorted(list(set(LAYOUTS_SIMPLE).difference(set("cache")))) +) def test_simple(started_cluster, layout_name): simple_tester.execute(layout_name, node) -@pytest.mark.parametrize("layout_name", sorted(list(set(LAYOUTS_COMPLEX).difference(set("complex_key_cache"))))) + +@pytest.mark.parametrize( + "layout_name", + sorted(list(set(LAYOUTS_COMPLEX).difference(set("complex_key_cache")))), +) def test_complex(started_cluster, layout_name): complex_tester.execute(layout_name, node) + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_RANGED)) def test_ranged(started_cluster, layout_name): ranged_tester.execute(layout_name, node) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_executable_cache.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_executable_cache.py index 5d694bc09a2..5186139ddf6 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_executable_cache.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_executable_cache.py @@ -1,4 +1,4 @@ -import os +import os import math import pytest @@ -8,7 +8,9 @@ from helpers.cluster import ClickHouseCluster from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout from helpers.external_sources import SourceExecutableCache -SOURCE = SourceExecutableCache("ExecutableCache", "localhost", "9000", "cache_node", "9000", "", "") +SOURCE = SourceExecutableCache( + "ExecutableCache", "localhost", "9000", "cache_node", "9000", "", "" +) cluster = None node = None @@ -17,6 +19,7 @@ complex_tester = None ranged_tester = None test_name = "cache" + def setup_module(module): global cluster global node @@ -38,16 +41,19 @@ def setup_module(module): cluster = ClickHouseCluster(__file__, name=test_name) main_configs = [] - main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml')) + main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) dictionaries = simple_tester.list_dictionaries() - - node = cluster.add_instance('cache_node', main_configs=main_configs, dictionaries=dictionaries) - + node = cluster.add_instance( + "cache_node", main_configs=main_configs, dictionaries=dictionaries + ) + + def teardown_module(module): simple_tester.cleanup() + @pytest.fixture(scope="module") def started_cluster(): try: @@ -62,10 +68,12 @@ def started_cluster(): finally: cluster.shutdown() -@pytest.mark.parametrize("layout_name", ['cache']) + +@pytest.mark.parametrize("layout_name", ["cache"]) def test_simple(started_cluster, layout_name): simple_tester.execute(layout_name, node) -@pytest.mark.parametrize("layout_name", ['complex_key_cache']) + +@pytest.mark.parametrize("layout_name", ["complex_key_cache"]) def test_complex(started_cluster, layout_name): complex_tester.execute(layout_name, node) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_executable_hashed.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_executable_hashed.py index 8c0e6f8b878..63f4ff87cce 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_executable_hashed.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_executable_hashed.py @@ -1,4 +1,4 @@ -import os +import os import math import pytest @@ -8,7 +8,9 @@ from helpers.cluster import ClickHouseCluster from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout from helpers.external_sources import SourceExecutableHashed -SOURCE = SourceExecutableHashed("ExecutableHashed", "localhost", "9000", "hashed_node", "9000", "", "") +SOURCE = SourceExecutableHashed( + "ExecutableHashed", "localhost", "9000", "hashed_node", "9000", "", "" +) cluster = None node = None @@ -39,16 +41,19 @@ def setup_module(module): cluster = ClickHouseCluster(__file__, name=test_name) main_configs = [] - main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml')) + main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) dictionaries = simple_tester.list_dictionaries() - - node = cluster.add_instance('hashed_node', main_configs=main_configs, dictionaries=dictionaries) - + node = cluster.add_instance( + "hashed_node", main_configs=main_configs, dictionaries=dictionaries + ) + + def teardown_module(module): simple_tester.cleanup() + @pytest.fixture(scope="module") def started_cluster(): try: @@ -63,14 +68,17 @@ def started_cluster(): finally: cluster.shutdown() -@pytest.mark.parametrize("layout_name", ['hashed']) + +@pytest.mark.parametrize("layout_name", ["hashed"]) def test_simple(started_cluster, layout_name): simple_tester.execute(layout_name, node) -@pytest.mark.parametrize("layout_name", ['complex_key_hashed']) + +@pytest.mark.parametrize("layout_name", ["complex_key_hashed"]) def test_complex(started_cluster, layout_name): complex_tester.execute(layout_name, node) + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_RANGED)) def test_ranged(started_cluster, layout_name): ranged_tester.execute(layout_name, node) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_file.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_file.py index 97a06fadc5e..0147b95c786 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_file.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_file.py @@ -17,6 +17,7 @@ complex_tester = None ranged_tester = None test_name = "file" + def setup_module(module): global cluster global node @@ -38,16 +39,19 @@ def setup_module(module): cluster = ClickHouseCluster(__file__, name=test_name) main_configs = [] - main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml')) + main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) dictionaries = simple_tester.list_dictionaries() - node = cluster.add_instance('file_node', main_configs=main_configs, dictionaries=dictionaries) + node = cluster.add_instance( + "file_node", main_configs=main_configs, dictionaries=dictionaries + ) def teardown_module(module): simple_tester.cleanup() + @pytest.fixture(scope="module") def started_cluster(): try: @@ -62,14 +66,26 @@ def started_cluster(): finally: cluster.shutdown() -@pytest.mark.parametrize("layout_name", sorted(set(LAYOUTS_SIMPLE).difference({'cache', 'direct'})) ) + +@pytest.mark.parametrize( + "layout_name", sorted(set(LAYOUTS_SIMPLE).difference({"cache", "direct"})) +) def test_simple(started_cluster, layout_name): simple_tester.execute(layout_name, node) -@pytest.mark.parametrize("layout_name", sorted(list(set(LAYOUTS_COMPLEX).difference({'complex_key_cache', 'complex_key_direct'})))) + +@pytest.mark.parametrize( + "layout_name", + sorted( + list( + set(LAYOUTS_COMPLEX).difference({"complex_key_cache", "complex_key_direct"}) + ) + ), +) def test_complex(started_cluster, layout_name): complex_tester.execute(layout_name, node) + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_RANGED)) def test_ranged(started_cluster, layout_name): ranged_tester.execute(layout_name, node) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_http.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_http.py index c8c73011f61..96d17508880 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_http.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_http.py @@ -17,6 +17,7 @@ complex_tester = None ranged_tester = None test_name = "http" + def setup_module(module): global cluster global node @@ -38,18 +39,21 @@ def setup_module(module): cluster = ClickHouseCluster(__file__, name=test_name) main_configs = [] - main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml')) + main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) dictionaries = simple_tester.list_dictionaries() - cluster.add_instance('clickhouse_h', main_configs=main_configs) + cluster.add_instance("clickhouse_h", main_configs=main_configs) - node = cluster.add_instance('http_node', main_configs=main_configs, dictionaries=dictionaries) + node = cluster.add_instance( + "http_node", main_configs=main_configs, dictionaries=dictionaries + ) def teardown_module(module): simple_tester.cleanup() + @pytest.fixture(scope="module") def started_cluster(): try: @@ -64,14 +68,17 @@ def started_cluster(): finally: cluster.shutdown() + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_SIMPLE)) def test_simple(started_cluster, layout_name): simple_tester.execute(layout_name, node) + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_COMPLEX)) def test_complex(started_cluster, layout_name): complex_tester.execute(layout_name, node) + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_RANGED)) def test_ranged(started_cluster, layout_name): ranged_tester.execute(layout_name, node) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_https.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_https.py index 42f33e3da3c..007e318e037 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_https.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_https.py @@ -8,7 +8,9 @@ from helpers.cluster import ClickHouseCluster from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout from helpers.external_sources import SourceHTTPS -SOURCE = SourceHTTPS("SourceHTTPS", "localhost", "9000", "clickhouse_hs", "9000", "", "") +SOURCE = SourceHTTPS( + "SourceHTTPS", "localhost", "9000", "clickhouse_hs", "9000", "", "" +) cluster = None node = None @@ -17,6 +19,7 @@ complex_tester = None ranged_tester = None test_name = "https" + def setup_module(module): global cluster global node @@ -38,18 +41,21 @@ def setup_module(module): cluster = ClickHouseCluster(__file__, name=test_name) main_configs = [] - main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml')) + main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) dictionaries = simple_tester.list_dictionaries() - cluster.add_instance('clickhouse_hs', main_configs=main_configs) + cluster.add_instance("clickhouse_hs", main_configs=main_configs) - node = cluster.add_instance('https_node', main_configs=main_configs, dictionaries=dictionaries) + node = cluster.add_instance( + "https_node", main_configs=main_configs, dictionaries=dictionaries + ) def teardown_module(module): simple_tester.cleanup() + @pytest.fixture(scope="module") def started_cluster(): try: @@ -64,14 +70,17 @@ def started_cluster(): finally: cluster.shutdown() + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_SIMPLE)) def test_simple(started_cluster, layout_name): simple_tester.execute(layout_name, node) + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_COMPLEX)) def test_complex(started_cluster, layout_name): complex_tester.execute(layout_name, node) + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_RANGED)) def test_ranged(started_cluster, layout_name): ranged_tester.execute(layout_name, node) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo.py index deaaf044bce..4a9d054b08f 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo.py @@ -16,6 +16,7 @@ complex_tester = None ranged_tester = None test_name = "mongo" + def setup_module(module): global cluster global node @@ -24,7 +25,15 @@ def setup_module(module): global ranged_tester cluster = ClickHouseCluster(__file__, name=test_name) - SOURCE = SourceMongo("MongoDB", "localhost", cluster.mongo_port, cluster.mongo_host, "27017", "root", "clickhouse") + SOURCE = SourceMongo( + "MongoDB", + "localhost", + cluster.mongo_port, + cluster.mongo_host, + "27017", + "root", + "clickhouse", + ) simple_tester = SimpleLayoutTester(test_name) simple_tester.cleanup() @@ -38,16 +47,19 @@ def setup_module(module): # Since that all .xml configs were created main_configs = [] - main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml')) + main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) dictionaries = simple_tester.list_dictionaries() - node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries, with_mongo=True) + node = cluster.add_instance( + "node", main_configs=main_configs, dictionaries=dictionaries, with_mongo=True + ) def teardown_module(module): simple_tester.cleanup() + @pytest.fixture(scope="module") def started_cluster(): try: @@ -62,14 +74,17 @@ def started_cluster(): finally: cluster.shutdown() + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_SIMPLE)) def test_simple(started_cluster, layout_name): simple_tester.execute(layout_name, node) + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_COMPLEX)) def test_complex(started_cluster, layout_name): complex_tester.execute(layout_name, node) + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_RANGED)) def test_ranged(started_cluster, layout_name): ranged_tester.execute(layout_name, node) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo_uri.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo_uri.py index fdf4826cb63..c6551e0eb70 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo_uri.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo_uri.py @@ -1,4 +1,4 @@ -import os +import os import math import pytest @@ -26,7 +26,15 @@ def setup_module(module): cluster = ClickHouseCluster(__file__, name=test_name) - SOURCE = SourceMongoURI("MongoDB", "localhost", cluster.mongo_port, cluster.mongo_host, "27017", "root", "clickhouse") + SOURCE = SourceMongoURI( + "MongoDB", + "localhost", + cluster.mongo_port, + cluster.mongo_host, + "27017", + "root", + "clickhouse", + ) simple_tester = SimpleLayoutTester(test_name) simple_tester.cleanup() @@ -40,16 +48,22 @@ def setup_module(module): # Since that all .xml configs were created main_configs = [] - main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml')) - + main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) + dictionaries = simple_tester.list_dictionaries() - node = cluster.add_instance('uri_node', main_configs=main_configs, dictionaries=dictionaries, with_mongo=True) + node = cluster.add_instance( + "uri_node", + main_configs=main_configs, + dictionaries=dictionaries, + with_mongo=True, + ) + - def teardown_module(module): simple_tester.cleanup() + @pytest.fixture(scope="module") def started_cluster(): try: @@ -64,6 +78,7 @@ def started_cluster(): finally: cluster.shutdown() + # See comment in SourceMongoURI @pytest.mark.parametrize("layout_name", ["flat"]) def test_simple(started_cluster, layout_name): diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mysql.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mysql.py index 7cd7460b8cb..96757c58e0c 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mysql.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mysql.py @@ -26,7 +26,15 @@ def setup_module(module): cluster = ClickHouseCluster(__file__, name=test_name) - SOURCE = SourceMySQL("MySQL", None, cluster.mysql_port, cluster.mysql_host, cluster.mysql_port, "root", "clickhouse") + SOURCE = SourceMySQL( + "MySQL", + None, + cluster.mysql_port, + cluster.mysql_host, + cluster.mysql_port, + "root", + "clickhouse", + ) simple_tester = SimpleLayoutTester(test_name) simple_tester.cleanup() @@ -40,21 +48,24 @@ def setup_module(module): # Since that all .xml configs were created main_configs = [] - main_configs.append(os.path.join('configs', 'disable_ssl_verification.xml')) + main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) dictionaries = simple_tester.list_dictionaries() - - node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries, with_mysql=True) + + node = cluster.add_instance( + "node", main_configs=main_configs, dictionaries=dictionaries, with_mysql=True + ) def teardown_module(module): simple_tester.cleanup() + @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() - + simple_tester.prepare(cluster) complex_tester.prepare(cluster) ranged_tester.prepare(cluster) @@ -64,14 +75,17 @@ def started_cluster(): finally: cluster.shutdown() + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_SIMPLE)) def test_simple(started_cluster, layout_name): simple_tester.execute(layout_name, node) + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_COMPLEX)) def test_complex(started_cluster, layout_name): complex_tester.execute(layout_name, node) + @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_RANGED)) def test_ranged(started_cluster, layout_name): ranged_tester.execute(layout_name, node) diff --git a/tests/integration/test_dictionaries_complex_key_cache_string/test.py b/tests/integration/test_dictionaries_complex_key_cache_string/test.py index f5a1be7daf4..ae9cd4e7c91 100644 --- a/tests/integration/test_dictionaries_complex_key_cache_string/test.py +++ b/tests/integration/test_dictionaries_complex_key_cache_string/test.py @@ -5,54 +5,92 @@ from helpers.cluster import ClickHouseCluster SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) cluster = ClickHouseCluster(__file__) -node_memory = cluster.add_instance('node_memory', dictionaries=['configs/dictionaries/complex_key_cache_string.xml']) -node_ssd = cluster.add_instance('node_ssd', dictionaries=['configs/dictionaries/ssd_complex_key_cache_string.xml']) +node_memory = cluster.add_instance( + "node_memory", dictionaries=["configs/dictionaries/complex_key_cache_string.xml"] +) +node_ssd = cluster.add_instance( + "node_ssd", dictionaries=["configs/dictionaries/ssd_complex_key_cache_string.xml"] +) + @pytest.fixture() def started_cluster(): try: cluster.start() node_memory.query( - "create table radars_table (radar_id String, radar_ip String, client_id String) engine=MergeTree() order by radar_id") + "create table radars_table (radar_id String, radar_ip String, client_id String) engine=MergeTree() order by radar_id" + ) node_ssd.query( - "create table radars_table (radar_id String, radar_ip String, client_id String) engine=MergeTree() order by radar_id") + "create table radars_table (radar_id String, radar_ip String, client_id String) engine=MergeTree() order by radar_id" + ) yield cluster finally: cluster.shutdown() + @pytest.mark.skip(reason="SSD cache test can run on disk only") @pytest.mark.parametrize("type", ["memory", "ssd"]) def test_memory_consumption(started_cluster, type): - node = started_cluster.instances[f'node_{type}'] + node = started_cluster.instances[f"node_{type}"] node.query( - "insert into radars_table select toString(rand() % 5000), '{0}', '{0}' from numbers(1000)".format('w' * 8)) + "insert into radars_table select toString(rand() % 5000), '{0}', '{0}' from numbers(1000)".format( + "w" * 8 + ) + ) node.query( - "insert into radars_table select toString(rand() % 5000), '{0}', '{0}' from numbers(1000)".format('x' * 16)) + "insert into radars_table select toString(rand() % 5000), '{0}', '{0}' from numbers(1000)".format( + "x" * 16 + ) + ) node.query( - "insert into radars_table select toString(rand() % 5000), '{0}', '{0}' from numbers(1000)".format('y' * 32)) + "insert into radars_table select toString(rand() % 5000), '{0}', '{0}' from numbers(1000)".format( + "y" * 32 + ) + ) node.query( - "insert into radars_table select toString(rand() % 5000), '{0}', '{0}' from numbers(1000)".format('z' * 64)) + "insert into radars_table select toString(rand() % 5000), '{0}', '{0}' from numbers(1000)".format( + "z" * 64 + ) + ) # Fill dictionary - node.query("select dictGetString('radars', 'client_id', tuple(toString(number))) from numbers(0, 5000)") + node.query( + "select dictGetString('radars', 'client_id', tuple(toString(number))) from numbers(0, 5000)" + ) - allocated_first = int(node.query("select bytes_allocated from system.dictionaries where name = 'radars'").strip()) + allocated_first = int( + node.query( + "select bytes_allocated from system.dictionaries where name = 'radars'" + ).strip() + ) alloc_array = [] for i in range(5): - node.query("select dictGetString('radars', 'client_id', tuple(toString(number))) from numbers(0, 5000)") + node.query( + "select dictGetString('radars', 'client_id', tuple(toString(number))) from numbers(0, 5000)" + ) - allocated = int(node.query("select bytes_allocated from system.dictionaries where name = 'radars'").strip()) + allocated = int( + node.query( + "select bytes_allocated from system.dictionaries where name = 'radars'" + ).strip() + ) alloc_array.append(allocated) # size doesn't grow assert all(allocated_first >= a for a in alloc_array) for i in range(5): - node.query("select dictGetString('radars', 'client_id', tuple(toString(number))) from numbers(0, 5000)") + node.query( + "select dictGetString('radars', 'client_id', tuple(toString(number))) from numbers(0, 5000)" + ) - allocated = int(node.query("select bytes_allocated from system.dictionaries where name = 'radars'").strip()) + allocated = int( + node.query( + "select bytes_allocated from system.dictionaries where name = 'radars'" + ).strip() + ) alloc_array.append(allocated) # size doesn't grow diff --git a/tests/integration/test_dictionaries_config_reload/test.py b/tests/integration/test_dictionaries_config_reload/test.py index 4f338767304..7be179f854b 100644 --- a/tests/integration/test_dictionaries_config_reload/test.py +++ b/tests/integration/test_dictionaries_config_reload/test.py @@ -10,16 +10,22 @@ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', stay_alive=True, main_configs=['config/dictionaries_config.xml']) +node = cluster.add_instance( + "node", stay_alive=True, main_configs=["config/dictionaries_config.xml"] +) def copy_file_to_container(local_path, dist_path, container_id): - os.system("docker cp {local} {cont_id}:{dist}".format(local=local_path, cont_id=container_id, dist=dist_path)) + os.system( + "docker cp {local} {cont_id}:{dist}".format( + local=local_path, cont_id=container_id, dist=dist_path + ) + ) -config = ''' +config = """ /etc/clickhouse-server/dictionaries/{dictionaries_config} -''' +""" @pytest.fixture(scope="module") @@ -27,9 +33,15 @@ def started_cluster(): try: cluster.start() - copy_file_to_container(os.path.join(SCRIPT_DIR, 'dictionaries/.'), '/etc/clickhouse-server/dictionaries', node.docker_id) + copy_file_to_container( + os.path.join(SCRIPT_DIR, "dictionaries/."), + "/etc/clickhouse-server/dictionaries", + node.docker_id, + ) - node.query("CREATE TABLE dictionary_values (id UInt64, value_1 String, value_2 String) ENGINE=TinyLog;") + node.query( + "CREATE TABLE dictionary_values (id UInt64, value_1 String, value_2 String) ENGINE=TinyLog;" + ) node.query("INSERT INTO dictionary_values VALUES (0, 'Value_1', 'Value_2')") node.restart_clickhouse() @@ -41,7 +53,10 @@ def started_cluster(): def change_config(dictionaries_config): - node.replace_config("/etc/clickhouse-server/config.d/dictionaries_config.xml", config.format(dictionaries_config=dictionaries_config)) + node.replace_config( + "/etc/clickhouse-server/config.d/dictionaries_config.xml", + config.format(dictionaries_config=dictionaries_config), + ) node.query("SYSTEM RELOAD CONFIG;") @@ -51,7 +66,10 @@ def test(started_cluster): time.sleep(10) - assert node.query("SELECT dictGet('test_dictionary_1', 'value_1', toUInt64(0));") == 'Value_1\n' + assert ( + node.query("SELECT dictGet('test_dictionary_1', 'value_1', toUInt64(0));") + == "Value_1\n" + ) # Change path to the second dictionary in config. change_config("dictionary_config2.xml") @@ -59,7 +77,12 @@ def test(started_cluster): time.sleep(10) # Check that the new dictionary is loaded. - assert node.query("SELECT dictGet('test_dictionary_2', 'value_2', toUInt64(0));") == 'Value_2\n' + assert ( + node.query("SELECT dictGet('test_dictionary_2', 'value_2', toUInt64(0));") + == "Value_2\n" + ) # Check that the previous dictionary was unloaded. - node.query_and_get_error("SELECT dictGet('test_dictionary_1', 'value', toUInt64(0));") + node.query_and_get_error( + "SELECT dictGet('test_dictionary_1', 'value', toUInt64(0));" + ) diff --git a/tests/integration/test_dictionaries_ddl/test.py b/tests/integration/test_dictionaries_ddl/test.py index 6e4d9958fac..cb70deef72b 100644 --- a/tests/integration/test_dictionaries_ddl/test.py +++ b/tests/integration/test_dictionaries_ddl/test.py @@ -11,25 +11,43 @@ from helpers.cluster import ClickHouseCluster SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_mysql=True, dictionaries=['configs/dictionaries/simple_dictionary.xml'], - user_configs=['configs/user_admin.xml', 'configs/user_default.xml']) -node2 = cluster.add_instance('node2', with_mysql=True, dictionaries=['configs/dictionaries/simple_dictionary.xml'], - main_configs=['configs/dictionaries/lazy_load.xml', 'configs/allow_remote_node.xml'], - user_configs=['configs/user_admin.xml', 'configs/user_default.xml']) -node3 = cluster.add_instance('node3', main_configs=['configs/allow_remote_node.xml'], - dictionaries=['configs/dictionaries/dictionary_with_conflict_name.xml', - 'configs/dictionaries/conflict_name_dictionary.xml'], - user_configs=['configs/user_admin.xml']) -node4 = cluster.add_instance('node4', user_configs=['configs/user_admin.xml', 'configs/config_password.xml']) +node1 = cluster.add_instance( + "node1", + with_mysql=True, + dictionaries=["configs/dictionaries/simple_dictionary.xml"], + user_configs=["configs/user_admin.xml", "configs/user_default.xml"], +) +node2 = cluster.add_instance( + "node2", + with_mysql=True, + dictionaries=["configs/dictionaries/simple_dictionary.xml"], + main_configs=[ + "configs/dictionaries/lazy_load.xml", + "configs/allow_remote_node.xml", + ], + user_configs=["configs/user_admin.xml", "configs/user_default.xml"], +) +node3 = cluster.add_instance( + "node3", + main_configs=["configs/allow_remote_node.xml"], + dictionaries=[ + "configs/dictionaries/dictionary_with_conflict_name.xml", + "configs/dictionaries/conflict_name_dictionary.xml", + ], + user_configs=["configs/user_admin.xml"], +) +node4 = cluster.add_instance( + "node4", user_configs=["configs/user_admin.xml", "configs/config_password.xml"] +) def create_mysql_conn(user, password, hostname, port): - logging.debug("Created MySQL connection user:{}, password:{}, host:{}, port{}".format(user, password, hostname, port)) - return pymysql.connect( - user=user, - password=password, - host=hostname, - port=port) + logging.debug( + "Created MySQL connection user:{}, password:{}, host:{}, port{}".format( + user, password, hostname, port + ) + ) + return pymysql.connect(user=user, password=password, host=hostname, port=port) def execute_mysql_query(connection, query): @@ -49,35 +67,71 @@ def started_cluster(): clickhouse.query("CREATE DATABASE test", user="admin") clickhouse.query( "CREATE TABLE test.xml_dictionary_table (id UInt64, SomeValue1 UInt8, SomeValue2 String) ENGINE = MergeTree() ORDER BY id", - user="admin") + user="admin", + ) clickhouse.query( "INSERT INTO test.xml_dictionary_table SELECT number, number % 23, hex(number) from numbers(1000)", - user="admin") + user="admin", + ) yield cluster finally: cluster.shutdown() -@pytest.mark.parametrize("clickhouse,name,layout", [ - pytest.param(node1, 'complex_node1_hashed', 'LAYOUT(COMPLEX_KEY_HASHED())', id="complex_node1_hashed"), - pytest.param(node1, 'complex_node1_cache', 'LAYOUT(COMPLEX_KEY_CACHE(SIZE_IN_CELLS 10))', id="complex_node1_cache"), - pytest.param(node2, 'complex_node2_hashed', 'LAYOUT(COMPLEX_KEY_HASHED())', id="complex_node2_hashed"), - pytest.param(node2, 'complex_node2_cache', 'LAYOUT(COMPLEX_KEY_CACHE(SIZE_IN_CELLS 10))', id="complex_node2_cache"), -]) +@pytest.mark.parametrize( + "clickhouse,name,layout", + [ + pytest.param( + node1, + "complex_node1_hashed", + "LAYOUT(COMPLEX_KEY_HASHED())", + id="complex_node1_hashed", + ), + pytest.param( + node1, + "complex_node1_cache", + "LAYOUT(COMPLEX_KEY_CACHE(SIZE_IN_CELLS 10))", + id="complex_node1_cache", + ), + pytest.param( + node2, + "complex_node2_hashed", + "LAYOUT(COMPLEX_KEY_HASHED())", + id="complex_node2_hashed", + ), + pytest.param( + node2, + "complex_node2_cache", + "LAYOUT(COMPLEX_KEY_CACHE(SIZE_IN_CELLS 10))", + id="complex_node2_cache", + ), + ], +) def test_create_and_select_mysql(started_cluster, clickhouse, name, layout): - mysql_conn = create_mysql_conn("root", "clickhouse", started_cluster.mysql_ip, started_cluster.mysql_port) + mysql_conn = create_mysql_conn( + "root", "clickhouse", started_cluster.mysql_ip, started_cluster.mysql_port + ) execute_mysql_query(mysql_conn, "DROP DATABASE IF EXISTS create_and_select") execute_mysql_query(mysql_conn, "CREATE DATABASE create_and_select") - execute_mysql_query(mysql_conn, - "CREATE TABLE create_and_select.{} (key_field1 int, key_field2 bigint, value1 text, value2 float, PRIMARY KEY (key_field1, key_field2))".format( - name)) + execute_mysql_query( + mysql_conn, + "CREATE TABLE create_and_select.{} (key_field1 int, key_field2 bigint, value1 text, value2 float, PRIMARY KEY (key_field1, key_field2))".format( + name + ), + ) values = [] for i in range(1000): - values.append('(' + ','.join([str(i), str(i * i), str(i) * 5, str(i * 3.14)]) + ')') - execute_mysql_query(mysql_conn, "INSERT INTO create_and_select.{} VALUES ".format(name) + ','.join(values)) + values.append( + "(" + ",".join([str(i), str(i * i), str(i) * 5, str(i * 3.14)]) + ")" + ) + execute_mysql_query( + mysql_conn, + "INSERT INTO create_and_select.{} VALUES ".format(name) + ",".join(values), + ) - clickhouse.query(""" + clickhouse.query( + """ CREATE DICTIONARY default.{} ( key_field1 Int32, key_field2 Int64, @@ -95,48 +149,76 @@ def test_create_and_select_mysql(started_cluster, clickhouse, name, layout): )) {} LIFETIME(MIN 1 MAX 3) - """.format(name, name, layout)) + """.format( + name, name, layout + ) + ) for i in range(172, 200): - assert clickhouse.query( - "SELECT dictGetString('default.{}', 'value1', tuple(toInt32({}), toInt64({})))".format(name, i, - i * i)) == str( - i) * 5 + '\n' + assert ( + clickhouse.query( + "SELECT dictGetString('default.{}', 'value1', tuple(toInt32({}), toInt64({})))".format( + name, i, i * i + ) + ) + == str(i) * 5 + "\n" + ) stroka = clickhouse.query( - "SELECT dictGetFloat32('default.{}', 'value2', tuple(toInt32({}), toInt64({})))".format(name, i, - i * i)).strip() + "SELECT dictGetFloat32('default.{}', 'value2', tuple(toInt32({}), toInt64({})))".format( + name, i, i * i + ) + ).strip() value = float(stroka) assert int(value) == int(i * 3.14) for i in range(1000): - values.append('(' + ','.join([str(i), str(i * i), str(i) * 3, str(i * 2.718)]) + ')') - execute_mysql_query(mysql_conn, "REPLACE INTO create_and_select.{} VALUES ".format(name) + ','.join(values)) + values.append( + "(" + ",".join([str(i), str(i * i), str(i) * 3, str(i * 2.718)]) + ")" + ) + execute_mysql_query( + mysql_conn, + "REPLACE INTO create_and_select.{} VALUES ".format(name) + ",".join(values), + ) clickhouse.query("SYSTEM RELOAD DICTIONARY 'default.{}'".format(name)) for i in range(172, 200): - assert clickhouse.query( - "SELECT dictGetString('default.{}', 'value1', tuple(toInt32({}), toInt64({})))".format(name, i, - i * i)) == str( - i) * 3 + '\n' + assert ( + clickhouse.query( + "SELECT dictGetString('default.{}', 'value1', tuple(toInt32({}), toInt64({})))".format( + name, i, i * i + ) + ) + == str(i) * 3 + "\n" + ) string = clickhouse.query( - "SELECT dictGetFloat32('default.{}', 'value2', tuple(toInt32({}), toInt64({})))".format(name, i, - i * i)).strip() + "SELECT dictGetFloat32('default.{}', 'value2', tuple(toInt32({}), toInt64({})))".format( + name, i, i * i + ) + ).strip() value = float(string) assert int(value) == int(i * 2.718) - clickhouse.query("select dictGetUInt8('xml_dictionary', 'SomeValue1', toUInt64(17))") == "17\n" - clickhouse.query("select dictGetString('xml_dictionary', 'SomeValue2', toUInt64(977))") == str(hex(977))[2:] + '\n' + clickhouse.query( + "select dictGetUInt8('xml_dictionary', 'SomeValue1', toUInt64(17))" + ) == "17\n" + clickhouse.query( + "select dictGetString('xml_dictionary', 'SomeValue2', toUInt64(977))" + ) == str(hex(977))[2:] + "\n" clickhouse.query(f"drop dictionary default.{name}") def test_restricted_database(started_cluster): for node in [node1, node2]: node.query("CREATE DATABASE IF NOT EXISTS restricted_db", user="admin") - node.query("CREATE TABLE restricted_db.table_in_restricted_db AS test.xml_dictionary_table", user="admin") + node.query( + "CREATE TABLE restricted_db.table_in_restricted_db AS test.xml_dictionary_table", + user="admin", + ) with pytest.raises(QueryRuntimeException): - node1.query(""" + node1.query( + """ CREATE DICTIONARY restricted_db.some_dict( id UInt64, SomeValue1 UInt8, @@ -146,10 +228,12 @@ def test_restricted_database(started_cluster): LAYOUT(FLAT()) SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_in_restricted_db' DB 'restricted_db')) LIFETIME(MIN 1 MAX 10) - """) + """ + ) with pytest.raises(QueryRuntimeException): - node1.query(""" + node1.query( + """ CREATE DICTIONARY default.some_dict( id UInt64, SomeValue1 UInt8, @@ -159,13 +243,17 @@ def test_restricted_database(started_cluster): LAYOUT(FLAT()) SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_in_restricted_db' DB 'restricted_db')) LIFETIME(MIN 1 MAX 10) - """) + """ + ) - node1.query("SELECT dictGetUInt8('default.some_dict', 'SomeValue1', toUInt64(17))") == "17\n" + node1.query( + "SELECT dictGetUInt8('default.some_dict', 'SomeValue1', toUInt64(17))" + ) == "17\n" # with lazy load we don't need query to get exception with pytest.raises(QueryRuntimeException): - node2.query(""" + node2.query( + """ CREATE DICTIONARY restricted_db.some_dict( id UInt64, SomeValue1 UInt8, @@ -175,10 +263,12 @@ def test_restricted_database(started_cluster): LAYOUT(FLAT()) SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_in_restricted_db' DB 'restricted_db')) LIFETIME(MIN 1 MAX 10) - """) + """ + ) with pytest.raises(QueryRuntimeException): - node2.query(""" + node2.query( + """ CREATE DICTIONARY default.some_dict( id UInt64, SomeValue1 UInt8, @@ -188,17 +278,24 @@ def test_restricted_database(started_cluster): LAYOUT(FLAT()) SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'table_in_restricted_db' DB 'restricted_db')) LIFETIME(MIN 1 MAX 10) - """) + """ + ) for node in [node1, node2]: node.query("DROP DICTIONARY IF EXISTS default.some_dict", user="admin") node.query("DROP DATABASE restricted_db", user="admin") def test_conflicting_name(started_cluster): - assert node3.query("select dictGetUInt8('test.conflicting_dictionary', 'SomeValue1', toUInt64(17))") == '17\n' + assert ( + node3.query( + "select dictGetUInt8('test.conflicting_dictionary', 'SomeValue1', toUInt64(17))" + ) + == "17\n" + ) with pytest.raises(QueryRuntimeException): - node3.query(""" + node3.query( + """ CREATE DICTIONARY test.conflicting_dictionary( id UInt64, SomeValue1 UInt8, @@ -208,15 +305,19 @@ def test_conflicting_name(started_cluster): LAYOUT(FLAT()) SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'xml_dictionary_table' DB 'test')) LIFETIME(MIN 1 MAX 10) - """) + """ + ) # old version still works - node3.query("select dictGetUInt8('test.conflicting_dictionary', 'SomeValue1', toUInt64(17))") == '17\n' + node3.query( + "select dictGetUInt8('test.conflicting_dictionary', 'SomeValue1', toUInt64(17))" + ) == "17\n" def test_http_dictionary_restrictions(started_cluster): try: - node3.query(""" + node3.query( + """ CREATE DICTIONARY test.restricted_http_dictionary ( id UInt64, value String @@ -225,16 +326,20 @@ def test_http_dictionary_restrictions(started_cluster): LAYOUT(FLAT()) SOURCE(HTTP(URL 'http://somehost.net' FORMAT TabSeparated)) LIFETIME(1) - """) - node3.query("SELECT dictGetString('test.restricted_http_dictionary', 'value', toUInt64(1))") + """ + ) + node3.query( + "SELECT dictGetString('test.restricted_http_dictionary', 'value', toUInt64(1))" + ) except QueryRuntimeException as ex: - assert 'is not allowed in configuration file' in str(ex) + assert "is not allowed in configuration file" in str(ex) node3.query("DROP DICTIONARY test.restricted_http_dictionary") def test_file_dictionary_restrictions(started_cluster): try: - node3.query(""" + node3.query( + """ CREATE DICTIONARY test.restricted_file_dictionary ( id UInt64, value String @@ -243,21 +348,34 @@ def test_file_dictionary_restrictions(started_cluster): LAYOUT(FLAT()) SOURCE(FILE(PATH '/usr/bin/cat' FORMAT TabSeparated)) LIFETIME(1) - """) - node3.query("SELECT dictGetString('test.restricted_file_dictionary', 'value', toUInt64(1))") + """ + ) + node3.query( + "SELECT dictGetString('test.restricted_file_dictionary', 'value', toUInt64(1))" + ) except QueryRuntimeException as ex: - assert 'is not inside' in str(ex) + assert "is not inside" in str(ex) node3.query("DROP DICTIONARY test.restricted_file_dictionary") def test_dictionary_with_where(started_cluster): - mysql_conn = create_mysql_conn("root", "clickhouse", started_cluster.mysql_ip, started_cluster.mysql_port) - execute_mysql_query(mysql_conn, "CREATE DATABASE IF NOT EXISTS dictionary_with_where") - execute_mysql_query(mysql_conn, - "CREATE TABLE dictionary_with_where.special_table (key_field1 int, value1 text, PRIMARY KEY (key_field1))") - execute_mysql_query(mysql_conn, "INSERT INTO dictionary_with_where.special_table VALUES (1, 'abcabc'), (2, 'qweqwe')") + mysql_conn = create_mysql_conn( + "root", "clickhouse", started_cluster.mysql_ip, started_cluster.mysql_port + ) + execute_mysql_query( + mysql_conn, "CREATE DATABASE IF NOT EXISTS dictionary_with_where" + ) + execute_mysql_query( + mysql_conn, + "CREATE TABLE dictionary_with_where.special_table (key_field1 int, value1 text, PRIMARY KEY (key_field1))", + ) + execute_mysql_query( + mysql_conn, + "INSERT INTO dictionary_with_where.special_table VALUES (1, 'abcabc'), (2, 'qweqwe')", + ) - node1.query(""" + node1.query( + """ CREATE DICTIONARY default.special_dict ( key_field1 Int32, value1 String DEFAULT 'xxx' @@ -273,11 +391,17 @@ def test_dictionary_with_where(started_cluster): )) LAYOUT(FLAT()) LIFETIME(MIN 1 MAX 3) - """) + """ + ) node1.query("SYSTEM RELOAD DICTIONARY default.special_dict") - assert node1.query("SELECT dictGetString('default.special_dict', 'value1', toUInt64(2))") == 'qweqwe\n' + assert ( + node1.query( + "SELECT dictGetString('default.special_dict', 'value1', toUInt64(2))" + ) + == "qweqwe\n" + ) node1.query("DROP DICTIONARY default.special_dict") execute_mysql_query(mysql_conn, "DROP TABLE dictionary_with_where.special_table") execute_mysql_query(mysql_conn, "DROP DATABASE dictionary_with_where") @@ -285,7 +409,8 @@ def test_dictionary_with_where(started_cluster): def test_clickhouse_remote(started_cluster): with pytest.raises(QueryRuntimeException): - node3.query(""" + node3.query( + """ CREATE DICTIONARY test.clickhouse_remote( id UInt64, SomeValue1 UInt8, @@ -295,7 +420,8 @@ def test_clickhouse_remote(started_cluster): LAYOUT(FLAT()) SOURCE(CLICKHOUSE(HOST 'node4' PORT 9000 USER 'default' TABLE 'xml_dictionary_table' DB 'test')) LIFETIME(MIN 1 MAX 10) - """) + """ + ) for i in range(5): node3.query("system reload dictionary test.clickhouse_remote") time.sleep(0.5) @@ -303,7 +429,8 @@ def test_clickhouse_remote(started_cluster): node3.query("detach dictionary if exists test.clickhouse_remote") with pytest.raises(QueryRuntimeException): - node3.query(""" + node3.query( + """ CREATE DICTIONARY test.clickhouse_remote( id UInt64, SomeValue1 UInt8, @@ -313,12 +440,14 @@ def test_clickhouse_remote(started_cluster): LAYOUT(FLAT()) SOURCE(CLICKHOUSE(HOST 'node4' PORT 9000 USER 'default' PASSWORD 'default' TABLE 'xml_dictionary_table' DB 'test')) LIFETIME(MIN 1 MAX 10) - """) + """ + ) node3.query("attach dictionary test.clickhouse_remote") node3.query("drop dictionary test.clickhouse_remote") - node3.query(""" + node3.query( + """ CREATE DICTIONARY test.clickhouse_remote( id UInt64, SomeValue1 UInt8, @@ -328,6 +457,9 @@ def test_clickhouse_remote(started_cluster): LAYOUT(FLAT()) SOURCE(CLICKHOUSE(HOST 'node4' PORT 9000 USER 'default' PASSWORD 'default' TABLE 'xml_dictionary_table' DB 'test')) LIFETIME(MIN 1 MAX 10) - """) + """ + ) - node3.query("select dictGetUInt8('test.clickhouse_remote', 'SomeValue1', toUInt64(17))") == '17\n' + node3.query( + "select dictGetUInt8('test.clickhouse_remote', 'SomeValue1', toUInt64(17))" + ) == "17\n" diff --git a/tests/integration/test_dictionaries_dependency/test.py b/tests/integration/test_dictionaries_dependency/test.py index 7dc7f84d50b..f57d4e42813 100644 --- a/tests/integration/test_dictionaries_dependency/test.py +++ b/tests/integration/test_dictionaries_dependency/test.py @@ -2,8 +2,10 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', stay_alive=True) -node2 = cluster.add_instance('node2', stay_alive=True, main_configs=['configs/disable_lazy_load.xml']) +node1 = cluster.add_instance("node1", stay_alive=True) +node2 = cluster.add_instance( + "node2", stay_alive=True, main_configs=["configs/disable_lazy_load.xml"] +) nodes = [node1, node2] @@ -21,9 +23,11 @@ def start_cluster(): node.query("INSERT INTO test.source VALUES (5,6)") for db in ("test", "test_ordinary"): - node.query("CREATE DICTIONARY {}.dict(x UInt64, y UInt64) PRIMARY KEY x " \ - "SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'source' DB 'test')) " \ - "LAYOUT(FLAT()) LIFETIME(0)".format(db)) + node.query( + "CREATE DICTIONARY {}.dict(x UInt64, y UInt64) PRIMARY KEY x " + "SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'source' DB 'test')) " + "LAYOUT(FLAT()) LIFETIME(0)".format(db) + ) yield cluster finally: @@ -53,13 +57,18 @@ def cleanup_after_test(): def test_dependency_via_implicit_table(node): d_names = ["test.adict", "test.zdict", "atest.dict", "ztest.dict"] for d_name in d_names: - node.query("CREATE DICTIONARY {}(x UInt64, y UInt64) PRIMARY KEY x " \ - "SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dict' DB 'test')) " \ - "LAYOUT(FLAT()) LIFETIME(0)".format(d_name)) + node.query( + "CREATE DICTIONARY {}(x UInt64, y UInt64) PRIMARY KEY x " + "SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'dict' DB 'test')) " + "LAYOUT(FLAT()) LIFETIME(0)".format(d_name) + ) def check(): for d_name in d_names: - assert node.query("SELECT dictGet({}, 'y', toUInt64(5))".format(d_name)) == "6\n" + assert ( + node.query("SELECT dictGet({}, 'y', toUInt64(5))".format(d_name)) + == "6\n" + ) check() @@ -74,16 +83,25 @@ def test_dependency_via_explicit_table(node): d_names = ["test.other_{}".format(i) for i in range(0, len(tbl_names))] for i in range(0, len(tbl_names)): tbl_name = tbl_names[i] - tbl_database, tbl_shortname = tbl_name.split('.') + tbl_database, tbl_shortname = tbl_name.split(".") d_name = d_names[i] - node.query("CREATE TABLE {}(x UInt64, y UInt64) ENGINE=Dictionary('test.dict')".format(tbl_name)) - node.query("CREATE DICTIONARY {}(x UInt64, y UInt64) PRIMARY KEY x " \ - "SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE '{}' DB '{}')) " \ - "LAYOUT(FLAT()) LIFETIME(0)".format(d_name, tbl_shortname, tbl_database)) + node.query( + "CREATE TABLE {}(x UInt64, y UInt64) ENGINE=Dictionary('test.dict')".format( + tbl_name + ) + ) + node.query( + "CREATE DICTIONARY {}(x UInt64, y UInt64) PRIMARY KEY x " + "SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE '{}' DB '{}')) " + "LAYOUT(FLAT()) LIFETIME(0)".format(d_name, tbl_shortname, tbl_database) + ) def check(): for d_name in d_names: - assert node.query("SELECT dictGet({}, 'y', toUInt64(5))".format(d_name)) == "6\n" + assert ( + node.query("SELECT dictGet({}, 'y', toUInt64(5))".format(d_name)) + == "6\n" + ) check() @@ -95,30 +113,40 @@ def test_dependency_via_explicit_table(node): for tbl in tbl_names: node.query(f"DROP TABLE {tbl}") + @pytest.mark.parametrize("node", nodes) def test_dependency_via_dictionary_database(node): node.query("CREATE DATABASE dict_db ENGINE=Dictionary") d_names = ["test_ordinary.adict", "test_ordinary.zdict", "atest.dict", "ztest.dict"] for d_name in d_names: - node.query("CREATE DICTIONARY {}(x UInt64, y UInt64) PRIMARY KEY x " \ - "SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'test_ordinary.dict' DB 'dict_db')) " \ - "LAYOUT(FLAT()) LIFETIME(0)".format(d_name)) + node.query( + "CREATE DICTIONARY {}(x UInt64, y UInt64) PRIMARY KEY x " + "SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'test_ordinary.dict' DB 'dict_db')) " + "LAYOUT(FLAT()) LIFETIME(0)".format(d_name) + ) def check(): for d_name in d_names: - assert node.query("SELECT dictGet({}, 'y', toUInt64(5))".format(d_name)) == "6\n" - + assert ( + node.query("SELECT dictGet({}, 'y', toUInt64(5))".format(d_name)) + == "6\n" + ) for d_name in d_names: - assert node.query("SELECT dictGet({}, 'y', toUInt64(5))".format(d_name)) == "6\n" + assert ( + node.query("SELECT dictGet({}, 'y', toUInt64(5))".format(d_name)) == "6\n" + ) # Restart must not break anything. node.restart_clickhouse() for d_name in d_names: - assert node.query_with_retry("SELECT dictGet({}, 'y', toUInt64(5))".format(d_name)) == "6\n" + assert ( + node.query_with_retry("SELECT dictGet({}, 'y', toUInt64(5))".format(d_name)) + == "6\n" + ) - # cleanup + # cleanup for d_name in d_names: node.query(f"DROP DICTIONARY IF EXISTS {d_name} SYNC") node.query("DROP DATABASE dict_db SYNC") diff --git a/tests/integration/test_dictionaries_dependency_xml/test.py b/tests/integration/test_dictionaries_dependency_xml/test.py index 13635c7b969..3f4c3320920 100644 --- a/tests/integration/test_dictionaries_dependency_xml/test.py +++ b/tests/integration/test_dictionaries_dependency_xml/test.py @@ -2,11 +2,17 @@ import pytest from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry -DICTIONARY_FILES = ['configs/dictionaries/dep_x.xml', 'configs/dictionaries/dep_y.xml', - 'configs/dictionaries/dep_z.xml', 'configs/dictionaries/node.xml'] +DICTIONARY_FILES = [ + "configs/dictionaries/dep_x.xml", + "configs/dictionaries/dep_y.xml", + "configs/dictionaries/dep_z.xml", + "configs/dictionaries/node.xml", +] cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', dictionaries=DICTIONARY_FILES, stay_alive=True) +instance = cluster.add_instance( + "instance", dictionaries=DICTIONARY_FILES, stay_alive=True +) @pytest.fixture(scope="module") @@ -14,13 +20,15 @@ def started_cluster(): try: cluster.start() - instance.query(''' + instance.query( + """ CREATE DATABASE IF NOT EXISTS dict ENGINE=Dictionary; CREATE DATABASE IF NOT EXISTS test; DROP TABLE IF EXISTS test.elements; CREATE TABLE test.elements (id UInt64, a String, b Int32, c Float64) ENGINE=Log; INSERT INTO test.elements VALUES (0, 'water', 10, 1), (1, 'air', 40, 0.01), (2, 'earth', 100, 1.7); - ''') + """ + ) yield cluster @@ -29,23 +37,25 @@ def started_cluster(): def get_status(dictionary_name): - return instance.query("SELECT status FROM system.dictionaries WHERE name='" + dictionary_name + "'").rstrip("\n") + return instance.query( + "SELECT status FROM system.dictionaries WHERE name='" + dictionary_name + "'" + ).rstrip("\n") def test_get_data(started_cluster): query = instance.query # dictionaries_lazy_load == false, so these dictionary are not loaded. - assert get_status('dep_x') == 'NOT_LOADED' - assert get_status('dep_y') == 'NOT_LOADED' - assert get_status('dep_z') == 'NOT_LOADED' + assert get_status("dep_x") == "NOT_LOADED" + assert get_status("dep_y") == "NOT_LOADED" + assert get_status("dep_z") == "NOT_LOADED" # Dictionary 'dep_x' depends on 'dep_z', which depends on 'dep_y'. # So they all should be loaded at once. assert query("SELECT dictGetString('dep_x', 'a', toUInt64(1))") == "air\n" - assert get_status('dep_x') == 'LOADED' - assert get_status('dep_y') == 'LOADED' - assert get_status('dep_z') == 'LOADED' + assert get_status("dep_x") == "LOADED" + assert get_status("dep_y") == "LOADED" + assert get_status("dep_z") == "LOADED" # Other dictionaries should work too. assert query("SELECT dictGetString('dep_y', 'a', toUInt64(1))") == "air\n" @@ -59,7 +69,13 @@ def test_get_data(started_cluster): query("INSERT INTO test.elements VALUES (3, 'fire', 30, 8)") # Wait for dictionaries to be reloaded. - assert_eq_with_retry(instance, "SELECT dictHas('dep_x', toUInt64(3))", "1", sleep_time=2, retry_count=10) + assert_eq_with_retry( + instance, + "SELECT dictHas('dep_x', toUInt64(3))", + "1", + sleep_time=2, + retry_count=10, + ) assert query("SELECT dictGetString('dep_x', 'a', toUInt64(3))") == "fire\n" assert query("SELECT dictGetString('dep_y', 'a', toUInt64(3))") == "fire\n" assert query("SELECT dictGetString('dep_z', 'a', toUInt64(3))") == "fire\n" @@ -67,7 +83,13 @@ def test_get_data(started_cluster): # dep_z (and hence dep_x) are updated only when there `intDiv(count(), 4)` is changed, now `count()==4`, # so dep_x and dep_z are not going to be updated after the following INSERT. query("INSERT INTO test.elements VALUES (4, 'ether', 404, 0.001)") - assert_eq_with_retry(instance, "SELECT dictHas('dep_y', toUInt64(4))", "1", sleep_time=2, retry_count=10) + assert_eq_with_retry( + instance, + "SELECT dictHas('dep_y', toUInt64(4))", + "1", + sleep_time=2, + retry_count=10, + ) assert query("SELECT dictGetString('dep_x', 'a', toUInt64(4))") == "XX\n" assert query("SELECT dictGetString('dep_y', 'a', toUInt64(4))") == "ether\n" assert query("SELECT dictGetString('dep_z', 'a', toUInt64(4))") == "ZZ\n" @@ -83,28 +105,41 @@ def dependent_tables_assert(): assert "default.join" in res assert "a.t" in res + def test_dependent_tables(started_cluster): query = instance.query query("create database lazy engine=Lazy(10)") query("create database a") query("create table lazy.src (n int, m int) engine=Log") - query("create dictionary a.d (n int default 0, m int default 42) primary key n " - "source(clickhouse(host 'localhost' port tcpPort() user 'default' table 'src' password '' db 'lazy'))" - "lifetime(min 1 max 10) layout(flat())") + query( + "create dictionary a.d (n int default 0, m int default 42) primary key n " + "source(clickhouse(host 'localhost' port tcpPort() user 'default' table 'src' password '' db 'lazy'))" + "lifetime(min 1 max 10) layout(flat())" + ) query("create table system.join (n int, m int) engine=Join(any, left, n)") query("insert into system.join values (1, 1)") - query("create table src (n int, m default joinGet('system.join', 'm', 1::int)," - "t default dictGetOrNull('a.d', 'm', toUInt64(3))," - "k default dictGet('a.d', 'm', toUInt64(4))) engine=MergeTree order by n") - query("create dictionary test.d (n int default 0, m int default 42) primary key n " - "source(clickhouse(host 'localhost' port tcpPort() user 'default' table 'src' password '' db 'default'))" - "lifetime(min 1 max 10) layout(flat())") - query("create table join (n int, m default dictGet('a.d', 'm', toUInt64(3))," - "k default dictGet('test.d', 'm', toUInt64(0))) engine=Join(any, left, n)") - query("create table lazy.log (n default dictGet(test.d, 'm', toUInt64(0))) engine=Log") - query("create table a.t (n default joinGet('system.join', 'm', 1::int)," - "m default dictGet('test.d', 'm', toUInt64(3))," - "k default joinGet(join, 'm', 1::int)) engine=MergeTree order by n") + query( + "create table src (n int, m default joinGet('system.join', 'm', 1::int)," + "t default dictGetOrNull('a.d', 'm', toUInt64(3))," + "k default dictGet('a.d', 'm', toUInt64(4))) engine=MergeTree order by n" + ) + query( + "create dictionary test.d (n int default 0, m int default 42) primary key n " + "source(clickhouse(host 'localhost' port tcpPort() user 'default' table 'src' password '' db 'default'))" + "lifetime(min 1 max 10) layout(flat())" + ) + query( + "create table join (n int, m default dictGet('a.d', 'm', toUInt64(3))," + "k default dictGet('test.d', 'm', toUInt64(0))) engine=Join(any, left, n)" + ) + query( + "create table lazy.log (n default dictGet(test.d, 'm', toUInt64(0))) engine=Log" + ) + query( + "create table a.t (n default joinGet('system.join', 'm', 1::int)," + "m default dictGet('test.d', 'm', toUInt64(3))," + "k default joinGet(join, 'm', 1::int)) engine=MergeTree order by n" + ) dependent_tables_assert() instance.restart_clickhouse() @@ -120,7 +155,9 @@ def test_dependent_tables(started_cluster): def test_xml_dict_same_name(started_cluster): - instance.query("create table default.node ( key UInt64, name String ) Engine=Dictionary(node);") + instance.query( + "create table default.node ( key UInt64, name String ) Engine=Dictionary(node);" + ) instance.restart_clickhouse() assert "node" in instance.query("show tables from default") instance.query("drop table default.node") diff --git a/tests/integration/test_dictionaries_mysql/test.py b/tests/integration/test_dictionaries_mysql/test.py index 664fde2baa8..5c67a4c434a 100644 --- a/tests/integration/test_dictionaries_mysql/test.py +++ b/tests/integration/test_dictionaries_mysql/test.py @@ -6,10 +6,12 @@ from helpers.cluster import ClickHouseCluster import time import logging -DICTS = ['configs/dictionaries/mysql_dict1.xml', 'configs/dictionaries/mysql_dict2.xml'] -CONFIG_FILES = ['configs/remote_servers.xml', 'configs/named_collections.xml'] +DICTS = ["configs/dictionaries/mysql_dict1.xml", "configs/dictionaries/mysql_dict2.xml"] +CONFIG_FILES = ["configs/remote_servers.xml", "configs/named_collections.xml"] cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', main_configs=CONFIG_FILES, with_mysql=True, dictionaries=DICTS) +instance = cluster.add_instance( + "instance", main_configs=CONFIG_FILES, with_mysql=True, dictionaries=DICTS +) create_table_mysql_template = """ CREATE TABLE IF NOT EXISTS `test`.`{}` ( @@ -32,14 +34,16 @@ def started_cluster(): # Create a MySQL database mysql_connection = get_mysql_conn(cluster) - create_mysql_db(mysql_connection, 'test') + create_mysql_db(mysql_connection, "test") mysql_connection.close() # Create database in ClickHouse instance.query("CREATE DATABASE IF NOT EXISTS test") # Create database in ClickChouse using MySQL protocol (will be used for data insertion) - instance.query("CREATE DATABASE clickhouse_mysql ENGINE = MySQL('mysql57:3306', 'test', 'root', 'clickhouse')") + instance.query( + "CREATE DATABASE clickhouse_mysql ENGINE = MySQL('mysql57:3306', 'test', 'root', 'clickhouse')" + ) yield cluster @@ -50,13 +54,24 @@ def started_cluster(): def test_mysql_dictionaries_custom_query_full_load(started_cluster): mysql_connection = get_mysql_conn(started_cluster) - execute_mysql_query(mysql_connection, "CREATE TABLE IF NOT EXISTS test.test_table_1 (id Integer, value_1 Text);") - execute_mysql_query(mysql_connection, "CREATE TABLE IF NOT EXISTS test.test_table_2 (id Integer, value_2 Text);") - execute_mysql_query(mysql_connection, "INSERT INTO test.test_table_1 VALUES (1, 'Value_1');") - execute_mysql_query(mysql_connection, "INSERT INTO test.test_table_2 VALUES (1, 'Value_2');") + execute_mysql_query( + mysql_connection, + "CREATE TABLE IF NOT EXISTS test.test_table_1 (id Integer, value_1 Text);", + ) + execute_mysql_query( + mysql_connection, + "CREATE TABLE IF NOT EXISTS test.test_table_2 (id Integer, value_2 Text);", + ) + execute_mysql_query( + mysql_connection, "INSERT INTO test.test_table_1 VALUES (1, 'Value_1');" + ) + execute_mysql_query( + mysql_connection, "INSERT INTO test.test_table_2 VALUES (1, 'Value_2');" + ) query = instance.query - query(""" + query( + """ CREATE DICTIONARY test_dictionary_custom_query ( id UInt64, @@ -72,11 +87,12 @@ def test_mysql_dictionaries_custom_query_full_load(started_cluster): PASSWORD 'clickhouse' QUERY $doc$SELECT id, value_1, value_2 FROM test.test_table_1 INNER JOIN test.test_table_2 USING (id);$doc$)) LIFETIME(0) - """) + """ + ) result = query("SELECT id, value_1, value_2 FROM test_dictionary_custom_query") - assert result == '1\tValue_1\tValue_2\n' + assert result == "1\tValue_1\tValue_2\n" query("DROP DICTIONARY test_dictionary_custom_query;") @@ -87,13 +103,24 @@ def test_mysql_dictionaries_custom_query_full_load(started_cluster): def test_mysql_dictionaries_custom_query_partial_load_simple_key(started_cluster): mysql_connection = get_mysql_conn(started_cluster) - execute_mysql_query(mysql_connection, "CREATE TABLE IF NOT EXISTS test.test_table_1 (id Integer, value_1 Text);") - execute_mysql_query(mysql_connection, "CREATE TABLE IF NOT EXISTS test.test_table_2 (id Integer, value_2 Text);") - execute_mysql_query(mysql_connection, "INSERT INTO test.test_table_1 VALUES (1, 'Value_1');") - execute_mysql_query(mysql_connection, "INSERT INTO test.test_table_2 VALUES (1, 'Value_2');") + execute_mysql_query( + mysql_connection, + "CREATE TABLE IF NOT EXISTS test.test_table_1 (id Integer, value_1 Text);", + ) + execute_mysql_query( + mysql_connection, + "CREATE TABLE IF NOT EXISTS test.test_table_2 (id Integer, value_2 Text);", + ) + execute_mysql_query( + mysql_connection, "INSERT INTO test.test_table_1 VALUES (1, 'Value_1');" + ) + execute_mysql_query( + mysql_connection, "INSERT INTO test.test_table_2 VALUES (1, 'Value_2');" + ) query = instance.query - query(""" + query( + """ CREATE DICTIONARY test_dictionary_custom_query ( id UInt64, @@ -108,9 +135,12 @@ def test_mysql_dictionaries_custom_query_partial_load_simple_key(started_cluster USER 'root' PASSWORD 'clickhouse' QUERY $doc$SELECT id, value_1, value_2 FROM test.test_table_1 INNER JOIN test.test_table_2 USING (id) WHERE {condition};$doc$)) - """) + """ + ) - result = query("SELECT dictGet('test_dictionary_custom_query', ('value_1', 'value_2'), toUInt64(1))") + result = query( + "SELECT dictGet('test_dictionary_custom_query', ('value_1', 'value_2'), toUInt64(1))" + ) assert result == "('Value_1','Value_2')\n" @@ -123,13 +153,24 @@ def test_mysql_dictionaries_custom_query_partial_load_simple_key(started_cluster def test_mysql_dictionaries_custom_query_partial_load_complex_key(started_cluster): mysql_connection = get_mysql_conn(started_cluster) - execute_mysql_query(mysql_connection, "CREATE TABLE IF NOT EXISTS test.test_table_1 (id Integer, id_key Text, value_1 Text);") - execute_mysql_query(mysql_connection, "CREATE TABLE IF NOT EXISTS test.test_table_2 (id Integer, id_key Text, value_2 Text);") - execute_mysql_query(mysql_connection, "INSERT INTO test.test_table_1 VALUES (1, 'Key', 'Value_1');") - execute_mysql_query(mysql_connection, "INSERT INTO test.test_table_2 VALUES (1, 'Key', 'Value_2');") + execute_mysql_query( + mysql_connection, + "CREATE TABLE IF NOT EXISTS test.test_table_1 (id Integer, id_key Text, value_1 Text);", + ) + execute_mysql_query( + mysql_connection, + "CREATE TABLE IF NOT EXISTS test.test_table_2 (id Integer, id_key Text, value_2 Text);", + ) + execute_mysql_query( + mysql_connection, "INSERT INTO test.test_table_1 VALUES (1, 'Key', 'Value_1');" + ) + execute_mysql_query( + mysql_connection, "INSERT INTO test.test_table_2 VALUES (1, 'Key', 'Value_2');" + ) query = instance.query - query(""" + query( + """ CREATE DICTIONARY test_dictionary_custom_query ( id UInt64, @@ -145,9 +186,12 @@ def test_mysql_dictionaries_custom_query_partial_load_complex_key(started_cluste USER 'root' PASSWORD 'clickhouse' QUERY $doc$SELECT id, id_key, value_1, value_2 FROM test.test_table_1 INNER JOIN test.test_table_2 USING (id, id_key) WHERE {condition};$doc$)) - """) + """ + ) - result = query("SELECT dictGet('test_dictionary_custom_query', ('value_1', 'value_2'), (toUInt64(1), 'Key'))") + result = query( + "SELECT dictGet('test_dictionary_custom_query', ('value_1', 'value_2'), (toUInt64(1), 'Key'))" + ) assert result == "('Value_1','Value_2')\n" @@ -161,82 +205,109 @@ def test_predefined_connection_configuration(started_cluster): mysql_connection = get_mysql_conn(started_cluster) execute_mysql_query(mysql_connection, "DROP TABLE IF EXISTS test.test_table") - execute_mysql_query(mysql_connection, "CREATE TABLE IF NOT EXISTS test.test_table (id Integer, value Integer);") - execute_mysql_query(mysql_connection, "INSERT INTO test.test_table VALUES (100, 200);") + execute_mysql_query( + mysql_connection, + "CREATE TABLE IF NOT EXISTS test.test_table (id Integer, value Integer);", + ) + execute_mysql_query( + mysql_connection, "INSERT INTO test.test_table VALUES (100, 200);" + ) - instance.query(''' + instance.query( + """ DROP DICTIONARY IF EXISTS dict; CREATE DICTIONARY dict (id UInt32, value UInt32) PRIMARY KEY id SOURCE(MYSQL(NAME mysql1)) LIFETIME(MIN 1 MAX 2) LAYOUT(HASHED()); - ''') + """ + ) result = instance.query("SELECT dictGetUInt32(dict, 'value', toUInt64(100))") - assert(int(result) == 200) + assert int(result) == 200 - instance.query(''' + instance.query( + """ DROP DICTIONARY dict; CREATE DICTIONARY dict (id UInt32, value UInt32) PRIMARY KEY id SOURCE(MYSQL(NAME mysql2)) LIFETIME(MIN 1 MAX 2) LAYOUT(HASHED()); - ''') - result = instance.query_and_get_error("SELECT dictGetUInt32(dict, 'value', toUInt64(100))") - instance.query(''' + """ + ) + result = instance.query_and_get_error( + "SELECT dictGetUInt32(dict, 'value', toUInt64(100))" + ) + instance.query( + """ DROP DICTIONARY dict; CREATE DICTIONARY dict (id UInt32, value UInt32) PRIMARY KEY id SOURCE(MYSQL(NAME unknown_collection)) LIFETIME(MIN 1 MAX 2) LAYOUT(HASHED()); - ''') - result = instance.query_and_get_error("SELECT dictGetUInt32(dict, 'value', toUInt64(100))") + """ + ) + result = instance.query_and_get_error( + "SELECT dictGetUInt32(dict, 'value', toUInt64(100))" + ) - instance.query(''' + instance.query( + """ DROP DICTIONARY dict; CREATE DICTIONARY dict (id UInt32, value UInt32) PRIMARY KEY id SOURCE(MYSQL(NAME mysql3 PORT 3306)) LIFETIME(MIN 1 MAX 2) LAYOUT(HASHED()); - ''') + """ + ) result = instance.query("SELECT dictGetUInt32(dict, 'value', toUInt64(100))") - assert(int(result) == 200) + assert int(result) == 200 - instance.query(''' + instance.query( + """ DROP DICTIONARY IF EXISTS dict; CREATE DICTIONARY dict (id UInt32, value UInt32) PRIMARY KEY id SOURCE(MYSQL(NAME mysql1 connection_pool_size 0)) LIFETIME(MIN 1 MAX 2) LAYOUT(HASHED()); - ''') - result = instance.query_and_get_error("SELECT dictGetUInt32(dict, 'value', toUInt64(100))") - assert 'Connection pool cannot have zero size' in result + """ + ) + result = instance.query_and_get_error( + "SELECT dictGetUInt32(dict, 'value', toUInt64(100))" + ) + assert "Connection pool cannot have zero size" in result - instance.query(''' + instance.query( + """ DROP DICTIONARY IF EXISTS dict; CREATE DICTIONARY dict (id UInt32, value UInt32) PRIMARY KEY id SOURCE(MYSQL(NAME mysql4)) LIFETIME(MIN 1 MAX 2) LAYOUT(HASHED()); - ''') - result = instance.query_and_get_error("SELECT dictGetUInt32(dict, 'value', toUInt64(100))") - assert 'Connection pool cannot have zero size' in result + """ + ) + result = instance.query_and_get_error( + "SELECT dictGetUInt32(dict, 'value', toUInt64(100))" + ) + assert "Connection pool cannot have zero size" in result - instance.query(''' + instance.query( + """ DROP DICTIONARY IF EXISTS dict; CREATE DICTIONARY dict (id UInt32, value UInt32) PRIMARY KEY id SOURCE(MYSQL(NAME mysql4 connection_pool_size 1)) LIFETIME(MIN 1 MAX 2) LAYOUT(HASHED()); - ''') + """ + ) result = instance.query("SELECT dictGetUInt32(dict, 'value', toUInt64(100))") - assert(int(result) == 200) + assert int(result) == 200 def create_mysql_db(mysql_connection, name): @@ -255,12 +326,24 @@ def prepare_mysql_table(started_cluster, table_name, index): query = instance.query query( "INSERT INTO `clickhouse_mysql`.{}(id, value) select number, concat('{} value ', toString(number)) from numbers(10000) ".format( - table_name + str(index), table_name + str(index))) - assert query("SELECT count() FROM `clickhouse_mysql`.{}".format(table_name + str(index))).rstrip() == '10000' + table_name + str(index), table_name + str(index) + ) + ) + assert ( + query( + "SELECT count() FROM `clickhouse_mysql`.{}".format(table_name + str(index)) + ).rstrip() + == "10000" + ) mysql_connection.close() # Create CH Dictionary tables based on MySQL tables - query(create_clickhouse_dictionary_table_template.format(table_name + str(index), 'dict' + str(index))) + query( + create_clickhouse_dictionary_table_template.format( + table_name + str(index), "dict" + str(index) + ) + ) + def get_mysql_conn(started_cluster): errors = [] @@ -268,10 +351,17 @@ def get_mysql_conn(started_cluster): for _ in range(5): try: if conn is None: - conn = pymysql.connect(user='root', password='clickhouse', host=started_cluster.mysql_ip, port=started_cluster.mysql_port) + conn = pymysql.connect( + user="root", + password="clickhouse", + host=started_cluster.mysql_ip, + port=started_cluster.mysql_port, + ) else: conn.ping(reconnect=True) - logging.debug(f"MySQL Connection establised: {started_cluster.mysql_ip}:{started_cluster.mysql_port}") + logging.debug( + f"MySQL Connection establised: {started_cluster.mysql_ip}:{started_cluster.mysql_port}" + ) return conn except Exception as e: errors += [str(e)] @@ -279,6 +369,7 @@ def get_mysql_conn(started_cluster): raise Exception("Connection not establised, {}".format(errors)) + def execute_mysql_query(connection, query): logging.debug("Execute MySQL query:{}".format(query)) with warnings.catch_warnings(): @@ -287,6 +378,7 @@ def execute_mysql_query(connection, query): cursor.execute(query) connection.commit() + def create_mysql_table(conn, table_name): with conn.cursor() as cursor: cursor.execute(create_table_mysql_template.format(table_name)) diff --git a/tests/integration/test_dictionaries_null_value/test.py b/tests/integration/test_dictionaries_null_value/test.py index 96ca76f594e..d62b1e6fc49 100644 --- a/tests/integration/test_dictionaries_null_value/test.py +++ b/tests/integration/test_dictionaries_null_value/test.py @@ -1,10 +1,10 @@ import pytest from helpers.cluster import ClickHouseCluster -DICTIONARY_FILES = ['configs/dictionaries/cache.xml'] +DICTIONARY_FILES = ["configs/dictionaries/cache.xml"] cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', dictionaries=DICTIONARY_FILES) +instance = cluster.add_instance("instance", dictionaries=DICTIONARY_FILES) @pytest.fixture(scope="module") @@ -12,7 +12,8 @@ def started_cluster(): try: cluster.start() - instance.query(''' + instance.query( + """ CREATE DATABASE IF NOT EXISTS test; DROP TABLE IF EXISTS test.source; CREATE TABLE test.source (id UInt64, key0 UInt8, key0_str String, key1 UInt8, @@ -22,7 +23,8 @@ def started_cluster(): Float32_ Float32, Float64_ Float64, String_ String, Date_ Date, DateTime_ DateTime, Parent UInt64) ENGINE=Log; - ''') + """ + ) yield cluster @@ -34,10 +36,22 @@ def test_null_value(started_cluster): query = instance.query assert query("select dictGetUInt8('cache', 'UInt8_', toUInt64(12121212))") == "1\n" - assert query("select dictGetString('cache', 'String_', toUInt64(12121212))") == "implicit-default\n" - assert query("select dictGetDate('cache', 'Date_', toUInt64(12121212))") == "2015-11-25\n" + assert ( + query("select dictGetString('cache', 'String_', toUInt64(12121212))") + == "implicit-default\n" + ) + assert ( + query("select dictGetDate('cache', 'Date_', toUInt64(12121212))") + == "2015-11-25\n" + ) # Check, that empty null_value interprets as default value - assert query("select dictGetUInt64('cache', 'UInt64_', toUInt64(12121212))") == "0\n" - assert query( - "select toTimeZone(dictGetDateTime('cache', 'DateTime_', toUInt64(12121212)), 'UTC')") == "1970-01-01 00:00:00\n" + assert ( + query("select dictGetUInt64('cache', 'UInt64_', toUInt64(12121212))") == "0\n" + ) + assert ( + query( + "select toTimeZone(dictGetDateTime('cache', 'DateTime_', toUInt64(12121212)), 'UTC')" + ) + == "1970-01-01 00:00:00\n" + ) diff --git a/tests/integration/test_dictionaries_postgresql/test.py b/tests/integration/test_dictionaries_postgresql/test.py index 53333fe2012..49a75a09e4e 100644 --- a/tests/integration/test_dictionaries_postgresql/test.py +++ b/tests/integration/test_dictionaries_postgresql/test.py @@ -7,9 +7,16 @@ from helpers.cluster import ClickHouseCluster from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', - main_configs=['configs/config.xml', 'configs/dictionaries/postgres_dict.xml', 'configs/named_collections.xml'], - with_postgres=True, with_postgres_cluster=True) +node1 = cluster.add_instance( + "node1", + main_configs=[ + "configs/config.xml", + "configs/dictionaries/postgres_dict.xml", + "configs/named_collections.xml", + ], + with_postgres=True, + with_postgres_cluster=True, +) postgres_dict_table_template = """ CREATE TABLE IF NOT EXISTS {} ( @@ -21,35 +28,52 @@ click_dict_table_template = """ ) ENGINE = Dictionary({}) """ + def get_postgres_conn(ip, port, database=False): if database == True: - conn_string = "host={} port={} dbname='clickhouse' user='postgres' password='mysecretpassword'".format(ip, port) + conn_string = "host={} port={} dbname='clickhouse' user='postgres' password='mysecretpassword'".format( + ip, port + ) else: - conn_string = "host={} port={} user='postgres' password='mysecretpassword'".format(ip, port) + conn_string = ( + "host={} port={} user='postgres' password='mysecretpassword'".format( + ip, port + ) + ) conn = psycopg2.connect(conn_string) conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) conn.autocommit = True return conn + def create_postgres_db(conn, name): cursor = conn.cursor() cursor.execute("CREATE DATABASE {}".format(name)) + def create_postgres_table(cursor, table_name): cursor.execute(postgres_dict_table_template.format(table_name)) + def create_and_fill_postgres_table(cursor, table_name, port, host): create_postgres_table(cursor, table_name) # Fill postgres table using clickhouse postgres table function and check - table_func = '''postgresql('{}:{}', 'clickhouse', '{}', 'postgres', 'mysecretpassword')'''.format(host, port, table_name) - node1.query('''INSERT INTO TABLE FUNCTION {} SELECT number, number, number from numbers(10000) - '''.format(table_func, table_name)) + table_func = """postgresql('{}:{}', 'clickhouse', '{}', 'postgres', 'mysecretpassword')""".format( + host, port, table_name + ) + node1.query( + """INSERT INTO TABLE FUNCTION {} SELECT number, number, number from numbers(10000) + """.format( + table_func, table_name + ) + ) result = node1.query("SELECT count() FROM {}".format(table_func)) - assert result.rstrip() == '10000' + assert result.rstrip() == "10000" + def create_dict(table_name, index=0): - node1.query(click_dict_table_template.format(table_name, 'dict' + str(index))) + node1.query(click_dict_table_template.format(table_name, "dict" + str(index))) @pytest.fixture(scope="module") @@ -58,13 +82,17 @@ def started_cluster(): cluster.start() node1.query("CREATE DATABASE IF NOT EXISTS test") - postgres_conn = get_postgres_conn(ip=cluster.postgres_ip, port=cluster.postgres_port) + postgres_conn = get_postgres_conn( + ip=cluster.postgres_ip, port=cluster.postgres_port + ) print("postgres1 connected") - create_postgres_db(postgres_conn, 'clickhouse') + create_postgres_db(postgres_conn, "clickhouse") - postgres_conn = get_postgres_conn(ip=cluster.postgres2_ip, port=cluster.postgres_port) + postgres_conn = get_postgres_conn( + ip=cluster.postgres2_ip, port=cluster.postgres_port + ) print("postgres2 connected") - create_postgres_db(postgres_conn, 'clickhouse') + create_postgres_db(postgres_conn, "clickhouse") yield cluster @@ -73,17 +101,39 @@ def started_cluster(): def test_load_dictionaries(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, database=True, port=started_cluster.postgres_port) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + database=True, + port=started_cluster.postgres_port, + ) cursor = conn.cursor() - table_name = 'test0' - create_and_fill_postgres_table(cursor, table_name, port=started_cluster.postgres_port, host=started_cluster.postgres_ip) + table_name = "test0" + create_and_fill_postgres_table( + cursor, + table_name, + port=started_cluster.postgres_port, + host=started_cluster.postgres_ip, + ) create_dict(table_name) - dict_name = 'dict0' + dict_name = "dict0" node1.query("SYSTEM RELOAD DICTIONARY {}".format(dict_name)) - assert node1.query("SELECT count() FROM `test`.`dict_table_{}`".format(table_name)).rstrip() == '10000' - assert node1.query("SELECT dictGetUInt32('{}', 'key', toUInt64(0))".format(dict_name)) == '0\n' - assert node1.query("SELECT dictGetUInt32('{}', 'value', toUInt64(9999))".format(dict_name)) == '9999\n' + assert ( + node1.query( + "SELECT count() FROM `test`.`dict_table_{}`".format(table_name) + ).rstrip() + == "10000" + ) + assert ( + node1.query("SELECT dictGetUInt32('{}', 'key', toUInt64(0))".format(dict_name)) + == "0\n" + ) + assert ( + node1.query( + "SELECT dictGetUInt32('{}', 'value', toUInt64(9999))".format(dict_name) + ) + == "9999\n" + ) cursor.execute("DROP TABLE IF EXISTS {}".format(table_name)) node1.query("DROP TABLE IF EXISTS {}".format(table_name)) @@ -91,16 +141,25 @@ def test_load_dictionaries(started_cluster): def test_postgres_dictionaries_custom_query_full_load(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, database=True, port=started_cluster.postgres_port) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + database=True, + port=started_cluster.postgres_port, + ) cursor = conn.cursor() - cursor.execute("CREATE TABLE IF NOT EXISTS test_table_1 (id Integer, value_1 Text);") - cursor.execute("CREATE TABLE IF NOT EXISTS test_table_2 (id Integer, value_2 Text);") + cursor.execute( + "CREATE TABLE IF NOT EXISTS test_table_1 (id Integer, value_1 Text);" + ) + cursor.execute( + "CREATE TABLE IF NOT EXISTS test_table_2 (id Integer, value_2 Text);" + ) cursor.execute("INSERT INTO test_table_1 VALUES (1, 'Value_1');") cursor.execute("INSERT INTO test_table_2 VALUES (1, 'Value_2');") query = node1.query - query(""" + query( + """ CREATE DICTIONARY test_dictionary_custom_query ( id UInt64, @@ -117,11 +176,14 @@ def test_postgres_dictionaries_custom_query_full_load(started_cluster): PASSWORD 'mysecretpassword' QUERY $doc$SELECT id, value_1, value_2 FROM test_table_1 INNER JOIN test_table_2 USING (id);$doc$)) LIFETIME(0) - """.format(started_cluster.postgres_ip, started_cluster.postgres_port)) + """.format( + started_cluster.postgres_ip, started_cluster.postgres_port + ) + ) result = query("SELECT id, value_1, value_2 FROM test_dictionary_custom_query") - assert result == '1\tValue_1\tValue_2\n' + assert result == "1\tValue_1\tValue_2\n" query("DROP DICTIONARY test_dictionary_custom_query;") @@ -130,16 +192,25 @@ def test_postgres_dictionaries_custom_query_full_load(started_cluster): def test_postgres_dictionaries_custom_query_partial_load_simple_key(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, database=True, port=started_cluster.postgres_port) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + database=True, + port=started_cluster.postgres_port, + ) cursor = conn.cursor() - cursor.execute("CREATE TABLE IF NOT EXISTS test_table_1 (id Integer, value_1 Text);") - cursor.execute("CREATE TABLE IF NOT EXISTS test_table_2 (id Integer, value_2 Text);") + cursor.execute( + "CREATE TABLE IF NOT EXISTS test_table_1 (id Integer, value_1 Text);" + ) + cursor.execute( + "CREATE TABLE IF NOT EXISTS test_table_2 (id Integer, value_2 Text);" + ) cursor.execute("INSERT INTO test_table_1 VALUES (1, 'Value_1');") cursor.execute("INSERT INTO test_table_2 VALUES (1, 'Value_2');") query = node1.query - query(""" + query( + """ CREATE DICTIONARY test_dictionary_custom_query ( id UInt64, @@ -155,11 +226,16 @@ def test_postgres_dictionaries_custom_query_partial_load_simple_key(started_clus USER 'postgres' PASSWORD 'mysecretpassword' QUERY $doc$SELECT id, value_1, value_2 FROM test_table_1 INNER JOIN test_table_2 USING (id) WHERE {{condition}};$doc$)) - """.format(started_cluster.postgres_ip, started_cluster.postgres_port)) + """.format( + started_cluster.postgres_ip, started_cluster.postgres_port + ) + ) - result = query("SELECT dictGet('test_dictionary_custom_query', ('value_1', 'value_2'), toUInt64(1))") + result = query( + "SELECT dictGet('test_dictionary_custom_query', ('value_1', 'value_2'), toUInt64(1))" + ) - assert result == '(\'Value_1\',\'Value_2\')\n' + assert result == "('Value_1','Value_2')\n" query("DROP DICTIONARY test_dictionary_custom_query;") @@ -168,16 +244,25 @@ def test_postgres_dictionaries_custom_query_partial_load_simple_key(started_clus def test_postgres_dictionaries_custom_query_partial_load_complex_key(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, database=True, port=started_cluster.postgres_port) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + database=True, + port=started_cluster.postgres_port, + ) cursor = conn.cursor() - cursor.execute("CREATE TABLE IF NOT EXISTS test_table_1 (id Integer, key Text, value_1 Text);") - cursor.execute("CREATE TABLE IF NOT EXISTS test_table_2 (id Integer, key Text, value_2 Text);") + cursor.execute( + "CREATE TABLE IF NOT EXISTS test_table_1 (id Integer, key Text, value_1 Text);" + ) + cursor.execute( + "CREATE TABLE IF NOT EXISTS test_table_2 (id Integer, key Text, value_2 Text);" + ) cursor.execute("INSERT INTO test_table_1 VALUES (1, 'Key', 'Value_1');") cursor.execute("INSERT INTO test_table_2 VALUES (1, 'Key', 'Value_2');") query = node1.query - query(""" + query( + """ CREATE DICTIONARY test_dictionary_custom_query ( id UInt64, @@ -194,49 +279,101 @@ def test_postgres_dictionaries_custom_query_partial_load_complex_key(started_clu USER 'postgres' PASSWORD 'mysecretpassword' QUERY $doc$SELECT id, key, value_1, value_2 FROM test_table_1 INNER JOIN test_table_2 USING (id, key) WHERE {{condition}};$doc$)) - """.format(started_cluster.postgres_ip, started_cluster.postgres_port)) + """.format( + started_cluster.postgres_ip, started_cluster.postgres_port + ) + ) - result = query("SELECT dictGet('test_dictionary_custom_query', ('value_1', 'value_2'), (toUInt64(1), 'Key'))") + result = query( + "SELECT dictGet('test_dictionary_custom_query', ('value_1', 'value_2'), (toUInt64(1), 'Key'))" + ) - assert result == '(\'Value_1\',\'Value_2\')\n' + assert result == "('Value_1','Value_2')\n" query("DROP DICTIONARY test_dictionary_custom_query;") cursor.execute("DROP TABLE test_table_2;") cursor.execute("DROP TABLE test_table_1;") + def test_invalidate_query(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, database=True, port=started_cluster.postgres_port) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + database=True, + port=started_cluster.postgres_port, + ) cursor = conn.cursor() - table_name = 'test0' - create_and_fill_postgres_table(cursor, table_name, port=started_cluster.postgres_port, host=started_cluster.postgres_ip) + table_name = "test0" + create_and_fill_postgres_table( + cursor, + table_name, + port=started_cluster.postgres_port, + host=started_cluster.postgres_ip, + ) # invalidate query: SELECT value FROM test0 WHERE id = 0 - dict_name = 'dict0' + dict_name = "dict0" create_dict(table_name) node1.query("SYSTEM RELOAD DICTIONARY {}".format(dict_name)) - assert node1.query("SELECT dictGetUInt32('{}', 'value', toUInt64(0))".format(dict_name)) == "0\n" - assert node1.query("SELECT dictGetUInt32('{}', 'value', toUInt64(1))".format(dict_name)) == "1\n" + assert ( + node1.query( + "SELECT dictGetUInt32('{}', 'value', toUInt64(0))".format(dict_name) + ) + == "0\n" + ) + assert ( + node1.query( + "SELECT dictGetUInt32('{}', 'value', toUInt64(1))".format(dict_name) + ) + == "1\n" + ) # update should happen cursor.execute("UPDATE {} SET value=value+1 WHERE id = 0".format(table_name)) while True: - result = node1.query("SELECT dictGetUInt32('{}', 'value', toUInt64(0))".format(dict_name)) - if result != '0\n': + result = node1.query( + "SELECT dictGetUInt32('{}', 'value', toUInt64(0))".format(dict_name) + ) + if result != "0\n": break - assert node1.query("SELECT dictGetUInt32('{}', 'value', toUInt64(0))".format(dict_name)) == '1\n' + assert ( + node1.query( + "SELECT dictGetUInt32('{}', 'value', toUInt64(0))".format(dict_name) + ) + == "1\n" + ) # no update should happen cursor.execute("UPDATE {} SET value=value*2 WHERE id != 0".format(table_name)) time.sleep(5) - assert node1.query("SELECT dictGetUInt32('{}', 'value', toUInt64(0))".format(dict_name)) == '1\n' - assert node1.query("SELECT dictGetUInt32('{}', 'value', toUInt64(1))".format(dict_name)) == '1\n' + assert ( + node1.query( + "SELECT dictGetUInt32('{}', 'value', toUInt64(0))".format(dict_name) + ) + == "1\n" + ) + assert ( + node1.query( + "SELECT dictGetUInt32('{}', 'value', toUInt64(1))".format(dict_name) + ) + == "1\n" + ) # update should happen cursor.execute("UPDATE {} SET value=value+1 WHERE id = 0".format(table_name)) time.sleep(5) - assert node1.query("SELECT dictGetUInt32('{}', 'value', toUInt64(0))".format(dict_name)) == '2\n' - assert node1.query("SELECT dictGetUInt32('{}', 'value', toUInt64(1))".format(dict_name)) == '2\n' + assert ( + node1.query( + "SELECT dictGetUInt32('{}', 'value', toUInt64(0))".format(dict_name) + ) + == "2\n" + ) + assert ( + node1.query( + "SELECT dictGetUInt32('{}', 'value', toUInt64(1))".format(dict_name) + ) + == "2\n" + ) node1.query("DROP TABLE IF EXISTS {}".format(table_name)) node1.query("DROP DICTIONARY IF EXISTS {}".format(dict_name)) @@ -244,27 +381,39 @@ def test_invalidate_query(started_cluster): def test_dictionary_with_replicas(started_cluster): - conn1 = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) + conn1 = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor1 = conn1.cursor() - conn2 = get_postgres_conn(ip=started_cluster.postgres2_ip, port=started_cluster.postgres_port, database=True) + conn2 = get_postgres_conn( + ip=started_cluster.postgres2_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor2 = conn2.cursor() - create_postgres_table(cursor1, 'test1') - create_postgres_table(cursor2, 'test1') + create_postgres_table(cursor1, "test1") + create_postgres_table(cursor2, "test1") - cursor1.execute('INSERT INTO test1 select i, i, i from generate_series(0, 99) as t(i);') - cursor2.execute('INSERT INTO test1 select i, i, i from generate_series(100, 199) as t(i);') + cursor1.execute( + "INSERT INTO test1 select i, i, i from generate_series(0, 99) as t(i);" + ) + cursor2.execute( + "INSERT INTO test1 select i, i, i from generate_series(100, 199) as t(i);" + ) - create_dict('test1', 1) + create_dict("test1", 1) result = node1.query("SELECT * FROM `test`.`dict_table_test1` ORDER BY key") # priority 0 - non running port - assert node1.contains_in_log('PostgreSQLConnectionPool: Connection error*') + assert node1.contains_in_log("PostgreSQLConnectionPool: Connection error*") # priority 1 - postgres2, table contains rows with values 100-200 # priority 2 - postgres1, table contains rows with values 0-100 expected = node1.query("SELECT number, number FROM numbers(100, 100)") - assert(result == expected) + assert result == expected cursor1.execute("DROP TABLE IF EXISTS test1") cursor2.execute("DROP TABLE IF EXISTS test1") @@ -274,14 +423,21 @@ def test_dictionary_with_replicas(started_cluster): def test_postgres_schema(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - cursor.execute('CREATE SCHEMA test_schema') - cursor.execute('CREATE TABLE test_schema.test_table (id integer, value integer)') - cursor.execute('INSERT INTO test_schema.test_table SELECT i, i FROM generate_series(0, 99) as t(i)') + cursor.execute("CREATE SCHEMA test_schema") + cursor.execute("CREATE TABLE test_schema.test_table (id integer, value integer)") + cursor.execute( + "INSERT INTO test_schema.test_table SELECT i, i FROM generate_series(0, 99) as t(i)" + ) - node1.query(''' + node1.query( + """ DROP DICTIONARY IF EXISTS postgres_dict; CREATE DICTIONARY postgres_dict (id UInt32, value UInt32) PRIMARY KEY id @@ -294,88 +450,114 @@ def test_postgres_schema(started_cluster): table 'test_schema.test_table')) LIFETIME(MIN 1 MAX 2) LAYOUT(HASHED()); - ''') + """ + ) result = node1.query("SELECT dictGetUInt32(postgres_dict, 'value', toUInt64(1))") - assert(int(result.strip()) == 1) + assert int(result.strip()) == 1 result = node1.query("SELECT dictGetUInt32(postgres_dict, 'value', toUInt64(99))") - assert(int(result.strip()) == 99) + assert int(result.strip()) == 99 node1.query("DROP DICTIONARY IF EXISTS postgres_dict") def test_predefined_connection_configuration(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - cursor.execute('DROP TABLE IF EXISTS test_table') - cursor.execute('CREATE TABLE test_table (id integer, value integer)') - cursor.execute('INSERT INTO test_table SELECT i, i FROM generate_series(0, 99) as t(i)') + cursor.execute("DROP TABLE IF EXISTS test_table") + cursor.execute("CREATE TABLE test_table (id integer, value integer)") + cursor.execute( + "INSERT INTO test_table SELECT i, i FROM generate_series(0, 99) as t(i)" + ) - node1.query(''' + node1.query( + """ DROP DICTIONARY IF EXISTS postgres_dict; CREATE DICTIONARY postgres_dict (id UInt32, value UInt32) PRIMARY KEY id SOURCE(POSTGRESQL(NAME postgres1)) LIFETIME(MIN 1 MAX 2) LAYOUT(HASHED()); - ''') + """ + ) result = node1.query("SELECT dictGetUInt32(postgres_dict, 'value', toUInt64(99))") - assert(int(result.strip()) == 99) + assert int(result.strip()) == 99 - cursor.execute('DROP SCHEMA IF EXISTS test_schema CASCADE') - cursor.execute('CREATE SCHEMA test_schema') - cursor.execute('CREATE TABLE test_schema.test_table (id integer, value integer)') - cursor.execute('INSERT INTO test_schema.test_table SELECT i, 100 FROM generate_series(0, 99) as t(i)') + cursor.execute("DROP SCHEMA IF EXISTS test_schema CASCADE") + cursor.execute("CREATE SCHEMA test_schema") + cursor.execute("CREATE TABLE test_schema.test_table (id integer, value integer)") + cursor.execute( + "INSERT INTO test_schema.test_table SELECT i, 100 FROM generate_series(0, 99) as t(i)" + ) - node1.query(''' + node1.query( + """ DROP DICTIONARY postgres_dict; CREATE DICTIONARY postgres_dict (id UInt32, value UInt32) PRIMARY KEY id SOURCE(POSTGRESQL(NAME postgres1 SCHEMA test_schema)) LIFETIME(MIN 1 MAX 2) LAYOUT(HASHED()); - ''') + """ + ) result = node1.query("SELECT dictGetUInt32(postgres_dict, 'value', toUInt64(99))") - assert(int(result.strip()) == 100) + assert int(result.strip()) == 100 - node1.query(''' + node1.query( + """ DROP DICTIONARY postgres_dict; CREATE DICTIONARY postgres_dict (id UInt32, value UInt32) PRIMARY KEY id SOURCE(POSTGRESQL(NAME postgres2)) LIFETIME(MIN 1 MAX 2) LAYOUT(HASHED()); - ''') + """ + ) result = node1.query("SELECT dictGetUInt32(postgres_dict, 'value', toUInt64(99))") - assert(int(result.strip()) == 100) + assert int(result.strip()) == 100 - node1.query('DROP DICTIONARY postgres_dict') - node1.query(''' + node1.query("DROP DICTIONARY postgres_dict") + node1.query( + """ CREATE DICTIONARY postgres_dict (id UInt32, value UInt32) PRIMARY KEY id SOURCE(POSTGRESQL(NAME postgres4)) LIFETIME(MIN 1 MAX 2) LAYOUT(HASHED()); - ''') - result = node1.query_and_get_error("SELECT dictGetUInt32(postgres_dict, 'value', toUInt64(99))") + """ + ) + result = node1.query_and_get_error( + "SELECT dictGetUInt32(postgres_dict, 'value', toUInt64(99))" + ) - node1.query(''' + node1.query( + """ DROP DICTIONARY postgres_dict; CREATE DICTIONARY postgres_dict (id UInt32, value UInt32) PRIMARY KEY id SOURCE(POSTGRESQL(NAME postgres1 PORT 5432)) LIFETIME(MIN 1 MAX 2) LAYOUT(HASHED()); - ''') + """ + ) result = node1.query("SELECT dictGetUInt32(postgres_dict, 'value', toUInt64(99))") - assert(int(result.strip()) == 99) + assert int(result.strip()) == 99 def test_bad_configuration(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - node1.query(''' + node1.query( + """ DROP DICTIONARY IF EXISTS postgres_dict; CREATE DICTIONARY postgres_dict (id UInt32, value UInt32) PRIMARY KEY id @@ -388,13 +570,16 @@ def test_bad_configuration(started_cluster): table 'test_schema.test_table')) LIFETIME(MIN 1 MAX 2) LAYOUT(HASHED()); - ''') + """ + ) - node1.query_and_get_error("SELECT dictGetUInt32(postgres_dict, 'value', toUInt64(1))") - assert node1.contains_in_log('Unexpected key `dbbb`') + node1.query_and_get_error( + "SELECT dictGetUInt32(postgres_dict, 'value', toUInt64(1))" + ) + assert node1.contains_in_log("Unexpected key `dbbb`") -if __name__ == '__main__': +if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") cluster.shutdown() diff --git a/tests/integration/test_dictionaries_redis/test.py b/tests/integration/test_dictionaries_redis/test.py index e5a51bcb88a..bc8170ab08d 100644 --- a/tests/integration/test_dictionaries_redis/test.py +++ b/tests/integration/test_dictionaries_redis/test.py @@ -7,43 +7,36 @@ from helpers.external_sources import SourceRedis cluster = None SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -dict_configs_path = os.path.join(SCRIPT_DIR, 'configs/dictionaries') +dict_configs_path = os.path.join(SCRIPT_DIR, "configs/dictionaries") node = None KEY_FIELDS = { - "simple": [ - Field("KeyField", 'UInt64', is_key=True, default_value_for_get=9999999) - ], + "simple": [Field("KeyField", "UInt64", is_key=True, default_value_for_get=9999999)], "complex": [ - Field("KeyField1", 'UInt64', is_key=True, default_value_for_get=9999999), - Field("KeyField2", 'String', is_key=True, default_value_for_get='xxxxxxxxx'), - ] + Field("KeyField1", "UInt64", is_key=True, default_value_for_get=9999999), + Field("KeyField2", "String", is_key=True, default_value_for_get="xxxxxxxxx"), + ], } -KEY_VALUES = { - "simple": [ - [1], [2] - ], - "complex": [ - [1, 'world'], [2, 'qwerty2'] - ] -} +KEY_VALUES = {"simple": [[1], [2]], "complex": [[1, "world"], [2, "qwerty2"]]} FIELDS = [ - Field("UInt8_", 'UInt8', default_value_for_get=55), - Field("UInt16_", 'UInt16', default_value_for_get=66), - Field("UInt32_", 'UInt32', default_value_for_get=77), - Field("UInt64_", 'UInt64', default_value_for_get=88), - Field("Int8_", 'Int8', default_value_for_get=-55), - Field("Int16_", 'Int16', default_value_for_get=-66), - Field("Int32_", 'Int32', default_value_for_get=-77), - Field("Int64_", 'Int64', default_value_for_get=-88), - Field("UUID_", 'UUID', default_value_for_get='550e8400-0000-0000-0000-000000000000'), - Field("Date_", 'Date', default_value_for_get='2018-12-30'), - Field("DateTime_", 'DateTime', default_value_for_get='2018-12-30 00:00:00'), - Field("String_", 'String', default_value_for_get='hi'), - Field("Float32_", 'Float32', default_value_for_get=555.11), - Field("Float64_", 'Float64', default_value_for_get=777.11), + Field("UInt8_", "UInt8", default_value_for_get=55), + Field("UInt16_", "UInt16", default_value_for_get=66), + Field("UInt32_", "UInt32", default_value_for_get=77), + Field("UInt64_", "UInt64", default_value_for_get=88), + Field("Int8_", "Int8", default_value_for_get=-55), + Field("Int16_", "Int16", default_value_for_get=-66), + Field("Int32_", "Int32", default_value_for_get=-77), + Field("Int64_", "Int64", default_value_for_get=-88), + Field( + "UUID_", "UUID", default_value_for_get="550e8400-0000-0000-0000-000000000000" + ), + Field("Date_", "Date", default_value_for_get="2018-12-30"), + Field("DateTime_", "DateTime", default_value_for_get="2018-12-30 00:00:00"), + Field("String_", "String", default_value_for_get="hi"), + Field("Float32_", "Float32", default_value_for_get=555.11), + Field("Float64_", "Float64", default_value_for_get=777.11), ] VALUES = [ @@ -55,10 +48,10 @@ VALUES = [ [-77, -8], [-888, -9], [-999, -10], - ['550e8400-e29b-41d4-a716-446655440003', '550e8400-e29b-41d4-a716-446655440002'], - ['1973-06-28', '1978-06-28'], - ['1985-02-28 23:43:25', '1986-02-28 23:42:25'], - ['hello', 'hello'], + ["550e8400-e29b-41d4-a716-446655440003", "550e8400-e29b-41d4-a716-446655440002"], + ["1973-06-28", "1978-06-28"], + ["1985-02-28 23:43:25", "1986-02-28 23:42:25"], + ["hello", "hello"], [22.543, 21.543], [3332154213.4, 3222154213.4], ] @@ -70,19 +63,21 @@ LAYOUTS = [ Layout("complex_key_hashed"), Layout("complex_key_cache"), Layout("direct"), - Layout("complex_key_direct") + Layout("complex_key_direct"), ] DICTIONARIES = [] -def get_dict(source, layout, fields, suffix_name=''): +def get_dict(source, layout, fields, suffix_name=""): global dict_configs_path structure = DictionaryStructure(layout, fields) - dict_name = source.name + "_" + layout.name + '_' + suffix_name - dict_path = os.path.join(dict_configs_path, dict_name + '.xml') - dictionary = Dictionary(dict_name, structure, source, dict_path, "table_" + dict_name, fields) + dict_name = source.name + "_" + layout.name + "_" + suffix_name + dict_path = os.path.join(dict_configs_path, dict_name + ".xml") + dictionary = Dictionary( + dict_name, structure, source, dict_path, "table_" + dict_name, fields + ) dictionary.generate_config() return dictionary @@ -102,14 +97,38 @@ def setup_module(module): for i, field in enumerate(FIELDS): DICTIONARIES.append([]) sources = [] - sources.append(SourceRedis("RedisSimple", "localhost", cluster.redis_port, cluster.redis_host, "6379", "", "clickhouse", i * 2, - storage_type="simple")) - sources.append(SourceRedis("RedisHash", "localhost", cluster.redis_port, cluster.redis_host, "6379", "", "clickhouse", i * 2 + 1, - storage_type="hash_map")) + sources.append( + SourceRedis( + "RedisSimple", + "localhost", + cluster.redis_port, + cluster.redis_host, + "6379", + "", + "clickhouse", + i * 2, + storage_type="simple", + ) + ) + sources.append( + SourceRedis( + "RedisHash", + "localhost", + cluster.redis_port, + cluster.redis_host, + "6379", + "", + "clickhouse", + i * 2 + 1, + storage_type="hash_map", + ) + ) for source in sources: for layout in LAYOUTS: if not source.compatible_with_layout(layout): - print("Source", source.name, "incompatible with layout", layout.name) + print( + "Source", source.name, "incompatible with layout", layout.name + ) continue fields = KEY_FIELDS[layout.layout_type] + [field] @@ -120,7 +139,9 @@ def setup_module(module): for fname in os.listdir(dict_configs_path): dictionaries.append(os.path.join(dict_configs_path, fname)) - node = cluster.add_instance('node', main_configs=main_configs, dictionaries=dictionaries, with_redis=True) + node = cluster.add_instance( + "node", main_configs=main_configs, dictionaries=dictionaries, with_redis=True + ) @pytest.fixture(scope="module", autouse=True) @@ -142,7 +163,7 @@ def started_cluster(): @pytest.mark.parametrize("id", list(range(len(FIELDS)))) def test_redis_dictionaries(started_cluster, id): - print('id:', id) + print("id:", id) dicts = DICTIONARIES[id] values = VALUES[id] @@ -176,7 +197,7 @@ def test_redis_dictionaries(started_cluster, id): for query, answer in queries_with_answers: print(query) - assert node.query(query) == str(answer) + '\n' + assert node.query(query) == str(answer) + "\n" # Checks, that dictionaries can be reloaded. node.query("system reload dictionaries") diff --git a/tests/integration/test_dictionaries_redis/test_long.py b/tests/integration/test_dictionaries_redis/test_long.py index 3f29403df62..094df789704 100644 --- a/tests/integration/test_dictionaries_redis/test_long.py +++ b/tests/integration/test_dictionaries_redis/test_long.py @@ -4,7 +4,8 @@ import redis cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', with_redis=True) +node = cluster.add_instance("node", with_redis=True) + @pytest.fixture(scope="module") def start_cluster(): @@ -12,12 +13,15 @@ def start_cluster(): cluster.start() N = 1000 - client = redis.Redis(host='localhost', port=cluster.redis_port, password='clickhouse', db=0) + client = redis.Redis( + host="localhost", port=cluster.redis_port, password="clickhouse", db=0 + ) client.flushdb() for i in range(N): - client.hset('2020-10-10', i, i) + client.hset("2020-10-10", i, i) - node.query(""" + node.query( + """ CREATE DICTIONARY redis_dict ( date String, @@ -27,10 +31,13 @@ def start_cluster(): PRIMARY KEY date, id SOURCE(REDIS(HOST '{}' PORT 6379 STORAGE_TYPE 'hash_map' DB_INDEX 0 PASSWORD 'clickhouse')) LAYOUT(COMPLEX_KEY_DIRECT()) - """.format(cluster.redis_host) + """.format( + cluster.redis_host + ) ) - node.query(""" + node.query( + """ CREATE TABLE redis_dictionary_test ( date Date, @@ -39,13 +46,24 @@ def start_cluster(): ENGINE = MergeTree ORDER BY id""" ) - node.query("INSERT INTO default.redis_dictionary_test SELECT '2020-10-10', number FROM numbers(1000000)") + node.query( + "INSERT INTO default.redis_dictionary_test SELECT '2020-10-10', number FROM numbers(1000000)" + ) yield cluster finally: cluster.shutdown() + def test_redis_dict_long(start_cluster): - assert node.query("SELECT count(), uniqExact(date), uniqExact(id) FROM redis_dict") == "1000\t1\t1000\n" - assert node.query("SELECT count(DISTINCT dictGet('redis_dict', 'value', tuple(date, id % 1000))) FROM redis_dictionary_test") == "1000\n" + assert ( + node.query("SELECT count(), uniqExact(date), uniqExact(id) FROM redis_dict") + == "1000\t1\t1000\n" + ) + assert ( + node.query( + "SELECT count(DISTINCT dictGet('redis_dict', 'value', tuple(date, id % 1000))) FROM redis_dictionary_test" + ) + == "1000\n" + ) diff --git a/tests/integration/test_dictionaries_select_all/generate_dictionaries.py b/tests/integration/test_dictionaries_select_all/generate_dictionaries.py index 4208615bdc3..31480974d86 100644 --- a/tests/integration/test_dictionaries_select_all/generate_dictionaries.py +++ b/tests/integration/test_dictionaries_select_all/generate_dictionaries.py @@ -2,22 +2,38 @@ import difflib import os from functools import reduce -files = ['key_simple.tsv', 'key_complex_integers.tsv', 'key_complex_mixed.tsv'] +files = ["key_simple.tsv", "key_complex_integers.tsv", "key_complex_mixed.tsv"] types = [ - 'UInt8', 'UInt16', 'UInt32', 'UInt64', - 'Int8', 'Int16', 'Int32', 'Int64', - 'Float32', 'Float64', - 'String', - 'Date', 'DateTime' + "UInt8", + "UInt16", + "UInt32", + "UInt64", + "Int8", + "Int16", + "Int32", + "Int64", + "Float32", + "Float64", + "String", + "Date", + "DateTime", ] implicit_defaults = [ - '1', '1', '1', '', - '-1', '-1', '-1', '-1', - '2.71828', '2.71828', - 'implicit-default', - '2015-11-25', '' + "1", + "1", + "1", + "", + "-1", + "-1", + "-1", + "-1", + "2.71828", + "2.71828", + "implicit-default", + "2015-11-25", + "", ] @@ -25,25 +41,22 @@ def generate_structure(): # [ name, key_type, has_parent ] return [ # Simple key dictionaries - ['clickhouse_flat', 0, True], - ['clickhouse_hashed', 0, True], - ['clickhouse_cache', 0, True], - + ["clickhouse_flat", 0, True], + ["clickhouse_hashed", 0, True], + ["clickhouse_cache", 0, True], # Complex key dictionaries with (UInt8, UInt8) key - ['clickhouse_complex_integers_key_hashed', 1, False], - ['clickhouse_complex_integers_key_cache', 1, False], - + ["clickhouse_complex_integers_key_hashed", 1, False], + ["clickhouse_complex_integers_key_cache", 1, False], # Complex key dictionaries with (String, UInt8) key - ['clickhouse_complex_mixed_key_hashed', 2, False], - ['clickhouse_complex_mixed_key_cache', 2, False], - + ["clickhouse_complex_mixed_key_hashed", 2, False], + ["clickhouse_complex_mixed_key_cache", 2, False], # Range hashed dictionary - ['clickhouse_range_hashed', 3, False], + ["clickhouse_range_hashed", 3, False], ] def generate_dictionaries(path, structure): - dictionary_skeleton = ''' + dictionary_skeleton = """ {name} @@ -69,21 +82,23 @@ def generate_dictionaries(path, structure): {parent} - ''' - attribute_skeleton = ''' +
""" + attribute_skeleton = """ %s_ %s %s - ''' + """ - dictionary_skeleton = \ - dictionary_skeleton % reduce( - lambda xml, type_default: xml + attribute_skeleton % (type_default[0], type_default[0], type_default[1]), - list(zip(types, implicit_defaults)), '') + dictionary_skeleton = dictionary_skeleton % reduce( + lambda xml, type_default: xml + + attribute_skeleton % (type_default[0], type_default[0], type_default[1]), + list(zip(types, implicit_defaults)), + "", + ) - source_clickhouse = ''' + source_clickhouse = """ localhost 9000 @@ -92,21 +107,23 @@ def generate_dictionaries(path, structure): test dictionary_source
- ''' + """ - layout_flat = '' - layout_hashed = '' - layout_cache = '128' - layout_complex_key_hashed = '' - layout_complex_key_cache = '128' - layout_range_hashed = '' + layout_flat = "" + layout_hashed = "" + layout_cache = "128" + layout_complex_key_hashed = "" + layout_complex_key_cache = ( + "128" + ) + layout_range_hashed = "" - key_simple = ''' + key_simple = """ id - ''' - key_complex_integers = ''' + """ + key_complex_integers = """ key0 @@ -118,8 +135,8 @@ def generate_dictionaries(path, structure): UInt8 - ''' - key_complex_mixed = ''' + """ + key_complex_mixed = """ key0_str @@ -131,9 +148,9 @@ def generate_dictionaries(path, structure): UInt8 - ''' + """ - key_range_hashed = ''' + key_range_hashed = """ id @@ -143,32 +160,29 @@ def generate_dictionaries(path, structure): EndDate - ''' + """ keys = [key_simple, key_complex_integers, key_complex_mixed, key_range_hashed] - parent_attribute = ''' + parent_attribute = """ Parent UInt64 true 0 - ''' + """ sources_and_layouts = [ # Simple key dictionaries [source_clickhouse, layout_flat], [source_clickhouse, layout_hashed], [source_clickhouse, layout_cache], - # Complex key dictionaries with (UInt8, UInt8) key [source_clickhouse, layout_complex_key_hashed], [source_clickhouse, layout_complex_key_cache], - # Complex key dictionaries with (String, UInt8) key [source_clickhouse, layout_complex_key_hashed], [source_clickhouse, layout_complex_key_cache], - # Range hashed dictionary [source_clickhouse, layout_range_hashed], ] @@ -176,12 +190,17 @@ def generate_dictionaries(path, structure): file_names = [] # Generate dictionaries. - for (name, key_idx, has_parent), (source, layout) in zip(structure, sources_and_layouts): - filename = os.path.join(path, 'dictionary_%s.xml' % name) + for (name, key_idx, has_parent), (source, layout) in zip( + structure, sources_and_layouts + ): + filename = os.path.join(path, "dictionary_%s.xml" % name) file_names.append(filename) - with open(filename, 'w') as file: + with open(filename, "w") as file: dictionary_xml = dictionary_skeleton.format( - key=keys[key_idx], parent=parent_attribute if has_parent else '', **locals()) + key=keys[key_idx], + parent=parent_attribute if has_parent else "", + **locals() + ) file.write(dictionary_xml) return file_names @@ -189,77 +208,99 @@ def generate_dictionaries(path, structure): class DictionaryTestTable: def __init__(self, source_file_name): - self.structure = '''id UInt64, key0 UInt8, key0_str String, key1 UInt8, + self.structure = """id UInt64, key0 UInt8, key0_str String, key1 UInt8, StartDate Date, EndDate Date, UInt8_ UInt8, UInt16_ UInt16, UInt32_ UInt32, UInt64_ UInt64, Int8_ Int8, Int16_ Int16, Int32_ Int32, Int64_ Int64, Float32_ Float32, Float64_ Float64, String_ String, - Date_ Date, DateTime_ DateTime, Parent UInt64''' + Date_ Date, DateTime_ DateTime, Parent UInt64""" - self.names_and_types = list(map(str.split, self.structure.split(','))) + self.names_and_types = list(map(str.split, self.structure.split(","))) self.keys_names_and_types = self.names_and_types[:6] self.values_names_and_types = self.names_and_types[6:] self.source_file_name = source_file_name self.rows = None def create_clickhouse_source(self, instance): - query = ''' + query = """ create database if not exists test; drop table if exists test.dictionary_source; create table test.dictionary_source (%s) engine=Log; insert into test.dictionary_source values %s ; - ''' + """ types = tuple(pair[1] for pair in self.names_and_types) with open(self.source_file_name) as source_file: - lines = source_file.read().split('\n') + lines = source_file.read().split("\n") lines = tuple(filter(len, lines)) self.rows = [] def wrap_value(pair): value, type = pair - return "'" + value + "'" if type in ('String', 'Date', 'DateTime') else value + return ( + "'" + value + "'" if type in ("String", "Date", "DateTime") else value + ) def make_tuple(line): - row = tuple(line.split('\t')) + row = tuple(line.split("\t")) self.rows.append(row) - return '(' + ','.join(map(wrap_value, list(zip(row, types)))) + ')' + return "(" + ",".join(map(wrap_value, list(zip(row, types)))) + ")" - values = ','.join(map(make_tuple, lines)) + values = ",".join(map(make_tuple, lines)) print(query % (self.structure, values)) instance.query(query % (self.structure, values)) def get_structure_for_keys(self, keys, enable_parent=True): - structure = ','.join(name + ' ' + type for name, type in self.keys_names_and_types if name in keys) - return structure + ', ' + ','.join(name + ' ' + type for name, type in self.values_names_and_types - if enable_parent or name != 'Parent') + structure = ",".join( + name + " " + type + for name, type in self.keys_names_and_types + if name in keys + ) + return ( + structure + + ", " + + ",".join( + name + " " + type + for name, type in self.values_names_and_types + if enable_parent or name != "Parent" + ) + ) def _build_line_from_row(self, row, names): - return '\t'.join((value for value, (name, type) in zip(row, self.names_and_types) if name in set(names))) + return "\t".join( + ( + value + for value, (name, type) in zip(row, self.names_and_types) + if name in set(names) + ) + ) def compare_rows_by_keys(self, keys, values, lines, add_not_found_rows=True): - rows = [line.rstrip('\n').split('\t') for line in lines] + rows = [line.rstrip("\n").split("\t") for line in lines] diff = [] matched = [] - lines_map = {self._build_line_from_row(row, keys): self._build_line_from_row(row, values) for row in self.rows} + lines_map = { + self._build_line_from_row(row, keys): self._build_line_from_row(row, values) + for row in self.rows + } for row in rows: - key = '\t'.join(row[:len(keys)]) - value = '\t'.join(row[len(keys):]) + key = "\t".join(row[: len(keys)]) + value = "\t".join(row[len(keys) :]) if key in list(lines_map.keys()): pattern_value = lines_map[key] del lines_map[key] if not value == pattern_value: - diff.append((key + '\t' + value, key + '\t' + pattern_value)) + diff.append((key + "\t" + value, key + "\t" + pattern_value)) else: - matched.append((key + '\t' + value, key + '\t' + pattern_value)) + matched.append((key + "\t" + value, key + "\t" + pattern_value)) else: - diff.append((key + '\t' + value, '')) + diff.append((key + "\t" + value, "")) if add_not_found_rows: for key, value in list(lines_map.items()): - diff.append(('', key + '\t' + value)) + diff.append(("", key + "\t" + value)) if not diff: return None @@ -269,13 +310,21 @@ class DictionaryTestTable: right_lines = tuple(pair[1] for pair in diff) return left_lines, right_lines - def compare_by_keys(self, keys, lines, with_parent_column=True, add_not_found_rows=True): - values = [name for name, type in self.values_names_and_types if with_parent_column or name != 'Parent'] + def compare_by_keys( + self, keys, lines, with_parent_column=True, add_not_found_rows=True + ): + values = [ + name + for name, type in self.values_names_and_types + if with_parent_column or name != "Parent" + ] return self.compare_rows_by_keys(keys, values, lines, add_not_found_rows) def process_diff(self, diff): if not diff: - return '' + return "" left_lines, right_lines = diff - args = {'fromfile': 'received', 'tofile': 'expected', 'lineterm': ''} - return '\n'.join(tuple(difflib.context_diff(left_lines, right_lines, **args))[:]) + args = {"fromfile": "received", "tofile": "expected", "lineterm": ""} + return "\n".join( + tuple(difflib.context_diff(left_lines, right_lines, **args))[:] + ) diff --git a/tests/integration/test_dictionaries_select_all/test.py b/tests/integration/test_dictionaries_select_all/test.py index b1bf2e98b25..0a740394129 100644 --- a/tests/integration/test_dictionaries_select_all/test.py +++ b/tests/integration/test_dictionaries_select_all/test.py @@ -4,7 +4,11 @@ import pytest from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV -from .generate_dictionaries import generate_structure, generate_dictionaries, DictionaryTestTable +from .generate_dictionaries import ( + generate_structure, + generate_dictionaries, + DictionaryTestTable, +) SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) @@ -19,11 +23,15 @@ def setup_module(module): global test_table structure = generate_structure() - dictionary_files = generate_dictionaries(os.path.join(SCRIPT_DIR, 'configs/dictionaries'), structure) + dictionary_files = generate_dictionaries( + os.path.join(SCRIPT_DIR, "configs/dictionaries"), structure + ) cluster = ClickHouseCluster(__file__) - instance = cluster.add_instance('instance', dictionaries=dictionary_files) - test_table = DictionaryTestTable(os.path.join(SCRIPT_DIR, 'configs/dictionaries/source.tsv')) + instance = cluster.add_instance("instance", dictionaries=dictionary_files) + test_table = DictionaryTestTable( + os.path.join(SCRIPT_DIR, "configs/dictionaries/source.tsv") + ) @pytest.fixture(scope="module", autouse=True) @@ -31,8 +39,8 @@ def started_cluster(): try: cluster.start() test_table.create_clickhouse_source(instance) - for line in TSV(instance.query('select name from system.dictionaries')).lines: - print(line, end=' ') + for line in TSV(instance.query("select name from system.dictionaries")).lines: + print(line, end=" ") yield cluster @@ -40,18 +48,22 @@ def started_cluster(): cluster.shutdown() -@pytest.fixture(params=[ - # name, keys, use_parent - ('clickhouse_hashed', ('id',), True), - ('clickhouse_flat', ('id',), True), - ('clickhouse_complex_integers_key_hashed', ('key0', 'key1'), False), - ('clickhouse_complex_mixed_key_hashed', ('key0_str', 'key1'), False), - ('clickhouse_range_hashed', ('id', 'StartDate', 'EndDate'), False), -], - ids=['clickhouse_hashed', 'clickhouse_flat', - 'clickhouse_complex_integers_key_hashed', - 'clickhouse_complex_mixed_key_hashed', - 'clickhouse_range_hashed'] +@pytest.fixture( + params=[ + # name, keys, use_parent + ("clickhouse_hashed", ("id",), True), + ("clickhouse_flat", ("id",), True), + ("clickhouse_complex_integers_key_hashed", ("key0", "key1"), False), + ("clickhouse_complex_mixed_key_hashed", ("key0_str", "key1"), False), + ("clickhouse_range_hashed", ("id", "StartDate", "EndDate"), False), + ], + ids=[ + "clickhouse_hashed", + "clickhouse_flat", + "clickhouse_complex_integers_key_hashed", + "clickhouse_complex_mixed_key_hashed", + "clickhouse_range_hashed", + ], ) def dictionary_structure(started_cluster, request): return request.param @@ -62,27 +74,40 @@ def test_select_all(dictionary_structure): query = instance.query structure = test_table.get_structure_for_keys(keys, use_parent) - query(''' + query( + """ DROP TABLE IF EXISTS test.{0} - '''.format(name)) + """.format( + name + ) + ) - create_query = "CREATE TABLE test.{0} ({1}) engine = Dictionary({0})".format(name, structure) + create_query = "CREATE TABLE test.{0} ({1}) engine = Dictionary({0})".format( + name, structure + ) TSV(query(create_query)) - result = TSV(query('select * from test.{0}'.format(name))) + result = TSV(query("select * from test.{0}".format(name))) - diff = test_table.compare_by_keys(keys, result.lines, use_parent, add_not_found_rows=True) + diff = test_table.compare_by_keys( + keys, result.lines, use_parent, add_not_found_rows=True + ) print(test_table.process_diff(diff)) assert not diff -@pytest.fixture(params=[ - # name, keys, use_parent - ('clickhouse_cache', ('id',), True), - ('clickhouse_complex_integers_key_cache', ('key0', 'key1'), False), - ('clickhouse_complex_mixed_key_cache', ('key0_str', 'key1'), False) -], - ids=['clickhouse_cache', 'clickhouse_complex_integers_key_cache', 'clickhouse_complex_mixed_key_cache'] +@pytest.fixture( + params=[ + # name, keys, use_parent + ("clickhouse_cache", ("id",), True), + ("clickhouse_complex_integers_key_cache", ("key0", "key1"), False), + ("clickhouse_complex_mixed_key_cache", ("key0_str", "key1"), False), + ], + ids=[ + "clickhouse_cache", + "clickhouse_complex_integers_key_cache", + "clickhouse_complex_mixed_key_cache", + ], ) def cached_dictionary_structure(started_cluster, request): return request.param @@ -93,32 +118,42 @@ def test_select_all_from_cached(cached_dictionary_structure): query = instance.query structure = test_table.get_structure_for_keys(keys, use_parent) - query(''' + query( + """ DROP TABLE IF EXISTS test.{0} - '''.format(name)) + """.format( + name + ) + ) - create_query = "CREATE TABLE test.{0} ({1}) engine = Dictionary({0})".format(name, structure) + create_query = "CREATE TABLE test.{0} ({1}) engine = Dictionary({0})".format( + name, structure + ) TSV(query(create_query)) for i in range(4): - result = TSV(query('select * from test.{0}'.format(name))) - diff = test_table.compare_by_keys(keys, result.lines, use_parent, add_not_found_rows=False) + result = TSV(query("select * from test.{0}".format(name))) + diff = test_table.compare_by_keys( + keys, result.lines, use_parent, add_not_found_rows=False + ) print(test_table.process_diff(diff)) assert not diff key = [] for key_name in keys: - if key_name.endswith('str'): + if key_name.endswith("str"): key.append("'" + str(i) + "'") else: key.append(str(i)) if len(key) == 1: - key = 'toUInt64(' + str(i) + ')' + key = "toUInt64(" + str(i) + ")" else: - key = str('(' + ','.join(key) + ')') + key = str("(" + ",".join(key) + ")") query("select dictGetUInt8('{0}', 'UInt8_', {1})".format(name, key)) - result = TSV(query('select * from test.{0}'.format(name))) - diff = test_table.compare_by_keys(keys, result.lines, use_parent, add_not_found_rows=True) + result = TSV(query("select * from test.{0}".format(name))) + diff = test_table.compare_by_keys( + keys, result.lines, use_parent, add_not_found_rows=True + ) print(test_table.process_diff(diff)) assert not diff diff --git a/tests/integration/test_dictionaries_update_and_reload/test.py b/tests/integration/test_dictionaries_update_and_reload/test.py index 9bee5db8ce1..a973b697d0d 100644 --- a/tests/integration/test_dictionaries_update_and_reload/test.py +++ b/tests/integration/test_dictionaries_update_and_reload/test.py @@ -7,11 +7,16 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -DICTIONARY_FILES = ['configs/dictionaries/cache_xypairs.xml', 'configs/dictionaries/executable.xml', - 'configs/dictionaries/file.xml', 'configs/dictionaries/file.txt', 'configs/dictionaries/slow.xml'] +DICTIONARY_FILES = [ + "configs/dictionaries/cache_xypairs.xml", + "configs/dictionaries/executable.xml", + "configs/dictionaries/file.xml", + "configs/dictionaries/file.txt", + "configs/dictionaries/slow.xml", +] cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', dictionaries=DICTIONARY_FILES) +instance = cluster.add_instance("instance", dictionaries=DICTIONARY_FILES) @pytest.fixture(scope="module") @@ -27,18 +32,29 @@ def started_cluster(): def get_status(dictionary_name): - return instance.query("SELECT status FROM system.dictionaries WHERE name='" + dictionary_name + "'").rstrip("\n") + return instance.query( + "SELECT status FROM system.dictionaries WHERE name='" + dictionary_name + "'" + ).rstrip("\n") def get_last_exception(dictionary_name): - return instance.query("SELECT last_exception FROM system.dictionaries WHERE name='" + dictionary_name + "'").rstrip( - "\n").replace("\\'", "'") + return ( + instance.query( + "SELECT last_exception FROM system.dictionaries WHERE name='" + + dictionary_name + + "'" + ) + .rstrip("\n") + .replace("\\'", "'") + ) def get_loading_start_time(dictionary_name): s = instance.query( - "SELECT toTimeZone(loading_start_time, 'UTC') FROM system.dictionaries WHERE name='" + dictionary_name + "'").rstrip( - "\n") + "SELECT toTimeZone(loading_start_time, 'UTC') FROM system.dictionaries WHERE name='" + + dictionary_name + + "'" + ).rstrip("\n") if s == "1970-01-01 00:00:00": return None return time.strptime(s, "%Y-%m-%d %H:%M:%S") @@ -46,8 +62,10 @@ def get_loading_start_time(dictionary_name): def get_last_successful_update_time(dictionary_name): s = instance.query( - "SELECT toTimeZone(last_successful_update_time, 'UTC') FROM system.dictionaries WHERE name='" + dictionary_name + "'").rstrip( - "\n") + "SELECT toTimeZone(last_successful_update_time, 'UTC') FROM system.dictionaries WHERE name='" + + dictionary_name + + "'" + ).rstrip("\n") if s == "1970-01-01 00:00:00": return None return time.strptime(s, "%Y-%m-%d %H:%M:%S") @@ -55,60 +73,67 @@ def get_last_successful_update_time(dictionary_name): def get_loading_duration(dictionary_name): return float( - instance.query("SELECT loading_duration FROM system.dictionaries WHERE name='" + dictionary_name + "'")) + instance.query( + "SELECT loading_duration FROM system.dictionaries WHERE name='" + + dictionary_name + + "'" + ) + ) def replace_in_file_in_container(file_name, what, replace_with): - instance.exec_in_container(['sed', '-i', f's/{what}/{replace_with}/g', file_name]) + instance.exec_in_container(["sed", "-i", f"s/{what}/{replace_with}/g", file_name]) def test_reload_while_loading(started_cluster): query = instance.query # dictionaries_lazy_load == false, so this dictionary is not loaded. - assert get_status('slow') == "NOT_LOADED" - assert get_loading_duration('slow') == 0 + assert get_status("slow") == "NOT_LOADED" + assert get_loading_duration("slow") == 0 # It's not possible to get a value from the dictionary within 0.5 second, so the following query fails by timeout. with pytest.raises(QueryTimeoutExceedException): query("SELECT dictGetInt32('slow', 'a', toUInt64(5))", timeout=0.5) # The dictionary is now loading. - assert get_status('slow') == "LOADING" - start_time, duration = get_loading_start_time('slow'), get_loading_duration('slow') + assert get_status("slow") == "LOADING" + start_time, duration = get_loading_start_time("slow"), get_loading_duration("slow") assert duration > 0 time.sleep(0.5) # Still loading. - assert get_status('slow') == "LOADING" + assert get_status("slow") == "LOADING" prev_start_time, prev_duration = start_time, duration - start_time, duration = get_loading_start_time('slow'), get_loading_duration('slow') + start_time, duration = get_loading_start_time("slow"), get_loading_duration("slow") assert start_time == prev_start_time assert duration >= prev_duration # SYSTEM RELOAD DICTIONARY should restart loading. with pytest.raises(QueryTimeoutExceedException): query("SYSTEM RELOAD DICTIONARY 'slow'", timeout=0.5) - assert get_status('slow') == "LOADING" + assert get_status("slow") == "LOADING" prev_start_time, prev_duration = start_time, duration - start_time, duration = get_loading_start_time('slow'), get_loading_duration('slow') + start_time, duration = get_loading_start_time("slow"), get_loading_duration("slow") assert start_time > prev_start_time assert duration < prev_duration time.sleep(0.5) # Still loading. - assert get_status('slow') == "LOADING" + assert get_status("slow") == "LOADING" prev_start_time, prev_duration = start_time, duration - start_time, duration = get_loading_start_time('slow'), get_loading_duration('slow') + start_time, duration = get_loading_start_time("slow"), get_loading_duration("slow") assert start_time == prev_start_time assert duration >= prev_duration # Changing the configuration file should restart loading again. - replace_in_file_in_container('/etc/clickhouse-server/dictionaries/slow.xml', 'sleep 100', 'sleep 0') + replace_in_file_in_container( + "/etc/clickhouse-server/dictionaries/slow.xml", "sleep 100", "sleep 0" + ) time.sleep(5) # Configuration files are reloaded once in 5 seconds. # This time loading should finish quickly. - assert get_status('slow') == "LOADED" + assert get_status("slow") == "LOADED" - last_successful_update_time = get_last_successful_update_time('slow') + last_successful_update_time = get_last_successful_update_time("slow") assert last_successful_update_time > start_time assert query("SELECT dictGetInt32('slow', 'a', toUInt64(5))") == "6\n" @@ -124,8 +149,12 @@ def test_reload_after_loading(started_cluster): # for mtime, and clickhouse will miss the update if we change the file too # soon. Should probably be fixed by switching to use std::filesystem. time.sleep(1) - replace_in_file_in_container('/etc/clickhouse-server/dictionaries/executable.xml', '8', '81') - replace_in_file_in_container('/etc/clickhouse-server/dictionaries/file.txt', '10', '101') + replace_in_file_in_container( + "/etc/clickhouse-server/dictionaries/executable.xml", "8", "81" + ) + replace_in_file_in_container( + "/etc/clickhouse-server/dictionaries/file.txt", "10", "101" + ) # SYSTEM RELOAD 'name' reloads only the specified dictionary. query("SYSTEM RELOAD DICTIONARY 'executable'") @@ -138,8 +167,12 @@ def test_reload_after_loading(started_cluster): # SYSTEM RELOAD DICTIONARIES reloads all loaded dictionaries. time.sleep(1) # see the comment above - replace_in_file_in_container('/etc/clickhouse-server/dictionaries/executable.xml', '81', '82') - replace_in_file_in_container('/etc/clickhouse-server/dictionaries/file.txt', '101', '102') + replace_in_file_in_container( + "/etc/clickhouse-server/dictionaries/executable.xml", "81", "82" + ) + replace_in_file_in_container( + "/etc/clickhouse-server/dictionaries/file.txt", "101", "102" + ) query("SYSTEM RELOAD DICTIONARY 'file'") query("SYSTEM RELOAD DICTIONARY 'executable'") assert query("SELECT dictGetInt32('executable', 'a', toUInt64(7))") == "82\n" @@ -148,8 +181,12 @@ def test_reload_after_loading(started_cluster): # Configuration files are reloaded and lifetimes are checked automatically once in 5 seconds. # Wait slightly more, to be sure it did reload. time.sleep(1) # see the comment above - replace_in_file_in_container('/etc/clickhouse-server/dictionaries/executable.xml', '82', '83') - replace_in_file_in_container('/etc/clickhouse-server/dictionaries/file.txt', '102', '103') + replace_in_file_in_container( + "/etc/clickhouse-server/dictionaries/executable.xml", "82", "83" + ) + replace_in_file_in_container( + "/etc/clickhouse-server/dictionaries/file.txt", "102", "103" + ) time.sleep(10) assert query("SELECT dictGetInt32('file', 'a', toUInt64(9))") == "103\n" assert query("SELECT dictGetInt32('executable', 'a', toUInt64(7))") == "83\n" @@ -163,24 +200,36 @@ def test_reload_after_fail_by_system_reload(started_cluster): # We expect an error because the file source doesn't exist. no_such_file_error = "No such file" - assert no_such_file_error in instance.query_and_get_error("SELECT dictGetInt32('no_file', 'a', toUInt64(9))") + assert no_such_file_error in instance.query_and_get_error( + "SELECT dictGetInt32('no_file', 'a', toUInt64(9))" + ) assert get_status("no_file") == "FAILED" # SYSTEM RELOAD should not change anything now, the status is still FAILED. - assert no_such_file_error in instance.query_and_get_error("SYSTEM RELOAD DICTIONARY 'no_file'") - assert no_such_file_error in instance.query_and_get_error("SELECT dictGetInt32('no_file', 'a', toUInt64(9))") + assert no_such_file_error in instance.query_and_get_error( + "SYSTEM RELOAD DICTIONARY 'no_file'" + ) + assert no_such_file_error in instance.query_and_get_error( + "SELECT dictGetInt32('no_file', 'a', toUInt64(9))" + ) assert get_status("no_file") == "FAILED" # Creating the file source makes the dictionary able to load. - instance.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/dictionaries/file.txt"), - "/etc/clickhouse-server/dictionaries/no_file.txt") + instance.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/dictionaries/file.txt"), + "/etc/clickhouse-server/dictionaries/no_file.txt", + ) query("SYSTEM RELOAD DICTIONARY 'no_file'") query("SELECT dictGetInt32('no_file', 'a', toUInt64(9))") == "10\n" assert get_status("no_file") == "LOADED" # Removing the file source should not spoil the loaded dictionary. - instance.exec_in_container(["rm", "/etc/clickhouse-server/dictionaries/no_file.txt"]) - assert no_such_file_error in instance.query_and_get_error("SYSTEM RELOAD DICTIONARY 'no_file'") + instance.exec_in_container( + ["rm", "/etc/clickhouse-server/dictionaries/no_file.txt"] + ) + assert no_such_file_error in instance.query_and_get_error( + "SYSTEM RELOAD DICTIONARY 'no_file'" + ) query("SELECT dictGetInt32('no_file', 'a', toUInt64(9))") == "10\n" assert get_status("no_file") == "LOADED" @@ -191,28 +240,38 @@ def test_reload_after_fail_by_timer(started_cluster): # We expect an error because the file source doesn't exist. expected_error = "No such file" - assert expected_error in instance.query_and_get_error("SELECT dictGetInt32('no_file_2', 'a', toUInt64(9))") + assert expected_error in instance.query_and_get_error( + "SELECT dictGetInt32('no_file_2', 'a', toUInt64(9))" + ) assert get_status("no_file_2") == "FAILED" # Passed time should not change anything now, the status is still FAILED. - time.sleep(6); - assert expected_error in instance.query_and_get_error("SELECT dictGetInt32('no_file_2', 'a', toUInt64(9))") + time.sleep(6) + assert expected_error in instance.query_and_get_error( + "SELECT dictGetInt32('no_file_2', 'a', toUInt64(9))" + ) assert get_status("no_file_2") == "FAILED" # Creating the file source makes the dictionary able to load. - instance.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/dictionaries/file.txt"), - "/etc/clickhouse-server/dictionaries/no_file_2.txt") + instance.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/dictionaries/file.txt"), + "/etc/clickhouse-server/dictionaries/no_file_2.txt", + ) # Check that file appears in container and wait if needed. while not instance.path_exists("/etc/clickhouse-server/dictionaries/no_file_2.txt"): time.sleep(1) - assert("9\t10\n" == instance.exec_in_container(["cat", "/etc/clickhouse-server/dictionaries/no_file_2.txt"])) + assert "9\t10\n" == instance.exec_in_container( + ["cat", "/etc/clickhouse-server/dictionaries/no_file_2.txt"] + ) instance.query("SYSTEM RELOAD DICTIONARY no_file_2") instance.query("SELECT dictGetInt32('no_file_2', 'a', toUInt64(9))") == "10\n" assert get_status("no_file_2") == "LOADED" # Removing the file source should not spoil the loaded dictionary. - instance.exec_in_container(["rm", "/etc/clickhouse-server/dictionaries/no_file_2.txt"]) - time.sleep(6); + instance.exec_in_container( + ["rm", "/etc/clickhouse-server/dictionaries/no_file_2.txt"] + ) + time.sleep(6) instance.query("SELECT dictGetInt32('no_file_2', 'a', toUInt64(9))") == "10\n" assert get_status("no_file_2") == "LOADED" @@ -224,24 +283,33 @@ def test_reload_after_fail_in_cache_dictionary(started_cluster): # Can't get a value from the cache dictionary because the source (table `test.xypairs`) doesn't respond. expected_error = "Table test.xypairs doesn't exist" update_error = "Could not update cache dictionary cache_xypairs now" - assert expected_error in query_and_get_error("SELECT dictGetUInt64('cache_xypairs', 'y', toUInt64(1))") + assert expected_error in query_and_get_error( + "SELECT dictGetUInt64('cache_xypairs', 'y', toUInt64(1))" + ) assert get_status("cache_xypairs") == "LOADED" assert expected_error in get_last_exception("cache_xypairs") # Create table `test.xypairs`. - query(''' + query( + """ DROP TABLE IF EXISTS test.xypairs; CREATE TABLE test.xypairs (x UInt64, y UInt64) ENGINE=Log; INSERT INTO test.xypairs VALUES (1, 56), (3, 78); - ''') + """ + ) # Cache dictionary now works. - assert_eq_with_retry(instance, "SELECT dictGet('cache_xypairs', 'y', toUInt64(1))", "56", ignore_error=True) + assert_eq_with_retry( + instance, + "SELECT dictGet('cache_xypairs', 'y', toUInt64(1))", + "56", + ignore_error=True, + ) query("SELECT dictGet('cache_xypairs', 'y', toUInt64(2))") == "0" assert get_last_exception("cache_xypairs") == "" # Drop table `test.xypairs`. - query('DROP TABLE test.xypairs') + query("DROP TABLE test.xypairs") # Values are cached so we can get them. query("SELECT dictGet('cache_xypairs', 'y', toUInt64(1))") == "56" @@ -249,28 +317,36 @@ def test_reload_after_fail_in_cache_dictionary(started_cluster): assert get_last_exception("cache_xypairs") == "" # But we can't get a value from the source table which isn't cached. - assert expected_error in query_and_get_error("SELECT dictGetUInt64('cache_xypairs', 'y', toUInt64(3))") + assert expected_error in query_and_get_error( + "SELECT dictGetUInt64('cache_xypairs', 'y', toUInt64(3))" + ) assert expected_error in get_last_exception("cache_xypairs") # Passed time should not spoil the cache. time.sleep(5) query("SELECT dictGet('cache_xypairs', 'y', toUInt64(1))") == "56" query("SELECT dictGet('cache_xypairs', 'y', toUInt64(2))") == "0" - error = query_and_get_error("SELECT dictGetUInt64('cache_xypairs', 'y', toUInt64(3))") + error = query_and_get_error( + "SELECT dictGetUInt64('cache_xypairs', 'y', toUInt64(3))" + ) assert (expected_error in error) or (update_error in error) last_exception = get_last_exception("cache_xypairs") assert (expected_error in last_exception) or (update_error in last_exception) # Create table `test.xypairs` again with changed values. - query(''' + query( + """ CREATE TABLE test.xypairs (x UInt64, y UInt64) ENGINE=Log; INSERT INTO test.xypairs VALUES (1, 57), (3, 79); - ''') + """ + ) - query('SYSTEM RELOAD DICTIONARY cache_xypairs') + query("SYSTEM RELOAD DICTIONARY cache_xypairs") # The cache dictionary returns new values now. - assert_eq_with_retry(instance, "SELECT dictGet('cache_xypairs', 'y', toUInt64(1))", "57") + assert_eq_with_retry( + instance, "SELECT dictGet('cache_xypairs', 'y', toUInt64(1))", "57" + ) query("SELECT dictGet('cache_xypairs', 'y', toUInt64(2))") == "0" query("SELECT dictGet('cache_xypairs', 'y', toUInt64(3))") == "79" assert get_last_exception("cache_xypairs") == "" diff --git a/tests/integration/test_dictionaries_update_field/test.py b/tests/integration/test_dictionaries_update_field/test.py index 8fb0d67e8b8..a98239e3a40 100644 --- a/tests/integration/test_dictionaries_update_field/test.py +++ b/tests/integration/test_dictionaries_update_field/test.py @@ -7,7 +7,8 @@ from helpers.cluster import ClickHouseKiller cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('main_node', main_configs=[]) +node = cluster.add_instance("main_node", main_configs=[]) + @pytest.fixture(scope="module") def started_cluster(): @@ -31,11 +32,15 @@ def started_cluster(): finally: cluster.shutdown() -@pytest.mark.parametrize("dictionary_name,dictionary_type", [ - ("flat_update_field_dictionary", "FLAT"), - ("simple_key_hashed_update_field_dictionary", "HASHED"), - ("complex_key_hashed_update_field_dictionary", "COMPLEX_KEY_HASHED") -]) + +@pytest.mark.parametrize( + "dictionary_name,dictionary_type", + [ + ("flat_update_field_dictionary", "FLAT"), + ("simple_key_hashed_update_field_dictionary", "HASHED"), + ("complex_key_hashed_update_field_dictionary", "COMPLEX_KEY_HASHED"), + ], +) def test_update_field(started_cluster, dictionary_name, dictionary_type): create_dictionary_query = """ CREATE DICTIONARY {dictionary_name} @@ -48,29 +53,53 @@ def test_update_field(started_cluster, dictionary_name, dictionary_type): SOURCE(CLICKHOUSE(table 'table_for_update_field_dictionary' update_field 'last_insert_time')) LAYOUT({dictionary_type}()) LIFETIME(1); - """.format(dictionary_name=dictionary_name, dictionary_type=dictionary_type) + """.format( + dictionary_name=dictionary_name, dictionary_type=dictionary_type + ) node.query(create_dictionary_query) - node.query("INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());") - query_result = node.query("SELECT key, value FROM {dictionary_name} ORDER BY key ASC".format(dictionary_name=dictionary_name)) - assert query_result == '1\tFirst\n' + node.query( + "INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());" + ) + query_result = node.query( + "SELECT key, value FROM {dictionary_name} ORDER BY key ASC".format( + dictionary_name=dictionary_name + ) + ) + assert query_result == "1\tFirst\n" - node.query("INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());") + node.query( + "INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());" + ) time.sleep(10) - query_result = node.query("SELECT key, value FROM {dictionary_name} ORDER BY key ASC".format(dictionary_name=dictionary_name)) + query_result = node.query( + "SELECT key, value FROM {dictionary_name} ORDER BY key ASC".format( + dictionary_name=dictionary_name + ) + ) - assert query_result == '1\tFirst\n2\tSecond\n' + assert query_result == "1\tFirst\n2\tSecond\n" - node.query("INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now());") - node.query("INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now());") + node.query( + "INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now());" + ) + node.query( + "INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now());" + ) time.sleep(10) - query_result = node.query("SELECT key, value FROM {dictionary_name} ORDER BY key ASC".format(dictionary_name=dictionary_name)) + query_result = node.query( + "SELECT key, value FROM {dictionary_name} ORDER BY key ASC".format( + dictionary_name=dictionary_name + ) + ) - assert query_result == '1\tFirst\n2\tSecondUpdated\n3\tThird\n' + assert query_result == "1\tFirst\n2\tSecondUpdated\n3\tThird\n" node.query("TRUNCATE TABLE table_for_update_field_dictionary") - node.query("DROP DICTIONARY {dictionary_name}".format(dictionary_name=dictionary_name)) + node.query( + "DROP DICTIONARY {dictionary_name}".format(dictionary_name=dictionary_name) + ) diff --git a/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py b/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py index cfd5f4d5607..bb587efa7e9 100644 --- a/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py +++ b/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py @@ -1,5 +1,3 @@ - - import time import pytest @@ -9,8 +7,10 @@ from helpers.network import PartitionManager cluster = ClickHouseCluster(__file__, name="reading") -dictionary_node = cluster.add_instance('dictionary_node', stay_alive=True) -main_node = cluster.add_instance('main_node', dictionaries=['configs/dictionaries/cache_ints_dictionary.xml']) +dictionary_node = cluster.add_instance("dictionary_node", stay_alive=True) +main_node = cluster.add_instance( + "main_node", dictionaries=["configs/dictionaries/cache_ints_dictionary.xml"] +) @pytest.fixture(scope="module") @@ -19,13 +19,19 @@ def started_cluster(): cluster.start() dictionary_node.query("create database if not exists test;") dictionary_node.query("drop table if exists test.ints;") - dictionary_node.query("create table test.ints " - "(key UInt64, " - "i8 Int8, i16 Int16, i32 Int32, i64 Int64, " - "u8 UInt8, u16 UInt16, u32 UInt32, u64 UInt64) " - "Engine = Memory;") - dictionary_node.query("insert into test.ints values (7, 7, 7, 7, 7, 7, 7, 7, 7);") - dictionary_node.query("insert into test.ints values (5, 5, 5, 5, 5, 5, 5, 5, 5);") + dictionary_node.query( + "create table test.ints " + "(key UInt64, " + "i8 Int8, i16 Int16, i32 Int32, i64 Int64, " + "u8 UInt8, u16 UInt16, u32 UInt32, u64 UInt64) " + "Engine = Memory;" + ) + dictionary_node.query( + "insert into test.ints values (7, 7, 7, 7, 7, 7, 7, 7, 7);" + ) + dictionary_node.query( + "insert into test.ints values (5, 5, 5, 5, 5, 5, 5, 5, 5);" + ) yield cluster finally: @@ -34,24 +40,68 @@ def started_cluster(): # @pytest.mark.skip(reason="debugging") def test_default_reading(started_cluster): - assert None != dictionary_node.get_process_pid("clickhouse"), "ClickHouse must be alive" + assert None != dictionary_node.get_process_pid( + "clickhouse" + ), "ClickHouse must be alive" # Key 0 is not in dictionary, so default value will be returned def test_helper(): - assert '42' == main_node.query("select dictGetOrDefault('experimental_dict', 'i8', toUInt64(13), toInt8(42));").rstrip() - assert '42' == main_node.query("select dictGetOrDefault('experimental_dict', 'i16', toUInt64(13), toInt16(42));").rstrip() - assert '42' == main_node.query("select dictGetOrDefault('experimental_dict', 'i32', toUInt64(13), toInt32(42));").rstrip() - assert '42' == main_node.query("select dictGetOrDefault('experimental_dict', 'i64', toUInt64(13), toInt64(42));").rstrip() - assert '42' == main_node.query("select dictGetOrDefault('experimental_dict', 'u8', toUInt64(13), toUInt8(42));").rstrip() - assert '42' == main_node.query("select dictGetOrDefault('experimental_dict', 'u16', toUInt64(13), toUInt16(42));").rstrip() - assert '42' == main_node.query("select dictGetOrDefault('experimental_dict', 'u32', toUInt64(13), toUInt32(42));").rstrip() - assert '42' == main_node.query("select dictGetOrDefault('experimental_dict', 'u64', toUInt64(13), toUInt64(42));").rstrip() + assert ( + "42" + == main_node.query( + "select dictGetOrDefault('experimental_dict', 'i8', toUInt64(13), toInt8(42));" + ).rstrip() + ) + assert ( + "42" + == main_node.query( + "select dictGetOrDefault('experimental_dict', 'i16', toUInt64(13), toInt16(42));" + ).rstrip() + ) + assert ( + "42" + == main_node.query( + "select dictGetOrDefault('experimental_dict', 'i32', toUInt64(13), toInt32(42));" + ).rstrip() + ) + assert ( + "42" + == main_node.query( + "select dictGetOrDefault('experimental_dict', 'i64', toUInt64(13), toInt64(42));" + ).rstrip() + ) + assert ( + "42" + == main_node.query( + "select dictGetOrDefault('experimental_dict', 'u8', toUInt64(13), toUInt8(42));" + ).rstrip() + ) + assert ( + "42" + == main_node.query( + "select dictGetOrDefault('experimental_dict', 'u16', toUInt64(13), toUInt16(42));" + ).rstrip() + ) + assert ( + "42" + == main_node.query( + "select dictGetOrDefault('experimental_dict', 'u32', toUInt64(13), toUInt32(42));" + ).rstrip() + ) + assert ( + "42" + == main_node.query( + "select dictGetOrDefault('experimental_dict', 'u64', toUInt64(13), toUInt64(42));" + ).rstrip() + ) test_helper() with PartitionManager() as pm, ClickHouseKiller(dictionary_node): - assert None == dictionary_node.get_process_pid("clickhouse"), "ClickHouse must be alive" + assert None == dictionary_node.get_process_pid( + "clickhouse" + ), "ClickHouse must be alive" # Remove connection between main_node and dictionary for sure pm.heal_all() diff --git a/tests/integration/test_dictionary_allow_read_expired_keys/test_default_string.py b/tests/integration/test_dictionary_allow_read_expired_keys/test_default_string.py index 3611d382b12..7acc26a66e0 100644 --- a/tests/integration/test_dictionary_allow_read_expired_keys/test_default_string.py +++ b/tests/integration/test_dictionary_allow_read_expired_keys/test_default_string.py @@ -1,5 +1,3 @@ - - import os import random import string @@ -11,14 +9,19 @@ from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__, name="string") -dictionary_node = cluster.add_instance('dictionary_node', stay_alive=True) -main_node = cluster.add_instance('main_node', dictionaries=['configs/dictionaries/cache_ints_dictionary.xml', - 'configs/dictionaries/cache_strings_default_settings.xml']) +dictionary_node = cluster.add_instance("dictionary_node", stay_alive=True) +main_node = cluster.add_instance( + "main_node", + dictionaries=[ + "configs/dictionaries/cache_ints_dictionary.xml", + "configs/dictionaries/cache_strings_default_settings.xml", + ], +) def get_random_string(string_length=8): alphabet = string.ascii_letters + string.digits - return ''.join((random.choice(alphabet) for _ in range(string_length))) + return "".join((random.choice(alphabet) for _ in range(string_length))) @pytest.fixture(scope="module") @@ -27,15 +30,23 @@ def started_cluster(): cluster.start() dictionary_node.query("CREATE DATABASE IF NOT EXISTS test;") dictionary_node.query("DROP TABLE IF EXISTS test.strings;") - dictionary_node.query(""" + dictionary_node.query( + """ CREATE TABLE test.strings (key UInt64, value String) ENGINE = Memory; - """) + """ + ) values_to_insert = ", ".join( - ["({}, '{}')".format(1000000 + number, get_random_string()) for number in range(100)]) - dictionary_node.query("INSERT INTO test.strings VALUES {}".format(values_to_insert)) + [ + "({}, '{}')".format(1000000 + number, get_random_string()) + for number in range(100) + ] + ) + dictionary_node.query( + "INSERT INTO test.strings VALUES {}".format(values_to_insert) + ) yield cluster finally: @@ -44,7 +55,9 @@ def started_cluster(): # @pytest.mark.skip(reason="debugging") def test_return_real_values(started_cluster): - assert None != dictionary_node.get_process_pid("clickhouse"), "ClickHouse must be alive" + assert None != dictionary_node.get_process_pid( + "clickhouse" + ), "ClickHouse must be alive" first_batch = """ SELECT count(*) diff --git a/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get.py b/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get.py index cf2234c0601..05f638ec337 100644 --- a/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get.py +++ b/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get.py @@ -1,5 +1,3 @@ - - import time import pytest @@ -9,8 +7,10 @@ from helpers.network import PartitionManager cluster = ClickHouseCluster(__file__) -dictionary_node = cluster.add_instance('dictionary_node', stay_alive=True) -main_node = cluster.add_instance('main_node', dictionaries=['configs/dictionaries/cache_ints_dictionary.xml']) +dictionary_node = cluster.add_instance("dictionary_node", stay_alive=True) +main_node = cluster.add_instance( + "main_node", dictionaries=["configs/dictionaries/cache_ints_dictionary.xml"] +) @pytest.fixture(scope="module") @@ -19,13 +19,19 @@ def started_cluster(): cluster.start() dictionary_node.query("create database if not exists test;") dictionary_node.query("drop table if exists test.ints;") - dictionary_node.query("create table test.ints " - "(key UInt64, " - "i8 Int8, i16 Int16, i32 Int32, i64 Int64, " - "u8 UInt8, u16 UInt16, u32 UInt32, u64 UInt64) " - "Engine = Memory;") - dictionary_node.query("insert into test.ints values (7, 7, 7, 7, 7, 7, 7, 7, 7);") - dictionary_node.query("insert into test.ints values (5, 5, 5, 5, 5, 5, 5, 5, 5);") + dictionary_node.query( + "create table test.ints " + "(key UInt64, " + "i8 Int8, i16 Int16, i32 Int32, i64 Int64, " + "u8 UInt8, u16 UInt16, u32 UInt32, u64 UInt64) " + "Engine = Memory;" + ) + dictionary_node.query( + "insert into test.ints values (7, 7, 7, 7, 7, 7, 7, 7, 7);" + ) + dictionary_node.query( + "insert into test.ints values (5, 5, 5, 5, 5, 5, 5, 5, 5);" + ) yield cluster finally: @@ -34,17 +40,59 @@ def started_cluster(): # @pytest.mark.skip(reason="debugging") def test_simple_dict_get(started_cluster): - assert None != dictionary_node.get_process_pid("clickhouse"), "ClickHouse must be alive" + assert None != dictionary_node.get_process_pid( + "clickhouse" + ), "ClickHouse must be alive" def test_helper(): - assert '7' == main_node.query("select dictGet('experimental_dict', 'i8', toUInt64(7));").rstrip(), "Wrong answer." - assert '7' == main_node.query("select dictGet('experimental_dict', 'i16', toUInt64(7));").rstrip(), "Wrong answer." - assert '7' == main_node.query("select dictGet('experimental_dict', 'i32', toUInt64(7));").rstrip(), "Wrong answer." - assert '7' == main_node.query("select dictGet('experimental_dict', 'i64', toUInt64(7));").rstrip(), "Wrong answer." - assert '7' == main_node.query("select dictGet('experimental_dict', 'u8', toUInt64(7));").rstrip(), "Wrong answer." - assert '7' == main_node.query("select dictGet('experimental_dict', 'u16', toUInt64(7));").rstrip(), "Wrong answer." - assert '7' == main_node.query("select dictGet('experimental_dict', 'u32', toUInt64(7));").rstrip(), "Wrong answer." - assert '7' == main_node.query("select dictGet('experimental_dict', 'u64', toUInt64(7));").rstrip(), "Wrong answer." + assert ( + "7" + == main_node.query( + "select dictGet('experimental_dict', 'i8', toUInt64(7));" + ).rstrip() + ), "Wrong answer." + assert ( + "7" + == main_node.query( + "select dictGet('experimental_dict', 'i16', toUInt64(7));" + ).rstrip() + ), "Wrong answer." + assert ( + "7" + == main_node.query( + "select dictGet('experimental_dict', 'i32', toUInt64(7));" + ).rstrip() + ), "Wrong answer." + assert ( + "7" + == main_node.query( + "select dictGet('experimental_dict', 'i64', toUInt64(7));" + ).rstrip() + ), "Wrong answer." + assert ( + "7" + == main_node.query( + "select dictGet('experimental_dict', 'u8', toUInt64(7));" + ).rstrip() + ), "Wrong answer." + assert ( + "7" + == main_node.query( + "select dictGet('experimental_dict', 'u16', toUInt64(7));" + ).rstrip() + ), "Wrong answer." + assert ( + "7" + == main_node.query( + "select dictGet('experimental_dict', 'u32', toUInt64(7));" + ).rstrip() + ), "Wrong answer." + assert ( + "7" + == main_node.query( + "select dictGet('experimental_dict', 'u64', toUInt64(7));" + ).rstrip() + ), "Wrong answer." test_helper() diff --git a/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py b/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py index df36218fc7b..54c5976f295 100644 --- a/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py +++ b/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py @@ -1,5 +1,3 @@ - - import time import pytest @@ -9,8 +7,10 @@ from helpers.network import PartitionManager cluster = ClickHouseCluster(__file__, name="default") -dictionary_node = cluster.add_instance('dictionary_node', stay_alive=True) -main_node = cluster.add_instance('main_node', dictionaries=['configs/dictionaries/cache_ints_dictionary.xml']) +dictionary_node = cluster.add_instance("dictionary_node", stay_alive=True) +main_node = cluster.add_instance( + "main_node", dictionaries=["configs/dictionaries/cache_ints_dictionary.xml"] +) @pytest.fixture(scope="module") @@ -19,13 +19,19 @@ def started_cluster(): cluster.start() dictionary_node.query("create database if not exists test;") dictionary_node.query("drop table if exists test.ints;") - dictionary_node.query("create table test.ints " - "(key UInt64, " - "i8 Int8, i16 Int16, i32 Int32, i64 Int64, " - "u8 UInt8, u16 UInt16, u32 UInt32, u64 UInt64) " - "Engine = Memory;") - dictionary_node.query("insert into test.ints values (7, 7, 7, 7, 7, 7, 7, 7, 7);") - dictionary_node.query("insert into test.ints values (5, 5, 5, 5, 5, 5, 5, 5, 5);") + dictionary_node.query( + "create table test.ints " + "(key UInt64, " + "i8 Int8, i16 Int16, i32 Int32, i64 Int64, " + "u8 UInt8, u16 UInt16, u32 UInt32, u64 UInt64) " + "Engine = Memory;" + ) + dictionary_node.query( + "insert into test.ints values (7, 7, 7, 7, 7, 7, 7, 7, 7);" + ) + dictionary_node.query( + "insert into test.ints values (5, 5, 5, 5, 5, 5, 5, 5, 5);" + ) yield cluster finally: @@ -34,17 +40,59 @@ def started_cluster(): # @pytest.mark.skip(reason="debugging") def test_simple_dict_get_or_default(started_cluster): - assert None != dictionary_node.get_process_pid("clickhouse"), "ClickHouse must be alive" + assert None != dictionary_node.get_process_pid( + "clickhouse" + ), "ClickHouse must be alive" def test_helper(): - assert '5' == main_node.query("select dictGetOrDefault('experimental_dict', 'i8', toUInt64(5), toInt8(42));").rstrip() - assert '5' == main_node.query("select dictGetOrDefault('experimental_dict', 'i16', toUInt64(5), toInt16(42));").rstrip() - assert '5' == main_node.query("select dictGetOrDefault('experimental_dict', 'i32', toUInt64(5), toInt32(42));").rstrip() - assert '5' == main_node.query("select dictGetOrDefault('experimental_dict', 'i64', toUInt64(5), toInt64(42));").rstrip() - assert '5' == main_node.query("select dictGetOrDefault('experimental_dict', 'u8', toUInt64(5), toUInt8(42));").rstrip() - assert '5' == main_node.query("select dictGetOrDefault('experimental_dict', 'u16', toUInt64(5), toUInt16(42));").rstrip() - assert '5' == main_node.query("select dictGetOrDefault('experimental_dict', 'u32', toUInt64(5), toUInt32(42));").rstrip() - assert '5' == main_node.query("select dictGetOrDefault('experimental_dict', 'u64', toUInt64(5), toUInt64(42));").rstrip() + assert ( + "5" + == main_node.query( + "select dictGetOrDefault('experimental_dict', 'i8', toUInt64(5), toInt8(42));" + ).rstrip() + ) + assert ( + "5" + == main_node.query( + "select dictGetOrDefault('experimental_dict', 'i16', toUInt64(5), toInt16(42));" + ).rstrip() + ) + assert ( + "5" + == main_node.query( + "select dictGetOrDefault('experimental_dict', 'i32', toUInt64(5), toInt32(42));" + ).rstrip() + ) + assert ( + "5" + == main_node.query( + "select dictGetOrDefault('experimental_dict', 'i64', toUInt64(5), toInt64(42));" + ).rstrip() + ) + assert ( + "5" + == main_node.query( + "select dictGetOrDefault('experimental_dict', 'u8', toUInt64(5), toUInt8(42));" + ).rstrip() + ) + assert ( + "5" + == main_node.query( + "select dictGetOrDefault('experimental_dict', 'u16', toUInt64(5), toUInt16(42));" + ).rstrip() + ) + assert ( + "5" + == main_node.query( + "select dictGetOrDefault('experimental_dict', 'u32', toUInt64(5), toUInt32(42));" + ).rstrip() + ) + assert ( + "5" + == main_node.query( + "select dictGetOrDefault('experimental_dict', 'u64', toUInt64(5), toUInt64(42));" + ).rstrip() + ) test_helper() diff --git a/tests/integration/test_dictionary_custom_settings/http_server.py b/tests/integration/test_dictionary_custom_settings/http_server.py index bd5ce22dbac..8683e98af9c 100644 --- a/tests/integration/test_dictionary_custom_settings/http_server.py +++ b/tests/integration/test_dictionary_custom_settings/http_server.py @@ -9,9 +9,14 @@ from http.server import BaseHTTPRequestHandler, HTTPServer # Decorator used to see if authentication works for external dictionary who use a HTTP source. def check_auth(fn): def wrapper(req): - auth_header = req.headers.get('authorization', None) - api_key = req.headers.get('api-key', None) - if not auth_header or auth_header != 'Basic Zm9vOmJhcg==' or not api_key or api_key != 'secret': + auth_header = req.headers.get("authorization", None) + api_key = req.headers.get("api-key", None) + if ( + not auth_header + or auth_header != "Basic Zm9vOmJhcg==" + or not api_key + or api_key != "secret" + ): req.send_response(401) else: fn(req) @@ -35,15 +40,15 @@ def start_server(server_address, data_path, schema, cert_path, address_family): def __send_headers(self): self.send_response(200) - self.send_header('Content-type', 'text/csv') + self.send_header("Content-type", "text/csv") self.end_headers() def __send_data(self, only_ids=None): - with open(data_path, 'r') as fl: - reader = csv.reader(fl, delimiter='\t') + with open(data_path, "r") as fl: + reader = csv.reader(fl, delimiter="\t") for row in reader: if not only_ids or (row[0] in only_ids): - self.wfile.write(('\t'.join(row) + '\n').encode()) + self.wfile.write(("\t".join(row) + "\n").encode()) def __read_and_decode_post_ids(self): data = self.__read_and_decode_post_data() @@ -69,19 +74,29 @@ def start_server(server_address, data_path, schema, cert_path, address_family): HTTPServer.address_family = socket.AF_INET6 httpd = HTTPServer(server_address, TSVHTTPHandler) if schema == "https": - httpd.socket = ssl.wrap_socket(httpd.socket, certfile=cert_path, server_side=True) + httpd.socket = ssl.wrap_socket( + httpd.socket, certfile=cert_path, server_side=True + ) httpd.serve_forever() if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Simple HTTP server returns data from file") + parser = argparse.ArgumentParser( + description="Simple HTTP server returns data from file" + ) parser.add_argument("--host", default="localhost") parser.add_argument("--port", default=5555, type=int) parser.add_argument("--data-path", required=True) parser.add_argument("--schema", choices=("http", "https"), required=True) parser.add_argument("--cert-path", default="./fake_cert.pem") - parser.add_argument('--address-family', choices=("ipv4", "ipv6"), default="ipv4") + parser.add_argument("--address-family", choices=("ipv4", "ipv6"), default="ipv4") args = parser.parse_args() - start_server((args.host, args.port), args.data_path, args.schema, args.cert_path, args.address_family) + start_server( + (args.host, args.port), + args.data_path, + args.schema, + args.cert_path, + args.address_family, + ) diff --git a/tests/integration/test_dictionary_custom_settings/test.py b/tests/integration/test_dictionary_custom_settings/test.py index 0d337e8c00e..715219ceb87 100644 --- a/tests/integration/test_dictionary_custom_settings/test.py +++ b/tests/integration/test_dictionary_custom_settings/test.py @@ -4,15 +4,15 @@ import pytest from helpers.cluster import ClickHouseCluster DICTIONARY_FILES = [ - 'configs/dictionaries/FileSourceConfig.xml', - 'configs/dictionaries/ExecutableSourceConfig.xml', - 'configs/dictionaries/source.csv', - 'configs/dictionaries/HTTPSourceConfig.xml', - 'configs/dictionaries/ClickHouseSourceConfig.xml' + "configs/dictionaries/FileSourceConfig.xml", + "configs/dictionaries/ExecutableSourceConfig.xml", + "configs/dictionaries/source.csv", + "configs/dictionaries/HTTPSourceConfig.xml", + "configs/dictionaries/ClickHouseSourceConfig.xml", ] cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('node', dictionaries=DICTIONARY_FILES) +instance = cluster.add_instance("node", dictionaries=DICTIONARY_FILES) def prepare(): @@ -20,14 +20,22 @@ def prepare(): path = "/source.csv" script_dir = os.path.dirname(os.path.realpath(__file__)) - node.copy_file_to_container(os.path.join(script_dir, './http_server.py'), '/http_server.py') - node.copy_file_to_container(os.path.join(script_dir, 'configs/dictionaries/source.csv'), './source.csv') - node.exec_in_container([ - "bash", - "-c", - "python3 /http_server.py --data-path={tbl} --schema=http --host=localhost --port=5555".format( - tbl=path) - ], detach=True) + node.copy_file_to_container( + os.path.join(script_dir, "./http_server.py"), "/http_server.py" + ) + node.copy_file_to_container( + os.path.join(script_dir, "configs/dictionaries/source.csv"), "./source.csv" + ) + node.exec_in_container( + [ + "bash", + "-c", + "python3 /http_server.py --data-path={tbl} --schema=http --host=localhost --port=5555".format( + tbl=path + ), + ], + detach=True, + ) @pytest.fixture(scope="module") @@ -45,20 +53,30 @@ def test_work(start_cluster): instance.query("SYSTEM RELOAD DICTIONARIES") - assert query("SELECT dictGetString('test_file', 'first', toUInt64(1))") == "\\\'a\n" - assert query("SELECT dictGetString('test_file', 'second', toUInt64(1))") == "\"b\n" - assert query("SELECT dictGetString('test_executable', 'first', toUInt64(1))") == "\\\'a\n" - assert query("SELECT dictGetString('test_executable', 'second', toUInt64(1))") == "\"b\n" + assert query("SELECT dictGetString('test_file', 'first', toUInt64(1))") == "\\'a\n" + assert query("SELECT dictGetString('test_file', 'second', toUInt64(1))") == '"b\n' + assert ( + query("SELECT dictGetString('test_executable', 'first', toUInt64(1))") + == "\\'a\n" + ) + assert ( + query("SELECT dictGetString('test_executable', 'second', toUInt64(1))") + == '"b\n' + ) - caught_exception = '' + caught_exception = "" try: - instance.query("CREATE TABLE source (id UInt64, first String, second String, third String) ENGINE=TinyLog;") - instance.query("INSERT INTO default.source VALUES (1, 'aaa', 'bbb', 'cccc'), (2, 'ddd', 'eee', 'fff')") + instance.query( + "CREATE TABLE source (id UInt64, first String, second String, third String) ENGINE=TinyLog;" + ) + instance.query( + "INSERT INTO default.source VALUES (1, 'aaa', 'bbb', 'cccc'), (2, 'ddd', 'eee', 'fff')" + ) instance.query("SELECT dictGetString('test_clickhouse', 'second', toUInt64(1))") except Exception as e: caught_exception = str(e) assert caught_exception.find("Limit for result exceeded") != -1 - assert query("SELECT dictGetString('test_http', 'first', toUInt64(1))") == "\\\'a\n" - assert query("SELECT dictGetString('test_http', 'second', toUInt64(1))") == "\"b\n" + assert query("SELECT dictGetString('test_http', 'first', toUInt64(1))") == "\\'a\n" + assert query("SELECT dictGetString('test_http', 'second', toUInt64(1))") == '"b\n' diff --git a/tests/integration/test_dictionary_ddl_on_cluster/test.py b/tests/integration/test_dictionary_ddl_on_cluster/test.py index feca1532974..dc9d31d75bd 100644 --- a/tests/integration/test_dictionary_ddl_on_cluster/test.py +++ b/tests/integration/test_dictionary_ddl_on_cluster/test.py @@ -3,14 +3,26 @@ from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -ch1 = cluster.add_instance('ch1', main_configs=["configs/config.d/clusters.xml", "configs/config.d/ddl.xml"], - with_zookeeper=True) -ch2 = cluster.add_instance('ch2', main_configs=["configs/config.d/clusters.xml", "configs/config.d/ddl.xml"], - with_zookeeper=True) -ch3 = cluster.add_instance('ch3', main_configs=["configs/config.d/clusters.xml", "configs/config.d/ddl.xml"], - with_zookeeper=True) -ch4 = cluster.add_instance('ch4', main_configs=["configs/config.d/clusters.xml", "configs/config.d/ddl.xml"], - with_zookeeper=True) +ch1 = cluster.add_instance( + "ch1", + main_configs=["configs/config.d/clusters.xml", "configs/config.d/ddl.xml"], + with_zookeeper=True, +) +ch2 = cluster.add_instance( + "ch2", + main_configs=["configs/config.d/clusters.xml", "configs/config.d/ddl.xml"], + with_zookeeper=True, +) +ch3 = cluster.add_instance( + "ch3", + main_configs=["configs/config.d/clusters.xml", "configs/config.d/ddl.xml"], + with_zookeeper=True, +) +ch4 = cluster.add_instance( + "ch4", + main_configs=["configs/config.d/clusters.xml", "configs/config.d/ddl.xml"], + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -18,7 +30,8 @@ def started_cluster(): try: cluster.start() ch1.query( - "CREATE TABLE sometbl ON CLUSTER 'cluster' (key UInt64, value String) ENGINE = MergeTree ORDER by key") + "CREATE TABLE sometbl ON CLUSTER 'cluster' (key UInt64, value String) ENGINE = MergeTree ORDER by key" + ) yield cluster finally: @@ -42,12 +55,19 @@ def test_dictionary_ddl_on_cluster(started_cluster): LAYOUT(FLAT()) SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'sometbl' DB 'default')) LIFETIME(10) - """) + """ + ) for num, node in enumerate([ch1, ch2, ch3, ch4]): assert node.query("SELECT count() from sometbl") == "1\n" - assert node.query( - "SELECT dictGetString('default.somedict', 'value', toUInt64({}))".format(num)) == node.name + '\n' + assert ( + node.query( + "SELECT dictGetString('default.somedict', 'value', toUInt64({}))".format( + num + ) + ) + == node.name + "\n" + ) ch1.query("DETACH DICTIONARY default.somedict ON CLUSTER 'cluster'") @@ -59,8 +79,14 @@ def test_dictionary_ddl_on_cluster(started_cluster): for num, node in enumerate([ch1, ch2, ch3, ch4]): assert node.query("SELECT count() from sometbl") == "1\n" - assert node.query( - "SELECT dictGetString('default.somedict', 'value', toUInt64({}))".format(num)) == node.name + '\n' + assert ( + node.query( + "SELECT dictGetString('default.somedict', 'value', toUInt64({}))".format( + num + ) + ) + == node.name + "\n" + ) for num, node in enumerate([ch1, ch2, ch3, ch4]): node.query("ALTER TABLE sometbl UPDATE value = 'new_key' WHERE 1") @@ -68,8 +94,14 @@ def test_dictionary_ddl_on_cluster(started_cluster): ch1.query("SYSTEM RELOAD DICTIONARY ON CLUSTER 'cluster' `default.somedict`") for num, node in enumerate([ch1, ch2, ch3, ch4]): - assert node.query( - "SELECT dictGetString('default.somedict', 'value', toUInt64({}))".format(num)) == 'new_key' + '\n' + assert ( + node.query( + "SELECT dictGetString('default.somedict', 'value', toUInt64({}))".format( + num + ) + ) + == "new_key" + "\n" + ) ch1.query("DROP DICTIONARY default.somedict ON CLUSTER 'cluster'") diff --git a/tests/integration/test_disabled_mysql_server/test.py b/tests/integration/test_disabled_mysql_server/test.py index d7977404c73..6a4df3fc0b4 100644 --- a/tests/integration/test_disabled_mysql_server/test.py +++ b/tests/integration/test_disabled_mysql_server/test.py @@ -10,7 +10,10 @@ from helpers.cluster import ClickHouseCluster, get_docker_compose_path from helpers.network import PartitionManager cluster = ClickHouseCluster(__file__) -clickhouse_node = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_mysql=True) +clickhouse_node = cluster.add_instance( + "node1", main_configs=["configs/remote_servers.xml"], with_mysql=True +) + @pytest.fixture(scope="module") def started_cluster(): @@ -22,17 +25,22 @@ def started_cluster(): class MySQLNodeInstance: - def __init__(self, started_cluster, user='root', password='clickhouse'): + def __init__(self, started_cluster, user="root", password="clickhouse"): self.user = user self.port = cluster.mysql_port self.hostname = cluster.mysql_ip self.password = password - self.mysql_connection = None # lazy init + self.mysql_connection = None # lazy init def alloc_connection(self): if self.mysql_connection is None: - self.mysql_connection = pymysql.connect(user=self.user, password=self.password, host=self.hostname, - port=self.port, autocommit=True) + self.mysql_connection = pymysql.connect( + user=self.user, + password=self.password, + host=self.hostname, + port=self.port, + autocommit=True, + ) return self.mysql_connection def query(self, execution_query): @@ -48,12 +56,22 @@ def test_disabled_mysql_server(started_cluster): with contextlib.closing(MySQLNodeInstance(started_cluster)) as mysql_node: mysql_node.query("DROP DATABASE IF EXISTS test_db_disabled;") mysql_node.query("CREATE DATABASE test_db_disabled;") - mysql_node.query("CREATE TABLE test_db_disabled.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;") + mysql_node.query( + "CREATE TABLE test_db_disabled.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;" + ) with PartitionManager() as pm: - clickhouse_node.query("CREATE DATABASE test_db_disabled ENGINE = MySQL('mysql57:3306', 'test_db_disabled', 'root', 'clickhouse')") - - pm._add_rule({'source': clickhouse_node.ip_address, 'destination_port': 3306, 'action': 'DROP'}) + clickhouse_node.query( + "CREATE DATABASE test_db_disabled ENGINE = MySQL('mysql57:3306', 'test_db_disabled', 'root', 'clickhouse')" + ) + + pm._add_rule( + { + "source": clickhouse_node.ip_address, + "destination_port": 3306, + "action": "DROP", + } + ) clickhouse_node.query("SELECT * FROM system.parts") clickhouse_node.query("SELECT * FROM system.mutations") clickhouse_node.query("SELECT * FROM system.graphite_retentions") diff --git a/tests/integration/test_disk_access_storage/test.py b/tests/integration/test_disk_access_storage/test.py index ad31be4284a..273a00adffe 100644 --- a/tests/integration/test_disk_access_storage/test.py +++ b/tests/integration/test_disk_access_storage/test.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', stay_alive=True) +instance = cluster.add_instance("instance", stay_alive=True) @pytest.fixture(scope="module", autouse=True) @@ -16,13 +16,19 @@ def started_cluster(): def create_entities(): - instance.query("CREATE SETTINGS PROFILE s1 SETTINGS max_memory_usage = 123456789 MIN 100000000 MAX 200000000") + instance.query( + "CREATE SETTINGS PROFILE s1 SETTINGS max_memory_usage = 123456789 MIN 100000000 MAX 200000000" + ) instance.query("CREATE USER u1 SETTINGS PROFILE s1") instance.query("CREATE ROLE rx SETTINGS PROFILE s1") instance.query("CREATE USER u2 IDENTIFIED BY 'qwerty' HOST LOCAL DEFAULT ROLE rx") instance.query("CREATE SETTINGS PROFILE s2 SETTINGS PROFILE s1 TO u2") - instance.query("CREATE ROW POLICY p ON mydb.mytable FOR SELECT USING a<1000 TO u1, u2") - instance.query("CREATE QUOTA q FOR INTERVAL 1 HOUR MAX QUERIES 100 TO ALL EXCEPT rx") + instance.query( + "CREATE ROW POLICY p ON mydb.mytable FOR SELECT USING a<1000 TO u1, u2" + ) + instance.query( + "CREATE QUOTA q FOR INTERVAL 1 HOUR MAX QUERIES 100 TO ALL EXCEPT rx" + ) @pytest.fixture(autouse=True) @@ -38,21 +44,37 @@ def test_create(): create_entities() def check(): - assert instance.query("SHOW CREATE USER u1") == "CREATE USER u1 SETTINGS PROFILE s1\n" - assert instance.query( - "SHOW CREATE USER u2") == "CREATE USER u2 IDENTIFIED WITH sha256_password HOST LOCAL DEFAULT ROLE rx\n" - assert instance.query( - "SHOW CREATE ROW POLICY p ON mydb.mytable") == "CREATE ROW POLICY p ON mydb.mytable FOR SELECT USING a < 1000 TO u1, u2\n" - assert instance.query( - "SHOW CREATE QUOTA q") == "CREATE QUOTA q FOR INTERVAL 1 hour MAX queries = 100 TO ALL EXCEPT rx\n" + assert ( + instance.query("SHOW CREATE USER u1") + == "CREATE USER u1 SETTINGS PROFILE s1\n" + ) + assert ( + instance.query("SHOW CREATE USER u2") + == "CREATE USER u2 IDENTIFIED WITH sha256_password HOST LOCAL DEFAULT ROLE rx\n" + ) + assert ( + instance.query("SHOW CREATE ROW POLICY p ON mydb.mytable") + == "CREATE ROW POLICY p ON mydb.mytable FOR SELECT USING a < 1000 TO u1, u2\n" + ) + assert ( + instance.query("SHOW CREATE QUOTA q") + == "CREATE QUOTA q FOR INTERVAL 1 hour MAX queries = 100 TO ALL EXCEPT rx\n" + ) assert instance.query("SHOW GRANTS FOR u1") == "" assert instance.query("SHOW GRANTS FOR u2") == "GRANT rx TO u2\n" - assert instance.query("SHOW CREATE ROLE rx") == "CREATE ROLE rx SETTINGS PROFILE s1\n" + assert ( + instance.query("SHOW CREATE ROLE rx") + == "CREATE ROLE rx SETTINGS PROFILE s1\n" + ) assert instance.query("SHOW GRANTS FOR rx") == "" - assert instance.query( - "SHOW CREATE SETTINGS PROFILE s1") == "CREATE SETTINGS PROFILE s1 SETTINGS max_memory_usage = 123456789 MIN 100000000 MAX 200000000\n" - assert instance.query( - "SHOW CREATE SETTINGS PROFILE s2") == "CREATE SETTINGS PROFILE s2 SETTINGS INHERIT s1 TO u2\n" + assert ( + instance.query("SHOW CREATE SETTINGS PROFILE s1") + == "CREATE SETTINGS PROFILE s1 SETTINGS max_memory_usage = 123456789 MIN 100000000 MAX 200000000\n" + ) + assert ( + instance.query("SHOW CREATE SETTINGS PROFILE s2") + == "CREATE SETTINGS PROFILE s2 SETTINGS INHERIT s1 TO u2\n" + ) check() instance.restart_clickhouse() # Check persistency @@ -70,22 +92,44 @@ def test_alter(): instance.query("ALTER ROLE rx SETTINGS PROFILE s2") instance.query("GRANT SELECT ON mydb.mytable TO u1") instance.query("GRANT SELECT ON mydb.* TO rx WITH GRANT OPTION") - instance.query("ALTER SETTINGS PROFILE s1 SETTINGS max_memory_usage = 987654321 READONLY") + instance.query( + "ALTER SETTINGS PROFILE s1 SETTINGS max_memory_usage = 987654321 READONLY" + ) def check(): - assert instance.query("SHOW CREATE USER u1") == "CREATE USER u1 SETTINGS PROFILE s1\n" - assert instance.query( - "SHOW CREATE USER u2") == "CREATE USER u2 IDENTIFIED WITH sha256_password HOST LOCAL DEFAULT ROLE ry\n" - assert instance.query("SHOW GRANTS FOR u1") == "GRANT SELECT ON mydb.mytable TO u1\n" + assert ( + instance.query("SHOW CREATE USER u1") + == "CREATE USER u1 SETTINGS PROFILE s1\n" + ) + assert ( + instance.query("SHOW CREATE USER u2") + == "CREATE USER u2 IDENTIFIED WITH sha256_password HOST LOCAL DEFAULT ROLE ry\n" + ) + assert ( + instance.query("SHOW GRANTS FOR u1") + == "GRANT SELECT ON mydb.mytable TO u1\n" + ) assert instance.query("SHOW GRANTS FOR u2") == "GRANT rx, ry TO u2\n" - assert instance.query("SHOW CREATE ROLE rx") == "CREATE ROLE rx SETTINGS PROFILE s2\n" + assert ( + instance.query("SHOW CREATE ROLE rx") + == "CREATE ROLE rx SETTINGS PROFILE s2\n" + ) assert instance.query("SHOW CREATE ROLE ry") == "CREATE ROLE ry\n" - assert instance.query("SHOW GRANTS FOR rx") == "GRANT SELECT ON mydb.* TO rx WITH GRANT OPTION\n" - assert instance.query("SHOW GRANTS FOR ry") == "GRANT rx TO ry WITH ADMIN OPTION\n" - assert instance.query( - "SHOW CREATE SETTINGS PROFILE s1") == "CREATE SETTINGS PROFILE s1 SETTINGS max_memory_usage = 987654321 READONLY\n" - assert instance.query( - "SHOW CREATE SETTINGS PROFILE s2") == "CREATE SETTINGS PROFILE s2 SETTINGS INHERIT s1 TO u2\n" + assert ( + instance.query("SHOW GRANTS FOR rx") + == "GRANT SELECT ON mydb.* TO rx WITH GRANT OPTION\n" + ) + assert ( + instance.query("SHOW GRANTS FOR ry") == "GRANT rx TO ry WITH ADMIN OPTION\n" + ) + assert ( + instance.query("SHOW CREATE SETTINGS PROFILE s1") + == "CREATE SETTINGS PROFILE s1 SETTINGS max_memory_usage = 987654321 READONLY\n" + ) + assert ( + instance.query("SHOW CREATE SETTINGS PROFILE s2") + == "CREATE SETTINGS PROFILE s2 SETTINGS INHERIT s1 TO u2\n" + ) check() instance.restart_clickhouse() # Check persistency @@ -104,11 +148,20 @@ def test_drop(): def check(): assert instance.query("SHOW CREATE USER u1") == "CREATE USER u1\n" - assert instance.query("SHOW CREATE SETTINGS PROFILE s2") == "CREATE SETTINGS PROFILE s2\n" - assert "There is no user `u2`" in instance.query_and_get_error("SHOW CREATE USER u2") - assert "There is no row policy `p ON mydb.mytable`" in instance.query_and_get_error( - "SHOW CREATE ROW POLICY p ON mydb.mytable") - assert "There is no quota `q`" in instance.query_and_get_error("SHOW CREATE QUOTA q") + assert ( + instance.query("SHOW CREATE SETTINGS PROFILE s2") + == "CREATE SETTINGS PROFILE s2\n" + ) + assert "There is no user `u2`" in instance.query_and_get_error( + "SHOW CREATE USER u2" + ) + assert ( + "There is no row policy `p ON mydb.mytable`" + in instance.query_and_get_error("SHOW CREATE ROW POLICY p ON mydb.mytable") + ) + assert "There is no quota `q`" in instance.query_and_get_error( + "SHOW CREATE QUOTA q" + ) check() instance.restart_clickhouse() # Check persistency diff --git a/tests/integration/test_disk_over_web_server/test.py b/tests/integration/test_disk_over_web_server/test.py index f80cccac1be..b82c35e617f 100644 --- a/tests/integration/test_disk_over_web_server/test.py +++ b/tests/integration/test_disk_over_web_server/test.py @@ -4,32 +4,59 @@ from helpers.cluster import ClickHouseCluster uuids = [] + @pytest.fixture(scope="module") def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("node1", main_configs=["configs/storage_conf.xml"], with_nginx=True) - cluster.add_instance("node2", main_configs=["configs/storage_conf_web.xml"], with_nginx=True) - cluster.add_instance("node3", main_configs=["configs/storage_conf_web.xml"], with_nginx=True) + cluster.add_instance( + "node1", main_configs=["configs/storage_conf.xml"], with_nginx=True + ) + cluster.add_instance( + "node2", main_configs=["configs/storage_conf_web.xml"], with_nginx=True + ) + cluster.add_instance( + "node3", main_configs=["configs/storage_conf_web.xml"], with_nginx=True + ) cluster.start() node1 = cluster.instances["node1"] expected = "" global uuids for i in range(3): - node1.query(""" CREATE TABLE data{} (id Int32) ENGINE = MergeTree() ORDER BY id SETTINGS storage_policy = 'def';""".format(i)) - node1.query("INSERT INTO data{} SELECT number FROM numbers(500000 * {})".format(i, i + 1)) + node1.query( + """ CREATE TABLE data{} (id Int32) ENGINE = MergeTree() ORDER BY id SETTINGS storage_policy = 'def';""".format( + i + ) + ) + node1.query( + "INSERT INTO data{} SELECT number FROM numbers(500000 * {})".format( + i, i + 1 + ) + ) expected = node1.query("SELECT * FROM data{} ORDER BY id".format(i)) - metadata_path = node1.query("SELECT data_paths FROM system.tables WHERE name='data{}'".format(i)) - metadata_path = metadata_path[metadata_path.find('/'):metadata_path.rfind('/')+1] - print(f'Metadata: {metadata_path}') + metadata_path = node1.query( + "SELECT data_paths FROM system.tables WHERE name='data{}'".format(i) + ) + metadata_path = metadata_path[ + metadata_path.find("/") : metadata_path.rfind("/") + 1 + ] + print(f"Metadata: {metadata_path}") - node1.exec_in_container(['bash', '-c', - '/usr/bin/clickhouse static-files-disk-uploader --test-mode --url http://nginx:80/test1 --metadata-path {}'.format(metadata_path)], user='root') - parts = metadata_path.split('/') + node1.exec_in_container( + [ + "bash", + "-c", + "/usr/bin/clickhouse static-files-disk-uploader --test-mode --url http://nginx:80/test1 --metadata-path {}".format( + metadata_path + ), + ], + user="root", + ) + parts = metadata_path.split("/") uuids.append(parts[3]) - print(f'UUID: {parts[3]}') + print(f"UUID: {parts[3]}") yield cluster @@ -42,24 +69,40 @@ def test_usage(cluster, node_name): node1 = cluster.instances["node1"] node2 = cluster.instances[node_name] global uuids - assert(len(uuids) == 3) + assert len(uuids) == 3 for i in range(3): - node2.query(""" + node2.query( + """ ATTACH TABLE test{} UUID '{}' (id Int32) ENGINE = MergeTree() ORDER BY id SETTINGS storage_policy = 'web'; - """.format(i, uuids[i], i, i)) + """.format( + i, uuids[i], i, i + ) + ) result = node2.query("SELECT * FROM test{} settings max_threads=20".format(i)) result = node2.query("SELECT count() FROM test{}".format(i)) - assert(int(result) == 500000 * (i+1)) + assert int(result) == 500000 * (i + 1) - result = node2.query("SELECT id FROM test{} WHERE id % 56 = 3 ORDER BY id".format(i)) - assert(result == node1.query("SELECT id FROM data{} WHERE id % 56 = 3 ORDER BY id".format(i))) + result = node2.query( + "SELECT id FROM test{} WHERE id % 56 = 3 ORDER BY id".format(i) + ) + assert result == node1.query( + "SELECT id FROM data{} WHERE id % 56 = 3 ORDER BY id".format(i) + ) - result = node2.query("SELECT id FROM test{} WHERE id > 789999 AND id < 999999 ORDER BY id".format(i)) - assert(result == node1.query("SELECT id FROM data{} WHERE id > 789999 AND id < 999999 ORDER BY id".format(i))) + result = node2.query( + "SELECT id FROM test{} WHERE id > 789999 AND id < 999999 ORDER BY id".format( + i + ) + ) + assert result == node1.query( + "SELECT id FROM data{} WHERE id > 789999 AND id < 999999 ORDER BY id".format( + i + ) + ) node2.query("DROP TABLE test{}".format(i)) print(f"Ok {i}") @@ -69,19 +112,23 @@ def test_incorrect_usage(cluster): node1 = cluster.instances["node1"] node2 = cluster.instances["node3"] global uuids - node2.query(""" + node2.query( + """ ATTACH TABLE test0 UUID '{}' (id Int32) ENGINE = MergeTree() ORDER BY id SETTINGS storage_policy = 'web'; - """.format(uuids[0])) + """.format( + uuids[0] + ) + ) result = node2.query("SELECT count() FROM test0") - assert(int(result) == 500000) + assert int(result) == 500000 result = node2.query_and_get_error("ALTER TABLE test0 ADD COLUMN col1 Int32 first") - assert("Table is read-only" in result) + assert "Table is read-only" in result result = node2.query_and_get_error("TRUNCATE TABLE test0") - assert("Table is read-only" in result) + assert "Table is read-only" in result node2.query("DROP TABLE test0") diff --git a/tests/integration/test_disk_types/test.py b/tests/integration/test_disk_types/test.py index 35e900c3c9f..a26f80165e8 100644 --- a/tests/integration/test_disk_types/test.py +++ b/tests/integration/test_disk_types/test.py @@ -14,7 +14,12 @@ disk_types = { def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("node", main_configs=["configs/storage.xml"], with_minio=True, with_hdfs=True) + cluster.add_instance( + "node", + main_configs=["configs/storage.xml"], + with_minio=True, + with_hdfs=True, + ) cluster.start() yield cluster finally: @@ -26,7 +31,7 @@ def test_different_types(cluster): response = node.query("SELECT * FROM system.disks") disks = response.split("\n") for disk in disks: - if disk == '': # skip empty line (after split at last position) + if disk == "": # skip empty line (after split at last position) continue fields = disk.split("\t") assert len(fields) >= 6 @@ -36,5 +41,7 @@ def test_different_types(cluster): def test_select_by_type(cluster): node = cluster.instances["node"] for name, disk_type in list(disk_types.items()): - assert node.query("SELECT name FROM system.disks WHERE type='" + disk_type + "'") == name + "\n" - + assert ( + node.query("SELECT name FROM system.disks WHERE type='" + disk_type + "'") + == name + "\n" + ) diff --git a/tests/integration/test_distributed_backward_compatability/test.py b/tests/integration/test_distributed_backward_compatability/test.py index 0d36aaa23f4..cb51142d249 100644 --- a/tests/integration/test_distributed_backward_compatability/test.py +++ b/tests/integration/test_distributed_backward_compatability/test.py @@ -4,9 +4,19 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node_old = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], image='yandex/clickhouse-server', - tag='20.8.9.6', stay_alive=True, with_installed_binary=True) -node_new = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], user_configs=['configs/legacy.xml']) +node_old = cluster.add_instance( + "node1", + main_configs=["configs/remote_servers.xml"], + image="yandex/clickhouse-server", + tag="20.8.9.6", + stay_alive=True, + with_installed_binary=True, +) +node_new = cluster.add_instance( + "node2", + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/legacy.xml"], +) @pytest.fixture(scope="module") @@ -15,15 +25,19 @@ def started_cluster(): cluster.start() for node in (node_old, node_new): - node.query("CREATE TABLE local_table(id UInt32, val String) ENGINE = MergeTree ORDER BY id") + node.query( + "CREATE TABLE local_table(id UInt32, val String) ENGINE = MergeTree ORDER BY id" + ) node_old.query("INSERT INTO local_table VALUES (1, 'node1')") node_new.query("INSERT INTO local_table VALUES (2, 'node2')") node_old.query( - "CREATE TABLE distributed(id UInt32, val String) ENGINE = Distributed(test_cluster, default, local_table)") + "CREATE TABLE distributed(id UInt32, val String) ENGINE = Distributed(test_cluster, default, local_table)" + ) node_new.query( - "CREATE TABLE distributed(id UInt32, val String) ENGINE = Distributed(test_cluster, default, local_table)") + "CREATE TABLE distributed(id UInt32, val String) ENGINE = Distributed(test_cluster, default, local_table)" + ) yield cluster @@ -33,13 +47,15 @@ def started_cluster(): def test_distributed_in_tuple(started_cluster): query1 = "SELECT count() FROM distributed WHERE (id, val) IN ((1, 'node1'), (2, 'a'), (3, 'b'))" - query2 = "SELECT sum((id, val) IN ((1, 'node1'), (2, 'a'), (3, 'b'))) FROM distributed" + query2 = ( + "SELECT sum((id, val) IN ((1, 'node1'), (2, 'a'), (3, 'b'))) FROM distributed" + ) assert node_old.query(query1) == "1\n" assert node_old.query(query2) == "1\n" assert node_new.query(query1) == "1\n" assert node_new.query(query2) == "1\n" - large_set = '(' + ','.join([str(i) for i in range(1000)]) + ')' + large_set = "(" + ",".join([str(i) for i in range(1000)]) + ")" query3 = "SELECT count() FROM distributed WHERE id IN " + large_set query4 = "SELECT sum(id IN {}) FROM distributed".format(large_set) assert node_old.query(query3) == "2\n" diff --git a/tests/integration/test_distributed_ddl/cluster.py b/tests/integration/test_distributed_ddl/cluster.py index f90d15ebd08..9b2f6622ada 100644 --- a/tests/integration/test_distributed_ddl/cluster.py +++ b/tests/integration/test_distributed_ddl/cluster.py @@ -8,6 +8,7 @@ from helpers.cluster import ClickHouseCluster from helpers.network import PartitionManager from helpers.test_tools import TSV + class ClickHouseClusterWithDDLHelpers(ClickHouseCluster): def __init__(self, base_path, config_dir, testcase_name): ClickHouseCluster.__init__(self, base_path, name=testcase_name) @@ -16,49 +17,82 @@ class ClickHouseClusterWithDDLHelpers(ClickHouseCluster): def prepare(self, replace_hostnames_with_ips=True): try: - main_configs_files = ["clusters.xml", "zookeeper_session_timeout.xml", "macro.xml", "query_log.xml", - "ddl.xml"] - main_configs = [os.path.join(self.test_config_dir, "config.d", f) for f in main_configs_files] - user_configs = [os.path.join(self.test_config_dir, "users.d", f) for f in - ["restricted_user.xml", "query_log.xml"]] + main_configs_files = [ + "clusters.xml", + "zookeeper_session_timeout.xml", + "macro.xml", + "query_log.xml", + "ddl.xml", + ] + main_configs = [ + os.path.join(self.test_config_dir, "config.d", f) + for f in main_configs_files + ] + user_configs = [ + os.path.join(self.test_config_dir, "users.d", f) + for f in ["restricted_user.xml", "query_log.xml"] + ] if self.test_config_dir == "configs_secure": - main_configs += [os.path.join(self.test_config_dir, f) for f in - ["server.crt", "server.key", "dhparam.pem", "config.d/ssl_conf.xml"]] + main_configs += [ + os.path.join(self.test_config_dir, f) + for f in [ + "server.crt", + "server.key", + "dhparam.pem", + "config.d/ssl_conf.xml", + ] + ] for i in range(4): self.add_instance( - 'ch{}'.format(i + 1), + "ch{}".format(i + 1), main_configs=main_configs, user_configs=user_configs, macros={"layer": 0, "shard": i // 2 + 1, "replica": i % 2 + 1}, - with_zookeeper=True) + with_zookeeper=True, + ) self.start() # Replace config files for testing ability to set host in DNS and IP formats if replace_hostnames_with_ips: - self.replace_domains_to_ip_addresses_in_cluster_config(['ch1', 'ch3']) + self.replace_domains_to_ip_addresses_in_cluster_config(["ch1", "ch3"]) # Select sacrifice instance to test CONNECTION_LOSS and server fail on it - sacrifice = self.instances['ch4'] + sacrifice = self.instances["ch4"] self.pm_random_drops = PartitionManager() self.pm_random_drops._add_rule( - {'probability': 0.01, 'destination': sacrifice.ip_address, 'source_port': 2181, - 'action': 'REJECT --reject-with tcp-reset'}) + { + "probability": 0.01, + "destination": sacrifice.ip_address, + "source_port": 2181, + "action": "REJECT --reject-with tcp-reset", + } + ) self.pm_random_drops._add_rule( - {'probability': 0.01, 'source': sacrifice.ip_address, 'destination_port': 2181, - 'action': 'REJECT --reject-with tcp-reset'}) + { + "probability": 0.01, + "source": sacrifice.ip_address, + "destination_port": 2181, + "action": "REJECT --reject-with tcp-reset", + } + ) # Initialize databases and service tables - instance = self.instances['ch1'] + instance = self.instances["ch1"] - self.ddl_check_query(instance, """ + self.ddl_check_query( + instance, + """ CREATE TABLE IF NOT EXISTS all_tables ON CLUSTER 'cluster_no_replicas' (database String, name String, engine String, metadata_modification_time DateTime) ENGINE = Distributed('cluster_no_replicas', 'system', 'tables') - """) + """, + ) - self.ddl_check_query(instance, "CREATE DATABASE IF NOT EXISTS test ON CLUSTER 'cluster'") + self.ddl_check_query( + instance, "CREATE DATABASE IF NOT EXISTS test ON CLUSTER 'cluster'" + ) except Exception as e: print(e) @@ -77,7 +111,9 @@ class ClickHouseClusterWithDDLHelpers(ClickHouseCluster): codes = [l[2] for l in M] messages = [l[3] for l in M] - assert len(hosts) == num_hosts and len(set(hosts)) == num_hosts, "\n" + tsv_content + assert len(hosts) == num_hosts and len(set(hosts)) == num_hosts, ( + "\n" + tsv_content + ) assert len(set(codes)) == 1, "\n" + tsv_content assert codes[0] == "0", "\n" + tsv_content @@ -87,7 +123,11 @@ class ClickHouseClusterWithDDLHelpers(ClickHouseCluster): return contents def replace_domains_to_ip_addresses_in_cluster_config(self, instances_to_replace): - clusters_config = open(p.join(self.base_dir, '{}/config.d/clusters.xml'.format(self.test_config_dir))).read() + clusters_config = open( + p.join( + self.base_dir, "{}/config.d/clusters.xml".format(self.test_config_dir) + ) + ).read() for inst_name, inst in list(self.instances.items()): clusters_config = clusters_config.replace(inst_name, str(inst.ip_address)) @@ -95,16 +135,23 @@ class ClickHouseClusterWithDDLHelpers(ClickHouseCluster): for inst_name in instances_to_replace: inst = self.instances[inst_name] self.instances[inst_name].exec_in_container( - ['bash', '-c', 'echo "$NEW_CONFIG" > /etc/clickhouse-server/config.d/clusters.xml'], - environment={"NEW_CONFIG": clusters_config}, privileged=True) + [ + "bash", + "-c", + 'echo "$NEW_CONFIG" > /etc/clickhouse-server/config.d/clusters.xml', + ], + environment={"NEW_CONFIG": clusters_config}, + privileged=True, + ) # print cluster.instances[inst_name].exec_in_container(['cat', "/etc/clickhouse-server/config.d/clusters.xml"]) @staticmethod def ddl_check_there_are_no_dublicates(instance): query = "SELECT max(c), argMax(q, c) FROM (SELECT lower(query) AS q, count() AS c FROM system.query_log WHERE type=2 AND q LIKE '/* ddl_entry=query-%' GROUP BY query)" rows = instance.query(query) - assert len(rows) > 0 and rows[0][0] == "1", "dublicates on {} {}: {}".format(instance.name, - instance.ip_address, rows) + assert len(rows) > 0 and rows[0][0] == "1", "dublicates on {} {}: {}".format( + instance.name, instance.ip_address, rows + ) @staticmethod def insert_reliable(instance, query_insert): @@ -119,7 +166,10 @@ class ClickHouseClusterWithDDLHelpers(ClickHouseCluster): except Exception as e: last_exception = e s = str(e) - if not (s.find('Unknown status, client must retry') >= 0 or s.find('zkutil::KeeperException')): + if not ( + s.find("Unknown status, client must retry") >= 0 + or s.find("zkutil::KeeperException") + ): raise e raise last_exception diff --git a/tests/integration/test_distributed_ddl/test.py b/tests/integration/test_distributed_ddl/test.py index 18e091de1ec..9270efdd29b 100755 --- a/tests/integration/test_distributed_ddl/test.py +++ b/tests/integration/test_distributed_ddl/test.py @@ -21,9 +21,11 @@ def test_cluster(request): yield cluster - instance = cluster.instances['ch1'] + instance = cluster.instances["ch1"] cluster.ddl_check_query(instance, "DROP DATABASE test ON CLUSTER 'cluster'") - cluster.ddl_check_query(instance, "DROP DATABASE IF EXISTS test2 ON CLUSTER 'cluster'") + cluster.ddl_check_query( + instance, "DROP DATABASE IF EXISTS test2 ON CLUSTER 'cluster'" + ) # Check query log to ensure that DDL queries are not executed twice time.sleep(1.5) @@ -37,233 +39,379 @@ def test_cluster(request): def test_default_database(test_cluster): - instance = test_cluster.instances['ch3'] + instance = test_cluster.instances["ch3"] - test_cluster.ddl_check_query(instance, "CREATE DATABASE IF NOT EXISTS test2 ON CLUSTER 'cluster' FORMAT TSV") - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS null ON CLUSTER 'cluster' FORMAT TSV") - test_cluster.ddl_check_query(instance, - "CREATE TABLE null ON CLUSTER 'cluster2' (s String DEFAULT 'escape\t\nme') ENGINE = Null") + test_cluster.ddl_check_query( + instance, "CREATE DATABASE IF NOT EXISTS test2 ON CLUSTER 'cluster' FORMAT TSV" + ) + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS null ON CLUSTER 'cluster' FORMAT TSV" + ) + test_cluster.ddl_check_query( + instance, + "CREATE TABLE null ON CLUSTER 'cluster2' (s String DEFAULT 'escape\t\nme') ENGINE = Null", + ) - contents = instance.query("SELECT hostName() AS h, database FROM all_tables WHERE name = 'null' ORDER BY h") + contents = instance.query( + "SELECT hostName() AS h, database FROM all_tables WHERE name = 'null' ORDER BY h" + ) assert TSV(contents) == TSV("ch1\tdefault\nch2\ttest2\nch3\tdefault\nch4\ttest2\n") - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS null ON CLUSTER cluster2") - test_cluster.ddl_check_query(instance, "DROP DATABASE IF EXISTS test2 ON CLUSTER 'cluster'") + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS null ON CLUSTER cluster2" + ) + test_cluster.ddl_check_query( + instance, "DROP DATABASE IF EXISTS test2 ON CLUSTER 'cluster'" + ) def test_create_view(test_cluster): - instance = test_cluster.instances['ch3'] - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS test.super_simple_view ON CLUSTER 'cluster'") - test_cluster.ddl_check_query(instance, - "CREATE VIEW test.super_simple_view ON CLUSTER 'cluster' AS SELECT * FROM system.numbers FORMAT TSV") - test_cluster.ddl_check_query(instance, - "CREATE MATERIALIZED VIEW test.simple_mat_view ON CLUSTER 'cluster' ENGINE = Memory AS SELECT * FROM system.numbers FORMAT TSV") - test_cluster.ddl_check_query(instance, "DROP TABLE test.simple_mat_view ON CLUSTER 'cluster' FORMAT TSV") - test_cluster.ddl_check_query(instance, - "DROP TABLE IF EXISTS test.super_simple_view2 ON CLUSTER 'cluster' FORMAT TSV") + instance = test_cluster.instances["ch3"] + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS test.super_simple_view ON CLUSTER 'cluster'" + ) + test_cluster.ddl_check_query( + instance, + "CREATE VIEW test.super_simple_view ON CLUSTER 'cluster' AS SELECT * FROM system.numbers FORMAT TSV", + ) + test_cluster.ddl_check_query( + instance, + "CREATE MATERIALIZED VIEW test.simple_mat_view ON CLUSTER 'cluster' ENGINE = Memory AS SELECT * FROM system.numbers FORMAT TSV", + ) + test_cluster.ddl_check_query( + instance, "DROP TABLE test.simple_mat_view ON CLUSTER 'cluster' FORMAT TSV" + ) + test_cluster.ddl_check_query( + instance, + "DROP TABLE IF EXISTS test.super_simple_view2 ON CLUSTER 'cluster' FORMAT TSV", + ) - test_cluster.ddl_check_query(instance, - "CREATE TABLE test.super_simple ON CLUSTER 'cluster' (i Int8) ENGINE = Memory") - test_cluster.ddl_check_query(instance, - "RENAME TABLE test.super_simple TO test.super_simple2 ON CLUSTER 'cluster' FORMAT TSV") - test_cluster.ddl_check_query(instance, "DROP TABLE test.super_simple2 ON CLUSTER 'cluster'") + test_cluster.ddl_check_query( + instance, + "CREATE TABLE test.super_simple ON CLUSTER 'cluster' (i Int8) ENGINE = Memory", + ) + test_cluster.ddl_check_query( + instance, + "RENAME TABLE test.super_simple TO test.super_simple2 ON CLUSTER 'cluster' FORMAT TSV", + ) + test_cluster.ddl_check_query( + instance, "DROP TABLE test.super_simple2 ON CLUSTER 'cluster'" + ) def test_on_server_fail(test_cluster): - instance = test_cluster.instances['ch1'] - kill_instance = test_cluster.instances['ch2'] + instance = test_cluster.instances["ch1"] + kill_instance = test_cluster.instances["ch2"] - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS test.test_server_fail ON CLUSTER 'cluster'") + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS test.test_server_fail ON CLUSTER 'cluster'" + ) kill_instance.get_docker_handle().stop() - request = instance.get_query_request("CREATE TABLE test.test_server_fail ON CLUSTER 'cluster' (i Int8) ENGINE=Null", - timeout=180) + request = instance.get_query_request( + "CREATE TABLE test.test_server_fail ON CLUSTER 'cluster' (i Int8) ENGINE=Null", + timeout=180, + ) kill_instance.get_docker_handle().start() - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS test.__nope__ ON CLUSTER 'cluster'") + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS test.__nope__ ON CLUSTER 'cluster'" + ) # Check query itself test_cluster.check_all_hosts_successfully_executed(request.get_answer()) # And check query artefacts contents = instance.query( - "SELECT hostName() AS h FROM all_tables WHERE database='test' AND name='test_server_fail' ORDER BY h") + "SELECT hostName() AS h FROM all_tables WHERE database='test' AND name='test_server_fail' ORDER BY h" + ) assert TSV(contents) == TSV("ch1\nch2\nch3\nch4\n") - test_cluster.ddl_check_query(instance, "DROP TABLE test.test_server_fail ON CLUSTER 'cluster'") + test_cluster.ddl_check_query( + instance, "DROP TABLE test.test_server_fail ON CLUSTER 'cluster'" + ) def test_simple_alters(test_cluster): - instance = test_cluster.instances['ch2'] + instance = test_cluster.instances["ch2"] - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS merge ON CLUSTER '{cluster}'") - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS all_merge_32 ON CLUSTER '{cluster}'") - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS all_merge_64 ON CLUSTER '{cluster}'") + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS merge ON CLUSTER '{cluster}'" + ) + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS all_merge_32 ON CLUSTER '{cluster}'" + ) + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS all_merge_64 ON CLUSTER '{cluster}'" + ) - test_cluster.ddl_check_query(instance, """ + test_cluster.ddl_check_query( + instance, + """ CREATE TABLE IF NOT EXISTS merge ON CLUSTER '{cluster}' (p Date, i Int32) ENGINE = MergeTree(p, p, 1) -""") - test_cluster.ddl_check_query(instance, """ +""", + ) + test_cluster.ddl_check_query( + instance, + """ CREATE TABLE IF NOT EXISTS all_merge_32 ON CLUSTER '{cluster}' (p Date, i Int32) ENGINE = Distributed('{cluster}', default, merge, i) -""") - test_cluster.ddl_check_query(instance, """ +""", + ) + test_cluster.ddl_check_query( + instance, + """ CREATE TABLE IF NOT EXISTS all_merge_64 ON CLUSTER '{cluster}' (p Date, i Int64, s String) ENGINE = Distributed('{cluster}', default, merge, i) -""") +""", + ) for i in range(0, 4, 2): k = (i / 2) * 2 - test_cluster.instances['ch{}'.format(i + 1)].query("INSERT INTO merge (i) VALUES ({})({})".format(k, k + 1)) + test_cluster.instances["ch{}".format(i + 1)].query( + "INSERT INTO merge (i) VALUES ({})({})".format(k, k + 1) + ) assert TSV(instance.query("SELECT i FROM all_merge_32 ORDER BY i")) == TSV( - ''.join(['{}\n'.format(x) for x in range(4)])) + "".join(["{}\n".format(x) for x in range(4)]) + ) time.sleep(5) - test_cluster.ddl_check_query(instance, "ALTER TABLE merge ON CLUSTER '{cluster}' MODIFY COLUMN i Int64") + test_cluster.ddl_check_query( + instance, "ALTER TABLE merge ON CLUSTER '{cluster}' MODIFY COLUMN i Int64" + ) time.sleep(5) - test_cluster.ddl_check_query(instance, - "ALTER TABLE merge ON CLUSTER '{cluster}' ADD COLUMN s String DEFAULT toString(i) FORMAT TSV") + test_cluster.ddl_check_query( + instance, + "ALTER TABLE merge ON CLUSTER '{cluster}' ADD COLUMN s String DEFAULT toString(i) FORMAT TSV", + ) assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV( - ''.join(['{}\t{}\n'.format(x, x) for x in range(4)])) + "".join(["{}\t{}\n".format(x, x) for x in range(4)]) + ) for i in range(0, 4, 2): k = (i / 2) * 2 + 4 - test_cluster.instances['ch{}'.format(i + 1)].query( - "INSERT INTO merge (p, i) VALUES (31, {})(31, {})".format(k, k + 1)) + test_cluster.instances["ch{}".format(i + 1)].query( + "INSERT INTO merge (p, i) VALUES (31, {})(31, {})".format(k, k + 1) + ) assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV( - ''.join(['{}\t{}\n'.format(x, x) for x in range(8)])) + "".join(["{}\t{}\n".format(x, x) for x in range(8)]) + ) - test_cluster.ddl_check_query(instance, "ALTER TABLE merge ON CLUSTER '{cluster}' DETACH PARTITION 197002") + test_cluster.ddl_check_query( + instance, "ALTER TABLE merge ON CLUSTER '{cluster}' DETACH PARTITION 197002" + ) assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV( - ''.join(['{}\t{}\n'.format(x, x) for x in range(4)])) + "".join(["{}\t{}\n".format(x, x) for x in range(4)]) + ) test_cluster.ddl_check_query(instance, "DROP TABLE merge ON CLUSTER '{cluster}'") - test_cluster.ddl_check_query(instance, "DROP TABLE all_merge_32 ON CLUSTER '{cluster}'") - test_cluster.ddl_check_query(instance, "DROP TABLE all_merge_64 ON CLUSTER '{cluster}'") + test_cluster.ddl_check_query( + instance, "DROP TABLE all_merge_32 ON CLUSTER '{cluster}'" + ) + test_cluster.ddl_check_query( + instance, "DROP TABLE all_merge_64 ON CLUSTER '{cluster}'" + ) def test_macro(test_cluster): - instance = test_cluster.instances['ch2'] - test_cluster.ddl_check_query(instance, "CREATE TABLE tab ON CLUSTER '{cluster}' (value UInt8) ENGINE = Memory") + instance = test_cluster.instances["ch2"] + test_cluster.ddl_check_query( + instance, + "CREATE TABLE tab ON CLUSTER '{cluster}' (value UInt8) ENGINE = Memory", + ) for i in range(4): - test_cluster.insert_reliable(test_cluster.instances['ch{}'.format(i + 1)], - "INSERT INTO tab VALUES ({})".format(i)) + test_cluster.insert_reliable( + test_cluster.instances["ch{}".format(i + 1)], + "INSERT INTO tab VALUES ({})".format(i), + ) - test_cluster.ddl_check_query(instance, - "CREATE TABLE distr ON CLUSTER '{cluster}' (value UInt8) ENGINE = Distributed('{cluster}', 'default', 'tab', value % 4)") + test_cluster.ddl_check_query( + instance, + "CREATE TABLE distr ON CLUSTER '{cluster}' (value UInt8) ENGINE = Distributed('{cluster}', 'default', 'tab', value % 4)", + ) - assert TSV(instance.query("SELECT value FROM distr ORDER BY value")) == TSV('0\n1\n2\n3\n') - assert TSV(test_cluster.instances['ch3'].query("SELECT value FROM distr ORDER BY value")) == TSV('0\n1\n2\n3\n') + assert TSV(instance.query("SELECT value FROM distr ORDER BY value")) == TSV( + "0\n1\n2\n3\n" + ) + assert TSV( + test_cluster.instances["ch3"].query("SELECT value FROM distr ORDER BY value") + ) == TSV("0\n1\n2\n3\n") - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS distr ON CLUSTER '{cluster}'") - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS tab ON CLUSTER '{cluster}'") + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS distr ON CLUSTER '{cluster}'" + ) + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS tab ON CLUSTER '{cluster}'" + ) def test_implicit_macros(test_cluster): # Temporarily disable random ZK packet drops, they might broke creation if ReplicatedMergeTree replicas firewall_drops_rules = test_cluster.pm_random_drops.pop_rules() - instance = test_cluster.instances['ch2'] + instance = test_cluster.instances["ch2"] - test_cluster.ddl_check_query(instance, "DROP DATABASE IF EXISTS test_db ON CLUSTER '{cluster}' SYNC") - test_cluster.ddl_check_query(instance, "CREATE DATABASE IF NOT EXISTS test_db ON CLUSTER '{cluster}'") + test_cluster.ddl_check_query( + instance, "DROP DATABASE IF EXISTS test_db ON CLUSTER '{cluster}' SYNC" + ) + test_cluster.ddl_check_query( + instance, "CREATE DATABASE IF NOT EXISTS test_db ON CLUSTER '{cluster}'" + ) - test_cluster.ddl_check_query(instance, """ + test_cluster.ddl_check_query( + instance, + """ CREATE TABLE IF NOT EXISTS test_db.test_macro ON CLUSTER '{cluster}' (p Date, i Int32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/{layer}-{shard}/{table}', '{replica}', p, p, 1) -""") +""", + ) # Check that table was created at correct path in zookeeper - assert test_cluster.get_kazoo_client('zoo1').exists('/clickhouse/tables/test_db/0-1/test_macro') is not None + assert ( + test_cluster.get_kazoo_client("zoo1").exists( + "/clickhouse/tables/test_db/0-1/test_macro" + ) + is not None + ) # Enable random ZK packet drops test_cluster.pm_random_drops.push_rules(firewall_drops_rules) def test_allowed_databases(test_cluster): - instance = test_cluster.instances['ch2'] + instance = test_cluster.instances["ch2"] instance.query("CREATE DATABASE IF NOT EXISTS db1 ON CLUSTER cluster") instance.query("CREATE DATABASE IF NOT EXISTS db2 ON CLUSTER cluster") - instance.query("CREATE TABLE db1.t1 ON CLUSTER cluster (i Int8) ENGINE = Memory", - settings={"user": "restricted_user"}) + instance.query( + "CREATE TABLE db1.t1 ON CLUSTER cluster (i Int8) ENGINE = Memory", + settings={"user": "restricted_user"}, + ) with pytest.raises(Exception): - instance.query("CREATE TABLE db2.t2 ON CLUSTER cluster (i Int8) ENGINE = Memory", - settings={"user": "restricted_user"}) + instance.query( + "CREATE TABLE db2.t2 ON CLUSTER cluster (i Int8) ENGINE = Memory", + settings={"user": "restricted_user"}, + ) with pytest.raises(Exception): - instance.query("CREATE TABLE t3 ON CLUSTER cluster (i Int8) ENGINE = Memory", - settings={"user": "restricted_user"}) + instance.query( + "CREATE TABLE t3 ON CLUSTER cluster (i Int8) ENGINE = Memory", + settings={"user": "restricted_user"}, + ) with pytest.raises(Exception): - instance.query("DROP DATABASE db2 ON CLUSTER cluster", settings={"user": "restricted_user"}) + instance.query( + "DROP DATABASE db2 ON CLUSTER cluster", settings={"user": "restricted_user"} + ) - instance.query("DROP DATABASE db1 ON CLUSTER cluster", settings={"user": "restricted_user"}) + instance.query( + "DROP DATABASE db1 ON CLUSTER cluster", settings={"user": "restricted_user"} + ) def test_kill_query(test_cluster): - instance = test_cluster.instances['ch3'] + instance = test_cluster.instances["ch3"] - test_cluster.ddl_check_query(instance, "KILL QUERY ON CLUSTER 'cluster' WHERE NOT elapsed FORMAT TSV") + test_cluster.ddl_check_query( + instance, "KILL QUERY ON CLUSTER 'cluster' WHERE NOT elapsed FORMAT TSV" + ) def test_detach_query(test_cluster): - instance = test_cluster.instances['ch3'] + instance = test_cluster.instances["ch3"] - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS test_attach ON CLUSTER cluster FORMAT TSV") - test_cluster.ddl_check_query(instance, "CREATE TABLE test_attach ON CLUSTER cluster (i Int8)ENGINE = Log") - test_cluster.ddl_check_query(instance, "DETACH TABLE test_attach ON CLUSTER cluster FORMAT TSV") - test_cluster.ddl_check_query(instance, "ATTACH TABLE test_attach ON CLUSTER cluster") + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS test_attach ON CLUSTER cluster FORMAT TSV" + ) + test_cluster.ddl_check_query( + instance, "CREATE TABLE test_attach ON CLUSTER cluster (i Int8)ENGINE = Log" + ) + test_cluster.ddl_check_query( + instance, "DETACH TABLE test_attach ON CLUSTER cluster FORMAT TSV" + ) + test_cluster.ddl_check_query( + instance, "ATTACH TABLE test_attach ON CLUSTER cluster" + ) def test_optimize_query(test_cluster): - instance = test_cluster.instances['ch3'] + instance = test_cluster.instances["ch3"] - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS test_optimize ON CLUSTER cluster FORMAT TSV") - test_cluster.ddl_check_query(instance, - "CREATE TABLE test_optimize ON CLUSTER cluster (p Date, i Int32) ENGINE = MergeTree(p, p, 8192)") - test_cluster.ddl_check_query(instance, "OPTIMIZE TABLE test_optimize ON CLUSTER cluster FORMAT TSV") + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS test_optimize ON CLUSTER cluster FORMAT TSV" + ) + test_cluster.ddl_check_query( + instance, + "CREATE TABLE test_optimize ON CLUSTER cluster (p Date, i Int32) ENGINE = MergeTree(p, p, 8192)", + ) + test_cluster.ddl_check_query( + instance, "OPTIMIZE TABLE test_optimize ON CLUSTER cluster FORMAT TSV" + ) def test_create_as_select(test_cluster): - instance = test_cluster.instances['ch2'] - test_cluster.ddl_check_query(instance, - "CREATE TABLE test_as_select ON CLUSTER cluster ENGINE = Memory AS (SELECT 1 AS x UNION ALL SELECT 2 AS x)") - assert TSV(instance.query("SELECT x FROM test_as_select ORDER BY x")) == TSV("1\n2\n") - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS test_as_select ON CLUSTER cluster") + instance = test_cluster.instances["ch2"] + test_cluster.ddl_check_query( + instance, + "CREATE TABLE test_as_select ON CLUSTER cluster ENGINE = Memory AS (SELECT 1 AS x UNION ALL SELECT 2 AS x)", + ) + assert TSV(instance.query("SELECT x FROM test_as_select ORDER BY x")) == TSV( + "1\n2\n" + ) + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS test_as_select ON CLUSTER cluster" + ) def test_create_reserved(test_cluster): - instance = test_cluster.instances['ch2'] - test_cluster.ddl_check_query(instance, - "CREATE TABLE test_reserved ON CLUSTER cluster (`p` Date, `image` Nullable(String), `index` Nullable(Float64), `invalidate` Nullable(Int64)) ENGINE = MergeTree(`p`, `p`, 8192)") - test_cluster.ddl_check_query(instance, - "CREATE TABLE test_as_reserved ON CLUSTER cluster ENGINE = Memory AS (SELECT * from test_reserved)") - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS test_reserved ON CLUSTER cluster") - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS test_as_reserved ON CLUSTER cluster") + instance = test_cluster.instances["ch2"] + test_cluster.ddl_check_query( + instance, + "CREATE TABLE test_reserved ON CLUSTER cluster (`p` Date, `image` Nullable(String), `index` Nullable(Float64), `invalidate` Nullable(Int64)) ENGINE = MergeTree(`p`, `p`, 8192)", + ) + test_cluster.ddl_check_query( + instance, + "CREATE TABLE test_as_reserved ON CLUSTER cluster ENGINE = Memory AS (SELECT * from test_reserved)", + ) + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS test_reserved ON CLUSTER cluster" + ) + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS test_as_reserved ON CLUSTER cluster" + ) def test_rename(test_cluster): - instance = test_cluster.instances['ch1'] + instance = test_cluster.instances["ch1"] rules = test_cluster.pm_random_drops.pop_rules() - test_cluster.ddl_check_query(instance, - "DROP TABLE IF EXISTS rename_shard ON CLUSTER cluster SYNC") - test_cluster.ddl_check_query(instance, - "DROP TABLE IF EXISTS rename_new ON CLUSTER cluster SYNC") - test_cluster.ddl_check_query(instance, - "DROP TABLE IF EXISTS rename_old ON CLUSTER cluster SYNC") - test_cluster.ddl_check_query(instance, - "DROP TABLE IF EXISTS rename ON CLUSTER cluster SYNC") + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS rename_shard ON CLUSTER cluster SYNC" + ) + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS rename_new ON CLUSTER cluster SYNC" + ) + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS rename_old ON CLUSTER cluster SYNC" + ) + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS rename ON CLUSTER cluster SYNC" + ) - test_cluster.ddl_check_query(instance, - "CREATE TABLE rename_shard ON CLUSTER cluster (id Int64, sid String DEFAULT concat('old', toString(id))) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/staging/test_shard', '{replica}') ORDER BY (id)") - test_cluster.ddl_check_query(instance, - "CREATE TABLE rename_new ON CLUSTER cluster AS rename_shard ENGINE = Distributed(cluster, default, rename_shard, id % 2)") - test_cluster.ddl_check_query(instance, "RENAME TABLE rename_new TO rename ON CLUSTER cluster;") + test_cluster.ddl_check_query( + instance, + "CREATE TABLE rename_shard ON CLUSTER cluster (id Int64, sid String DEFAULT concat('old', toString(id))) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/staging/test_shard', '{replica}') ORDER BY (id)", + ) + test_cluster.ddl_check_query( + instance, + "CREATE TABLE rename_new ON CLUSTER cluster AS rename_shard ENGINE = Distributed(cluster, default, rename_shard, id % 2)", + ) + test_cluster.ddl_check_query( + instance, "RENAME TABLE rename_new TO rename ON CLUSTER cluster;" + ) for i in range(10): instance.query("insert into rename (id) values ({})".format(i)) @@ -275,87 +423,165 @@ def test_rename(test_cluster): # because path of lock in zk contains shard name, which is list of host names of replicas instance.query( "ALTER TABLE rename_shard ON CLUSTER cluster MODIFY COLUMN sid String DEFAULT concat('new', toString(id))", - ignore_error=True) + ignore_error=True, + ) time.sleep(1) - test_cluster.ddl_check_query(instance, - "CREATE TABLE rename_new ON CLUSTER cluster AS rename_shard ENGINE = Distributed(cluster, default, rename_shard, id % 2)") + test_cluster.ddl_check_query( + instance, + "CREATE TABLE rename_new ON CLUSTER cluster AS rename_shard ENGINE = Distributed(cluster, default, rename_shard, id % 2)", + ) instance.query("system stop distributed sends rename") for i in range(10, 20): instance.query("insert into rename (id) values ({})".format(i)) - test_cluster.ddl_check_query(instance, "RENAME TABLE rename TO rename_old, rename_new TO rename ON CLUSTER cluster") + test_cluster.ddl_check_query( + instance, + "RENAME TABLE rename TO rename_old, rename_new TO rename ON CLUSTER cluster", + ) for i in range(20, 30): instance.query("insert into rename (id) values ({})".format(i)) instance.query("system flush distributed rename") - for name in ['ch1', 'ch2', 'ch3', 'ch4']: + for name in ["ch1", "ch2", "ch3", "ch4"]: test_cluster.instances[name].query("system sync replica rename_shard") # system stop distributed sends does not affect inserts into local shard, # so some ids in range (10, 20) will be inserted into rename_shard assert instance.query("select count(id), sum(id) from rename").rstrip() == "25\t360" # assert instance.query("select count(id), sum(id) from rename").rstrip() == "20\t290" - assert instance.query("select count(id), sum(id) from rename where sid like 'old%'").rstrip() == "15\t115" + assert ( + instance.query( + "select count(id), sum(id) from rename where sid like 'old%'" + ).rstrip() + == "15\t115" + ) # assert instance.query("select count(id), sum(id) from rename where sid like 'old%'").rstrip() == "10\t45" - assert instance.query("select count(id), sum(id) from rename where sid like 'new%'").rstrip() == "10\t245" + assert ( + instance.query( + "select count(id), sum(id) from rename where sid like 'new%'" + ).rstrip() + == "10\t245" + ) test_cluster.pm_random_drops.push_rules(rules) def test_socket_timeout(test_cluster): - instance = test_cluster.instances['ch1'] + instance = test_cluster.instances["ch1"] # queries should not fail with "Timeout exceeded while reading from socket" in case of EINTR caused by query profiler for i in range(0, 100): - instance.query("select hostName() as host, count() from cluster('cluster', 'system', 'settings') group by host") + instance.query( + "select hostName() as host, count() from cluster('cluster', 'system', 'settings') group by host" + ) def test_replicated_without_arguments(test_cluster): rules = test_cluster.pm_random_drops.pop_rules() - instance = test_cluster.instances['ch1'] - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS test_atomic.rmt ON CLUSTER cluster SYNC") - test_cluster.ddl_check_query(instance, "DROP DATABASE IF EXISTS test_atomic ON CLUSTER cluster SYNC") + instance = test_cluster.instances["ch1"] + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS test_atomic.rmt ON CLUSTER cluster SYNC" + ) + test_cluster.ddl_check_query( + instance, "DROP DATABASE IF EXISTS test_atomic ON CLUSTER cluster SYNC" + ) - test_cluster.ddl_check_query(instance, "CREATE DATABASE test_atomic ON CLUSTER cluster ENGINE=Atomic") - assert "are supported only for ON CLUSTER queries with Atomic database engine" in \ - instance.query_and_get_error("CREATE TABLE test_atomic.rmt (n UInt64, s String) ENGINE=ReplicatedMergeTree ORDER BY n") - test_cluster.ddl_check_query(instance, - "CREATE TABLE test_atomic.rmt ON CLUSTER cluster (n UInt64, s String) ENGINE=ReplicatedMergeTree() ORDER BY n") - test_cluster.ddl_check_query(instance, "DROP TABLE test_atomic.rmt ON CLUSTER cluster SYNC") - test_cluster.ddl_check_query(instance, - "CREATE TABLE test_atomic.rmt UUID '12345678-0000-4000-8000-000000000001' ON CLUSTER cluster (n UInt64, s String) ENGINE=ReplicatedMergeTree ORDER BY n") - assert instance.query("SHOW CREATE test_atomic.rmt FORMAT TSVRaw") == \ - "CREATE TABLE test_atomic.rmt\n(\n `n` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree('/clickhouse/tables/12345678-0000-4000-8000-000000000001/{shard}', '{replica}')\nORDER BY n\nSETTINGS index_granularity = 8192\n" - test_cluster.ddl_check_query(instance, "RENAME TABLE test_atomic.rmt TO test_atomic.rmt_renamed ON CLUSTER cluster") - test_cluster.ddl_check_query(instance, - "CREATE TABLE test_atomic.rmt ON CLUSTER cluster (n UInt64, s String) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}') ORDER BY n") - test_cluster.ddl_check_query(instance, - "EXCHANGE TABLES test_atomic.rmt AND test_atomic.rmt_renamed ON CLUSTER cluster") - assert instance.query("SELECT countDistinct(uuid) from clusterAllReplicas('cluster', 'system', 'databases') WHERE uuid != '00000000-0000-0000-0000-000000000000' AND name='test_atomic'") == "1\n" - assert instance.query("SELECT countDistinct(uuid) from clusterAllReplicas('cluster', 'system', 'tables') WHERE uuid != '00000000-0000-0000-0000-000000000000' AND name='rmt'") == "1\n" - test_cluster.ddl_check_query(instance, - "CREATE TABLE test_atomic.rrmt ON CLUSTER cluster (n UInt64, m UInt64) ENGINE=ReplicatedReplacingMergeTree(m) ORDER BY n") - test_cluster.ddl_check_query(instance, - "CREATE TABLE test_atomic.rsmt ON CLUSTER cluster (n UInt64, m UInt64, k UInt64) ENGINE=ReplicatedSummingMergeTree((m, k)) ORDER BY n") - test_cluster.ddl_check_query(instance, - "CREATE TABLE test_atomic.rvcmt ON CLUSTER cluster (n UInt64, m Int8, k UInt64) ENGINE=ReplicatedVersionedCollapsingMergeTree(m, k) ORDER BY n") - test_cluster.ddl_check_query(instance, "DROP DATABASE test_atomic ON CLUSTER cluster SYNC") + test_cluster.ddl_check_query( + instance, "CREATE DATABASE test_atomic ON CLUSTER cluster ENGINE=Atomic" + ) + assert ( + "are supported only for ON CLUSTER queries with Atomic database engine" + in instance.query_and_get_error( + "CREATE TABLE test_atomic.rmt (n UInt64, s String) ENGINE=ReplicatedMergeTree ORDER BY n" + ) + ) + test_cluster.ddl_check_query( + instance, + "CREATE TABLE test_atomic.rmt ON CLUSTER cluster (n UInt64, s String) ENGINE=ReplicatedMergeTree() ORDER BY n", + ) + test_cluster.ddl_check_query( + instance, "DROP TABLE test_atomic.rmt ON CLUSTER cluster SYNC" + ) + test_cluster.ddl_check_query( + instance, + "CREATE TABLE test_atomic.rmt UUID '12345678-0000-4000-8000-000000000001' ON CLUSTER cluster (n UInt64, s String) ENGINE=ReplicatedMergeTree ORDER BY n", + ) + assert ( + instance.query("SHOW CREATE test_atomic.rmt FORMAT TSVRaw") + == "CREATE TABLE test_atomic.rmt\n(\n `n` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree('/clickhouse/tables/12345678-0000-4000-8000-000000000001/{shard}', '{replica}')\nORDER BY n\nSETTINGS index_granularity = 8192\n" + ) + test_cluster.ddl_check_query( + instance, + "RENAME TABLE test_atomic.rmt TO test_atomic.rmt_renamed ON CLUSTER cluster", + ) + test_cluster.ddl_check_query( + instance, + "CREATE TABLE test_atomic.rmt ON CLUSTER cluster (n UInt64, s String) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}') ORDER BY n", + ) + test_cluster.ddl_check_query( + instance, + "EXCHANGE TABLES test_atomic.rmt AND test_atomic.rmt_renamed ON CLUSTER cluster", + ) + assert ( + instance.query( + "SELECT countDistinct(uuid) from clusterAllReplicas('cluster', 'system', 'databases') WHERE uuid != '00000000-0000-0000-0000-000000000000' AND name='test_atomic'" + ) + == "1\n" + ) + assert ( + instance.query( + "SELECT countDistinct(uuid) from clusterAllReplicas('cluster', 'system', 'tables') WHERE uuid != '00000000-0000-0000-0000-000000000000' AND name='rmt'" + ) + == "1\n" + ) + test_cluster.ddl_check_query( + instance, + "CREATE TABLE test_atomic.rrmt ON CLUSTER cluster (n UInt64, m UInt64) ENGINE=ReplicatedReplacingMergeTree(m) ORDER BY n", + ) + test_cluster.ddl_check_query( + instance, + "CREATE TABLE test_atomic.rsmt ON CLUSTER cluster (n UInt64, m UInt64, k UInt64) ENGINE=ReplicatedSummingMergeTree((m, k)) ORDER BY n", + ) + test_cluster.ddl_check_query( + instance, + "CREATE TABLE test_atomic.rvcmt ON CLUSTER cluster (n UInt64, m Int8, k UInt64) ENGINE=ReplicatedVersionedCollapsingMergeTree(m, k) ORDER BY n", + ) + test_cluster.ddl_check_query( + instance, "DROP DATABASE test_atomic ON CLUSTER cluster SYNC" + ) - test_cluster.ddl_check_query(instance, "CREATE DATABASE test_ordinary ON CLUSTER cluster ENGINE=Ordinary") - assert "are supported only for ON CLUSTER queries with Atomic database engine" in \ - instance.query_and_get_error("CREATE TABLE test_ordinary.rmt ON CLUSTER cluster (n UInt64, s String) ENGINE=ReplicatedMergeTree ORDER BY n") - assert "are supported only for ON CLUSTER queries with Atomic database engine" in \ - instance.query_and_get_error("CREATE TABLE test_ordinary.rmt ON CLUSTER cluster (n UInt64, s String) ENGINE=ReplicatedMergeTree('/{shard}/{uuid}/', '{replica}') ORDER BY n") - test_cluster.ddl_check_query(instance, "CREATE TABLE test_ordinary.rmt ON CLUSTER cluster (n UInt64, s String) ENGINE=ReplicatedMergeTree('/{shard}/{table}/', '{replica}') ORDER BY n") - assert instance.query("SHOW CREATE test_ordinary.rmt FORMAT TSVRaw") == \ - "CREATE TABLE test_ordinary.rmt\n(\n `n` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree('/{shard}/rmt/', '{replica}')\nORDER BY n\nSETTINGS index_granularity = 8192\n" - test_cluster.ddl_check_query(instance, "DROP DATABASE test_ordinary ON CLUSTER cluster SYNC") + test_cluster.ddl_check_query( + instance, "CREATE DATABASE test_ordinary ON CLUSTER cluster ENGINE=Ordinary" + ) + assert ( + "are supported only for ON CLUSTER queries with Atomic database engine" + in instance.query_and_get_error( + "CREATE TABLE test_ordinary.rmt ON CLUSTER cluster (n UInt64, s String) ENGINE=ReplicatedMergeTree ORDER BY n" + ) + ) + assert ( + "are supported only for ON CLUSTER queries with Atomic database engine" + in instance.query_and_get_error( + "CREATE TABLE test_ordinary.rmt ON CLUSTER cluster (n UInt64, s String) ENGINE=ReplicatedMergeTree('/{shard}/{uuid}/', '{replica}') ORDER BY n" + ) + ) + test_cluster.ddl_check_query( + instance, + "CREATE TABLE test_ordinary.rmt ON CLUSTER cluster (n UInt64, s String) ENGINE=ReplicatedMergeTree('/{shard}/{table}/', '{replica}') ORDER BY n", + ) + assert ( + instance.query("SHOW CREATE test_ordinary.rmt FORMAT TSVRaw") + == "CREATE TABLE test_ordinary.rmt\n(\n `n` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree('/{shard}/rmt/', '{replica}')\nORDER BY n\nSETTINGS index_granularity = 8192\n" + ) + test_cluster.ddl_check_query( + instance, "DROP DATABASE test_ordinary ON CLUSTER cluster SYNC" + ) test_cluster.pm_random_drops.push_rules(rules) -if __name__ == '__main__': +if __name__ == "__main__": with contextmanager(test_cluster)() as ctx_cluster: for name, instance in list(ctx_cluster.instances.items()): print(name, instance.ip_address) diff --git a/tests/integration/test_distributed_ddl/test_replicated_alter.py b/tests/integration/test_distributed_ddl/test_replicated_alter.py index 5e7989cb256..08d2c1da278 100644 --- a/tests/integration/test_distributed_ddl/test_replicated_alter.py +++ b/tests/integration/test_distributed_ddl/test_replicated_alter.py @@ -12,7 +12,9 @@ from .cluster import ClickHouseClusterWithDDLHelpers @pytest.fixture(scope="module", params=["configs", "configs_secure"]) def test_cluster(request): - cluster = ClickHouseClusterWithDDLHelpers(__file__, request.param, "alters_" + request.param) + cluster = ClickHouseClusterWithDDLHelpers( + __file__, request.param, "alters_" + request.param + ) try: # TODO: Fix ON CLUSTER alters when nodes have different configs. Need to canonicalize node identity. @@ -20,9 +22,11 @@ def test_cluster(request): yield cluster - instance = cluster.instances['ch1'] + instance = cluster.instances["ch1"] cluster.ddl_check_query(instance, "DROP DATABASE test ON CLUSTER 'cluster'") - cluster.ddl_check_query(instance, "DROP DATABASE IF EXISTS test2 ON CLUSTER 'cluster'") + cluster.ddl_check_query( + instance, "DROP DATABASE IF EXISTS test2 ON CLUSTER 'cluster'" + ) # Check query log to ensure that DDL queries are not executed twice time.sleep(1.5) @@ -36,64 +40,102 @@ def test_cluster(request): def test_replicated_alters(test_cluster): - instance = test_cluster.instances['ch2'] + instance = test_cluster.instances["ch2"] - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS merge_for_alter ON CLUSTER cluster SYNC") - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS all_merge_32 ON CLUSTER cluster SYNC") - test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS all_merge_64 ON CLUSTER cluster SYNC") + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS merge_for_alter ON CLUSTER cluster SYNC" + ) + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS all_merge_32 ON CLUSTER cluster SYNC" + ) + test_cluster.ddl_check_query( + instance, "DROP TABLE IF EXISTS all_merge_64 ON CLUSTER cluster SYNC" + ) # Temporarily disable random ZK packet drops, they might broke creation if ReplicatedMergeTree replicas firewall_drops_rules = test_cluster.pm_random_drops.pop_rules() - test_cluster.ddl_check_query(instance, """ + test_cluster.ddl_check_query( + instance, + """ CREATE TABLE IF NOT EXISTS merge_for_alter ON CLUSTER cluster (p Date, i Int32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/hits', '{replica}', p, p, 1) -""") +""", + ) - test_cluster.ddl_check_query(instance, """ + test_cluster.ddl_check_query( + instance, + """ CREATE TABLE IF NOT EXISTS all_merge_32 ON CLUSTER cluster (p Date, i Int32) ENGINE = Distributed(cluster, default, merge_for_alter, i) -""") - test_cluster.ddl_check_query(instance, """ +""", + ) + test_cluster.ddl_check_query( + instance, + """ CREATE TABLE IF NOT EXISTS all_merge_64 ON CLUSTER cluster (p Date, i Int64, s String) ENGINE = Distributed(cluster, default, merge_for_alter, i) -""") +""", + ) for i in range(4): k = (i // 2) * 2 - test_cluster.insert_reliable(test_cluster.instances['ch{}'.format(i + 1)], - "INSERT INTO merge_for_alter (i) VALUES ({})({})".format(k, k + 1)) + test_cluster.insert_reliable( + test_cluster.instances["ch{}".format(i + 1)], + "INSERT INTO merge_for_alter (i) VALUES ({})({})".format(k, k + 1), + ) test_cluster.sync_replicas("merge_for_alter") assert TSV(instance.query("SELECT i FROM all_merge_32 ORDER BY i")) == TSV( - ''.join(['{}\n'.format(x) for x in range(4)])) + "".join(["{}\n".format(x) for x in range(4)]) + ) - test_cluster.ddl_check_query(instance, "ALTER TABLE merge_for_alter ON CLUSTER cluster MODIFY COLUMN i Int64") - test_cluster.ddl_check_query(instance, - "ALTER TABLE merge_for_alter ON CLUSTER cluster ADD COLUMN s String DEFAULT toString(i)") + test_cluster.ddl_check_query( + instance, "ALTER TABLE merge_for_alter ON CLUSTER cluster MODIFY COLUMN i Int64" + ) + test_cluster.ddl_check_query( + instance, + "ALTER TABLE merge_for_alter ON CLUSTER cluster ADD COLUMN s String DEFAULT toString(i)", + ) assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV( - ''.join(['{}\t{}\n'.format(x, x) for x in range(4)])) + "".join(["{}\t{}\n".format(x, x) for x in range(4)]) + ) for i in range(4): k = (i // 2) * 2 + 4 - test_cluster.insert_reliable(test_cluster.instances['ch{}'.format(i + 1)], - "INSERT INTO merge_for_alter (p, i) VALUES (31, {})(31, {})".format(k, k + 1)) + test_cluster.insert_reliable( + test_cluster.instances["ch{}".format(i + 1)], + "INSERT INTO merge_for_alter (p, i) VALUES (31, {})(31, {})".format( + k, k + 1 + ), + ) test_cluster.sync_replicas("merge_for_alter") assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV( - ''.join(['{}\t{}\n'.format(x, x) for x in range(8)])) + "".join(["{}\t{}\n".format(x, x) for x in range(8)]) + ) - test_cluster.ddl_check_query(instance, "ALTER TABLE merge_for_alter ON CLUSTER cluster DETACH PARTITION 197002") + test_cluster.ddl_check_query( + instance, + "ALTER TABLE merge_for_alter ON CLUSTER cluster DETACH PARTITION 197002", + ) assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV( - ''.join(['{}\t{}\n'.format(x, x) for x in range(4)])) + "".join(["{}\t{}\n".format(x, x) for x in range(4)]) + ) - test_cluster.ddl_check_query(instance, "DROP TABLE merge_for_alter ON CLUSTER cluster SYNC") + test_cluster.ddl_check_query( + instance, "DROP TABLE merge_for_alter ON CLUSTER cluster SYNC" + ) # Enable random ZK packet drops test_cluster.pm_random_drops.push_rules(firewall_drops_rules) - test_cluster.ddl_check_query(instance, "DROP TABLE all_merge_32 ON CLUSTER cluster SYNC") - test_cluster.ddl_check_query(instance, "DROP TABLE all_merge_64 ON CLUSTER cluster SYNC") + test_cluster.ddl_check_query( + instance, "DROP TABLE all_merge_32 ON CLUSTER cluster SYNC" + ) + test_cluster.ddl_check_query( + instance, "DROP TABLE all_merge_64 ON CLUSTER cluster SYNC" + ) diff --git a/tests/integration/test_distributed_ddl_on_cross_replication/test.py b/tests/integration/test_distributed_ddl_on_cross_replication/test.py index b61bfc5d83f..b89091d4034 100644 --- a/tests/integration/test_distributed_ddl_on_cross_replication/test.py +++ b/tests/integration/test_distributed_ddl_on_cross_replication/test.py @@ -4,12 +4,24 @@ from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True, - macros={"shard": 1, "replica": 1, "shard_bk": 3, "replica_bk": 2}) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True, - macros={"shard": 2, "replica": 1, "shard_bk": 1, "replica_bk": 2}) -node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml'], with_zookeeper=True, - macros={"shard": 3, "replica": 1, "shard_bk": 2, "replica_bk": 2}) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/remote_servers.xml"], + with_zookeeper=True, + macros={"shard": 1, "replica": 1, "shard_bk": 3, "replica_bk": 2}, +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/remote_servers.xml"], + with_zookeeper=True, + macros={"shard": 2, "replica": 1, "shard_bk": 1, "replica_bk": 2}, +) +node3 = cluster.add_instance( + "node3", + main_configs=["configs/remote_servers.xml"], + with_zookeeper=True, + macros={"shard": 3, "replica": 1, "shard_bk": 2, "replica_bk": 2}, +) @pytest.fixture(scope="module") @@ -17,7 +29,8 @@ def started_cluster(): try: cluster.start() - node1.query(''' + node1.query( + """ CREATE DATABASE replica_1 ON CLUSTER cross_3shards_2replicas; CREATE DATABASE replica_2 ON CLUSTER cross_3shards_2replicas; @@ -38,19 +51,23 @@ def started_cluster(): CREATE TABLE replica_2.replicated ON CLUSTER cross_3shards_2replicas as replica_2.replicated_local ENGINE = Distributed(cross_3shards_2replicas, '', replicated_local, shard_id); - ''') + """ + ) - to_insert = '''\ + to_insert = """\ 2017-06-16 10 0 2017-06-17 11 0 2017-06-16 20 1 2017-06-17 21 1 2017-06-16 30 2 2017-06-17 31 2 -''' +""" - node1.query("INSERT INTO replica_1.replicated FORMAT TSV", stdin=to_insert, - settings={"insert_distributed_sync": 1}) + node1.query( + "INSERT INTO replica_1.replicated FORMAT TSV", + stdin=to_insert, + settings={"insert_distributed_sync": 1}, + ) yield cluster finally: @@ -59,56 +76,93 @@ def started_cluster(): def test_alter_ddl(started_cluster): - node1.query("ALTER TABLE replica_1.replicated_local \ + node1.query( + "ALTER TABLE replica_1.replicated_local \ ON CLUSTER cross_3shards_2replicas \ UPDATE shard_id=shard_id+3 \ - WHERE part_key='2017-06-16'") + WHERE part_key='2017-06-16'" + ) node1.query("SYSTEM SYNC REPLICA replica_2.replicated_local;", timeout=5) - assert_eq_with_retry(node1, - "SELECT count(*) FROM replica_2.replicated where shard_id >= 3 and part_key='2017-06-16'", '3') + assert_eq_with_retry( + node1, + "SELECT count(*) FROM replica_2.replicated where shard_id >= 3 and part_key='2017-06-16'", + "3", + ) - node1.query("ALTER TABLE replica_1.replicated_local \ - ON CLUSTER cross_3shards_2replicas DELETE WHERE shard_id >=3;") + node1.query( + "ALTER TABLE replica_1.replicated_local \ + ON CLUSTER cross_3shards_2replicas DELETE WHERE shard_id >=3;" + ) node1.query("SYSTEM SYNC REPLICA replica_2.replicated_local;", timeout=5) - assert_eq_with_retry(node1, "SELECT count(*) FROM replica_2.replicated where shard_id >= 3", '0') + assert_eq_with_retry( + node1, "SELECT count(*) FROM replica_2.replicated where shard_id >= 3", "0" + ) - node2.query("ALTER TABLE replica_1.replicated_local ON CLUSTER cross_3shards_2replicas DROP PARTITION '2017-06-17'") + node2.query( + "ALTER TABLE replica_1.replicated_local ON CLUSTER cross_3shards_2replicas DROP PARTITION '2017-06-17'" + ) node2.query("SYSTEM SYNC REPLICA replica_2.replicated_local;", timeout=5) - assert_eq_with_retry(node1, "SELECT count(*) FROM replica_2.replicated", '0') + assert_eq_with_retry(node1, "SELECT count(*) FROM replica_2.replicated", "0") + def test_atomic_database(started_cluster): - node1.query('''DROP DATABASE IF EXISTS replica_1 ON CLUSTER cross_3shards_2replicas; + node1.query( + """DROP DATABASE IF EXISTS replica_1 ON CLUSTER cross_3shards_2replicas; DROP DATABASE IF EXISTS replica_2 ON CLUSTER cross_3shards_2replicas; CREATE DATABASE replica_1 ON CLUSTER cross_3shards_2replicas ENGINE=Atomic; - CREATE DATABASE replica_2 ON CLUSTER cross_3shards_2replicas ENGINE=Atomic;''') + CREATE DATABASE replica_2 ON CLUSTER cross_3shards_2replicas ENGINE=Atomic;""" + ) - assert "It's not supported for cross replication" in \ - node1.query_and_get_error("CREATE TABLE rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree ORDER BY n") - assert "It's not supported for cross replication" in \ - node1.query_and_get_error("CREATE TABLE replica_1.rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree ORDER BY n") - assert "It's not supported for cross replication" in \ - node1.query_and_get_error("CREATE TABLE rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree('/{shard}/{uuid}/', '{replica}') ORDER BY n") - assert "It's not supported for cross replication" in \ - node1.query_and_get_error("CREATE TABLE replica_2.rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree('/{shard}/{uuid}/', '{replica}') ORDER BY n") - assert "For a distributed DDL on circular replicated cluster its table name must be qualified by database name" in \ - node1.query_and_get_error("CREATE TABLE rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree('/tables/{shard}/rmt/', '{replica}') ORDER BY n") + assert "It's not supported for cross replication" in node1.query_and_get_error( + "CREATE TABLE rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree ORDER BY n" + ) + assert "It's not supported for cross replication" in node1.query_and_get_error( + "CREATE TABLE replica_1.rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree ORDER BY n" + ) + assert "It's not supported for cross replication" in node1.query_and_get_error( + "CREATE TABLE rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree('/{shard}/{uuid}/', '{replica}') ORDER BY n" + ) + assert "It's not supported for cross replication" in node1.query_and_get_error( + "CREATE TABLE replica_2.rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree('/{shard}/{uuid}/', '{replica}') ORDER BY n" + ) + assert ( + "For a distributed DDL on circular replicated cluster its table name must be qualified by database name" + in node1.query_and_get_error( + "CREATE TABLE rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree('/tables/{shard}/rmt/', '{replica}') ORDER BY n" + ) + ) - node1.query("CREATE TABLE replica_1.rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree('/tables/{shard}/rmt/', '{replica}') ORDER BY n") - node1.query("CREATE TABLE replica_2.rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree('/tables/{shard_bk}/rmt/', '{replica_bk}') ORDER BY n") + node1.query( + "CREATE TABLE replica_1.rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree('/tables/{shard}/rmt/', '{replica}') ORDER BY n" + ) + node1.query( + "CREATE TABLE replica_2.rmt ON CLUSTER cross_3shards_2replicas (n UInt64, s String) ENGINE=ReplicatedMergeTree('/tables/{shard_bk}/rmt/', '{replica_bk}') ORDER BY n" + ) - assert node1.query("SELECT countDistinct(uuid) from remote('node1,node2,node3', 'system', 'databases') WHERE uuid != '00000000-0000-0000-0000-000000000000' AND name='replica_1'") == "1\n" - assert node1.query("SELECT countDistinct(uuid) from remote('node1,node2,node3', 'system', 'tables') WHERE uuid != '00000000-0000-0000-0000-000000000000' AND name='rmt'") == "2\n" + assert ( + node1.query( + "SELECT countDistinct(uuid) from remote('node1,node2,node3', 'system', 'databases') WHERE uuid != '00000000-0000-0000-0000-000000000000' AND name='replica_1'" + ) + == "1\n" + ) + assert ( + node1.query( + "SELECT countDistinct(uuid) from remote('node1,node2,node3', 'system', 'tables') WHERE uuid != '00000000-0000-0000-0000-000000000000' AND name='rmt'" + ) + == "2\n" + ) node1.query("INSERT INTO replica_1.rmt VALUES (1, 'test')") node2.query("SYSTEM SYNC REPLICA replica_2.rmt", timeout=5) - assert_eq_with_retry(node2, "SELECT * FROM replica_2.rmt", '1\ttest') + assert_eq_with_retry(node2, "SELECT * FROM replica_2.rmt", "1\ttest") + def test_non_query_with_table_ddl(started_cluster): node1.query("CREATE USER A ON CLUSTER cross_3shards_2replicas") - assert node1.query("SELECT 1", user='A') == "1\n" - assert node2.query("SELECT 1", user='A') == "1\n" + assert node1.query("SELECT 1", user="A") == "1\n" + assert node2.query("SELECT 1", user="A") == "1\n" node2.query("DROP USER A ON CLUSTER cross_3shards_2replicas") diff --git a/tests/integration/test_distributed_ddl_parallel/test.py b/tests/integration/test_distributed_ddl_parallel/test.py index 44971ca3d9e..a3fe00623ca 100644 --- a/tests/integration/test_distributed_ddl_parallel/test.py +++ b/tests/integration/test_distributed_ddl_parallel/test.py @@ -19,39 +19,43 @@ class SafeThread(threading.Thread): super().__init__() self.target = target self.exception = None + def run(self): try: self.target() - except Exception as e: # pylint: disable=broad-except + except Exception as e: # pylint: disable=broad-except self.exception = e + def join(self, timeout=None): super().join(timeout) if self.exception: raise self.exception + def add_instance(name, ddl_config=None): - main_configs=[ - 'configs/remote_servers.xml', + main_configs = [ + "configs/remote_servers.xml", ] if ddl_config: main_configs.append(ddl_config) - dictionaries=[ - 'configs/dict.xml', + dictionaries = [ + "configs/dict.xml", ] - return cluster.add_instance(name, - main_configs=main_configs, - dictionaries=dictionaries, - with_zookeeper=True) + return cluster.add_instance( + name, main_configs=main_configs, dictionaries=dictionaries, with_zookeeper=True + ) -initiator = add_instance('initiator') + +initiator = add_instance("initiator") # distributed_ddl.pool_size = 2 -n1 = add_instance('n1', 'configs/ddl_a.xml') -n2 = add_instance('n2', 'configs/ddl_a.xml') +n1 = add_instance("n1", "configs/ddl_a.xml") +n2 = add_instance("n2", "configs/ddl_a.xml") # distributed_ddl.pool_size = 20 -n3 = add_instance('n3', 'configs/ddl_b.xml') -n4 = add_instance('n4', 'configs/ddl_b.xml') +n3 = add_instance("n3", "configs/ddl_b.xml") +n4 = add_instance("n4", "configs/ddl_b.xml") -@pytest.fixture(scope='module', autouse=True) + +@pytest.fixture(scope="module", autouse=True) def start_cluster(): try: cluster.start() @@ -59,6 +63,7 @@ def start_cluster(): finally: cluster.shutdown() + # verifies that functions executes longer then `sec` def longer_then(sec): def wrapper(func): @@ -67,40 +72,61 @@ def longer_then(sec): ts = time.time() result = func(*args, **kwargs) te = time.time() - took = te-ts + took = te - ts assert took >= sec return result + return inner + return wrapper + # It takes 7 seconds to load slow_dict_7. def execute_reload_dictionary_slow_dict_7(): - initiator.query('SYSTEM RELOAD DICTIONARY ON CLUSTER cluster_a slow_dict_7', settings={ - 'distributed_ddl_task_timeout': 60, - }) + initiator.query( + "SYSTEM RELOAD DICTIONARY ON CLUSTER cluster_a slow_dict_7", + settings={ + "distributed_ddl_task_timeout": 60, + }, + ) + + def execute_reload_dictionary_slow_dict_3(): - initiator.query('SYSTEM RELOAD DICTIONARY ON CLUSTER cluster_b slow_dict_3', settings={ - 'distributed_ddl_task_timeout': 60, - }) + initiator.query( + "SYSTEM RELOAD DICTIONARY ON CLUSTER cluster_b slow_dict_3", + settings={ + "distributed_ddl_task_timeout": 60, + }, + ) + + def execute_smoke_query(): - initiator.query('DROP DATABASE IF EXISTS foo ON CLUSTER cluster_b', settings={ - 'distributed_ddl_task_timeout': 60, - }) + initiator.query( + "DROP DATABASE IF EXISTS foo ON CLUSTER cluster_b", + settings={ + "distributed_ddl_task_timeout": 60, + }, + ) + def check_log(): # ensure that none of tasks processed multiple times for _, instance in list(cluster.instances.items()): - assert not instance.contains_in_log('Coordination::Exception: Node exists') + assert not instance.contains_in_log("Coordination::Exception: Node exists") + # NOTE: uses inner function to exclude slow start_cluster() from timeout. + def test_slow_dict_load_7(): @pytest.mark.timeout(10) @longer_then(7) def inner_test(): - initiator.query('SYSTEM RELOAD DICTIONARY slow_dict_7') + initiator.query("SYSTEM RELOAD DICTIONARY slow_dict_7") + inner_test() + def test_all_in_parallel(): @pytest.mark.timeout(10) @longer_then(7) @@ -112,9 +138,11 @@ def test_all_in_parallel(): thread.start() for thread in threads: thread.join(70) + inner_test() check_log() + def test_two_in_parallel_two_queued(): @pytest.mark.timeout(19) @longer_then(14) @@ -126,14 +154,17 @@ def test_two_in_parallel_two_queued(): thread.start() for thread in threads: thread.join(70) + inner_test() check_log() + def test_smoke(): for _ in range(100): execute_smoke_query() check_log() + def test_smoke_parallel(): threads = [] for _ in range(100): @@ -144,6 +175,7 @@ def test_smoke_parallel(): thread.join(70) check_log() + def test_smoke_parallel_dict_reload(): threads = [] for _ in range(100): diff --git a/tests/integration/test_distributed_ddl_password/test.py b/tests/integration/test_distributed_ddl_password/test.py index 0c061914497..bf2b7979c3c 100644 --- a/tests/integration/test_distributed_ddl_password/test.py +++ b/tests/integration/test_distributed_ddl_password/test.py @@ -4,18 +4,42 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=["configs/config.d/clusters.xml"], - user_configs=["configs/users.d/default_with_password.xml"], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=["configs/config.d/clusters.xml"], - user_configs=["configs/users.d/default_with_password.xml"], with_zookeeper=True) -node3 = cluster.add_instance('node3', main_configs=["configs/config.d/clusters.xml"], - user_configs=["configs/users.d/default_with_password.xml"], with_zookeeper=True) -node4 = cluster.add_instance('node4', main_configs=["configs/config.d/clusters.xml"], - user_configs=["configs/users.d/default_with_password.xml"], with_zookeeper=True) -node5 = cluster.add_instance('node5', main_configs=["configs/config.d/clusters.xml"], - user_configs=["configs/users.d/default_with_password.xml"], with_zookeeper=True) -node6 = cluster.add_instance('node6', main_configs=["configs/config.d/clusters.xml"], - user_configs=["configs/users.d/default_with_password.xml"], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/config.d/clusters.xml"], + user_configs=["configs/users.d/default_with_password.xml"], + with_zookeeper=True, +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/config.d/clusters.xml"], + user_configs=["configs/users.d/default_with_password.xml"], + with_zookeeper=True, +) +node3 = cluster.add_instance( + "node3", + main_configs=["configs/config.d/clusters.xml"], + user_configs=["configs/users.d/default_with_password.xml"], + with_zookeeper=True, +) +node4 = cluster.add_instance( + "node4", + main_configs=["configs/config.d/clusters.xml"], + user_configs=["configs/users.d/default_with_password.xml"], + with_zookeeper=True, +) +node5 = cluster.add_instance( + "node5", + main_configs=["configs/config.d/clusters.xml"], + user_configs=["configs/users.d/default_with_password.xml"], + with_zookeeper=True, +) +node6 = cluster.add_instance( + "node6", + main_configs=["configs/config.d/clusters.xml"], + user_configs=["configs/users.d/default_with_password.xml"], + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -23,14 +47,25 @@ def start_cluster(): try: cluster.start() - for node, shard in [(node1, 1), (node2, 1), (node3, 2), (node4, 2), (node5, 3), (node6, 3)]: + for node, shard in [ + (node1, 1), + (node2, 1), + (node3, 2), + (node4, 2), + (node5, 3), + (node6, 3), + ]: node.query( - ''' + """ CREATE TABLE test_table(date Date, id UInt32, dummy UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}') PARTITION BY date ORDER BY id - '''.format(shard=shard, replica=node.name), settings={"password": "clickhouse"}) + """.format( + shard=shard, replica=node.name + ), + settings={"password": "clickhouse"}, + ) yield cluster @@ -39,76 +74,191 @@ def start_cluster(): def test_truncate(start_cluster): - node1.query("insert into test_table values ('2019-02-15', 1, 2), ('2019-02-15', 2, 3), ('2019-02-15', 3, 4)", - settings={"password": "clickhouse"}) + node1.query( + "insert into test_table values ('2019-02-15', 1, 2), ('2019-02-15', 2, 3), ('2019-02-15', 3, 4)", + settings={"password": "clickhouse"}, + ) - assert node1.query("select count(*) from test_table", settings={"password": "clickhouse"}) == "3\n" + assert ( + node1.query( + "select count(*) from test_table", settings={"password": "clickhouse"} + ) + == "3\n" + ) node2.query("system sync replica test_table", settings={"password": "clickhouse"}) - assert node2.query("select count(*) from test_table", settings={"password": "clickhouse"}) == "3\n" + assert ( + node2.query( + "select count(*) from test_table", settings={"password": "clickhouse"} + ) + == "3\n" + ) - node3.query("insert into test_table values ('2019-02-16', 1, 2), ('2019-02-16', 2, 3), ('2019-02-16', 3, 4)", - settings={"password": "clickhouse"}) + node3.query( + "insert into test_table values ('2019-02-16', 1, 2), ('2019-02-16', 2, 3), ('2019-02-16', 3, 4)", + settings={"password": "clickhouse"}, + ) - assert node3.query("select count(*) from test_table", settings={"password": "clickhouse"}) == "3\n" + assert ( + node3.query( + "select count(*) from test_table", settings={"password": "clickhouse"} + ) + == "3\n" + ) node4.query("system sync replica test_table", settings={"password": "clickhouse"}) - assert node4.query("select count(*) from test_table", settings={"password": "clickhouse"}) == "3\n" + assert ( + node4.query( + "select count(*) from test_table", settings={"password": "clickhouse"} + ) + == "3\n" + ) - node3.query("truncate table test_table on cluster 'awesome_cluster'", settings={"password": "clickhouse"}) + node3.query( + "truncate table test_table on cluster 'awesome_cluster'", + settings={"password": "clickhouse"}, + ) for node in [node1, node2, node3, node4]: - assert_eq_with_retry(node, "select count(*) from test_table", "0", settings={"password": "clickhouse"}) + assert_eq_with_retry( + node, + "select count(*) from test_table", + "0", + settings={"password": "clickhouse"}, + ) - node2.query("drop table test_table on cluster 'awesome_cluster'", settings={"password": "clickhouse"}) + node2.query( + "drop table test_table on cluster 'awesome_cluster'", + settings={"password": "clickhouse"}, + ) for node in [node1, node2, node3, node4]: - assert_eq_with_retry(node, "select count(*) from system.tables where name='test_table'", "0", - settings={"password": "clickhouse"}) + assert_eq_with_retry( + node, + "select count(*) from system.tables where name='test_table'", + "0", + settings={"password": "clickhouse"}, + ) def test_alter(start_cluster): - node5.query("insert into test_table values ('2019-02-15', 1, 2), ('2019-02-15', 2, 3), ('2019-02-15', 3, 4)", - settings={"password": "clickhouse"}) - node6.query("insert into test_table values ('2019-02-15', 4, 2), ('2019-02-15', 5, 3), ('2019-02-15', 6, 4)", - settings={"password": "clickhouse"}) + node5.query( + "insert into test_table values ('2019-02-15', 1, 2), ('2019-02-15', 2, 3), ('2019-02-15', 3, 4)", + settings={"password": "clickhouse"}, + ) + node6.query( + "insert into test_table values ('2019-02-15', 4, 2), ('2019-02-15', 5, 3), ('2019-02-15', 6, 4)", + settings={"password": "clickhouse"}, + ) node5.query("SYSTEM SYNC REPLICA test_table", settings={"password": "clickhouse"}) node6.query("SYSTEM SYNC REPLICA test_table", settings={"password": "clickhouse"}) - assert_eq_with_retry(node5, "select count(*) from test_table", "6", settings={"password": "clickhouse"}) - assert_eq_with_retry(node6, "select count(*) from test_table", "6", settings={"password": "clickhouse"}) + assert_eq_with_retry( + node5, + "select count(*) from test_table", + "6", + settings={"password": "clickhouse"}, + ) + assert_eq_with_retry( + node6, + "select count(*) from test_table", + "6", + settings={"password": "clickhouse"}, + ) - node6.query("OPTIMIZE TABLE test_table ON CLUSTER 'simple_cluster' FINAL", settings={"password": "clickhouse"}) + node6.query( + "OPTIMIZE TABLE test_table ON CLUSTER 'simple_cluster' FINAL", + settings={"password": "clickhouse"}, + ) node5.query("SYSTEM SYNC REPLICA test_table", settings={"password": "clickhouse"}) node6.query("SYSTEM SYNC REPLICA test_table", settings={"password": "clickhouse"}) - assert_eq_with_retry(node5, "select count(*) from test_table", "6", settings={"password": "clickhouse"}) - assert_eq_with_retry(node6, "select count(*) from test_table", "6", settings={"password": "clickhouse"}) + assert_eq_with_retry( + node5, + "select count(*) from test_table", + "6", + settings={"password": "clickhouse"}, + ) + assert_eq_with_retry( + node6, + "select count(*) from test_table", + "6", + settings={"password": "clickhouse"}, + ) - node6.query("ALTER TABLE test_table ON CLUSTER 'simple_cluster' DETACH PARTITION '2019-02-15'", - settings={"password": "clickhouse"}) - assert_eq_with_retry(node5, "select count(*) from test_table", "0", settings={"password": "clickhouse"}) - assert_eq_with_retry(node6, "select count(*) from test_table", "0", settings={"password": "clickhouse"}) + node6.query( + "ALTER TABLE test_table ON CLUSTER 'simple_cluster' DETACH PARTITION '2019-02-15'", + settings={"password": "clickhouse"}, + ) + assert_eq_with_retry( + node5, + "select count(*) from test_table", + "0", + settings={"password": "clickhouse"}, + ) + assert_eq_with_retry( + node6, + "select count(*) from test_table", + "0", + settings={"password": "clickhouse"}, + ) with pytest.raises(QueryRuntimeException): - node6.query("ALTER TABLE test_table ON CLUSTER 'simple_cluster' ATTACH PARTITION '2019-02-15'", - settings={"password": "clickhouse"}) + node6.query( + "ALTER TABLE test_table ON CLUSTER 'simple_cluster' ATTACH PARTITION '2019-02-15'", + settings={"password": "clickhouse"}, + ) - node5.query("ALTER TABLE test_table ATTACH PARTITION '2019-02-15'", settings={"password": "clickhouse"}) + node5.query( + "ALTER TABLE test_table ATTACH PARTITION '2019-02-15'", + settings={"password": "clickhouse"}, + ) - assert_eq_with_retry(node5, "select count(*) from test_table", "6", settings={"password": "clickhouse"}) - assert_eq_with_retry(node6, "select count(*) from test_table", "6", settings={"password": "clickhouse"}) + assert_eq_with_retry( + node5, + "select count(*) from test_table", + "6", + settings={"password": "clickhouse"}, + ) + assert_eq_with_retry( + node6, + "select count(*) from test_table", + "6", + settings={"password": "clickhouse"}, + ) - node5.query("ALTER TABLE test_table ON CLUSTER 'simple_cluster' MODIFY COLUMN dummy String", - settings={"password": "clickhouse"}) + node5.query( + "ALTER TABLE test_table ON CLUSTER 'simple_cluster' MODIFY COLUMN dummy String", + settings={"password": "clickhouse"}, + ) - assert_eq_with_retry(node5, "select length(dummy) from test_table ORDER BY dummy LIMIT 1", "1", - settings={"password": "clickhouse"}) - assert_eq_with_retry(node6, "select length(dummy) from test_table ORDER BY dummy LIMIT 1", "1", - settings={"password": "clickhouse"}) + assert_eq_with_retry( + node5, + "select length(dummy) from test_table ORDER BY dummy LIMIT 1", + "1", + settings={"password": "clickhouse"}, + ) + assert_eq_with_retry( + node6, + "select length(dummy) from test_table ORDER BY dummy LIMIT 1", + "1", + settings={"password": "clickhouse"}, + ) - node6.query("ALTER TABLE test_table ON CLUSTER 'simple_cluster' DROP PARTITION '2019-02-15'", - settings={"password": "clickhouse"}) + node6.query( + "ALTER TABLE test_table ON CLUSTER 'simple_cluster' DROP PARTITION '2019-02-15'", + settings={"password": "clickhouse"}, + ) - assert_eq_with_retry(node5, "select count(*) from test_table", "0", settings={"password": "clickhouse"}) - assert_eq_with_retry(node6, "select count(*) from test_table", "0", settings={"password": "clickhouse"}) + assert_eq_with_retry( + node5, + "select count(*) from test_table", + "0", + settings={"password": "clickhouse"}, + ) + assert_eq_with_retry( + node6, + "select count(*) from test_table", + "0", + settings={"password": "clickhouse"}, + ) diff --git a/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/test.py b/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/test.py index b0b89fde41f..a47268b06fd 100644 --- a/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/test.py +++ b/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/test.py @@ -5,23 +5,27 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) # node1 -- distributed_directory_monitor_split_batch_on_failure=on -node1 = cluster.add_instance('node1', - main_configs=['configs/remote_servers.xml'], - user_configs=['configs/overrides_1.xml'], +node1 = cluster.add_instance( + "node1", + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/overrides_1.xml"], ) # node2 -- distributed_directory_monitor_split_batch_on_failure=off -node2 = cluster.add_instance('node2', - main_configs=['configs/remote_servers.xml'], - user_configs=['configs/overrides_2.xml'], +node2 = cluster.add_instance( + "node2", + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/overrides_2.xml"], ) -@pytest.fixture(scope='module') + +@pytest.fixture(scope="module") def started_cluster(): try: cluster.start() for _, node in cluster.instances.items(): - node.query(""" + node.query( + """ create table null_ (key Int, value Int) engine=Null(); create table dist as null_ engine=Distributed(test_cluster, currentDatabase(), null_, key); create table data (key Int, uniq_values Int) engine=Memory(); @@ -29,34 +33,46 @@ def started_cluster(): system stop distributed sends dist; create table dist_data as data engine=Distributed(test_cluster, currentDatabase(), data); - """) + """ + ) yield cluster finally: cluster.shutdown() + def test_distributed_directory_monitor_split_batch_on_failure_OFF(started_cluster): for i in range(0, 100): limit = 100e3 - node2.query(f'insert into dist select number/100, number from system.numbers limit {limit} offset {limit*i}', settings={ - # max_memory_usage is the limit for the batch on the remote node - # (local query should not be affected since 30MB is enough for 100K rows) - 'max_memory_usage': '30Mi', - 'max_untracked_memory': '0' - }) + node2.query( + f"insert into dist select number/100, number from system.numbers limit {limit} offset {limit*i}", + settings={ + # max_memory_usage is the limit for the batch on the remote node + # (local query should not be affected since 30MB is enough for 100K rows) + "max_memory_usage": "30Mi", + "max_untracked_memory": "0", + }, + ) # "Received from" is mandatory, since the exception should be thrown on the remote node. - with pytest.raises(QueryRuntimeException, match=r'DB::Exception: Received from.*Memory limit \(for query\) exceeded: .*while pushing to view default\.mv'): - node2.query('system flush distributed dist') - assert int(node2.query('select count() from dist_data')) == 0 + with pytest.raises( + QueryRuntimeException, + match=r"DB::Exception: Received from.*Memory limit \(for query\) exceeded: .*while pushing to view default\.mv", + ): + node2.query("system flush distributed dist") + assert int(node2.query("select count() from dist_data")) == 0 + def test_distributed_directory_monitor_split_batch_on_failure_ON(started_cluster): for i in range(0, 100): limit = 100e3 - node1.query(f'insert into dist select number/100, number from system.numbers limit {limit} offset {limit*i}', settings={ - # max_memory_usage is the limit for the batch on the remote node - # (local query should not be affected since 30MB is enough for 100K rows) - 'max_memory_usage': '30Mi', - 'max_untracked_memory': '0' - }) - node1.query('system flush distributed dist') - assert int(node1.query('select count() from dist_data')) == 100000 + node1.query( + f"insert into dist select number/100, number from system.numbers limit {limit} offset {limit*i}", + settings={ + # max_memory_usage is the limit for the batch on the remote node + # (local query should not be affected since 30MB is enough for 100K rows) + "max_memory_usage": "30Mi", + "max_untracked_memory": "0", + }, + ) + node1.query("system flush distributed dist") + assert int(node1.query("select count() from dist_data")) == 100000 diff --git a/tests/integration/test_distributed_format/test.py b/tests/integration/test_distributed_format/test.py index d6e1cc03fa8..415141be021 100644 --- a/tests/integration/test_distributed_format/test.py +++ b/tests/integration/test_distributed_format/test.py @@ -6,20 +6,23 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=['configs/remote_servers.xml']) +node = cluster.add_instance("node", main_configs=["configs/remote_servers.xml"]) -cluster_param = pytest.mark.parametrize("cluster", [ - ('test_cluster_internal_replication'), - ('test_cluster_no_internal_replication'), -]) +cluster_param = pytest.mark.parametrize( + "cluster", + [ + ("test_cluster_internal_replication"), + ("test_cluster_no_internal_replication"), + ], +) def get_dist_path(cluster, table, dist_format): if dist_format == 0: - return f'/var/lib/clickhouse/data/test/{table}/default@not_existing:9000' - if cluster == 'test_cluster_internal_replication': - return f'/var/lib/clickhouse/data/test/{table}/shard1_all_replicas' - return f'/var/lib/clickhouse/data/test/{table}/shard1_replica1' + return f"/var/lib/clickhouse/data/test/{table}/default@not_existing:9000" + if cluster == "test_cluster_internal_replication": + return f"/var/lib/clickhouse/data/test/{table}/shard1_all_replicas" + return f"/var/lib/clickhouse/data/test/{table}/shard1_replica1" @pytest.fixture(scope="module") @@ -36,23 +39,32 @@ def started_cluster(): @cluster_param def test_single_file(started_cluster, cluster): node.query( - "create table test.distr_1 (x UInt64, s String) engine = Distributed('{}', database, table)".format(cluster)) - node.query("insert into test.distr_1 values (1, 'a'), (2, 'bb'), (3, 'ccc')", - settings={"use_compact_format_in_distributed_parts_names": "1"}) + "create table test.distr_1 (x UInt64, s String) engine = Distributed('{}', database, table)".format( + cluster + ) + ) + node.query( + "insert into test.distr_1 values (1, 'a'), (2, 'bb'), (3, 'ccc')", + settings={"use_compact_format_in_distributed_parts_names": "1"}, + ) - path = get_dist_path(cluster, 'distr_1', 1) + path = get_dist_path(cluster, "distr_1", 1) query = f"select * from file('{path}/1.bin', 'Distributed')" - out = node.exec_in_container(['/usr/bin/clickhouse', 'local', '--stacktrace', '-q', query]) + out = node.exec_in_container( + ["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query] + ) - assert out == '1\ta\n2\tbb\n3\tccc\n' + assert out == "1\ta\n2\tbb\n3\tccc\n" query = f""" create table t (x UInt64, s String) engine = File('Distributed', '{path}/1.bin'); select * from t; """ - out = node.exec_in_container(['/usr/bin/clickhouse', 'local', '--stacktrace', '-q', query]) + out = node.exec_in_container( + ["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query] + ) - assert out == '1\ta\n2\tbb\n3\tccc\n' + assert out == "1\ta\n2\tbb\n3\tccc\n" node.query("drop table test.distr_1") @@ -60,27 +72,40 @@ def test_single_file(started_cluster, cluster): @cluster_param def test_two_files(started_cluster, cluster): node.query( - "create table test.distr_2 (x UInt64, s String) engine = Distributed('{}', database, table)".format(cluster)) - node.query("insert into test.distr_2 values (0, '_'), (1, 'a')", settings={ - "use_compact_format_in_distributed_parts_names": "1", - }) - node.query("insert into test.distr_2 values (2, 'bb'), (3, 'ccc')", settings={ - "use_compact_format_in_distributed_parts_names": "1", - }) + "create table test.distr_2 (x UInt64, s String) engine = Distributed('{}', database, table)".format( + cluster + ) + ) + node.query( + "insert into test.distr_2 values (0, '_'), (1, 'a')", + settings={ + "use_compact_format_in_distributed_parts_names": "1", + }, + ) + node.query( + "insert into test.distr_2 values (2, 'bb'), (3, 'ccc')", + settings={ + "use_compact_format_in_distributed_parts_names": "1", + }, + ) - path = get_dist_path(cluster, 'distr_2', 1) + path = get_dist_path(cluster, "distr_2", 1) query = f"select * from file('{path}/{{1,2,3,4}}.bin', 'Distributed') order by x" - out = node.exec_in_container(['/usr/bin/clickhouse', 'local', '--stacktrace', '-q', query]) + out = node.exec_in_container( + ["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query] + ) - assert out == '0\t_\n1\ta\n2\tbb\n3\tccc\n' + assert out == "0\t_\n1\ta\n2\tbb\n3\tccc\n" query = f""" create table t (x UInt64, s String) engine = File('Distributed', '{path}/{{1,2,3,4}}.bin'); select * from t order by x; """ - out = node.exec_in_container(['/usr/bin/clickhouse', 'local', '--stacktrace', '-q', query]) + out = node.exec_in_container( + ["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query] + ) - assert out == '0\t_\n1\ta\n2\tbb\n3\tccc\n' + assert out == "0\t_\n1\ta\n2\tbb\n3\tccc\n" node.query("drop table test.distr_2") @@ -88,23 +113,33 @@ def test_two_files(started_cluster, cluster): @cluster_param def test_single_file_old(started_cluster, cluster): node.query( - "create table test.distr_3 (x UInt64, s String) engine = Distributed('{}', database, table)".format(cluster)) - node.query("insert into test.distr_3 values (1, 'a'), (2, 'bb'), (3, 'ccc')", settings={ - "use_compact_format_in_distributed_parts_names": "0", - }) + "create table test.distr_3 (x UInt64, s String) engine = Distributed('{}', database, table)".format( + cluster + ) + ) + node.query( + "insert into test.distr_3 values (1, 'a'), (2, 'bb'), (3, 'ccc')", + settings={ + "use_compact_format_in_distributed_parts_names": "0", + }, + ) - path = get_dist_path(cluster, 'distr_3', 0) + path = get_dist_path(cluster, "distr_3", 0) query = f"select * from file('{path}/1.bin', 'Distributed')" - out = node.exec_in_container(['/usr/bin/clickhouse', 'local', '--stacktrace', '-q', query]) + out = node.exec_in_container( + ["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query] + ) - assert out == '1\ta\n2\tbb\n3\tccc\n' + assert out == "1\ta\n2\tbb\n3\tccc\n" query = f""" create table t (x UInt64, s String) engine = File('Distributed', '{path}/1.bin'); select * from t; """ - out = node.exec_in_container(['/usr/bin/clickhouse', 'local', '--stacktrace', '-q', query]) + out = node.exec_in_container( + ["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query] + ) - assert out == '1\ta\n2\tbb\n3\tccc\n' + assert out == "1\ta\n2\tbb\n3\tccc\n" node.query("drop table test.distr_3") diff --git a/tests/integration/test_distributed_insert_backward_compatibility/test.py b/tests/integration/test_distributed_insert_backward_compatibility/test.py index ba7d8e0a25d..ad61a2ad6f5 100644 --- a/tests/integration/test_distributed_insert_backward_compatibility/test.py +++ b/tests/integration/test_distributed_insert_backward_compatibility/test.py @@ -5,19 +5,32 @@ from helpers.client import QueryRuntimeException cluster = ClickHouseCluster(__file__) -node_shard = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml']) +node_shard = cluster.add_instance("node1", main_configs=["configs/remote_servers.xml"]) + +node_dist = cluster.add_instance( + "node2", + main_configs=["configs/remote_servers.xml"], + image="yandex/clickhouse-server", + tag="21.11.9.1", + stay_alive=True, + with_installed_binary=True, +) -node_dist = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], image='yandex/clickhouse-server', - tag='21.11.9.1', stay_alive=True, with_installed_binary=True) @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() - node_shard.query("CREATE TABLE local_table(id UInt32, val String) ENGINE = MergeTree ORDER BY id") - node_dist.query("CREATE TABLE local_table(id UInt32, val String) ENGINE = MergeTree ORDER BY id") - node_dist.query("CREATE TABLE dist_table(id UInt32, val String) ENGINE = Distributed(test_cluster, default, local_table, rand())") + node_shard.query( + "CREATE TABLE local_table(id UInt32, val String) ENGINE = MergeTree ORDER BY id" + ) + node_dist.query( + "CREATE TABLE local_table(id UInt32, val String) ENGINE = MergeTree ORDER BY id" + ) + node_dist.query( + "CREATE TABLE dist_table(id UInt32, val String) ENGINE = Distributed(test_cluster, default, local_table, rand())" + ) yield cluster diff --git a/tests/integration/test_distributed_inter_server_secret/test.py b/tests/integration/test_distributed_inter_server_secret/test.py index 2601163d790..8d344834c50 100644 --- a/tests/integration/test_distributed_inter_server_secret/test.py +++ b/tests/integration/test_distributed_inter_server_secret/test.py @@ -11,45 +11,63 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -def make_instance(name, cfg): - return cluster.add_instance(name, - with_zookeeper=True, - main_configs=['configs/remote_servers.xml', cfg], - user_configs=['configs/users.xml']) -# _n1/_n2 contains cluster with different -- should fail -n1 = make_instance('n1', 'configs/remote_servers_n1.xml') -n2 = make_instance('n2', 'configs/remote_servers_n2.xml') -users = pytest.mark.parametrize('user,password', [ - ('default', '' ), - ('nopass', '' ), - ('pass', 'foo'), -]) +def make_instance(name, cfg): + return cluster.add_instance( + name, + with_zookeeper=True, + main_configs=["configs/remote_servers.xml", cfg], + user_configs=["configs/users.xml"], + ) + + +# _n1/_n2 contains cluster with different -- should fail +n1 = make_instance("n1", "configs/remote_servers_n1.xml") +n2 = make_instance("n2", "configs/remote_servers_n2.xml") + +users = pytest.mark.parametrize( + "user,password", + [ + ("default", ""), + ("nopass", ""), + ("pass", "foo"), + ], +) + def bootstrap(): for n in list(cluster.instances.values()): - n.query('DROP TABLE IF EXISTS data') - n.query('DROP TABLE IF EXISTS data_from_buffer') - n.query('DROP TABLE IF EXISTS dist') - n.query('CREATE TABLE data (key Int) Engine=Memory()') - n.query('CREATE TABLE data_from_buffer (key Int) Engine=Memory()') - n.query(""" + n.query("DROP TABLE IF EXISTS data") + n.query("DROP TABLE IF EXISTS data_from_buffer") + n.query("DROP TABLE IF EXISTS dist") + n.query("CREATE TABLE data (key Int) Engine=Memory()") + n.query("CREATE TABLE data_from_buffer (key Int) Engine=Memory()") + n.query( + """ CREATE TABLE dist_insecure AS data Engine=Distributed(insecure, currentDatabase(), data, key) - """) - n.query(""" + """ + ) + n.query( + """ CREATE TABLE dist_secure AS data Engine=Distributed(secure, currentDatabase(), data, key) - """) - n.query(""" + """ + ) + n.query( + """ CREATE TABLE dist_secure_from_buffer AS data_from_buffer Engine=Distributed(secure, currentDatabase(), data_from_buffer, key) - """) - n.query(""" + """ + ) + n.query( + """ CREATE TABLE dist_secure_disagree AS data Engine=Distributed(secure_disagree, currentDatabase(), data, key) - """) - n.query(""" + """ + ) + n.query( + """ CREATE TABLE dist_secure_buffer AS dist_secure_from_buffer Engine=Buffer(currentDatabase(), dist_secure_from_buffer, /* settings for manual flush only */ @@ -61,9 +79,11 @@ def bootstrap(): 0, /* min_bytes */ 0 /* max_bytes */ ) - """) + """ + ) -@pytest.fixture(scope='module', autouse=True) + +@pytest.fixture(scope="module", autouse=True) def start_cluster(): try: cluster.start() @@ -72,36 +92,57 @@ def start_cluster(): finally: cluster.shutdown() + def query_with_id(node, id_, query, **kwargs): return node.query("WITH '{}' AS __id {}".format(id_, query), **kwargs) + # @return -- [user, initial_user] def get_query_user_info(node, query_pattern): node.query("SYSTEM FLUSH LOGS") - return node.query(""" + return ( + node.query( + """ SELECT user, initial_user FROM system.query_log WHERE query LIKE '%{}%' AND query NOT LIKE '%system.query_log%' AND type = 'QueryFinish' - """.format(query_pattern)).strip().split('\t') + """.format( + query_pattern + ) + ) + .strip() + .split("\t") + ) + # @return -- [user, initial_user] def get_query_user_info_by_id(node, query_id): node.query("SYSTEM FLUSH LOGS") - return node.query(""" + return ( + node.query( + """ SELECT user, initial_user FROM system.query_log WHERE query_id = '{}' AND type = 'QueryFinish' - """.format(query_id)).strip().split('\t') + """.format( + query_id + ) + ) + .strip() + .split("\t") + ) + # @return -- settings def get_query_setting_on_shard(node, query_pattern, setting): node.query("SYSTEM FLUSH LOGS") - return node.query(""" + return node.query( + """ SELECT Settings['{}'] FROM system.query_log WHERE @@ -110,39 +151,55 @@ def get_query_setting_on_shard(node, query_pattern, setting): query NOT LIKE '%system.query_log%' AND type = 'QueryFinish' LIMIT 1 - """.format(setting, query_pattern)).strip() + """.format( + setting, query_pattern + ) + ).strip() + def test_insecure(): - n1.query('SELECT * FROM dist_insecure') + n1.query("SELECT * FROM dist_insecure") + def test_insecure_insert_async(): n1.query("TRUNCATE TABLE data") - n1.query('INSERT INTO dist_insecure SELECT * FROM numbers(2)') - n1.query('SYSTEM FLUSH DISTRIBUTED ON CLUSTER insecure dist_insecure') - assert int(n1.query('SELECT count() FROM dist_insecure')) == 2 - n1.query('TRUNCATE TABLE data ON CLUSTER insecure') + n1.query("INSERT INTO dist_insecure SELECT * FROM numbers(2)") + n1.query("SYSTEM FLUSH DISTRIBUTED ON CLUSTER insecure dist_insecure") + assert int(n1.query("SELECT count() FROM dist_insecure")) == 2 + n1.query("TRUNCATE TABLE data ON CLUSTER insecure") + def test_insecure_insert_sync(): n1.query("TRUNCATE TABLE data") - n1.query('INSERT INTO dist_insecure SELECT * FROM numbers(2)', settings={'insert_distributed_sync': 1}) - assert int(n1.query('SELECT count() FROM dist_insecure')) == 2 - n1.query('TRUNCATE TABLE data ON CLUSTER secure') + n1.query( + "INSERT INTO dist_insecure SELECT * FROM numbers(2)", + settings={"insert_distributed_sync": 1}, + ) + assert int(n1.query("SELECT count() FROM dist_insecure")) == 2 + n1.query("TRUNCATE TABLE data ON CLUSTER secure") + def test_secure(): - n1.query('SELECT * FROM dist_secure') + n1.query("SELECT * FROM dist_secure") + def test_secure_insert_async(): n1.query("TRUNCATE TABLE data") - n1.query('INSERT INTO dist_secure SELECT * FROM numbers(2)') - n1.query('SYSTEM FLUSH DISTRIBUTED ON CLUSTER secure dist_secure') - assert int(n1.query('SELECT count() FROM dist_secure')) == 2 - n1.query('TRUNCATE TABLE data ON CLUSTER secure') + n1.query("INSERT INTO dist_secure SELECT * FROM numbers(2)") + n1.query("SYSTEM FLUSH DISTRIBUTED ON CLUSTER secure dist_secure") + assert int(n1.query("SELECT count() FROM dist_secure")) == 2 + n1.query("TRUNCATE TABLE data ON CLUSTER secure") + def test_secure_insert_sync(): n1.query("TRUNCATE TABLE data") - n1.query('INSERT INTO dist_secure SELECT * FROM numbers(2)', settings={'insert_distributed_sync': 1}) - assert int(n1.query('SELECT count() FROM dist_secure')) == 2 - n1.query('TRUNCATE TABLE data ON CLUSTER secure') + n1.query( + "INSERT INTO dist_secure SELECT * FROM numbers(2)", + settings={"insert_distributed_sync": 1}, + ) + assert int(n1.query("SELECT count() FROM dist_secure")) == 2 + n1.query("TRUNCATE TABLE data ON CLUSTER secure") + # INSERT w/o initial_user # @@ -180,28 +237,40 @@ def test_secure_insert_sync(): def test_secure_insert_buffer_async(): # Change cluster definition so that the SELECT will always creates new connection priority = int(time.time()) - n1.exec_in_container(['bash', '-c', f'sed -i "s#.*#{priority}#" /etc/clickhouse-server/config.d/remote_servers.xml']) - n1.query('SYSTEM RELOAD CONFIG') + n1.exec_in_container( + [ + "bash", + "-c", + f'sed -i "s#.*#{priority}#" /etc/clickhouse-server/config.d/remote_servers.xml', + ] + ) + n1.query("SYSTEM RELOAD CONFIG") # ensure that SELECT creates new connection (we need separate table for # this, so that separate distributed pool will be used) query_id = uuid.uuid4().hex - n1.query('SELECT * FROM dist_secure_from_buffer', user='ro', query_id=query_id) - assert n1.contains_in_log('{' + query_id + '} Connection (n2:9000): Connecting.') + n1.query("SELECT * FROM dist_secure_from_buffer", user="ro", query_id=query_id) + assert n1.contains_in_log( + "{" + query_id + "} Connection (n2:9000): Connecting." + ) query_id = uuid.uuid4().hex - n1.query('INSERT INTO dist_secure_buffer SELECT * FROM numbers(2)', query_id=query_id) + n1.query( + "INSERT INTO dist_secure_buffer SELECT * FROM numbers(2)", query_id=query_id + ) # ensure that INSERT does not creates new connection, so that it will use # previous connection that was instantiated with "ro" user (using # interserver secret) - assert not n1.contains_in_log('{' + query_id + '} Connection (n2:9000): Connecting.') - assert get_query_user_info_by_id(n1, query_id) == ['default', 'default'] + assert not n1.contains_in_log( + "{" + query_id + "} Connection (n2:9000): Connecting." + ) + assert get_query_user_info_by_id(n1, query_id) == ["default", "default"] # And before the bug was fixed this query will fail with the following error: # # Code: 164. DB::Exception: Received from 172.16.2.5:9000. DB::Exception: There was an error on [n1:9000]: Code: 164. DB::Exception: Received from n2:9000. DB::Exception: ro: Cannot execute query in readonly mode. (READONLY) - n1.query('SYSTEM FLUSH DISTRIBUTED ON CLUSTER secure dist_secure_from_buffer') - n1.query('OPTIMIZE TABLE dist_secure_buffer') - n1.query('SYSTEM FLUSH DISTRIBUTED ON CLUSTER secure dist_secure_from_buffer') + n1.query("SYSTEM FLUSH DISTRIBUTED ON CLUSTER secure dist_secure_from_buffer") + n1.query("OPTIMIZE TABLE dist_secure_buffer") + n1.query("SYSTEM FLUSH DISTRIBUTED ON CLUSTER secure dist_secure_from_buffer") # Check user from which the INSERT on the remote node will be executed # @@ -213,76 +282,124 @@ def test_secure_insert_buffer_async(): # # {2c55669f-71ad-48fe-98fa-7b475b80718e} executeQuery: (from 0.0.0.0:0, user: ) INSERT INTO default.data_from_buffer (key) VALUES # - assert n2.contains_in_log('executeQuery: (from 0.0.0.0:0, user: ) INSERT INTO default.data_from_buffer (key) VALUES') + assert n2.contains_in_log( + "executeQuery: (from 0.0.0.0:0, user: ) INSERT INTO default.data_from_buffer (key) VALUES" + ) + + assert int(n1.query("SELECT count() FROM dist_secure_from_buffer")) == 2 + n1.query("TRUNCATE TABLE data_from_buffer ON CLUSTER secure") - assert int(n1.query('SELECT count() FROM dist_secure_from_buffer')) == 2 - n1.query('TRUNCATE TABLE data_from_buffer ON CLUSTER secure') def test_secure_disagree(): - with pytest.raises(QueryRuntimeException, match='.*Hash mismatch.*'): - n1.query('SELECT * FROM dist_secure_disagree') + with pytest.raises(QueryRuntimeException, match=".*Hash mismatch.*"): + n1.query("SELECT * FROM dist_secure_disagree") + def test_secure_disagree_insert(): n1.query("TRUNCATE TABLE data") - n1.query('INSERT INTO dist_secure_disagree SELECT * FROM numbers(2)') - with pytest.raises(QueryRuntimeException, match='.*Hash mismatch.*'): - n1.query('SYSTEM FLUSH DISTRIBUTED ON CLUSTER secure_disagree dist_secure_disagree') + n1.query("INSERT INTO dist_secure_disagree SELECT * FROM numbers(2)") + with pytest.raises(QueryRuntimeException, match=".*Hash mismatch.*"): + n1.query( + "SYSTEM FLUSH DISTRIBUTED ON CLUSTER secure_disagree dist_secure_disagree" + ) # check the the connection will be re-established # IOW that we will not get "Unknown BlockInfo field" - with pytest.raises(QueryRuntimeException, match='.*Hash mismatch.*'): - assert int(n1.query('SELECT count() FROM dist_secure_disagree')) == 0 + with pytest.raises(QueryRuntimeException, match=".*Hash mismatch.*"): + assert int(n1.query("SELECT count() FROM dist_secure_disagree")) == 0 + @users def test_user_insecure_cluster(user, password): - id_ = 'query-dist_insecure-' + user - query_with_id(n1, id_, 'SELECT * FROM dist_insecure', user=user, password=password) - assert get_query_user_info(n1, id_) == [user, user] # due to prefer_localhost_replica - assert get_query_user_info(n2, id_) == ['default', user] + id_ = "query-dist_insecure-" + user + query_with_id(n1, id_, "SELECT * FROM dist_insecure", user=user, password=password) + assert get_query_user_info(n1, id_) == [ + user, + user, + ] # due to prefer_localhost_replica + assert get_query_user_info(n2, id_) == ["default", user] + @users def test_user_secure_cluster(user, password): - id_ = 'query-dist_secure-' + user - query_with_id(n1, id_, 'SELECT * FROM dist_secure', user=user, password=password) + id_ = "query-dist_secure-" + user + query_with_id(n1, id_, "SELECT * FROM dist_secure", user=user, password=password) assert get_query_user_info(n1, id_) == [user, user] assert get_query_user_info(n2, id_) == [user, user] + @users def test_per_user_inline_settings_insecure_cluster(user, password): - id_ = 'query-ddl-settings-dist_insecure-' + user - query_with_id(n1, id_, """ + id_ = "query-ddl-settings-dist_insecure-" + user + query_with_id( + n1, + id_, + """ SELECT * FROM dist_insecure SETTINGS prefer_localhost_replica=0, max_memory_usage_for_user=1e9, max_untracked_memory=0 - """, user=user, password=password) - assert get_query_setting_on_shard(n1, id_, 'max_memory_usage_for_user') == '' + """, + user=user, + password=password, + ) + assert get_query_setting_on_shard(n1, id_, "max_memory_usage_for_user") == "" + + @users def test_per_user_inline_settings_secure_cluster(user, password): - id_ = 'query-ddl-settings-dist_secure-' + user - query_with_id(n1, id_, """ + id_ = "query-ddl-settings-dist_secure-" + user + query_with_id( + n1, + id_, + """ SELECT * FROM dist_secure SETTINGS prefer_localhost_replica=0, max_memory_usage_for_user=1e9, max_untracked_memory=0 - """, user=user, password=password) - assert int(get_query_setting_on_shard(n1, id_, 'max_memory_usage_for_user')) == int(1e9) + """, + user=user, + password=password, + ) + assert int(get_query_setting_on_shard(n1, id_, "max_memory_usage_for_user")) == int( + 1e9 + ) + + @users def test_per_user_protocol_settings_insecure_cluster(user, password): - id_ = 'query-protocol-settings-dist_insecure-' + user - query_with_id(n1, id_, 'SELECT * FROM dist_insecure', user=user, password=password, settings={ - 'prefer_localhost_replica': 0, - 'max_memory_usage_for_user': int(1e9), - 'max_untracked_memory': 0, - }) - assert get_query_setting_on_shard(n1, id_, 'max_memory_usage_for_user') == '' + id_ = "query-protocol-settings-dist_insecure-" + user + query_with_id( + n1, + id_, + "SELECT * FROM dist_insecure", + user=user, + password=password, + settings={ + "prefer_localhost_replica": 0, + "max_memory_usage_for_user": int(1e9), + "max_untracked_memory": 0, + }, + ) + assert get_query_setting_on_shard(n1, id_, "max_memory_usage_for_user") == "" + + @users def test_per_user_protocol_settings_secure_cluster(user, password): - id_ = 'query-protocol-settings-dist_secure-' + user - query_with_id(n1, id_, 'SELECT * FROM dist_secure', user=user, password=password, settings={ - 'prefer_localhost_replica': 0, - 'max_memory_usage_for_user': int(1e9), - 'max_untracked_memory': 0, - }) - assert int(get_query_setting_on_shard(n1, id_, 'max_memory_usage_for_user')) == int(1e9) + id_ = "query-protocol-settings-dist_secure-" + user + query_with_id( + n1, + id_, + "SELECT * FROM dist_secure", + user=user, + password=password, + settings={ + "prefer_localhost_replica": 0, + "max_memory_usage_for_user": int(1e9), + "max_untracked_memory": 0, + }, + ) + assert int(get_query_setting_on_shard(n1, id_, "max_memory_usage_for_user")) == int( + 1e9 + ) diff --git a/tests/integration/test_distributed_load_balancing/test.py b/tests/integration/test_distributed_load_balancing/test.py index 8a1c282eff2..90771c027dc 100644 --- a/tests/integration/test_distributed_load_balancing/test.py +++ b/tests/integration/test_distributed_load_balancing/test.py @@ -9,9 +9,9 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -n1 = cluster.add_instance('n1', main_configs=['configs/remote_servers.xml']) -n2 = cluster.add_instance('n2', main_configs=['configs/remote_servers.xml']) -n3 = cluster.add_instance('n3', main_configs=['configs/remote_servers.xml']) +n1 = cluster.add_instance("n1", main_configs=["configs/remote_servers.xml"]) +n2 = cluster.add_instance("n2", main_configs=["configs/remote_servers.xml"]) +n3 = cluster.add_instance("n3", main_configs=["configs/remote_servers.xml"]) nodes = len(cluster.instances) queries = nodes * 10 @@ -33,38 +33,44 @@ def bootstrap(): # And if the reload will happen during round_robin test it will start # querying from the beginning, so let's issue config reload just after # start to avoid reload in the middle of the test execution. - n.query('SYSTEM RELOAD CONFIG') - n.query('DROP TABLE IF EXISTS data') - n.query('DROP TABLE IF EXISTS dist') - n.query('CREATE TABLE data (key Int) Engine=Memory()') - n.query(""" + n.query("SYSTEM RELOAD CONFIG") + n.query("DROP TABLE IF EXISTS data") + n.query("DROP TABLE IF EXISTS dist") + n.query("CREATE TABLE data (key Int) Engine=Memory()") + n.query( + """ CREATE TABLE dist AS data Engine=Distributed( replicas_cluster, currentDatabase(), data) - """) - n.query(""" + """ + ) + n.query( + """ CREATE TABLE dist_priority AS data Engine=Distributed( replicas_priority_cluster, currentDatabase(), data) - """) - n.query(""" + """ + ) + n.query( + """ CREATE TABLE dist_priority_negative AS data Engine=Distributed( replicas_priority_negative_cluster, currentDatabase(), data) - """) + """ + ) def make_uuid(): return uuid.uuid4().hex -@pytest.fixture(scope='module', autouse=True) +@pytest.fixture(scope="module", autouse=True) def start_cluster(): try: cluster.start() @@ -74,26 +80,27 @@ def start_cluster(): cluster.shutdown() -def get_node(query_node, table='dist', *args, **kwargs): +def get_node(query_node, table="dist", *args, **kwargs): query_id = make_uuid() settings = { - 'query_id': query_id, - 'log_queries': 1, - 'log_queries_min_type': 'QUERY_START', - 'prefer_localhost_replica': 0, + "query_id": query_id, + "log_queries": 1, + "log_queries_min_type": "QUERY_START", + "prefer_localhost_replica": 0, } - if 'settings' not in kwargs: - kwargs['settings'] = settings + if "settings" not in kwargs: + kwargs["settings"] = settings else: - kwargs['settings'].update(settings) + kwargs["settings"].update(settings) - query_node.query('SELECT * FROM ' + table, *args, **kwargs) + query_node.query("SELECT * FROM " + table, *args, **kwargs) for n in list(cluster.instances.values()): - n.query('SYSTEM FLUSH LOGS') + n.query("SYSTEM FLUSH LOGS") - rows = query_node.query(""" + rows = query_node.query( + """ SELECT c.host_name FROM ( SELECT _shard_num @@ -107,7 +114,10 @@ def get_node(query_node, table='dist', *args, **kwargs): ) a JOIN system.clusters c ON a._shard_num = c.shard_num WHERE cluster = 'shards_cluster' - """.format(query_id=query_id)) + """.format( + query_id=query_id + ) + ) return rows.strip() @@ -115,88 +125,100 @@ def get_node(query_node, table='dist', *args, **kwargs): def test_load_balancing_default(): unique_nodes = set() for _ in range(0, queries): - unique_nodes.add(get_node(n1, settings={'load_balancing': 'random'})) + unique_nodes.add(get_node(n1, settings={"load_balancing": "random"})) assert len(unique_nodes) == nodes, unique_nodes def test_load_balancing_nearest_hostname(): unique_nodes = set() for _ in range(0, queries): - unique_nodes.add(get_node(n1, settings={'load_balancing': 'nearest_hostname'})) + unique_nodes.add(get_node(n1, settings={"load_balancing": "nearest_hostname"})) assert len(unique_nodes) == 1, unique_nodes - assert unique_nodes == set(['n1']) + assert unique_nodes == set(["n1"]) def test_load_balancing_in_order(): unique_nodes = set() for _ in range(0, queries): - unique_nodes.add(get_node(n1, settings={'load_balancing': 'in_order'})) + unique_nodes.add(get_node(n1, settings={"load_balancing": "in_order"})) assert len(unique_nodes) == 1, unique_nodes - assert unique_nodes == set(['n1']) + assert unique_nodes == set(["n1"]) def test_load_balancing_first_or_random(): unique_nodes = set() for _ in range(0, queries): - unique_nodes.add(get_node(n1, settings={'load_balancing': 'first_or_random'})) + unique_nodes.add(get_node(n1, settings={"load_balancing": "first_or_random"})) assert len(unique_nodes) == 1, unique_nodes - assert unique_nodes == set(['n1']) + assert unique_nodes == set(["n1"]) def test_load_balancing_round_robin(): unique_nodes = set() for _ in range(0, nodes): - unique_nodes.add(get_node(n1, settings={'load_balancing': 'round_robin'})) + unique_nodes.add(get_node(n1, settings={"load_balancing": "round_robin"})) assert len(unique_nodes) == nodes, unique_nodes - assert unique_nodes == set(['n1', 'n2', 'n3']) + assert unique_nodes == set(["n1", "n2", "n3"]) -@pytest.mark.parametrize('dist_table', [ - ('dist_priority'), - ('dist_priority_negative'), -]) +@pytest.mark.parametrize( + "dist_table", + [ + ("dist_priority"), + ("dist_priority_negative"), + ], +) def test_load_balancing_priority_round_robin(dist_table): unique_nodes = set() for _ in range(0, nodes): - unique_nodes.add(get_node(n1, dist_table, settings={'load_balancing': 'round_robin'})) + unique_nodes.add( + get_node(n1, dist_table, settings={"load_balancing": "round_robin"}) + ) assert len(unique_nodes) == 2, unique_nodes # n2 has bigger priority in config - assert unique_nodes == set(['n1', 'n3']) + assert unique_nodes == set(["n1", "n3"]) def test_distributed_replica_max_ignored_errors(): settings = { - 'use_hedged_requests' : 0, - 'load_balancing': 'in_order', - 'prefer_localhost_replica': 0, - 'connect_timeout': 2, - 'receive_timeout': 2, - 'send_timeout': 2, - 'idle_connection_timeout': 2, - 'tcp_keep_alive_timeout': 2, - - 'distributed_replica_max_ignored_errors': 0, - 'distributed_replica_error_half_life': 60, + "use_hedged_requests": 0, + "load_balancing": "in_order", + "prefer_localhost_replica": 0, + "connect_timeout": 2, + "receive_timeout": 2, + "send_timeout": 2, + "idle_connection_timeout": 2, + "tcp_keep_alive_timeout": 2, + "distributed_replica_max_ignored_errors": 0, + "distributed_replica_error_half_life": 60, } # initiate connection (if started only this test) - n2.query('SELECT * FROM dist', settings=settings) - cluster.pause_container('n1') + n2.query("SELECT * FROM dist", settings=settings) + cluster.pause_container("n1") # n1 paused -- skipping, and increment error_count for n1 # but the query succeeds, no need in query_and_get_error() - n2.query('SELECT * FROM dist', settings=settings) + n2.query("SELECT * FROM dist", settings=settings) # XXX: due to config reloading we need second time (sigh) - n2.query('SELECT * FROM dist', settings=settings) + n2.query("SELECT * FROM dist", settings=settings) # check error_count for n1 - assert int(n2.query(""" + assert ( + int( + n2.query( + """ SELECT errors_count FROM system.clusters WHERE cluster = 'replicas_cluster' AND host_name = 'n1' - """, settings=settings)) == 1 + """, + settings=settings, + ) + ) + == 1 + ) - cluster.unpause_container('n1') + cluster.unpause_container("n1") # still n2 - assert get_node(n2, settings=settings) == 'n2' + assert get_node(n2, settings=settings) == "n2" # now n1 - settings['distributed_replica_max_ignored_errors'] = 1 - assert get_node(n2, settings=settings) == 'n1' + settings["distributed_replica_max_ignored_errors"] = 1 + assert get_node(n2, settings=settings) == "n1" diff --git a/tests/integration/test_distributed_over_distributed/test.py b/tests/integration/test_distributed_over_distributed/test.py index ae86a70f31b..c000005e55a 100644 --- a/tests/integration/test_distributed_over_distributed/test.py +++ b/tests/integration/test_distributed_over_distributed/test.py @@ -7,13 +7,17 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -NODES = {'node' + str(i): cluster.add_instance( - 'node' + str(i), - main_configs=['configs/remote_servers.xml'], - user_configs=['configs/set_distributed_defaults.xml'], -) for i in (1, 2)} +NODES = { + "node" + + str(i): cluster.add_instance( + "node" + str(i), + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/set_distributed_defaults.xml"], + ) + for i in (1, 2) +} -CREATE_TABLES_SQL = ''' +CREATE_TABLES_SQL = """ CREATE TABLE base_table( node String, @@ -31,7 +35,7 @@ CREATE TABLE distributed_over_distributed_table AS distributed_table ENGINE = Distributed('test_cluster', default, distributed_table); -''' +""" INSERT_SQL_TEMPLATE = "INSERT INTO base_table VALUES ('{node_id}', {key}, {value})" @@ -43,24 +47,45 @@ def started_cluster(): for node_index, (node_name, node) in enumerate(NODES.items()): node.query(CREATE_TABLES_SQL) for i in range(0, 2): - node.query(INSERT_SQL_TEMPLATE.format(node_id=node_name, key=i, value=i + (node_index * 10))) + node.query( + INSERT_SQL_TEMPLATE.format( + node_id=node_name, key=i, value=i + (node_index * 10) + ) + ) yield cluster finally: cluster.shutdown() -@pytest.mark.parametrize("node,source", [ - pytest.param(NODES["node1"], "distributed_over_distributed_table", id="dod_node1"), - pytest.param(NODES["node1"], "cluster('test_cluster', default, distributed_table)", id="cluster_node1"), - pytest.param(NODES["node2"], "distributed_over_distributed_table", id="dod_node2"), - pytest.param(NODES["node2"], "cluster('test_cluster', default, distributed_table)", id="cluster_node2"), -] +@pytest.mark.parametrize( + "node,source", + [ + pytest.param( + NODES["node1"], "distributed_over_distributed_table", id="dod_node1" + ), + pytest.param( + NODES["node1"], + "cluster('test_cluster', default, distributed_table)", + id="cluster_node1", + ), + pytest.param( + NODES["node2"], "distributed_over_distributed_table", id="dod_node2" + ), + pytest.param( + NODES["node2"], + "cluster('test_cluster', default, distributed_table)", + id="cluster_node2", + ), + ], ) class TestDistributedOverDistributedSuite: def test_select_with_order_by_node(self, started_cluster, node, source): - assert node.query("SELECT * FROM {source} ORDER BY node, key".format(source=source)) \ - == """node1 0 0 + assert ( + node.query( + "SELECT * FROM {source} ORDER BY node, key".format(source=source) + ) + == """node1 0 0 node1 0 0 node1 1 1 node1 1 1 @@ -69,10 +94,14 @@ node2 0 10 node2 1 11 node2 1 11 """ + ) def test_select_with_order_by_key(self, started_cluster, node, source): - assert node.query("SELECT * FROM {source} ORDER BY key, node".format(source=source)) \ - == """node1 0 0 + assert ( + node.query( + "SELECT * FROM {source} ORDER BY key, node".format(source=source) + ) + == """node1 0 0 node1 0 0 node2 0 10 node2 0 10 @@ -81,15 +110,30 @@ node1 1 1 node2 1 11 node2 1 11 """ + ) def test_select_with_group_by_node(self, started_cluster, node, source): - assert node.query("SELECT node, SUM(value) FROM {source} GROUP BY node ORDER BY node".format(source=source)) \ - == "node1 2\nnode2 42\n" + assert ( + node.query( + "SELECT node, SUM(value) FROM {source} GROUP BY node ORDER BY node".format( + source=source + ) + ) + == "node1 2\nnode2 42\n" + ) def test_select_with_group_by_key(self, started_cluster, node, source): - assert node.query("SELECT key, SUM(value) FROM {source} GROUP BY key ORDER BY key".format(source=source)) \ - == "0 20\n1 24\n" + assert ( + node.query( + "SELECT key, SUM(value) FROM {source} GROUP BY key ORDER BY key".format( + source=source + ) + ) + == "0 20\n1 24\n" + ) def test_select_sum(self, started_cluster, node, source): - assert node.query("SELECT SUM(value) FROM {source}".format(source=source)) \ - == "44\n" + assert ( + node.query("SELECT SUM(value) FROM {source}".format(source=source)) + == "44\n" + ) diff --git a/tests/integration/test_distributed_queries_stress/test.py b/tests/integration/test_distributed_queries_stress/test.py index 45a1b714cc4..fce42b4e58b 100644 --- a/tests/integration/test_distributed_queries_stress/test.py +++ b/tests/integration/test_distributed_queries_stress/test.py @@ -8,35 +8,46 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1_r1 = cluster.add_instance('node1_r1', main_configs=['configs/remote_servers.xml']) -node2_r1 = cluster.add_instance('node2_r1', main_configs=['configs/remote_servers.xml']) -node1_r2 = cluster.add_instance('node1_r2', main_configs=['configs/remote_servers.xml']) -node2_r2 = cluster.add_instance('node2_r2', main_configs=['configs/remote_servers.xml']) +node1_r1 = cluster.add_instance("node1_r1", main_configs=["configs/remote_servers.xml"]) +node2_r1 = cluster.add_instance("node2_r1", main_configs=["configs/remote_servers.xml"]) +node1_r2 = cluster.add_instance("node1_r2", main_configs=["configs/remote_servers.xml"]) +node2_r2 = cluster.add_instance("node2_r2", main_configs=["configs/remote_servers.xml"]) + def run_benchmark(payload, settings): - node1_r1.exec_in_container([ - 'bash', '-c', 'echo {} | '.format(shlex.quote(payload.strip())) + ' '.join([ - 'clickhouse', 'benchmark', - '--concurrency=100', - '--cumulative', - '--delay=0', - # NOTE: with current matrix even 3 seconds it huge... - '--timelimit=3', - # tune some basic timeouts - '--hedged_connection_timeout_ms=200', - '--connect_timeout_with_failover_ms=200', - '--connections_with_failover_max_tries=5', - *settings, - ]) - ]) + node1_r1.exec_in_container( + [ + "bash", + "-c", + "echo {} | ".format(shlex.quote(payload.strip())) + + " ".join( + [ + "clickhouse", + "benchmark", + "--concurrency=100", + "--cumulative", + "--delay=0", + # NOTE: with current matrix even 3 seconds it huge... + "--timelimit=3", + # tune some basic timeouts + "--hedged_connection_timeout_ms=200", + "--connect_timeout_with_failover_ms=200", + "--connections_with_failover_max_tries=5", + *settings, + ] + ), + ] + ) -@pytest.fixture(scope='module') + +@pytest.fixture(scope="module") def started_cluster(): try: cluster.start() for _, instance in cluster.instances.items(): - instance.query(""" + instance.query( + """ create table if not exists data ( key Int, /* just to increase block size */ @@ -56,46 +67,55 @@ def started_cluster(): insert into data (key) select * from numbers(10); create table if not exists dist_one as data engine=Distributed(one_shard, currentDatabase(), data, key); - create table if not exists dist_one_over_dist as data engine=Distributed(one_shard, currentDatabase(), dist_one, yandexConsistentHash(key, 2)); + create table if not exists dist_one_over_dist as data engine=Distributed(one_shard, currentDatabase(), dist_one, kostikConsistentHash(key, 2)); create table if not exists dist_two as data engine=Distributed(two_shards, currentDatabase(), data, key); - create table if not exists dist_two_over_dist as data engine=Distributed(two_shards, currentDatabase(), dist_two, yandexConsistentHash(key, 2)); - """) + create table if not exists dist_two_over_dist as data engine=Distributed(two_shards, currentDatabase(), dist_two, kostikConsistentHash(key, 2)); + """ + ) yield cluster finally: cluster.shutdown() -@pytest.mark.parametrize('table,settings', itertools.product( - [ # tables - 'dist_one', - 'dist_one_over_dist', - 'dist_two', - 'dist_two_over_dist', - ], - [ # settings - *list(itertools.combinations([ - '', # defaults - '--prefer_localhost_replica=0', - '--async_socket_for_remote=0', - '--use_hedged_requests=0', - '--optimize_skip_unused_shards=1', - '--distributed_group_by_no_merge=2', - '--optimize_distributed_group_by_sharding_key=1', - # TODO: enlarge test matrix (but first those values to accept ms): - # - # - sleep_in_send_tables_status - # - sleep_in_send_data - ], 2)) - # TODO: more combinations that just 2 - ], -)) +@pytest.mark.parametrize( + "table,settings", + itertools.product( + [ # tables + "dist_one", + "dist_one_over_dist", + "dist_two", + "dist_two_over_dist", + ], + [ # settings + *list( + itertools.combinations( + [ + "", # defaults + "--prefer_localhost_replica=0", + "--async_socket_for_remote=0", + "--use_hedged_requests=0", + "--optimize_skip_unused_shards=1", + "--distributed_group_by_no_merge=2", + "--optimize_distributed_group_by_sharding_key=1", + # TODO: enlarge test matrix (but first those values to accept ms): + # + # - sleep_in_send_tables_status + # - sleep_in_send_data + ], + 2, + ) + ) + # TODO: more combinations that just 2 + ], + ), +) def test_stress_distributed(table, settings, started_cluster): - payload = f''' + payload = f""" select * from {table} where key = 0; select * from {table} where key = 1; select * from {table} where key = 2; select * from {table} where key = 3; select * from {table}; - ''' + """ run_benchmark(payload, settings) diff --git a/tests/integration/test_distributed_respect_user_timeouts/test.py b/tests/integration/test_distributed_respect_user_timeouts/test.py index a774a01e3c2..9cf7082d63a 100644 --- a/tests/integration/test_distributed_respect_user_timeouts/test.py +++ b/tests/integration/test_distributed_respect_user_timeouts/test.py @@ -10,11 +10,11 @@ from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -NODES = {'node' + str(i): None for i in (1, 2)} +NODES = {"node" + str(i): None for i in (1, 2)} IS_DEBUG = False -CREATE_TABLES_SQL = ''' +CREATE_TABLES_SQL = """ CREATE DATABASE test; CREATE TABLE base_table( @@ -26,68 +26,70 @@ ORDER BY node; CREATE TABLE distributed_table ENGINE = Distributed(test_cluster, default, base_table) AS base_table; -''' +""" INSERT_SQL_TEMPLATE = "INSERT INTO base_table VALUES ('{node_id}')" SELECTS_SQL = { - 'distributed': 'SELECT node FROM distributed_table ORDER BY node', - 'remote': ("SELECT node FROM remote('node1,node2', default.base_table) " - "ORDER BY node"), + "distributed": "SELECT node FROM distributed_table ORDER BY node", + "remote": ( + "SELECT node FROM remote('node1,node2', default.base_table) " "ORDER BY node" + ), } -EXCEPTION_NETWORK = 'DB::NetException: ' -EXCEPTION_TIMEOUT = 'Timeout exceeded while reading from socket (' -EXCEPTION_CONNECT = 'Timeout: connect timed out: ' +EXCEPTION_NETWORK = "DB::NetException: " +EXCEPTION_TIMEOUT = "Timeout exceeded while reading from socket (" +EXCEPTION_CONNECT = "Timeout: connect timed out: " TIMEOUT_MEASUREMENT_EPS = 0.01 EXPECTED_BEHAVIOR = { - 'default': { - 'times': 3, - 'timeout': 1, + "default": { + "times": 3, + "timeout": 1, }, - 'ready_to_wait': { - 'times': 5, - 'timeout': 3, + "ready_to_wait": { + "times": 5, + "timeout": 3, }, } TIMEOUT_DIFF_UPPER_BOUND = { - 'default': { - 'distributed': 5.5, - 'remote': 2.5, + "default": { + "distributed": 5.5, + "remote": 2.5, }, - 'ready_to_wait': { - 'distributed': 3, - 'remote': 2.0, + "ready_to_wait": { + "distributed": 3, + "remote": 2.0, }, } def _check_exception(exception, expected_tries=3): - lines = exception.split('\n') + lines = exception.split("\n") assert len(lines) > 4, "Unexpected exception (expected: timeout info)" - assert lines[0].startswith('Received exception from server (version') + assert lines[0].startswith("Received exception from server (version") - assert lines[1].startswith('Code: 279') - assert lines[1].endswith('All connection tries failed. Log: ') + assert lines[1].startswith("Code: 279") + assert lines[1].endswith("All connection tries failed. Log: ") - assert lines[2] == '', "Unexpected exception text (expected: empty line)" + assert lines[2] == "", "Unexpected exception text (expected: empty line)" - for i, line in enumerate(lines[3:3 + expected_tries]): + for i, line in enumerate(lines[3 : 3 + expected_tries]): expected_lines = ( - 'Code: 209. ' + EXCEPTION_NETWORK + EXCEPTION_TIMEOUT, - 'Code: 209. ' + EXCEPTION_NETWORK + EXCEPTION_CONNECT, + "Code: 209. " + EXCEPTION_NETWORK + EXCEPTION_TIMEOUT, + "Code: 209. " + EXCEPTION_NETWORK + EXCEPTION_CONNECT, EXCEPTION_TIMEOUT, ) - assert any(line.startswith(expected) for expected in expected_lines), \ - 'Unexpected exception "{}" at one of the connection attempts'.format(line) + assert any( + line.startswith(expected) for expected in expected_lines + ), 'Unexpected exception "{}" at one of the connection attempts'.format(line) - assert lines[3 + expected_tries] == '', 'Wrong number of connect attempts' + assert lines[3 + expected_tries] == "", "Wrong number of connect attempts" @pytest.fixture(scope="module", params=["configs", "configs_secure"]) @@ -103,14 +105,18 @@ def started_cluster(request): main_configs += [os.path.join(request.param, "config.d/ssl_conf.xml")] user_configs = [os.path.join(request.param, "users.d/set_distributed_defaults.xml")] for name in NODES: - NODES[name] = cluster.add_instance(name, main_configs=main_configs, user_configs=user_configs) + NODES[name] = cluster.add_instance( + name, main_configs=main_configs, user_configs=user_configs + ) try: cluster.start() if cluster.instances["node1"].is_debug_build(): global IS_DEBUG IS_DEBUG = True - logging.warning("Debug build is too slow to show difference in timings. We disable checks.") + logging.warning( + "Debug build is too slow to show difference in timings. We disable checks." + ) for node_id, node in list(NODES.items()): node.query(CREATE_TABLES_SQL) @@ -123,17 +129,17 @@ def started_cluster(request): def _check_timeout_and_exception(node, user, query_base, query): - repeats = EXPECTED_BEHAVIOR[user]['times'] + repeats = EXPECTED_BEHAVIOR[user]["times"] extra_repeats = 1 # Table function remote() are executed two times. # It tries to get table structure from remote shards. # On 'node2' it will firstly try to get structure from 'node1' (which is not available), # so there are 1 extra connection attempts for 'node2' and 'remote' - if node.name == 'node2' and query_base == 'remote': + if node.name == "node2" and query_base == "remote": extra_repeats = 2 - expected_timeout = EXPECTED_BEHAVIOR[user]['timeout'] * repeats * extra_repeats + expected_timeout = EXPECTED_BEHAVIOR[user]["timeout"] * repeats * extra_repeats start = timeit.default_timer() exception = node.query_and_get_error(query, user=user) @@ -143,25 +149,27 @@ def _check_timeout_and_exception(node, user, query_base, query): if not IS_DEBUG: assert expected_timeout - measured_timeout <= TIMEOUT_MEASUREMENT_EPS - assert measured_timeout - expected_timeout <= TIMEOUT_DIFF_UPPER_BOUND[user][query_base] + assert ( + measured_timeout - expected_timeout + <= TIMEOUT_DIFF_UPPER_BOUND[user][query_base] + ) # And exception should reflect connection attempts: _check_exception(exception, repeats) @pytest.mark.parametrize( - ('first_user', 'node_name', 'query_base'), + ("first_user", "node_name", "query_base"), tuple(itertools.product(EXPECTED_BEHAVIOR, NODES, SELECTS_SQL)), ) def test_reconnect(started_cluster, node_name, first_user, query_base): node = NODES[node_name] query = SELECTS_SQL[query_base] if started_cluster.__with_ssl_config: - query = query.replace('remote(', 'remoteSecure(') + query = query.replace("remote(", "remoteSecure(") # Everything is up, select should work: - assert TSV(node.query(query, - user=first_user)) == TSV('node1\nnode2') + assert TSV(node.query(query, user=first_user)) == TSV("node1\nnode2") with PartitionManager() as pm: # Break the connection. @@ -173,11 +181,10 @@ def test_reconnect(started_cluster, node_name, first_user, query_base): # Other user should have different timeout and exception _check_timeout_and_exception( node, - 'default' if first_user != 'default' else 'ready_to_wait', + "default" if first_user != "default" else "ready_to_wait", query_base, query, ) # select should work again: - assert TSV(node.query(query, - user=first_user)) == TSV('node1\nnode2') + assert TSV(node.query(query, user=first_user)) == TSV("node1\nnode2") diff --git a/tests/integration/test_distributed_storage_configuration/test.py b/tests/integration/test_distributed_storage_configuration/test.py index 94beb7b57ca..fa4e01bb7b3 100644 --- a/tests/integration/test_distributed_storage_configuration/test.py +++ b/tests/integration/test_distributed_storage_configuration/test.py @@ -8,34 +8,44 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', - main_configs=["configs/config.d/storage_configuration.xml"], - tmpfs=['/disk1:size=100M', '/disk2:size=100M']) +node = cluster.add_instance( + "node", + main_configs=["configs/config.d/storage_configuration.xml"], + tmpfs=["/disk1:size=100M", "/disk2:size=100M"], +) -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def start_cluster(): try: cluster.start() - node.query('CREATE DATABASE IF NOT EXISTS test ENGINE=Ordinary') # Different paths with Atomic + node.query( + "CREATE DATABASE IF NOT EXISTS test ENGINE=Ordinary" + ) # Different paths with Atomic yield cluster finally: cluster.shutdown() def _files_in_dist_mon(node, root, table): - return int(node.exec_in_container([ - 'bash', - '-c', - # `-maxdepth 1` to avoid /tmp/ subdirectory - 'find /{root}/data/test/{table}/default@127%2E0%2E0%2E2:9000 -maxdepth 1 -type f 2>/dev/null | wc -l'.format( - root=root, table=table) - ]).split('\n')[0]) + return int( + node.exec_in_container( + [ + "bash", + "-c", + # `-maxdepth 1` to avoid /tmp/ subdirectory + "find /{root}/data/test/{table}/default@127%2E0%2E0%2E2:9000 -maxdepth 1 -type f 2>/dev/null | wc -l".format( + root=root, table=table + ), + ] + ).split("\n")[0] + ) def test_insert(start_cluster): - node.query('CREATE TABLE test.foo (key Int) Engine=Memory()') - node.query(""" + node.query("CREATE TABLE test.foo (key Int) Engine=Memory()") + node.query( + """ CREATE TABLE test.dist_foo (key Int) Engine=Distributed( test_cluster_two_shards, @@ -44,41 +54,47 @@ def test_insert(start_cluster): key%2, 'default' ) - """) + """ + ) # manual only (but only for remote node) - node.query('SYSTEM STOP DISTRIBUTED SENDS test.dist_foo') + node.query("SYSTEM STOP DISTRIBUTED SENDS test.dist_foo") - node.query('INSERT INTO test.dist_foo SELECT * FROM numbers(100)', settings={ - 'use_compact_format_in_distributed_parts_names': '0', - }) - assert _files_in_dist_mon(node, 'disk1', 'dist_foo') == 1 - assert _files_in_dist_mon(node, 'disk2', 'dist_foo') == 0 + node.query( + "INSERT INTO test.dist_foo SELECT * FROM numbers(100)", + settings={ + "use_compact_format_in_distributed_parts_names": "0", + }, + ) + assert _files_in_dist_mon(node, "disk1", "dist_foo") == 1 + assert _files_in_dist_mon(node, "disk2", "dist_foo") == 0 - assert node.query('SELECT count() FROM test.dist_foo') == '100\n' - node.query('SYSTEM FLUSH DISTRIBUTED test.dist_foo') - assert node.query('SELECT count() FROM test.dist_foo') == '200\n' + assert node.query("SELECT count() FROM test.dist_foo") == "100\n" + node.query("SYSTEM FLUSH DISTRIBUTED test.dist_foo") + assert node.query("SELECT count() FROM test.dist_foo") == "200\n" # # RENAME # - node.query('RENAME TABLE test.dist_foo TO test.dist2_foo') + node.query("RENAME TABLE test.dist_foo TO test.dist2_foo") - node.query('INSERT INTO test.dist2_foo SELECT * FROM numbers(100)', settings={ - 'use_compact_format_in_distributed_parts_names': '0', - }) - assert _files_in_dist_mon(node, 'disk1', 'dist2_foo') == 0 - assert _files_in_dist_mon(node, 'disk2', 'dist2_foo') == 1 + node.query( + "INSERT INTO test.dist2_foo SELECT * FROM numbers(100)", + settings={ + "use_compact_format_in_distributed_parts_names": "0", + }, + ) + assert _files_in_dist_mon(node, "disk1", "dist2_foo") == 0 + assert _files_in_dist_mon(node, "disk2", "dist2_foo") == 1 - assert node.query('SELECT count() FROM test.dist2_foo') == '300\n' - node.query('SYSTEM FLUSH DISTRIBUTED test.dist2_foo') - assert node.query('SELECT count() FROM test.dist2_foo') == '400\n' + assert node.query("SELECT count() FROM test.dist2_foo") == "300\n" + node.query("SYSTEM FLUSH DISTRIBUTED test.dist2_foo") + assert node.query("SELECT count() FROM test.dist2_foo") == "400\n" # # DROP # - node.query('DROP TABLE test.dist2_foo') - for disk in ['disk1', 'disk2']: - node.exec_in_container([ - 'bash', '-c', - 'test ! -e /{}/data/test/dist2_foo'.format(disk) - ]) + node.query("DROP TABLE test.dist2_foo") + for disk in ["disk1", "disk2"]: + node.exec_in_container( + ["bash", "-c", "test ! -e /{}/data/test/dist2_foo".format(disk)] + ) diff --git a/tests/integration/test_distributed_system_query/test.py b/tests/integration/test_distributed_system_query/test.py index bf643fabf86..d221aa90dcb 100644 --- a/tests/integration/test_distributed_system_query/test.py +++ b/tests/integration/test_distributed_system_query/test.py @@ -4,8 +4,8 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml']) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml']) +node1 = cluster.add_instance("node1", main_configs=["configs/remote_servers.xml"]) +node2 = cluster.add_instance("node2", main_configs=["configs/remote_servers.xml"]) @pytest.fixture(scope="module") @@ -14,10 +14,13 @@ def started_cluster(): cluster.start() for node in (node1, node2): - node.query('''CREATE TABLE local_table(id UInt32, val String) ENGINE = MergeTree ORDER BY id;''') + node.query( + """CREATE TABLE local_table(id UInt32, val String) ENGINE = MergeTree ORDER BY id;""" + ) node1.query( - '''CREATE TABLE distributed_table(id UInt32, val String) ENGINE = Distributed(test_cluster, default, local_table, id);''') + """CREATE TABLE distributed_table(id UInt32, val String) ENGINE = Distributed(test_cluster, default, local_table, id);""" + ) yield cluster @@ -32,8 +35,8 @@ def test_start_and_stop_replica_send(started_cluster): node1.query("INSERT INTO distributed_table VALUES (1, 'node2')") # Write only to this node when stop distributed sends - assert node1.query("SELECT COUNT() FROM distributed_table").rstrip() == '1' + assert node1.query("SELECT COUNT() FROM distributed_table").rstrip() == "1" node1.query("SYSTEM START DISTRIBUTED SENDS distributed_table;") node1.query("SYSTEM FLUSH DISTRIBUTED distributed_table;") - assert node1.query("SELECT COUNT() FROM distributed_table").rstrip() == '2' + assert node1.query("SELECT COUNT() FROM distributed_table").rstrip() == "2" diff --git a/tests/integration/test_distributed_type_object/test.py b/tests/integration/test_distributed_type_object/test.py index faf509c46cd..b2179af8a3f 100644 --- a/tests/integration/test_distributed_type_object/test.py +++ b/tests/integration/test_distributed_type_object/test.py @@ -5,8 +5,9 @@ from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml']) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml']) +node1 = cluster.add_instance("node1", main_configs=["configs/remote_servers.xml"]) +node2 = cluster.add_instance("node2", main_configs=["configs/remote_servers.xml"]) + @pytest.fixture(scope="module") def started_cluster(): @@ -14,8 +15,14 @@ def started_cluster(): cluster.start() for node in (node1, node2): - node.query("CREATE TABLE local_table(id UInt32, data JSON) ENGINE = MergeTree ORDER BY id", settings={"allow_experimental_object_type": 1}) - node.query("CREATE TABLE dist_table AS local_table ENGINE = Distributed(test_cluster, default, local_table)", settings={"allow_experimental_object_type": 1}) + node.query( + "CREATE TABLE local_table(id UInt32, data JSON) ENGINE = MergeTree ORDER BY id", + settings={"allow_experimental_object_type": 1}, + ) + node.query( + "CREATE TABLE dist_table AS local_table ENGINE = Distributed(test_cluster, default, local_table)", + settings={"allow_experimental_object_type": 1}, + ) yield cluster @@ -24,21 +31,32 @@ def started_cluster(): def test_distributed_type_object(started_cluster): - node1.query('INSERT INTO local_table FORMAT JSONEachRow {"id": 1, "data": {"k1": 10}}') - node2.query('INSERT INTO local_table FORMAT JSONEachRow {"id": 2, "data": {"k1": 20}}') + node1.query( + 'INSERT INTO local_table FORMAT JSONEachRow {"id": 1, "data": {"k1": 10}}' + ) + node2.query( + 'INSERT INTO local_table FORMAT JSONEachRow {"id": 2, "data": {"k1": 20}}' + ) expected = TSV("10\n20\n") assert TSV(node1.query("SELECT data.k1 FROM dist_table ORDER BY id")) == expected - node1.query('INSERT INTO local_table FORMAT JSONEachRow {"id": 3, "data": {"k1": "str1"}}') + node1.query( + 'INSERT INTO local_table FORMAT JSONEachRow {"id": 3, "data": {"k1": "str1"}}' + ) expected = TSV("10\n20\nstr1\n") assert TSV(node1.query("SELECT data.k1 FROM dist_table ORDER BY id")) == expected - node1.query('INSERT INTO local_table FORMAT JSONEachRow {"id": 4, "data": {"k2": 30}}') + node1.query( + 'INSERT INTO local_table FORMAT JSONEachRow {"id": 4, "data": {"k2": 30}}' + ) expected = TSV("10\t0\n20\t0\nstr1\t0\n\t30") - assert TSV(node1.query("SELECT data.k1, data.k2 FROM dist_table ORDER BY id")) == expected + assert ( + TSV(node1.query("SELECT data.k1, data.k2 FROM dist_table ORDER BY id")) + == expected + ) expected = TSV("120\n") assert TSV(node1.query("SELECT sum(data.k2 * id) FROM dist_table")) == expected @@ -46,12 +64,25 @@ def test_distributed_type_object(started_cluster): node1.query("TRUNCATE TABLE local_table") node2.query("TRUNCATE TABLE local_table") - node1.query('INSERT INTO local_table FORMAT JSONEachRow {"id": 1, "data": {"k1": "aa", "k2": {"k3": "bb", "k4": "c"}}} {"id": 2, "data": {"k1": "ee", "k5": "ff"}};') - node2.query('INSERT INTO local_table FORMAT JSONEachRow {"id": 3, "data": {"k5":"foo"}};') + node1.query( + 'INSERT INTO local_table FORMAT JSONEachRow {"id": 1, "data": {"k1": "aa", "k2": {"k3": "bb", "k4": "c"}}} {"id": 2, "data": {"k1": "ee", "k5": "ff"}};' + ) + node2.query( + 'INSERT INTO local_table FORMAT JSONEachRow {"id": 3, "data": {"k5":"foo"}};' + ) - expected = TSV(""" + expected = TSV( + """ 1\taa\tbb\tc\t 2\tee\t\t\tff -3\t\t\t\tfoo""") +3\t\t\t\tfoo""" + ) - assert TSV(node1.query("SELECT id, data.k1, data.k2.k3, data.k2.k4, data.k5 FROM dist_table ORDER BY id")) == expected + assert ( + TSV( + node1.query( + "SELECT id, data.k1, data.k2.k3, data.k2.k4, data.k5 FROM dist_table ORDER BY id" + ) + ) + == expected + ) diff --git a/tests/integration/test_dotnet_client/test.py b/tests/integration/test_dotnet_client/test.py index 4cc16ac826e..b147688c099 100644 --- a/tests/integration/test_dotnet_client/test.py +++ b/tests/integration/test_dotnet_client/test.py @@ -15,8 +15,12 @@ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) DOCKER_COMPOSE_PATH = get_docker_compose_path() cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', - user_configs=["configs/users.xml"], env_variables={'UBSAN_OPTIONS': 'print_stacktrace=1'}) +node = cluster.add_instance( + "node", + user_configs=["configs/users.xml"], + env_variables={"UBSAN_OPTIONS": "print_stacktrace=1"}, +) + @pytest.fixture(scope="module") def started_cluster(): @@ -27,21 +31,37 @@ def started_cluster(): cluster.shutdown() -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def dotnet_container(): - docker_compose = os.path.join(DOCKER_COMPOSE_PATH, 'docker_compose_dotnet_client.yml') + docker_compose = os.path.join( + DOCKER_COMPOSE_PATH, "docker_compose_dotnet_client.yml" + ) run_and_check( - ['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d', '--no-build']) - yield docker.from_env().containers.get(cluster.project_name + '_dotnet1_1') + [ + "docker-compose", + "-p", + cluster.project_name, + "-f", + docker_compose, + "up", + "--no-recreate", + "-d", + "--no-build", + ] + ) + yield docker.from_env().containers.get(cluster.project_name + "_dotnet1_1") def test_dotnet_client(started_cluster, dotnet_container): - with open(os.path.join(SCRIPT_DIR, 'dotnet.reference'), 'rb') as fp: + with open(os.path.join(SCRIPT_DIR, "dotnet.reference"), "rb") as fp: reference = fp.read() code, (stdout, stderr) = dotnet_container.exec_run( - 'dotnet run --host {host} --port {port} --user default --password 123 --database default' - .format(host=started_cluster.get_instance_ip('node'), port=8123), demux=True) + "dotnet run --host {host} --port {port} --user default --password 123 --database default".format( + host=started_cluster.get_instance_ip("node"), port=8123 + ), + demux=True, + ) assert code == 0 assert stdout == reference diff --git a/tests/integration/test_drop_replica/test.py b/tests/integration/test_drop_replica/test.py index eb67a25f9f5..1fa086a4217 100644 --- a/tests/integration/test_drop_replica/test.py +++ b/tests/integration/test_drop_replica/test.py @@ -8,50 +8,71 @@ from helpers.network import PartitionManager def fill_nodes(nodes, shard): for node in nodes: node.query( - ''' + """ CREATE DATABASE test; CREATE TABLE test.test_table(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; - '''.format(shard=shard, replica=node.name)) + """.format( + shard=shard, replica=node.name + ) + ) node.query( - ''' + """ CREATE DATABASE test1; CREATE TABLE test1.test_table(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test1/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; - '''.format(shard=shard, replica=node.name)) + """.format( + shard=shard, replica=node.name + ) + ) node.query( - ''' + """ CREATE DATABASE test2; CREATE TABLE test2.test_table(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test2/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; - '''.format(shard=shard, replica=node.name)) + """.format( + shard=shard, replica=node.name + ) + ) node.query( - ''' + """ CREATE DATABASE test3; CREATE TABLE test3.test_table(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test3/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; - '''.format(shard=shard, replica=node.name)) + """.format( + shard=shard, replica=node.name + ) + ) node.query( - ''' + """ CREATE DATABASE test4; CREATE TABLE test4.test_table(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test4/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; - '''.format(shard=shard, replica=node.name)) + """.format( + shard=shard, replica=node.name + ) + ) cluster = ClickHouseCluster(__file__) -node_1_1 = cluster.add_instance('node_1_1', with_zookeeper=True, main_configs=['configs/remote_servers.xml']) -node_1_2 = cluster.add_instance('node_1_2', with_zookeeper=True, main_configs=['configs/remote_servers.xml']) -node_1_3 = cluster.add_instance('node_1_3', with_zookeeper=True, main_configs=['configs/remote_servers.xml']) +node_1_1 = cluster.add_instance( + "node_1_1", with_zookeeper=True, main_configs=["configs/remote_servers.xml"] +) +node_1_2 = cluster.add_instance( + "node_1_2", with_zookeeper=True, main_configs=["configs/remote_servers.xml"] +) +node_1_3 = cluster.add_instance( + "node_1_3", with_zookeeper=True, main_configs=["configs/remote_servers.xml"] +) @pytest.fixture(scope="module") @@ -71,82 +92,125 @@ def start_cluster(): def test_drop_replica(start_cluster): - node_1_1.query("INSERT INTO test.test_table SELECT number, toString(number) FROM numbers(100)") - node_1_1.query("INSERT INTO test1.test_table SELECT number, toString(number) FROM numbers(100)") - node_1_1.query("INSERT INTO test2.test_table SELECT number, toString(number) FROM numbers(100)") - node_1_1.query("INSERT INTO test3.test_table SELECT number, toString(number) FROM numbers(100)") - node_1_1.query("INSERT INTO test4.test_table SELECT number, toString(number) FROM numbers(100)") + node_1_1.query( + "INSERT INTO test.test_table SELECT number, toString(number) FROM numbers(100)" + ) + node_1_1.query( + "INSERT INTO test1.test_table SELECT number, toString(number) FROM numbers(100)" + ) + node_1_1.query( + "INSERT INTO test2.test_table SELECT number, toString(number) FROM numbers(100)" + ) + node_1_1.query( + "INSERT INTO test3.test_table SELECT number, toString(number) FROM numbers(100)" + ) + node_1_1.query( + "INSERT INTO test4.test_table SELECT number, toString(number) FROM numbers(100)" + ) - zk = cluster.get_kazoo_client('zoo1') - assert "can't drop local replica" in node_1_1.query_and_get_error("SYSTEM DROP REPLICA 'node_1_1'") + zk = cluster.get_kazoo_client("zoo1") assert "can't drop local replica" in node_1_1.query_and_get_error( - "SYSTEM DROP REPLICA 'node_1_1' FROM DATABASE test") + "SYSTEM DROP REPLICA 'node_1_1'" + ) assert "can't drop local replica" in node_1_1.query_and_get_error( - "SYSTEM DROP REPLICA 'node_1_1' FROM TABLE test.test_table") - assert "it's active" in node_1_2.query_and_get_error("SYSTEM DROP REPLICA 'node_1_1'") - assert "it's active" in node_1_2.query_and_get_error("SYSTEM DROP REPLICA 'node_1_1' FROM DATABASE test") - assert "it's active" in node_1_2.query_and_get_error("SYSTEM DROP REPLICA 'node_1_1' FROM TABLE test.test_table") - assert "it's active" in \ - node_1_3.query_and_get_error( - "SYSTEM DROP REPLICA 'node_1_1' FROM ZKPATH '/clickhouse/tables/test/{shard}/replicated/test_table'".format( - shard=1)) - assert "There is a local table" in \ - node_1_2.query_and_get_error( - "SYSTEM DROP REPLICA 'node_1_1' FROM ZKPATH '/clickhouse/tables/test/{shard}/replicated/test_table'".format( - shard=1)) - assert "There is a local table" in \ - node_1_1.query_and_get_error( - "SYSTEM DROP REPLICA 'node_1_1' FROM ZKPATH '/clickhouse/tables/test/{shard}/replicated/test_table'".format( - shard=1)) - assert "does not look like a table path" in \ - node_1_3.query_and_get_error("SYSTEM DROP REPLICA 'node_1_1' FROM ZKPATH '/clickhouse/tables/test'") + "SYSTEM DROP REPLICA 'node_1_1' FROM DATABASE test" + ) + assert "can't drop local replica" in node_1_1.query_and_get_error( + "SYSTEM DROP REPLICA 'node_1_1' FROM TABLE test.test_table" + ) + assert "it's active" in node_1_2.query_and_get_error( + "SYSTEM DROP REPLICA 'node_1_1'" + ) + assert "it's active" in node_1_2.query_and_get_error( + "SYSTEM DROP REPLICA 'node_1_1' FROM DATABASE test" + ) + assert "it's active" in node_1_2.query_and_get_error( + "SYSTEM DROP REPLICA 'node_1_1' FROM TABLE test.test_table" + ) + assert "it's active" in node_1_3.query_and_get_error( + "SYSTEM DROP REPLICA 'node_1_1' FROM ZKPATH '/clickhouse/tables/test/{shard}/replicated/test_table'".format( + shard=1 + ) + ) + assert "There is a local table" in node_1_2.query_and_get_error( + "SYSTEM DROP REPLICA 'node_1_1' FROM ZKPATH '/clickhouse/tables/test/{shard}/replicated/test_table'".format( + shard=1 + ) + ) + assert "There is a local table" in node_1_1.query_and_get_error( + "SYSTEM DROP REPLICA 'node_1_1' FROM ZKPATH '/clickhouse/tables/test/{shard}/replicated/test_table'".format( + shard=1 + ) + ) + assert "does not look like a table path" in node_1_3.query_and_get_error( + "SYSTEM DROP REPLICA 'node_1_1' FROM ZKPATH '/clickhouse/tables/test'" + ) node_1_1.query("DETACH DATABASE test") for i in range(1, 5): node_1_1.query("DETACH DATABASE test{}".format(i)) assert "doesn't exist" in node_1_3.query_and_get_error( - "SYSTEM DROP REPLICA 'node_1_1' FROM TABLE test.test_table") + "SYSTEM DROP REPLICA 'node_1_1' FROM TABLE test.test_table" + ) - assert "doesn't exist" in node_1_3.query_and_get_error("SYSTEM DROP REPLICA 'node_1_1' FROM DATABASE test1") + assert "doesn't exist" in node_1_3.query_and_get_error( + "SYSTEM DROP REPLICA 'node_1_1' FROM DATABASE test1" + ) node_1_3.query("SYSTEM DROP REPLICA 'node_1_1'") exists_replica_1_1 = zk.exists( - "/clickhouse/tables/test3/{shard}/replicated/test_table/replicas/{replica}".format(shard=1, - replica='node_1_1')) - assert (exists_replica_1_1 != None) + "/clickhouse/tables/test3/{shard}/replicated/test_table/replicas/{replica}".format( + shard=1, replica="node_1_1" + ) + ) + assert exists_replica_1_1 != None ## If you want to drop a inactive/stale replicate table that does not have a local replica, you can following syntax(ZKPATH): node_1_3.query( "SYSTEM DROP REPLICA 'node_1_1' FROM ZKPATH '/clickhouse/tables/test2/{shard}/replicated/test_table'".format( - shard=1)) + shard=1 + ) + ) exists_replica_1_1 = zk.exists( - "/clickhouse/tables/test2/{shard}/replicated/test_table/replicas/{replica}".format(shard=1, - replica='node_1_1')) - assert (exists_replica_1_1 == None) + "/clickhouse/tables/test2/{shard}/replicated/test_table/replicas/{replica}".format( + shard=1, replica="node_1_1" + ) + ) + assert exists_replica_1_1 == None node_1_2.query("SYSTEM DROP REPLICA 'node_1_1' FROM TABLE test.test_table") exists_replica_1_1 = zk.exists( - "/clickhouse/tables/test/{shard}/replicated/test_table/replicas/{replica}".format(shard=1, - replica='node_1_1')) - assert (exists_replica_1_1 == None) + "/clickhouse/tables/test/{shard}/replicated/test_table/replicas/{replica}".format( + shard=1, replica="node_1_1" + ) + ) + assert exists_replica_1_1 == None node_1_2.query("SYSTEM DROP REPLICA 'node_1_1' FROM DATABASE test1") exists_replica_1_1 = zk.exists( - "/clickhouse/tables/test1/{shard}/replicated/test_table/replicas/{replica}".format(shard=1, - replica='node_1_1')) - assert (exists_replica_1_1 == None) + "/clickhouse/tables/test1/{shard}/replicated/test_table/replicas/{replica}".format( + shard=1, replica="node_1_1" + ) + ) + assert exists_replica_1_1 == None node_1_3.query( "SYSTEM DROP REPLICA 'node_1_1' FROM ZKPATH '/clickhouse/tables/test3/{shard}/replicated/test_table'".format( - shard=1)) + shard=1 + ) + ) exists_replica_1_1 = zk.exists( - "/clickhouse/tables/test3/{shard}/replicated/test_table/replicas/{replica}".format(shard=1, - replica='node_1_1')) - assert (exists_replica_1_1 == None) + "/clickhouse/tables/test3/{shard}/replicated/test_table/replicas/{replica}".format( + shard=1, replica="node_1_1" + ) + ) + assert exists_replica_1_1 == None node_1_2.query("SYSTEM DROP REPLICA 'node_1_1'") exists_replica_1_1 = zk.exists( - "/clickhouse/tables/test4/{shard}/replicated/test_table/replicas/{replica}".format(shard=1, - replica='node_1_1')) - assert (exists_replica_1_1 == None) + "/clickhouse/tables/test4/{shard}/replicated/test_table/replicas/{replica}".format( + shard=1, replica="node_1_1" + ) + ) + assert exists_replica_1_1 == None diff --git a/tests/integration/test_enabling_access_management/test.py b/tests/integration/test_enabling_access_management/test.py index e93a643cd16..0b8c1771a40 100644 --- a/tests/integration/test_enabling_access_management/test.py +++ b/tests/integration/test_enabling_access_management/test.py @@ -2,7 +2,9 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', user_configs=["configs/users.d/extra_users.xml"]) +instance = cluster.add_instance( + "instance", user_configs=["configs/users.d/extra_users.xml"] +) @pytest.fixture(scope="module", autouse=True) @@ -16,10 +18,20 @@ def started_cluster(): def test_enabling_access_management(): - instance.query("CREATE USER Alex", user='default') - assert instance.query("SHOW CREATE USER Alex", user='default') == "CREATE USER Alex\n" - assert instance.query("SHOW CREATE USER Alex", user='readonly') == "CREATE USER Alex\n" - assert "Not enough privileges" in instance.query_and_get_error("SHOW CREATE USER Alex", user='xyz') + instance.query("CREATE USER Alex", user="default") + assert ( + instance.query("SHOW CREATE USER Alex", user="default") == "CREATE USER Alex\n" + ) + assert ( + instance.query("SHOW CREATE USER Alex", user="readonly") == "CREATE USER Alex\n" + ) + assert "Not enough privileges" in instance.query_and_get_error( + "SHOW CREATE USER Alex", user="xyz" + ) - assert "Cannot execute query in readonly mode" in instance.query_and_get_error("CREATE USER Robin", user='readonly') - assert "Not enough privileges" in instance.query_and_get_error("CREATE USER Robin", user='xyz') + assert "Cannot execute query in readonly mode" in instance.query_and_get_error( + "CREATE USER Robin", user="readonly" + ) + assert "Not enough privileges" in instance.query_and_get_error( + "CREATE USER Robin", user="xyz" + ) diff --git a/tests/integration/test_encrypted_disk/test.py b/tests/integration/test_encrypted_disk/test.py index 7d94f7ccdc5..4e6d1db9e99 100644 --- a/tests/integration/test_encrypted_disk/test.py +++ b/tests/integration/test_encrypted_disk/test.py @@ -7,10 +7,12 @@ from helpers.test_tools import assert_eq_with_retry FIRST_PART_NAME = "all_1_1_0" cluster = ClickHouseCluster(__file__) -node = cluster.add_instance("node", - main_configs=["configs/storage.xml"], - tmpfs=["/disk:size=100M"], - with_minio=True) +node = cluster.add_instance( + "node", + main_configs=["configs/storage.xml"], + tmpfs=["/disk:size=100M"], + with_minio=True, +) @pytest.fixture(scope="module", autouse=True) @@ -30,7 +32,10 @@ def cleanup_after_test(): node.query("DROP TABLE IF EXISTS encrypted_test NO DELAY") -@pytest.mark.parametrize("policy", ["encrypted_policy", "encrypted_policy_key192b", "local_policy", "s3_policy"]) +@pytest.mark.parametrize( + "policy", + ["encrypted_policy", "encrypted_policy_key192b", "local_policy", "s3_policy"], +) def test_encrypted_disk(policy): node.query( """ @@ -40,7 +45,9 @@ def test_encrypted_disk(policy): ) ENGINE=MergeTree() ORDER BY id SETTINGS storage_policy='{}' - """.format(policy) + """.format( + policy + ) ) node.query("INSERT INTO encrypted_test VALUES (0,'data'),(1,'data')") @@ -52,7 +59,21 @@ def test_encrypted_disk(policy): assert node.query(select_query) == "(0,'data'),(1,'data'),(2,'data'),(3,'data')" -@pytest.mark.parametrize("policy, destination_disks", [("local_policy", ["disk_local_encrypted", "disk_local_encrypted2", "disk_local_encrypted_key192b", "disk_local"]), ("s3_policy", ["disk_s3_encrypted", "disk_s3"])]) +@pytest.mark.parametrize( + "policy, destination_disks", + [ + ( + "local_policy", + [ + "disk_local_encrypted", + "disk_local_encrypted2", + "disk_local_encrypted_key192b", + "disk_local", + ], + ), + ("s3_policy", ["disk_s3_encrypted", "disk_s3"]), + ], +) def test_part_move(policy, destination_disks): node.query( """ @@ -62,7 +83,9 @@ def test_part_move(policy, destination_disks): ) ENGINE=MergeTree() ORDER BY id SETTINGS storage_policy='{}' - """.format(policy) + """.format( + policy + ) ) node.query("INSERT INTO encrypted_test VALUES (0,'data'),(1,'data')") @@ -70,16 +93,29 @@ def test_part_move(policy, destination_disks): assert node.query(select_query) == "(0,'data'),(1,'data')" for destination_disk in destination_disks: - node.query("ALTER TABLE encrypted_test MOVE PART '{}' TO DISK '{}'".format(FIRST_PART_NAME, destination_disk)) + node.query( + "ALTER TABLE encrypted_test MOVE PART '{}' TO DISK '{}'".format( + FIRST_PART_NAME, destination_disk + ) + ) assert node.query(select_query) == "(0,'data'),(1,'data')" with pytest.raises(QueryRuntimeException) as exc: - node.query("ALTER TABLE encrypted_test MOVE PART '{}' TO DISK '{}'".format(FIRST_PART_NAME, destination_disk)) - assert("Part '{}' is already on disk '{}'".format(FIRST_PART_NAME, destination_disk) in str(exc.value)) + node.query( + "ALTER TABLE encrypted_test MOVE PART '{}' TO DISK '{}'".format( + FIRST_PART_NAME, destination_disk + ) + ) + assert "Part '{}' is already on disk '{}'".format( + FIRST_PART_NAME, destination_disk + ) in str(exc.value) assert node.query(select_query) == "(0,'data'),(1,'data')" -@pytest.mark.parametrize("policy,encrypted_disk", [("local_policy", "disk_local_encrypted"), ("s3_policy", "disk_s3_encrypted")]) +@pytest.mark.parametrize( + "policy,encrypted_disk", + [("local_policy", "disk_local_encrypted"), ("s3_policy", "disk_s3_encrypted")], +) def test_optimize_table(policy, encrypted_disk): node.query( """ @@ -89,23 +125,35 @@ def test_optimize_table(policy, encrypted_disk): ) ENGINE=MergeTree() ORDER BY id SETTINGS storage_policy='{}' - """.format(policy) + """.format( + policy + ) ) node.query("INSERT INTO encrypted_test VALUES (0,'data'),(1,'data')") select_query = "SELECT * FROM encrypted_test ORDER BY id FORMAT Values" assert node.query(select_query) == "(0,'data'),(1,'data')" - node.query("ALTER TABLE encrypted_test MOVE PART '{}' TO DISK '{}'".format(FIRST_PART_NAME, encrypted_disk)) + node.query( + "ALTER TABLE encrypted_test MOVE PART '{}' TO DISK '{}'".format( + FIRST_PART_NAME, encrypted_disk + ) + ) assert node.query(select_query) == "(0,'data'),(1,'data')" node.query("INSERT INTO encrypted_test VALUES (2,'data'),(3,'data')") node.query("OPTIMIZE TABLE encrypted_test FINAL") with pytest.raises(QueryRuntimeException) as exc: - node.query("ALTER TABLE encrypted_test MOVE PART '{}' TO DISK '{}'".format(FIRST_PART_NAME, encrypted_disk)) + node.query( + "ALTER TABLE encrypted_test MOVE PART '{}' TO DISK '{}'".format( + FIRST_PART_NAME, encrypted_disk + ) + ) - assert("Part {} is not exists or not active".format(FIRST_PART_NAME) in str(exc.value)) + assert "Part {} is not exists or not active".format(FIRST_PART_NAME) in str( + exc.value + ) assert node.query(select_query) == "(0,'data'),(1,'data'),(2,'data'),(3,'data')" @@ -113,7 +161,11 @@ def test_optimize_table(policy, encrypted_disk): # Test adding encryption key on the fly. def test_add_key(): def make_storage_policy_with_keys(policy_name, keys): - node.exec_in_container(["bash", "-c" , """cat > /etc/clickhouse-server/config.d/storage_policy_{policy_name}.xml << EOF + node.exec_in_container( + [ + "bash", + "-c", + """cat > /etc/clickhouse-server/config.d/storage_policy_{policy_name}.xml << EOF @@ -136,33 +188,48 @@ def test_add_key(): -EOF""".format(policy_name=policy_name, keys=keys)]) +EOF""".format( + policy_name=policy_name, keys=keys + ), + ] + ) node.query("SYSTEM RELOAD CONFIG") # Add some data to an encrypted disk. node.query("SELECT policy_name FROM system.storage_policies") - make_storage_policy_with_keys("encrypted_policy_multikeys", "firstfirstfirstf") - assert_eq_with_retry(node, "SELECT policy_name FROM system.storage_policies WHERE policy_name='encrypted_policy_multikeys'", "encrypted_policy_multikeys") - - node.query(""" + make_storage_policy_with_keys( + "encrypted_policy_multikeys", "firstfirstfirstf" + ) + assert_eq_with_retry( + node, + "SELECT policy_name FROM system.storage_policies WHERE policy_name='encrypted_policy_multikeys'", + "encrypted_policy_multikeys", + ) + + node.query( + """ CREATE TABLE encrypted_test ( id Int64, data String ) ENGINE=MergeTree() ORDER BY id SETTINGS storage_policy='encrypted_policy_multikeys' - """) + """ + ) node.query("INSERT INTO encrypted_test VALUES (0,'data'),(1,'data')") select_query = "SELECT * FROM encrypted_test ORDER BY id FORMAT Values" assert node.query(select_query) == "(0,'data'),(1,'data')" # Add a second key and start using it. - make_storage_policy_with_keys("encrypted_policy_multikeys", """ + make_storage_policy_with_keys( + "encrypted_policy_multikeys", + """ firstfirstfirstf secondsecondseco 1 - """) + """, + ) node.query("INSERT INTO encrypted_test VALUES (2,'data'),(3,'data')") # Now "(0,'data'),(1,'data')" is encrypted with the first key and "(2,'data'),(3,'data')" is encrypted with the second key. @@ -170,11 +237,14 @@ EOF""".format(policy_name=policy_name, keys=keys)]) assert node.query(select_query) == "(0,'data'),(1,'data'),(2,'data'),(3,'data')" # Try to replace the first key with something wrong, and check that "(0,'data'),(1,'data')" cannot be read. - make_storage_policy_with_keys("encrypted_policy_multikeys", """ + make_storage_policy_with_keys( + "encrypted_policy_multikeys", + """ wrongwrongwrongw secondsecondseco 1 - """) + """, + ) expected_error = "Wrong key" assert expected_error in node.query_and_get_error(select_query) diff --git a/tests/integration/test_executable_dictionary/test.py b/tests/integration/test_executable_dictionary/test.py index 5e50a092a29..43e6ec0a800 100644 --- a/tests/integration/test_executable_dictionary/test.py +++ b/tests/integration/test_executable_dictionary/test.py @@ -10,29 +10,46 @@ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', stay_alive=True, main_configs=[]) +node = cluster.add_instance("node", stay_alive=True, main_configs=[]) def skip_test_msan(instance): if instance.is_built_with_memory_sanitizer(): pytest.skip("Memory Sanitizer cannot work with vfork") -def copy_file_to_container(local_path, dist_path, container_id): - os.system("docker cp {local} {cont_id}:{dist}".format(local=local_path, cont_id=container_id, dist=dist_path)) -config = ''' +def copy_file_to_container(local_path, dist_path, container_id): + os.system( + "docker cp {local} {cont_id}:{dist}".format( + local=local_path, cont_id=container_id, dist=dist_path + ) + ) + + +config = """ /etc/clickhouse-server/dictionaries/*_dictionary.xml -''' +""" + @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() - node.replace_config("/etc/clickhouse-server/config.d/dictionaries_config.xml", config) + node.replace_config( + "/etc/clickhouse-server/config.d/dictionaries_config.xml", config + ) - copy_file_to_container(os.path.join(SCRIPT_DIR, 'dictionaries/.'), '/etc/clickhouse-server/dictionaries', node.docker_id) - copy_file_to_container(os.path.join(SCRIPT_DIR, 'user_scripts/.'), '/var/lib/clickhouse/user_scripts', node.docker_id) + copy_file_to_container( + os.path.join(SCRIPT_DIR, "dictionaries/."), + "/etc/clickhouse-server/dictionaries", + node.docker_id, + ) + copy_file_to_container( + os.path.join(SCRIPT_DIR, "user_scripts/."), + "/var/lib/clickhouse/user_scripts", + node.docker_id, + ) node.restart_clickhouse() @@ -41,135 +58,427 @@ def started_cluster(): finally: cluster.shutdown() + def test_executable_input_bash(started_cluster): skip_test_msan(node) - assert node.query("SELECT dictGet('executable_input_bash', 'result', toUInt64(1))") == 'Key 1\n' - assert node.query("SELECT dictGet('executable_input_pool_bash', 'result', toUInt64(1))") == 'Key 1\n' + assert ( + node.query("SELECT dictGet('executable_input_bash', 'result', toUInt64(1))") + == "Key 1\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_input_pool_bash', 'result', toUInt64(1))" + ) + == "Key 1\n" + ) + def test_executable_implicit_input_bash(started_cluster): skip_test_msan(node) - assert node.query("SELECT dictGet('executable_implicit_input_bash', 'result', toUInt64(1))") == 'Key 1\n' - assert node.query("SELECT dictGet('executable_implicit_input_pool_bash', 'result', toUInt64(1))") == 'Key 1\n' + assert ( + node.query( + "SELECT dictGet('executable_implicit_input_bash', 'result', toUInt64(1))" + ) + == "Key 1\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_implicit_input_pool_bash', 'result', toUInt64(1))" + ) + == "Key 1\n" + ) + def test_executable_input_python(started_cluster): skip_test_msan(node) - assert node.query("SELECT dictGet('executable_input_python', 'result', toUInt64(1))") == 'Key 1\n' - assert node.query("SELECT dictGet('executable_input_pool_python', 'result', toUInt64(1))") == 'Key 1\n' + assert ( + node.query("SELECT dictGet('executable_input_python', 'result', toUInt64(1))") + == "Key 1\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_input_pool_python', 'result', toUInt64(1))" + ) + == "Key 1\n" + ) + def test_executable_implicit_input_python(started_cluster): skip_test_msan(node) - assert node.query("SELECT dictGet('executable_implicit_input_python', 'result', toUInt64(1))") == 'Key 1\n' - assert node.query("SELECT dictGet('executable_implicit_input_pool_python', 'result', toUInt64(1))") == 'Key 1\n' + assert ( + node.query( + "SELECT dictGet('executable_implicit_input_python', 'result', toUInt64(1))" + ) + == "Key 1\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_implicit_input_pool_python', 'result', toUInt64(1))" + ) + == "Key 1\n" + ) + def test_executable_input_send_chunk_header_python(started_cluster): skip_test_msan(node) - assert node.query("SELECT dictGet('executable_input_send_chunk_header_python', 'result', toUInt64(1))") == 'Key 1\n' - assert node.query("SELECT dictGet('executable_input_send_chunk_header_pool_python', 'result', toUInt64(1))") == 'Key 1\n' + assert ( + node.query( + "SELECT dictGet('executable_input_send_chunk_header_python', 'result', toUInt64(1))" + ) + == "Key 1\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_input_send_chunk_header_pool_python', 'result', toUInt64(1))" + ) + == "Key 1\n" + ) + def test_executable_implicit_input_send_chunk_header_python(started_cluster): skip_test_msan(node) - assert node.query("SELECT dictGet('executable_implicit_input_send_chunk_header_python', 'result', toUInt64(1))") == 'Key 1\n' - assert node.query("SELECT dictGet('executable_implicit_input_send_chunk_header_pool_python', 'result', toUInt64(1))") == 'Key 1\n' + assert ( + node.query( + "SELECT dictGet('executable_implicit_input_send_chunk_header_python', 'result', toUInt64(1))" + ) + == "Key 1\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_implicit_input_send_chunk_header_pool_python', 'result', toUInt64(1))" + ) + == "Key 1\n" + ) + def test_executable_input_sum_python(started_cluster): skip_test_msan(node) - assert node.query("SELECT dictGet('executable_input_sum_python', 'result', tuple(toUInt64(1), toUInt64(1)))") == '2\n' - assert node.query("SELECT dictGet('executable_input_sum_pool_python', 'result', tuple(toUInt64(1), toUInt64(1)))") == '2\n' + assert ( + node.query( + "SELECT dictGet('executable_input_sum_python', 'result', tuple(toUInt64(1), toUInt64(1)))" + ) + == "2\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_input_sum_pool_python', 'result', tuple(toUInt64(1), toUInt64(1)))" + ) + == "2\n" + ) + def test_executable_implicit_input_sum_python(started_cluster): skip_test_msan(node) - assert node.query("SELECT dictGet('executable_implicit_input_sum_python', 'result', tuple(toUInt64(1), toUInt64(1)))") == '2\n' - assert node.query("SELECT dictGet('executable_implicit_input_sum_pool_python', 'result', tuple(toUInt64(1), toUInt64(1)))") == '2\n' + assert ( + node.query( + "SELECT dictGet('executable_implicit_input_sum_python', 'result', tuple(toUInt64(1), toUInt64(1)))" + ) + == "2\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_implicit_input_sum_pool_python', 'result', tuple(toUInt64(1), toUInt64(1)))" + ) + == "2\n" + ) + def test_executable_input_argument_python(started_cluster): skip_test_msan(node) - assert node.query("SELECT dictGet('executable_input_argument_python', 'result', toUInt64(1))") == 'Key 1 1\n' - assert node.query("SELECT dictGet('executable_input_argument_pool_python', 'result', toUInt64(1))") == 'Key 1 1\n' + assert ( + node.query( + "SELECT dictGet('executable_input_argument_python', 'result', toUInt64(1))" + ) + == "Key 1 1\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_input_argument_pool_python', 'result', toUInt64(1))" + ) + == "Key 1 1\n" + ) + def test_executable_implicit_input_argument_python(started_cluster): skip_test_msan(node) - assert node.query("SELECT dictGet('executable_implicit_input_argument_python', 'result', toUInt64(1))") == 'Key 1 1\n' - assert node.query("SELECT dictGet('executable_implicit_input_argument_pool_python', 'result', toUInt64(1))") == 'Key 1 1\n' + assert ( + node.query( + "SELECT dictGet('executable_implicit_input_argument_python', 'result', toUInt64(1))" + ) + == "Key 1 1\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_implicit_input_argument_pool_python', 'result', toUInt64(1))" + ) + == "Key 1 1\n" + ) + def test_executable_input_signalled_python(started_cluster): skip_test_msan(node) - assert node.query("SELECT dictGet('executable_input_signalled_python', 'result', toUInt64(1))") == 'Default result\n' - assert node.query("SELECT dictGet('executable_input_signalled_pool_python', 'result', toUInt64(1))") == 'Default result\n' + assert ( + node.query( + "SELECT dictGet('executable_input_signalled_python', 'result', toUInt64(1))" + ) + == "Default result\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_input_signalled_pool_python', 'result', toUInt64(1))" + ) + == "Default result\n" + ) + def test_executable_implicit_input_signalled_python(started_cluster): skip_test_msan(node) - assert node.query("SELECT dictGet('executable_implicit_input_signalled_python', 'result', toUInt64(1))") == 'Default result\n' - assert node.query("SELECT dictGet('executable_implicit_input_signalled_pool_python', 'result', toUInt64(1))") == 'Default result\n' + assert ( + node.query( + "SELECT dictGet('executable_implicit_input_signalled_python', 'result', toUInt64(1))" + ) + == "Default result\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_implicit_input_signalled_pool_python', 'result', toUInt64(1))" + ) + == "Default result\n" + ) + def test_executable_input_slow_python(started_cluster): skip_test_msan(node) - assert node.query_and_get_error("SELECT dictGet('executable_input_slow_python', 'result', toUInt64(1))") - assert node.query_and_get_error("SELECT dictGet('executable_input_slow_pool_python', 'result', toUInt64(1))") + assert node.query_and_get_error( + "SELECT dictGet('executable_input_slow_python', 'result', toUInt64(1))" + ) + assert node.query_and_get_error( + "SELECT dictGet('executable_input_slow_pool_python', 'result', toUInt64(1))" + ) + def test_executable_implicit_input_slow_python(started_cluster): skip_test_msan(node) - assert node.query_and_get_error("SELECT dictGet('executable_implicit_input_slow_python', 'result', toUInt64(1))") - assert node.query_and_get_error("SELECT dictGet('executable_implicit_input_slow_pool_python', 'result', toUInt64(1))") + assert node.query_and_get_error( + "SELECT dictGet('executable_implicit_input_slow_python', 'result', toUInt64(1))" + ) + assert node.query_and_get_error( + "SELECT dictGet('executable_implicit_input_slow_pool_python', 'result', toUInt64(1))" + ) + def test_executable_input_slow_python(started_cluster): skip_test_msan(node) - assert node.query_and_get_error("SELECT dictGet('executable_input_slow_python', 'result', toUInt64(1))") - assert node.query_and_get_error("SELECT dictGet('executable_input_slow_pool_python', 'result', toUInt64(1))") + assert node.query_and_get_error( + "SELECT dictGet('executable_input_slow_python', 'result', toUInt64(1))" + ) + assert node.query_and_get_error( + "SELECT dictGet('executable_input_slow_pool_python', 'result', toUInt64(1))" + ) + def test_executable_implicit_input_slow_python(started_cluster): skip_test_msan(node) - assert node.query_and_get_error("SELECT dictGet('executable_implicit_input_slow_python', 'result', toUInt64(1))") - assert node.query_and_get_error("SELECT dictGet('executable_implicit_input_slow_pool_python', 'result', toUInt64(1))") + assert node.query_and_get_error( + "SELECT dictGet('executable_implicit_input_slow_python', 'result', toUInt64(1))" + ) + assert node.query_and_get_error( + "SELECT dictGet('executable_implicit_input_slow_pool_python', 'result', toUInt64(1))" + ) + def test_executable_non_direct_input_bash(started_cluster): skip_test_msan(node) - assert node.query("SELECT dictGet('executable_input_non_direct_bash', 'result', toUInt64(1))") == 'Key 1\n' - assert node.query("SELECT dictGet('executable_input_non_direct_pool_bash', 'result', toUInt64(1))") == 'Key 1\n' + assert ( + node.query( + "SELECT dictGet('executable_input_non_direct_bash', 'result', toUInt64(1))" + ) + == "Key 1\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_input_non_direct_pool_bash', 'result', toUInt64(1))" + ) + == "Key 1\n" + ) + def test_executable_implicit_non_direct_input_bash(started_cluster): skip_test_msan(node) - assert node.query("SELECT dictGet('executable_input_implicit_non_direct_bash', 'result', toUInt64(1))") == 'Key 1\n' - assert node.query("SELECT dictGet('executable_input_implicit_non_direct_pool_bash', 'result', toUInt64(1))") == 'Key 1\n' + assert ( + node.query( + "SELECT dictGet('executable_input_implicit_non_direct_bash', 'result', toUInt64(1))" + ) + == "Key 1\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_input_implicit_non_direct_pool_bash', 'result', toUInt64(1))" + ) + == "Key 1\n" + ) + def test_executable_source_python(started_cluster): skip_test_msan(node) - assert node.query("SELECT * FROM dictionary(executable_source_simple_key_python) ORDER BY input") == '1\tValue 1\n2\tValue 2\n3\tValue 3\n' - assert node.query("SELECT dictGet('executable_source_simple_key_python', 'result', toUInt64(1))") == 'Value 1\n' - assert node.query("SELECT dictGet('executable_source_simple_key_python', 'result', toUInt64(2))") == 'Value 2\n' - assert node.query("SELECT dictGet('executable_source_simple_key_python', 'result', toUInt64(3))") == 'Value 3\n' + assert ( + node.query( + "SELECT * FROM dictionary(executable_source_simple_key_python) ORDER BY input" + ) + == "1\tValue 1\n2\tValue 2\n3\tValue 3\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_source_simple_key_python', 'result', toUInt64(1))" + ) + == "Value 1\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_source_simple_key_python', 'result', toUInt64(2))" + ) + == "Value 2\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_source_simple_key_python', 'result', toUInt64(3))" + ) + == "Value 3\n" + ) + + assert ( + node.query( + "SELECT * FROM dictionary('executable_source_complex_key_python') ORDER BY input" + ) + == "1\tValue 1\n2\tValue 2\n3\tValue 3\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_source_complex_key_python', 'result', tuple(toUInt64(1)))" + ) + == "Value 1\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_source_complex_key_python', 'result', tuple(toUInt64(2)))" + ) + == "Value 2\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_source_complex_key_python', 'result', tuple(toUInt64(3)))" + ) + == "Value 3\n" + ) - assert node.query("SELECT * FROM dictionary('executable_source_complex_key_python') ORDER BY input") == '1\tValue 1\n2\tValue 2\n3\tValue 3\n' - assert node.query("SELECT dictGet('executable_source_complex_key_python', 'result', tuple(toUInt64(1)))") == 'Value 1\n' - assert node.query("SELECT dictGet('executable_source_complex_key_python', 'result', tuple(toUInt64(2)))") == 'Value 2\n' - assert node.query("SELECT dictGet('executable_source_complex_key_python', 'result', tuple(toUInt64(3)))") == 'Value 3\n' def test_executable_source_argument_python(started_cluster): skip_test_msan(node) - assert node.query("SELECT * FROM dictionary(executable_source_simple_key_argument_python) ORDER BY input") == '1\tValue 1 1\n2\tValue 1 2\n3\tValue 1 3\n' - assert node.query("SELECT dictGet('executable_source_simple_key_argument_python', 'result', toUInt64(1))") == 'Value 1 1\n' - assert node.query("SELECT dictGet('executable_source_simple_key_argument_python', 'result', toUInt64(2))") == 'Value 1 2\n' - assert node.query("SELECT dictGet('executable_source_simple_key_argument_python', 'result', toUInt64(3))") == 'Value 1 3\n' + assert ( + node.query( + "SELECT * FROM dictionary(executable_source_simple_key_argument_python) ORDER BY input" + ) + == "1\tValue 1 1\n2\tValue 1 2\n3\tValue 1 3\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_source_simple_key_argument_python', 'result', toUInt64(1))" + ) + == "Value 1 1\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_source_simple_key_argument_python', 'result', toUInt64(2))" + ) + == "Value 1 2\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_source_simple_key_argument_python', 'result', toUInt64(3))" + ) + == "Value 1 3\n" + ) + + assert ( + node.query( + "SELECT * FROM dictionary(executable_source_complex_key_argument_python) ORDER BY input" + ) + == "1\tValue 1 1\n2\tValue 1 2\n3\tValue 1 3\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_source_complex_key_argument_python', 'result', toUInt64(1))" + ) + == "Value 1 1\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_source_complex_key_argument_python', 'result', toUInt64(2))" + ) + == "Value 1 2\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_source_complex_key_argument_python', 'result', toUInt64(3))" + ) + == "Value 1 3\n" + ) - assert node.query("SELECT * FROM dictionary(executable_source_complex_key_argument_python) ORDER BY input") == '1\tValue 1 1\n2\tValue 1 2\n3\tValue 1 3\n' - assert node.query("SELECT dictGet('executable_source_complex_key_argument_python', 'result', toUInt64(1))") == 'Value 1 1\n' - assert node.query("SELECT dictGet('executable_source_complex_key_argument_python', 'result', toUInt64(2))") == 'Value 1 2\n' - assert node.query("SELECT dictGet('executable_source_complex_key_argument_python', 'result', toUInt64(3))") == 'Value 1 3\n' def test_executable_source_updated_python(started_cluster): skip_test_msan(node) - assert node.query("SELECT * FROM dictionary(executable_source_simple_key_update_python) ORDER BY input") == '1\tValue 0 1\n' - assert node.query("SELECT dictGet('executable_source_simple_key_update_python', 'result', toUInt64(1))") == 'Value 0 1\n' + assert ( + node.query( + "SELECT * FROM dictionary(executable_source_simple_key_update_python) ORDER BY input" + ) + == "1\tValue 0 1\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_source_simple_key_update_python', 'result', toUInt64(1))" + ) + == "Value 0 1\n" + ) time.sleep(10) - assert node.query("SELECT * FROM dictionary(executable_source_simple_key_update_python) ORDER BY input") == '1\tValue 1 1\n' - assert node.query("SELECT dictGet('executable_source_simple_key_update_python', 'result', toUInt64(1))") == 'Value 1 1\n' + assert ( + node.query( + "SELECT * FROM dictionary(executable_source_simple_key_update_python) ORDER BY input" + ) + == "1\tValue 1 1\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_source_simple_key_update_python', 'result', toUInt64(1))" + ) + == "Value 1 1\n" + ) - assert node.query("SELECT * FROM dictionary(executable_source_complex_key_update_python) ORDER BY input") == '1\tValue 0 1\n' - assert node.query("SELECT dictGet('executable_source_complex_key_update_python', 'result', toUInt64(1))") == 'Value 0 1\n' + assert ( + node.query( + "SELECT * FROM dictionary(executable_source_complex_key_update_python) ORDER BY input" + ) + == "1\tValue 0 1\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_source_complex_key_update_python', 'result', toUInt64(1))" + ) + == "Value 0 1\n" + ) time.sleep(10) - assert node.query("SELECT * FROM dictionary(executable_source_complex_key_update_python) ORDER BY input") == '1\tValue 1 1\n' - assert node.query("SELECT dictGet('executable_source_complex_key_update_python', 'result', toUInt64(1))") == 'Value 1 1\n' - + assert ( + node.query( + "SELECT * FROM dictionary(executable_source_complex_key_update_python) ORDER BY input" + ) + == "1\tValue 1 1\n" + ) + assert ( + node.query( + "SELECT dictGet('executable_source_complex_key_update_python', 'result', toUInt64(1))" + ) + == "Value 1 1\n" + ) diff --git a/tests/integration/test_executable_dictionary/user_scripts/input.py b/tests/integration/test_executable_dictionary/user_scripts/input.py index e711dd8e306..75a3ccac52c 100755 --- a/tests/integration/test_executable_dictionary/user_scripts/input.py +++ b/tests/integration/test_executable_dictionary/user_scripts/input.py @@ -4,8 +4,8 @@ import sys import os import signal -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: - updated_line = line.replace('\n', '') - print(updated_line + '\t' + "Key " + updated_line, end='\n') + updated_line = line.replace("\n", "") + print(updated_line + "\t" + "Key " + updated_line, end="\n") sys.stdout.flush() diff --git a/tests/integration/test_executable_dictionary/user_scripts/input_argument.py b/tests/integration/test_executable_dictionary/user_scripts/input_argument.py index 163f9c4183f..349650fad6e 100755 --- a/tests/integration/test_executable_dictionary/user_scripts/input_argument.py +++ b/tests/integration/test_executable_dictionary/user_scripts/input_argument.py @@ -2,10 +2,10 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": arg = int(sys.argv[1]) for line in sys.stdin: - updated_line = line.replace('\n', '') - print(updated_line + '\t' + "Key " + str(arg) + " " + updated_line, end='\n') + updated_line = line.replace("\n", "") + print(updated_line + "\t" + "Key " + str(arg) + " " + updated_line, end="\n") sys.stdout.flush() diff --git a/tests/integration/test_executable_dictionary/user_scripts/input_chunk_header.py b/tests/integration/test_executable_dictionary/user_scripts/input_chunk_header.py index 4eb00f64eb3..f8a60a771ea 100755 --- a/tests/integration/test_executable_dictionary/user_scripts/input_chunk_header.py +++ b/tests/integration/test_executable_dictionary/user_scripts/input_chunk_header.py @@ -2,14 +2,14 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": for chunk_header in sys.stdin: chunk_length = int(chunk_header) while chunk_length != 0: line = sys.stdin.readline() - updated_line = line.replace('\n', '') + updated_line = line.replace("\n", "") chunk_length -= 1 - print(updated_line + '\t' + "Key " + updated_line, end='\n') + print(updated_line + "\t" + "Key " + updated_line, end="\n") sys.stdout.flush() diff --git a/tests/integration/test_executable_dictionary/user_scripts/input_implicit.py b/tests/integration/test_executable_dictionary/user_scripts/input_implicit.py index 835ab1f441a..3ace4f73611 100755 --- a/tests/integration/test_executable_dictionary/user_scripts/input_implicit.py +++ b/tests/integration/test_executable_dictionary/user_scripts/input_implicit.py @@ -2,7 +2,7 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: - print("Key " + line, end='') + print("Key " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_dictionary/user_scripts/input_implicit_argument.py b/tests/integration/test_executable_dictionary/user_scripts/input_implicit_argument.py index c1b2e5966d7..b9b7f5065b2 100755 --- a/tests/integration/test_executable_dictionary/user_scripts/input_implicit_argument.py +++ b/tests/integration/test_executable_dictionary/user_scripts/input_implicit_argument.py @@ -2,9 +2,9 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": arg = int(sys.argv[1]) for line in sys.stdin: - print("Key " + str(arg) + " " + line, end='') + print("Key " + str(arg) + " " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_dictionary/user_scripts/input_implicit_chunk_header.py b/tests/integration/test_executable_dictionary/user_scripts/input_implicit_chunk_header.py index 5dc03e1c507..90c8bfd9a2f 100755 --- a/tests/integration/test_executable_dictionary/user_scripts/input_implicit_chunk_header.py +++ b/tests/integration/test_executable_dictionary/user_scripts/input_implicit_chunk_header.py @@ -2,13 +2,13 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": for chunk_header in sys.stdin: chunk_length = int(chunk_header) while chunk_length != 0: line = sys.stdin.readline() chunk_length -= 1 - print("Key " + line, end='') + print("Key " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_dictionary/user_scripts/input_implicit_signalled.py b/tests/integration/test_executable_dictionary/user_scripts/input_implicit_signalled.py index 27c8bc4840e..11a86737966 100755 --- a/tests/integration/test_executable_dictionary/user_scripts/input_implicit_signalled.py +++ b/tests/integration/test_executable_dictionary/user_scripts/input_implicit_signalled.py @@ -5,9 +5,9 @@ import os import signal import time -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: os.signal(os.getpid(), signal.SIGTERM) - print("Key " + line, end='') + print("Key " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_dictionary/user_scripts/input_implicit_slow.py b/tests/integration/test_executable_dictionary/user_scripts/input_implicit_slow.py index 648a9eac918..cbe47041712 100755 --- a/tests/integration/test_executable_dictionary/user_scripts/input_implicit_slow.py +++ b/tests/integration/test_executable_dictionary/user_scripts/input_implicit_slow.py @@ -5,8 +5,8 @@ import os import signal import time -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: time.sleep(5) - print("Key " + line, end='') + print("Key " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_dictionary/user_scripts/input_implicit_sum.py b/tests/integration/test_executable_dictionary/user_scripts/input_implicit_sum.py index 432d7a13a2f..b8297cc42bc 100755 --- a/tests/integration/test_executable_dictionary/user_scripts/input_implicit_sum.py +++ b/tests/integration/test_executable_dictionary/user_scripts/input_implicit_sum.py @@ -3,8 +3,8 @@ import sys import re -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: - line_split = re.split(r'\t+', line) - print(int(line_split[0]) + int(line_split[1]), end='\n') + line_split = re.split(r"\t+", line) + print(int(line_split[0]) + int(line_split[1]), end="\n") sys.stdout.flush() diff --git a/tests/integration/test_executable_dictionary/user_scripts/input_signalled.py b/tests/integration/test_executable_dictionary/user_scripts/input_signalled.py index a3a99f1e71e..4c131ddffd0 100755 --- a/tests/integration/test_executable_dictionary/user_scripts/input_signalled.py +++ b/tests/integration/test_executable_dictionary/user_scripts/input_signalled.py @@ -5,9 +5,9 @@ import os import signal import time -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: os.signal(os.getpid(), signal.SIGTERM) - updated_line = line.replace('\n', '') - print(updated_line + '\t' + "Key " + updated_line, end='\n') + updated_line = line.replace("\n", "") + print(updated_line + "\t" + "Key " + updated_line, end="\n") sys.stdout.flush() diff --git a/tests/integration/test_executable_dictionary/user_scripts/input_slow.py b/tests/integration/test_executable_dictionary/user_scripts/input_slow.py index a3b8c484b29..aa8ec0101e2 100755 --- a/tests/integration/test_executable_dictionary/user_scripts/input_slow.py +++ b/tests/integration/test_executable_dictionary/user_scripts/input_slow.py @@ -5,9 +5,9 @@ import os import signal import time -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: time.sleep(5) - updated_line = line.replace('\n', '') - print(updated_line + '\t' + "Key " + updated_line, end='\n') + updated_line = line.replace("\n", "") + print(updated_line + "\t" + "Key " + updated_line, end="\n") sys.stdout.flush() diff --git a/tests/integration/test_executable_dictionary/user_scripts/input_sum.py b/tests/integration/test_executable_dictionary/user_scripts/input_sum.py index e9ec5028701..ffdf599c886 100755 --- a/tests/integration/test_executable_dictionary/user_scripts/input_sum.py +++ b/tests/integration/test_executable_dictionary/user_scripts/input_sum.py @@ -3,10 +3,10 @@ import sys import re -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: - updated_line = line.replace('\n', '') - line_split = re.split(r'\t+', line) + updated_line = line.replace("\n", "") + line_split = re.split(r"\t+", line) sum = int(line_split[0]) + int(line_split[1]) - print(updated_line + '\t' + str(sum), end='\n') + print(updated_line + "\t" + str(sum), end="\n") sys.stdout.flush() diff --git a/tests/integration/test_executable_dictionary/user_scripts/source.py b/tests/integration/test_executable_dictionary/user_scripts/source.py index e105773c467..7af4d950f44 100755 --- a/tests/integration/test_executable_dictionary/user_scripts/source.py +++ b/tests/integration/test_executable_dictionary/user_scripts/source.py @@ -2,9 +2,9 @@ import sys -if __name__ == '__main__': - print('1' + '\t' + 'Value 1', end='\n') - print('2' + '\t' + 'Value 2', end='\n') - print('3' + '\t' + 'Value 3', end='\n') +if __name__ == "__main__": + print("1" + "\t" + "Value 1", end="\n") + print("2" + "\t" + "Value 2", end="\n") + print("3" + "\t" + "Value 3", end="\n") sys.stdout.flush() diff --git a/tests/integration/test_executable_dictionary/user_scripts/source_argument.py b/tests/integration/test_executable_dictionary/user_scripts/source_argument.py index 881e73adc97..decb0482fac 100755 --- a/tests/integration/test_executable_dictionary/user_scripts/source_argument.py +++ b/tests/integration/test_executable_dictionary/user_scripts/source_argument.py @@ -2,11 +2,11 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": arg = int(sys.argv[1]) - print('1' + '\t' + 'Value ' + str(arg) + ' 1', end='\n') - print('2' + '\t' + 'Value ' + str(arg) + ' 2', end='\n') - print('3' + '\t' + 'Value ' + str(arg) + ' 3', end='\n') + print("1" + "\t" + "Value " + str(arg) + " 1", end="\n") + print("2" + "\t" + "Value " + str(arg) + " 2", end="\n") + print("3" + "\t" + "Value " + str(arg) + " 3", end="\n") sys.stdout.flush() diff --git a/tests/integration/test_executable_dictionary/user_scripts/source_update.py b/tests/integration/test_executable_dictionary/user_scripts/source_update.py index 99388f9ada3..1090dac85b9 100755 --- a/tests/integration/test_executable_dictionary/user_scripts/source_update.py +++ b/tests/integration/test_executable_dictionary/user_scripts/source_update.py @@ -2,11 +2,11 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": update_field_value = 0 if len(sys.argv) >= 2: update_field_value = int(sys.argv[1]) - print('1' + '\t' + 'Value ' + str(update_field_value) + ' 1', end='\n') + print("1" + "\t" + "Value " + str(update_field_value) + " 1", end="\n") sys.stdout.flush() diff --git a/tests/integration/test_executable_table_function/test.py b/tests/integration/test_executable_table_function/test.py index 7820396d20f..868e056993b 100644 --- a/tests/integration/test_executable_table_function/test.py +++ b/tests/integration/test_executable_table_function/test.py @@ -9,7 +9,7 @@ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', stay_alive=True, main_configs=[]) +node = cluster.add_instance("node", stay_alive=True, main_configs=[]) # Something like https://reviews.llvm.org/D33325 @@ -19,14 +19,23 @@ def skip_test_msan(instance): def copy_file_to_container(local_path, dist_path, container_id): - os.system("docker cp {local} {cont_id}:{dist}".format(local=local_path, cont_id=container_id, dist=dist_path)) + os.system( + "docker cp {local} {cont_id}:{dist}".format( + local=local_path, cont_id=container_id, dist=dist_path + ) + ) + @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() - copy_file_to_container(os.path.join(SCRIPT_DIR, 'user_scripts/.'), '/var/lib/clickhouse/user_scripts', node.docker_id) + copy_file_to_container( + os.path.join(SCRIPT_DIR, "user_scripts/."), + "/var/lib/clickhouse/user_scripts", + node.docker_id, + ) node.restart_clickhouse() node.query("CREATE TABLE test_data_table (id UInt64) ENGINE=TinyLog;") @@ -37,266 +46,331 @@ def started_cluster(): finally: cluster.shutdown() + def test_executable_function_no_input_bash(started_cluster): skip_test_msan(node) - assert node.query("SELECT * FROM executable('no_input.sh', 'TabSeparated', 'value String')") == 'Key 0\nKey 1\nKey 2\n' + assert ( + node.query( + "SELECT * FROM executable('no_input.sh', 'TabSeparated', 'value String')" + ) + == "Key 0\nKey 1\nKey 2\n" + ) + def test_executable_function_no_input_python(started_cluster): skip_test_msan(node) - assert node.query("SELECT * FROM executable('no_input.py', 'TabSeparated', 'value String')") == 'Key 0\nKey 1\nKey 2\n' + assert ( + node.query( + "SELECT * FROM executable('no_input.py', 'TabSeparated', 'value String')" + ) + == "Key 0\nKey 1\nKey 2\n" + ) + def test_executable_function_input_bash(started_cluster): skip_test_msan(node) - query = "SELECT * FROM executable('input.sh', 'TabSeparated', 'value String', {source})" - assert node.query(query.format(source='(SELECT 1)')) == 'Key 1\n' - assert node.query(query.format(source='(SELECT id FROM test_data_table)')) == 'Key 0\nKey 1\nKey 2\n' + query = ( + "SELECT * FROM executable('input.sh', 'TabSeparated', 'value String', {source})" + ) + assert node.query(query.format(source="(SELECT 1)")) == "Key 1\n" + assert ( + node.query(query.format(source="(SELECT id FROM test_data_table)")) + == "Key 0\nKey 1\nKey 2\n" + ) + def test_executable_function_input_python(started_cluster): skip_test_msan(node) - query = "SELECT * FROM executable('input.py', 'TabSeparated', 'value String', {source})" - assert node.query(query.format(source='(SELECT 1)')) == 'Key 1\n' - assert node.query(query.format(source='(SELECT id FROM test_data_table)')) == 'Key 0\nKey 1\nKey 2\n' + query = ( + "SELECT * FROM executable('input.py', 'TabSeparated', 'value String', {source})" + ) + assert node.query(query.format(source="(SELECT 1)")) == "Key 1\n" + assert ( + node.query(query.format(source="(SELECT id FROM test_data_table)")) + == "Key 0\nKey 1\nKey 2\n" + ) + def test_executable_function_input_sum_python(started_cluster): skip_test_msan(node) query = "SELECT * FROM executable('input_sum.py', 'TabSeparated', 'value UInt64', {source})" - assert node.query(query.format(source='(SELECT 1, 1)')) == '2\n' - assert node.query(query.format(source='(SELECT id, id FROM test_data_table)')) == '0\n2\n4\n' + assert node.query(query.format(source="(SELECT 1, 1)")) == "2\n" + assert ( + node.query(query.format(source="(SELECT id, id FROM test_data_table)")) + == "0\n2\n4\n" + ) + def test_executable_function_input_argument_python(started_cluster): skip_test_msan(node) query = "SELECT * FROM executable('input_argument.py 1', 'TabSeparated', 'value String', {source})" - assert node.query(query.format(source='(SELECT 1)')) == 'Key 1 1\n' - assert node.query(query.format(source='(SELECT id FROM test_data_table)')) == 'Key 1 0\nKey 1 1\nKey 1 2\n' + assert node.query(query.format(source="(SELECT 1)")) == "Key 1 1\n" + assert ( + node.query(query.format(source="(SELECT id FROM test_data_table)")) + == "Key 1 0\nKey 1 1\nKey 1 2\n" + ) + def test_executable_function_input_signalled_python(started_cluster): skip_test_msan(node) query = "SELECT * FROM executable('input_signalled.py', 'TabSeparated', 'value String', {source})" - assert node.query(query.format(source='(SELECT 1)')) == '' - assert node.query(query.format(source='(SELECT id FROM test_data_table)')) == '' + assert node.query(query.format(source="(SELECT 1)")) == "" + assert node.query(query.format(source="(SELECT id FROM test_data_table)")) == "" + def test_executable_function_input_slow_python(started_cluster): skip_test_msan(node) query = "SELECT * FROM executable('input_slow.py', 'TabSeparated', 'value String', {source})" - assert node.query_and_get_error(query.format(source='(SELECT 1)')) - assert node.query_and_get_error(query.format(source='(SELECT id FROM test_data_table)')) + assert node.query_and_get_error(query.format(source="(SELECT 1)")) + assert node.query_and_get_error( + query.format(source="(SELECT id FROM test_data_table)") + ) + def test_executable_function_input_multiple_pipes_python(started_cluster): skip_test_msan(node) query = "SELECT * FROM executable('input_multiple_pipes.py', 'TabSeparated', 'value String', {source})" - actual = node.query(query.format(source='(SELECT 1), (SELECT 2), (SELECT 3)')) - expected = 'Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 1\n' + actual = node.query(query.format(source="(SELECT 1), (SELECT 2), (SELECT 3)")) + expected = "Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 1\n" assert actual == expected - actual = node.query(query.format(source='(SELECT id FROM test_data_table), (SELECT 2), (SELECT 3)')) - expected = 'Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 0\nKey from 0 fd 1\nKey from 0 fd 2\n' + actual = node.query( + query.format(source="(SELECT id FROM test_data_table), (SELECT 2), (SELECT 3)") + ) + expected = "Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 0\nKey from 0 fd 1\nKey from 0 fd 2\n" assert actual == expected + def test_executable_storage_no_input_bash(started_cluster): skip_test_msan(node) node.query("DROP TABLE IF EXISTS test_table") - node.query("CREATE TABLE test_table (value String) ENGINE=Executable('no_input.sh', 'TabSeparated')") - assert node.query("SELECT * FROM test_table") == 'Key 0\nKey 1\nKey 2\n' + node.query( + "CREATE TABLE test_table (value String) ENGINE=Executable('no_input.sh', 'TabSeparated')" + ) + assert node.query("SELECT * FROM test_table") == "Key 0\nKey 1\nKey 2\n" node.query("DROP TABLE test_table") + def test_executable_storage_no_input_python(started_cluster): skip_test_msan(node) node.query("DROP TABLE IF EXISTS test_table") - node.query("CREATE TABLE test_table (value String) ENGINE=Executable('no_input.py', 'TabSeparated')") - assert node.query("SELECT * FROM test_table") == 'Key 0\nKey 1\nKey 2\n' + node.query( + "CREATE TABLE test_table (value String) ENGINE=Executable('no_input.py', 'TabSeparated')" + ) + assert node.query("SELECT * FROM test_table") == "Key 0\nKey 1\nKey 2\n" node.query("DROP TABLE test_table") + def test_executable_storage_input_bash(started_cluster): skip_test_msan(node) query = "CREATE TABLE test_table (value String) ENGINE=Executable('input.sh', 'TabSeparated', {source})" node.query("DROP TABLE IF EXISTS test_table") - node.query(query.format(source='(SELECT 1)')) - assert node.query("SELECT * FROM test_table") == 'Key 1\n' + node.query(query.format(source="(SELECT 1)")) + assert node.query("SELECT * FROM test_table") == "Key 1\n" node.query("DROP TABLE test_table") - node.query(query.format(source='(SELECT id FROM test_data_table)')) - assert node.query("SELECT * FROM test_table") == 'Key 0\nKey 1\nKey 2\n' + node.query(query.format(source="(SELECT id FROM test_data_table)")) + assert node.query("SELECT * FROM test_table") == "Key 0\nKey 1\nKey 2\n" node.query("DROP TABLE test_table") + def test_executable_storage_input_python(started_cluster): skip_test_msan(node) query = "CREATE TABLE test_table (value String) ENGINE=Executable('input.py', 'TabSeparated', {source})" node.query("DROP TABLE IF EXISTS test_table") - node.query(query.format(source='(SELECT 1)')) - assert node.query("SELECT * FROM test_table") == 'Key 1\n' + node.query(query.format(source="(SELECT 1)")) + assert node.query("SELECT * FROM test_table") == "Key 1\n" node.query("DROP TABLE test_table") - node.query(query.format(source='(SELECT id FROM test_data_table)')) - assert node.query("SELECT * FROM test_table") == 'Key 0\nKey 1\nKey 2\n' + node.query(query.format(source="(SELECT id FROM test_data_table)")) + assert node.query("SELECT * FROM test_table") == "Key 0\nKey 1\nKey 2\n" node.query("DROP TABLE test_table") + def test_executable_storage_input_send_chunk_header_python(started_cluster): skip_test_msan(node) query = "CREATE TABLE test_table (value String) ENGINE=Executable('input_chunk_header.py', 'TabSeparated', {source}) SETTINGS send_chunk_header=1" node.query("DROP TABLE IF EXISTS test_table") - node.query(query.format(source='(SELECT 1)')) - assert node.query("SELECT * FROM test_table") == 'Key 1\n' + node.query(query.format(source="(SELECT 1)")) + assert node.query("SELECT * FROM test_table") == "Key 1\n" node.query("DROP TABLE test_table") - node.query(query.format(source='(SELECT id FROM test_data_table)')) - assert node.query("SELECT * FROM test_table") == 'Key 0\nKey 1\nKey 2\n' + node.query(query.format(source="(SELECT id FROM test_data_table)")) + assert node.query("SELECT * FROM test_table") == "Key 0\nKey 1\nKey 2\n" node.query("DROP TABLE test_table") + def test_executable_storage_input_sum_python(started_cluster): skip_test_msan(node) query = "CREATE TABLE test_table (value UInt64) ENGINE=Executable('input_sum.py', 'TabSeparated', {source})" node.query("DROP TABLE IF EXISTS test_table") - node.query(query.format(source='(SELECT 1, 1)')) - assert node.query("SELECT * FROM test_table") == '2\n' + node.query(query.format(source="(SELECT 1, 1)")) + assert node.query("SELECT * FROM test_table") == "2\n" node.query("DROP TABLE test_table") - node.query(query.format(source='(SELECT id, id FROM test_data_table)')) - assert node.query("SELECT * FROM test_table") == '0\n2\n4\n' + node.query(query.format(source="(SELECT id, id FROM test_data_table)")) + assert node.query("SELECT * FROM test_table") == "0\n2\n4\n" node.query("DROP TABLE test_table") + def test_executable_storage_input_argument_python(started_cluster): skip_test_msan(node) query = "CREATE TABLE test_table (value String) ENGINE=Executable('input_argument.py 1', 'TabSeparated', {source})" node.query("DROP TABLE IF EXISTS test_table") - node.query(query.format(source='(SELECT 1)')) - assert node.query("SELECT * FROM test_table") == 'Key 1 1\n' + node.query(query.format(source="(SELECT 1)")) + assert node.query("SELECT * FROM test_table") == "Key 1 1\n" node.query("DROP TABLE test_table") - node.query(query.format(source='(SELECT id FROM test_data_table)')) - assert node.query("SELECT * FROM test_table") == 'Key 1 0\nKey 1 1\nKey 1 2\n' + node.query(query.format(source="(SELECT id FROM test_data_table)")) + assert node.query("SELECT * FROM test_table") == "Key 1 0\nKey 1 1\nKey 1 2\n" node.query("DROP TABLE test_table") + def test_executable_storage_input_signalled_python(started_cluster): skip_test_msan(node) query = "CREATE TABLE test_table (value String) ENGINE=Executable('input_signalled.py', 'TabSeparated', {source})" node.query("DROP TABLE IF EXISTS test_table") - node.query(query.format(source='(SELECT 1)')) - assert node.query("SELECT * FROM test_table") == '' + node.query(query.format(source="(SELECT 1)")) + assert node.query("SELECT * FROM test_table") == "" node.query("DROP TABLE test_table") - node.query(query.format(source='(SELECT id FROM test_data_table)')) - assert node.query("SELECT * FROM test_table") == '' + node.query(query.format(source="(SELECT id FROM test_data_table)")) + assert node.query("SELECT * FROM test_table") == "" node.query("DROP TABLE test_table") + def test_executable_storage_input_slow_python(started_cluster): skip_test_msan(node) query = "CREATE TABLE test_table (value String) ENGINE=Executable('input_slow.py', 'TabSeparated', {source}) SETTINGS command_read_timeout=2500" node.query("DROP TABLE IF EXISTS test_table") - node.query(query.format(source='(SELECT 1)')) + node.query(query.format(source="(SELECT 1)")) assert node.query_and_get_error("SELECT * FROM test_table") node.query("DROP TABLE test_table") - node.query(query.format(source='(SELECT id FROM test_data_table)')) + node.query(query.format(source="(SELECT id FROM test_data_table)")) assert node.query_and_get_error("SELECT * FROM test_table") node.query("DROP TABLE test_table") + def test_executable_function_input_multiple_pipes_python(started_cluster): skip_test_msan(node) query = "CREATE TABLE test_table (value String) ENGINE=Executable('input_multiple_pipes.py', 'TabSeparated', {source})" node.query("DROP TABLE IF EXISTS test_table") - node.query(query.format(source='(SELECT 1), (SELECT 2), (SELECT 3)')) - assert node.query("SELECT * FROM test_table") == 'Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 1\n' + node.query(query.format(source="(SELECT 1), (SELECT 2), (SELECT 3)")) + assert ( + node.query("SELECT * FROM test_table") + == "Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 1\n" + ) node.query("DROP TABLE test_table") - node.query(query.format(source='(SELECT id FROM test_data_table), (SELECT 2), (SELECT 3)')) - assert node.query("SELECT * FROM test_table") == 'Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 0\nKey from 0 fd 1\nKey from 0 fd 2\n' + node.query( + query.format(source="(SELECT id FROM test_data_table), (SELECT 2), (SELECT 3)") + ) + assert ( + node.query("SELECT * FROM test_table") + == "Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 0\nKey from 0 fd 1\nKey from 0 fd 2\n" + ) node.query("DROP TABLE test_table") + def test_executable_pool_storage_input_python(started_cluster): skip_test_msan(node) query = "CREATE TABLE test_table (value String) ENGINE=ExecutablePool('input_pool.py', 'TabSeparated', {source}) SETTINGS send_chunk_header=1, pool_size=1" node.query("DROP TABLE IF EXISTS test_table") - node.query(query.format(source='(SELECT 1)')) + node.query(query.format(source="(SELECT 1)")) - assert node.query("SELECT * FROM test_table") == 'Key 1\n' - assert node.query("SELECT * FROM test_table") == 'Key 1\n' - assert node.query("SELECT * FROM test_table") == 'Key 1\n' + assert node.query("SELECT * FROM test_table") == "Key 1\n" + assert node.query("SELECT * FROM test_table") == "Key 1\n" + assert node.query("SELECT * FROM test_table") == "Key 1\n" node.query("DROP TABLE test_table") - node.query(query.format(source='(SELECT id FROM test_data_table)')) + node.query(query.format(source="(SELECT id FROM test_data_table)")) - assert node.query("SELECT * FROM test_table") == 'Key 0\nKey 1\nKey 2\n' - assert node.query("SELECT * FROM test_table") == 'Key 0\nKey 1\nKey 2\n' - assert node.query("SELECT * FROM test_table") == 'Key 0\nKey 1\nKey 2\n' + assert node.query("SELECT * FROM test_table") == "Key 0\nKey 1\nKey 2\n" + assert node.query("SELECT * FROM test_table") == "Key 0\nKey 1\nKey 2\n" + assert node.query("SELECT * FROM test_table") == "Key 0\nKey 1\nKey 2\n" node.query("DROP TABLE test_table") + def test_executable_pool_storage_input_sum_python(started_cluster): skip_test_msan(node) query = "CREATE TABLE test_table (value UInt64) ENGINE=ExecutablePool('input_sum_pool.py', 'TabSeparated', {source}) SETTINGS send_chunk_header=1, pool_size=1" node.query("DROP TABLE IF EXISTS test_table") - node.query(query.format(source='(SELECT 1, 1)')) + node.query(query.format(source="(SELECT 1, 1)")) - assert node.query("SELECT * FROM test_table") == '2\n' - assert node.query("SELECT * FROM test_table") == '2\n' - assert node.query("SELECT * FROM test_table") == '2\n' + assert node.query("SELECT * FROM test_table") == "2\n" + assert node.query("SELECT * FROM test_table") == "2\n" + assert node.query("SELECT * FROM test_table") == "2\n" node.query("DROP TABLE test_table") - node.query(query.format(source='(SELECT id, id FROM test_data_table)')) + node.query(query.format(source="(SELECT id, id FROM test_data_table)")) - assert node.query("SELECT * FROM test_table") == '0\n2\n4\n' - assert node.query("SELECT * FROM test_table") == '0\n2\n4\n' - assert node.query("SELECT * FROM test_table") == '0\n2\n4\n' + assert node.query("SELECT * FROM test_table") == "0\n2\n4\n" + assert node.query("SELECT * FROM test_table") == "0\n2\n4\n" + assert node.query("SELECT * FROM test_table") == "0\n2\n4\n" node.query("DROP TABLE test_table") + def test_executable_pool_storage_input_argument_python(started_cluster): skip_test_msan(node) query = "CREATE TABLE test_table (value String) ENGINE=ExecutablePool('input_argument_pool.py 1', 'TabSeparated', {source}) SETTINGS send_chunk_header=1, pool_size=1" node.query("DROP TABLE IF EXISTS test_table") - node.query(query.format(source='(SELECT 1)')) + node.query(query.format(source="(SELECT 1)")) - assert node.query("SELECT * FROM test_table") == 'Key 1 1\n' - assert node.query("SELECT * FROM test_table") == 'Key 1 1\n' - assert node.query("SELECT * FROM test_table") == 'Key 1 1\n' + assert node.query("SELECT * FROM test_table") == "Key 1 1\n" + assert node.query("SELECT * FROM test_table") == "Key 1 1\n" + assert node.query("SELECT * FROM test_table") == "Key 1 1\n" node.query("DROP TABLE test_table") - node.query(query.format(source='(SELECT id FROM test_data_table)')) + node.query(query.format(source="(SELECT id FROM test_data_table)")) - assert node.query("SELECT * FROM test_table") == 'Key 1 0\nKey 1 1\nKey 1 2\n' - assert node.query("SELECT * FROM test_table") == 'Key 1 0\nKey 1 1\nKey 1 2\n' - assert node.query("SELECT * FROM test_table") == 'Key 1 0\nKey 1 1\nKey 1 2\n' + assert node.query("SELECT * FROM test_table") == "Key 1 0\nKey 1 1\nKey 1 2\n" + assert node.query("SELECT * FROM test_table") == "Key 1 0\nKey 1 1\nKey 1 2\n" + assert node.query("SELECT * FROM test_table") == "Key 1 0\nKey 1 1\nKey 1 2\n" node.query("DROP TABLE test_table") + def test_executable_pool_storage_input_signalled_python(started_cluster): skip_test_msan(node) query = "CREATE TABLE test_table (value String) ENGINE=ExecutablePool('input_signalled_pool.py', 'TabSeparated', {source}) SETTINGS send_chunk_header=1, pool_size=1" node.query("DROP TABLE IF EXISTS test_table") - node.query(query.format(source='(SELECT 1)')) + node.query(query.format(source="(SELECT 1)")) assert node.query_and_get_error("SELECT * FROM test_table") assert node.query_and_get_error("SELECT * FROM test_table") @@ -304,7 +378,7 @@ def test_executable_pool_storage_input_signalled_python(started_cluster): node.query("DROP TABLE test_table") - node.query(query.format(source='(SELECT id FROM test_data_table)')) + node.query(query.format(source="(SELECT id FROM test_data_table)")) assert node.query_and_get_error("SELECT * FROM test_table") assert node.query_and_get_error("SELECT * FROM test_table") @@ -312,6 +386,7 @@ def test_executable_pool_storage_input_signalled_python(started_cluster): node.query("DROP TABLE test_table") + def test_executable_pool_storage_input_slow_python(started_cluster): skip_test_msan(node) @@ -320,7 +395,7 @@ def test_executable_pool_storage_input_slow_python(started_cluster): SETTINGS send_chunk_header=1, pool_size=1, command_read_timeout=2500""" node.query("DROP TABLE IF EXISTS test_table") - node.query(query.format(source='(SELECT 1)')) + node.query(query.format(source="(SELECT 1)")) assert node.query_and_get_error("SELECT * FROM test_table") assert node.query_and_get_error("SELECT * FROM test_table") @@ -328,7 +403,7 @@ def test_executable_pool_storage_input_slow_python(started_cluster): node.query("DROP TABLE test_table") - node.query(query.format(source='(SELECT id FROM test_data_table)')) + node.query(query.format(source="(SELECT id FROM test_data_table)")) assert node.query_and_get_error("SELECT * FROM test_table") assert node.query_and_get_error("SELECT * FROM test_table") @@ -336,46 +411,68 @@ def test_executable_pool_storage_input_slow_python(started_cluster): node.query("DROP TABLE test_table") + def test_executable_pool_storage_input_multiple_pipes_python(started_cluster): skip_test_msan(node) query = "CREATE TABLE test_table (value String) ENGINE=ExecutablePool('input_multiple_pipes_pool.py', 'TabSeparated', {source}) SETTINGS send_chunk_header=1, pool_size=1" node.query("DROP TABLE IF EXISTS test_table") - node.query(query.format(source='(SELECT 1), (SELECT 2), (SELECT 3)')) + node.query(query.format(source="(SELECT 1), (SELECT 2), (SELECT 3)")) - assert node.query("SELECT * FROM test_table") == 'Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 1\n' - assert node.query("SELECT * FROM test_table") == 'Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 1\n' - assert node.query("SELECT * FROM test_table") == 'Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 1\n' + assert ( + node.query("SELECT * FROM test_table") + == "Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 1\n" + ) + assert ( + node.query("SELECT * FROM test_table") + == "Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 1\n" + ) + assert ( + node.query("SELECT * FROM test_table") + == "Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 1\n" + ) node.query("DROP TABLE test_table") - node.query(query.format(source='(SELECT id FROM test_data_table), (SELECT 2), (SELECT 3)')) + node.query( + query.format(source="(SELECT id FROM test_data_table), (SELECT 2), (SELECT 3)") + ) - assert node.query("SELECT * FROM test_table") == 'Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 0\nKey from 0 fd 1\nKey from 0 fd 2\n' - assert node.query("SELECT * FROM test_table") == 'Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 0\nKey from 0 fd 1\nKey from 0 fd 2\n' - assert node.query("SELECT * FROM test_table") == 'Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 0\nKey from 0 fd 1\nKey from 0 fd 2\n' + assert ( + node.query("SELECT * FROM test_table") + == "Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 0\nKey from 0 fd 1\nKey from 0 fd 2\n" + ) + assert ( + node.query("SELECT * FROM test_table") + == "Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 0\nKey from 0 fd 1\nKey from 0 fd 2\n" + ) + assert ( + node.query("SELECT * FROM test_table") + == "Key from 4 fd 3\nKey from 3 fd 2\nKey from 0 fd 0\nKey from 0 fd 1\nKey from 0 fd 2\n" + ) node.query("DROP TABLE test_table") + def test_executable_pool_storage_input_count_python(started_cluster): skip_test_msan(node) query = "CREATE TABLE test_table (value String) ENGINE=ExecutablePool('input_count_pool.py', 'TabSeparated', {source}) SETTINGS send_chunk_header=1, pool_size=1" node.query("DROP TABLE IF EXISTS test_table") - node.query(query.format(source='(SELECT 1)')) + node.query(query.format(source="(SELECT 1)")) - assert node.query("SELECT * FROM test_table") == '1\n' - assert node.query("SELECT * FROM test_table") == '1\n' - assert node.query("SELECT * FROM test_table") == '1\n' + assert node.query("SELECT * FROM test_table") == "1\n" + assert node.query("SELECT * FROM test_table") == "1\n" + assert node.query("SELECT * FROM test_table") == "1\n" node.query("DROP TABLE test_table") - node.query(query.format(source='(SELECT number FROM system.numbers LIMIT 250000)')) + node.query(query.format(source="(SELECT number FROM system.numbers LIMIT 250000)")) - assert node.query("SELECT * FROM test_table") == '250000\n' - assert node.query("SELECT * FROM test_table") == '250000\n' - assert node.query("SELECT * FROM test_table") == '250000\n' + assert node.query("SELECT * FROM test_table") == "250000\n" + assert node.query("SELECT * FROM test_table") == "250000\n" + assert node.query("SELECT * FROM test_table") == "250000\n" node.query("DROP TABLE test_table") diff --git a/tests/integration/test_executable_table_function/user_scripts/input.py b/tests/integration/test_executable_table_function/user_scripts/input.py index 835ab1f441a..3ace4f73611 100755 --- a/tests/integration/test_executable_table_function/user_scripts/input.py +++ b/tests/integration/test_executable_table_function/user_scripts/input.py @@ -2,7 +2,7 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: - print("Key " + line, end='') + print("Key " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_table_function/user_scripts/input_argument.py b/tests/integration/test_executable_table_function/user_scripts/input_argument.py index c1b2e5966d7..b9b7f5065b2 100755 --- a/tests/integration/test_executable_table_function/user_scripts/input_argument.py +++ b/tests/integration/test_executable_table_function/user_scripts/input_argument.py @@ -2,9 +2,9 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": arg = int(sys.argv[1]) for line in sys.stdin: - print("Key " + str(arg) + " " + line, end='') + print("Key " + str(arg) + " " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_table_function/user_scripts/input_argument_pool.py b/tests/integration/test_executable_table_function/user_scripts/input_argument_pool.py index 378a6ef4391..13cad8e01d4 100755 --- a/tests/integration/test_executable_table_function/user_scripts/input_argument_pool.py +++ b/tests/integration/test_executable_table_function/user_scripts/input_argument_pool.py @@ -2,16 +2,16 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": arg = int(sys.argv[1]) for chunk_header in sys.stdin: chunk_length = int(chunk_header) - print(str(chunk_length), end='\n') + print(str(chunk_length), end="\n") while chunk_length != 0: line = sys.stdin.readline() chunk_length -= 1 - print("Key " + str(arg) + " " + line, end='') + print("Key " + str(arg) + " " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_table_function/user_scripts/input_chunk_header.py b/tests/integration/test_executable_table_function/user_scripts/input_chunk_header.py index 5dc03e1c507..90c8bfd9a2f 100755 --- a/tests/integration/test_executable_table_function/user_scripts/input_chunk_header.py +++ b/tests/integration/test_executable_table_function/user_scripts/input_chunk_header.py @@ -2,13 +2,13 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": for chunk_header in sys.stdin: chunk_length = int(chunk_header) while chunk_length != 0: line = sys.stdin.readline() chunk_length -= 1 - print("Key " + line, end='') + print("Key " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_table_function/user_scripts/input_count_pool.py b/tests/integration/test_executable_table_function/user_scripts/input_count_pool.py index 8b744168a82..b80c4832ab1 100755 --- a/tests/integration/test_executable_table_function/user_scripts/input_count_pool.py +++ b/tests/integration/test_executable_table_function/user_scripts/input_count_pool.py @@ -2,11 +2,11 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": for chunk_header in sys.stdin: chunk_length = int(chunk_header) - print(1, end='\n') - print(str(chunk_length), end='\n') + print(1, end="\n") + print(str(chunk_length), end="\n") while chunk_length != 0: line = sys.stdin.readline() diff --git a/tests/integration/test_executable_table_function/user_scripts/input_multiple_pipes.py b/tests/integration/test_executable_table_function/user_scripts/input_multiple_pipes.py index 64590cbc16a..4c7a03eee80 100755 --- a/tests/integration/test_executable_table_function/user_scripts/input_multiple_pipes.py +++ b/tests/integration/test_executable_table_function/user_scripts/input_multiple_pipes.py @@ -3,17 +3,17 @@ import sys import os -if __name__ == '__main__': +if __name__ == "__main__": fd3 = os.fdopen(3) fd4 = os.fdopen(4) for line in fd4: - print("Key from 4 fd " + line, end='') + print("Key from 4 fd " + line, end="") for line in fd3: - print("Key from 3 fd " + line, end='') + print("Key from 3 fd " + line, end="") for line in sys.stdin: - print("Key from 0 fd " + line, end='') + print("Key from 0 fd " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_table_function/user_scripts/input_multiple_pipes_pool.py b/tests/integration/test_executable_table_function/user_scripts/input_multiple_pipes_pool.py index a3a515899f9..412e7d95299 100755 --- a/tests/integration/test_executable_table_function/user_scripts/input_multiple_pipes_pool.py +++ b/tests/integration/test_executable_table_function/user_scripts/input_multiple_pipes_pool.py @@ -3,7 +3,7 @@ import sys import os -if __name__ == '__main__': +if __name__ == "__main__": fd3 = os.fdopen(3) fd4 = os.fdopen(4) @@ -36,10 +36,10 @@ if __name__ == '__main__': break break - print(str(len(lines)), end='\n') + print(str(len(lines)), end="\n") for line in lines: - print(line, end='') + print(line, end="") lines.clear() - sys.stdout.flush() \ No newline at end of file + sys.stdout.flush() diff --git a/tests/integration/test_executable_table_function/user_scripts/input_pool.py b/tests/integration/test_executable_table_function/user_scripts/input_pool.py index ec4e9af23cd..fe991be1417 100755 --- a/tests/integration/test_executable_table_function/user_scripts/input_pool.py +++ b/tests/integration/test_executable_table_function/user_scripts/input_pool.py @@ -2,14 +2,14 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": for chunk_header in sys.stdin: chunk_length = int(chunk_header) - print(str(chunk_length), end='\n') + print(str(chunk_length), end="\n") while chunk_length != 0: line = sys.stdin.readline() chunk_length -= 1 - print("Key " + line, end='') + print("Key " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_table_function/user_scripts/input_signalled.py b/tests/integration/test_executable_table_function/user_scripts/input_signalled.py index 93ce20fa8e7..fd3ad19039d 100755 --- a/tests/integration/test_executable_table_function/user_scripts/input_signalled.py +++ b/tests/integration/test_executable_table_function/user_scripts/input_signalled.py @@ -4,9 +4,9 @@ import sys import os import signal -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: os.signal(os.getpid(), signal.SIGTERM) - print("Key " + line, end='') + print("Key " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_table_function/user_scripts/input_signalled_pool.py b/tests/integration/test_executable_table_function/user_scripts/input_signalled_pool.py index 1ea0eddbd8d..79813c2e9c7 100755 --- a/tests/integration/test_executable_table_function/user_scripts/input_signalled_pool.py +++ b/tests/integration/test_executable_table_function/user_scripts/input_signalled_pool.py @@ -4,16 +4,16 @@ import sys import os import signal -if __name__ == '__main__': +if __name__ == "__main__": for chunk_header in sys.stdin: os.signal(os.getpid(), signal.SIGTERM) chunk_length = int(chunk_header) - print(str(chunk_length), end='\n') + print(str(chunk_length), end="\n") while chunk_length != 0: line = sys.stdin.readline() chunk_length -= 1 - print("Key " + line, end='') + print("Key " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_table_function/user_scripts/input_slow.py b/tests/integration/test_executable_table_function/user_scripts/input_slow.py index 4c2abe89e33..e007a58dfb4 100755 --- a/tests/integration/test_executable_table_function/user_scripts/input_slow.py +++ b/tests/integration/test_executable_table_function/user_scripts/input_slow.py @@ -3,8 +3,8 @@ import sys import time -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: time.sleep(25) - print("Key " + line, end='') + print("Key " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_table_function/user_scripts/input_slow_pool.py b/tests/integration/test_executable_table_function/user_scripts/input_slow_pool.py index c8df7e18c4c..7cbf8950826 100755 --- a/tests/integration/test_executable_table_function/user_scripts/input_slow_pool.py +++ b/tests/integration/test_executable_table_function/user_scripts/input_slow_pool.py @@ -3,16 +3,16 @@ import sys import time -if __name__ == '__main__': +if __name__ == "__main__": for chunk_header in sys.stdin: time.sleep(25) chunk_length = int(chunk_header) - print(str(chunk_length), end='\n') + print(str(chunk_length), end="\n") while chunk_length != 0: line = sys.stdin.readline() chunk_length -= 1 - print("Key " + line, end='') + print("Key " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_table_function/user_scripts/input_sum.py b/tests/integration/test_executable_table_function/user_scripts/input_sum.py index 432d7a13a2f..b8297cc42bc 100755 --- a/tests/integration/test_executable_table_function/user_scripts/input_sum.py +++ b/tests/integration/test_executable_table_function/user_scripts/input_sum.py @@ -3,8 +3,8 @@ import sys import re -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: - line_split = re.split(r'\t+', line) - print(int(line_split[0]) + int(line_split[1]), end='\n') + line_split = re.split(r"\t+", line) + print(int(line_split[0]) + int(line_split[1]), end="\n") sys.stdout.flush() diff --git a/tests/integration/test_executable_table_function/user_scripts/input_sum_pool.py b/tests/integration/test_executable_table_function/user_scripts/input_sum_pool.py index cd0de25fe87..a04dc9a1b26 100755 --- a/tests/integration/test_executable_table_function/user_scripts/input_sum_pool.py +++ b/tests/integration/test_executable_table_function/user_scripts/input_sum_pool.py @@ -3,15 +3,15 @@ import sys import re -if __name__ == '__main__': +if __name__ == "__main__": for chunk_header in sys.stdin: chunk_length = int(chunk_header) - print(str(chunk_length), end='\n') + print(str(chunk_length), end="\n") while chunk_length != 0: line = sys.stdin.readline() - line_split = re.split(r'\t+', line) - print(int(line_split[0]) + int(line_split[1]), end='\n') + line_split = re.split(r"\t+", line) + print(int(line_split[0]) + int(line_split[1]), end="\n") chunk_length -= 1 sys.stdout.flush() diff --git a/tests/integration/test_executable_table_function/user_scripts/no_input.py b/tests/integration/test_executable_table_function/user_scripts/no_input.py index 65b78f3d755..062032924ac 100755 --- a/tests/integration/test_executable_table_function/user_scripts/no_input.py +++ b/tests/integration/test_executable_table_function/user_scripts/no_input.py @@ -2,7 +2,7 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": print("Key 0") print("Key 1") print("Key 2") diff --git a/tests/integration/test_executable_user_defined_function/test.py b/tests/integration/test_executable_user_defined_function/test.py index e6542d79e4b..10993e9c5dd 100644 --- a/tests/integration/test_executable_user_defined_function/test.py +++ b/tests/integration/test_executable_user_defined_function/test.py @@ -10,29 +10,47 @@ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', stay_alive=True, main_configs=[]) +node = cluster.add_instance("node", stay_alive=True, main_configs=[]) def skip_test_msan(instance): if instance.is_built_with_memory_sanitizer(): pytest.skip("Memory Sanitizer cannot work with vfork") -def copy_file_to_container(local_path, dist_path, container_id): - os.system("docker cp {local} {cont_id}:{dist}".format(local=local_path, cont_id=container_id, dist=dist_path)) -config = ''' +def copy_file_to_container(local_path, dist_path, container_id): + os.system( + "docker cp {local} {cont_id}:{dist}".format( + local=local_path, cont_id=container_id, dist=dist_path + ) + ) + + +config = """ /etc/clickhouse-server/functions/test_function_config.xml -''' +""" + @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() - node.replace_config("/etc/clickhouse-server/config.d/executable_user_defined_functions_config.xml", config) + node.replace_config( + "/etc/clickhouse-server/config.d/executable_user_defined_functions_config.xml", + config, + ) - copy_file_to_container(os.path.join(SCRIPT_DIR, 'functions/.'), '/etc/clickhouse-server/functions', node.docker_id) - copy_file_to_container(os.path.join(SCRIPT_DIR, 'user_scripts/.'), '/var/lib/clickhouse/user_scripts', node.docker_id) + copy_file_to_container( + os.path.join(SCRIPT_DIR, "functions/."), + "/etc/clickhouse-server/functions", + node.docker_id, + ) + copy_file_to_container( + os.path.join(SCRIPT_DIR, "user_scripts/."), + "/var/lib/clickhouse/user_scripts", + node.docker_id, + ) node.restart_clickhouse() @@ -41,69 +59,105 @@ def started_cluster(): finally: cluster.shutdown() + def test_executable_function_bash(started_cluster): skip_test_msan(node) - assert node.query("SELECT test_function_bash(toUInt64(1))") == 'Key 1\n' - assert node.query("SELECT test_function_bash(1)") == 'Key 1\n' + assert node.query("SELECT test_function_bash(toUInt64(1))") == "Key 1\n" + assert node.query("SELECT test_function_bash(1)") == "Key 1\n" + + assert node.query("SELECT test_function_pool_bash(toUInt64(1))") == "Key 1\n" + assert node.query("SELECT test_function_pool_bash(1)") == "Key 1\n" - assert node.query("SELECT test_function_pool_bash(toUInt64(1))") == 'Key 1\n' - assert node.query("SELECT test_function_pool_bash(1)") == 'Key 1\n' def test_executable_function_python(started_cluster): skip_test_msan(node) - assert node.query("SELECT test_function_python(toUInt64(1))") == 'Key 1\n' - assert node.query("SELECT test_function_python(1)") == 'Key 1\n' + assert node.query("SELECT test_function_python(toUInt64(1))") == "Key 1\n" + assert node.query("SELECT test_function_python(1)") == "Key 1\n" + + assert node.query("SELECT test_function_pool_python(toUInt64(1))") == "Key 1\n" + assert node.query("SELECT test_function_pool_python(1)") == "Key 1\n" - assert node.query("SELECT test_function_pool_python(toUInt64(1))") == 'Key 1\n' - assert node.query("SELECT test_function_pool_python(1)") == 'Key 1\n' def test_executable_function_send_chunk_header_python(started_cluster): skip_test_msan(node) - assert node.query("SELECT test_function_send_chunk_header_python(toUInt64(1))") == 'Key 1\n' - assert node.query("SELECT test_function_send_chunk_header_python(1)") == 'Key 1\n' + assert ( + node.query("SELECT test_function_send_chunk_header_python(toUInt64(1))") + == "Key 1\n" + ) + assert node.query("SELECT test_function_send_chunk_header_python(1)") == "Key 1\n" + + assert ( + node.query("SELECT test_function_send_chunk_header_pool_python(toUInt64(1))") + == "Key 1\n" + ) + assert ( + node.query("SELECT test_function_send_chunk_header_pool_python(1)") == "Key 1\n" + ) - assert node.query("SELECT test_function_send_chunk_header_pool_python(toUInt64(1))") == 'Key 1\n' - assert node.query("SELECT test_function_send_chunk_header_pool_python(1)") == 'Key 1\n' def test_executable_function_sum_python(started_cluster): skip_test_msan(node) - assert node.query("SELECT test_function_sum_python(toUInt64(1), toUInt64(1))") == '2\n' - assert node.query("SELECT test_function_sum_python(1, 1)") == '2\n' + assert ( + node.query("SELECT test_function_sum_python(toUInt64(1), toUInt64(1))") == "2\n" + ) + assert node.query("SELECT test_function_sum_python(1, 1)") == "2\n" + + assert ( + node.query("SELECT test_function_sum_pool_python(toUInt64(1), toUInt64(1))") + == "2\n" + ) + assert node.query("SELECT test_function_sum_pool_python(1, 1)") == "2\n" - assert node.query("SELECT test_function_sum_pool_python(toUInt64(1), toUInt64(1))") == '2\n' - assert node.query("SELECT test_function_sum_pool_python(1, 1)") == '2\n' def test_executable_function_argument_python(started_cluster): skip_test_msan(node) - assert node.query("SELECT test_function_argument_python(toUInt64(1))") == 'Key 1 1\n' - assert node.query("SELECT test_function_argument_python(1)") == 'Key 1 1\n' + assert ( + node.query("SELECT test_function_argument_python(toUInt64(1))") == "Key 1 1\n" + ) + assert node.query("SELECT test_function_argument_python(1)") == "Key 1 1\n" + + assert ( + node.query("SELECT test_function_argument_pool_python(toUInt64(1))") + == "Key 1 1\n" + ) + assert node.query("SELECT test_function_argument_pool_python(1)") == "Key 1 1\n" - assert node.query("SELECT test_function_argument_pool_python(toUInt64(1))") == 'Key 1 1\n' - assert node.query("SELECT test_function_argument_pool_python(1)") == 'Key 1 1\n' def test_executable_function_signalled_python(started_cluster): skip_test_msan(node) - assert node.query_and_get_error("SELECT test_function_signalled_python(toUInt64(1))") + assert node.query_and_get_error( + "SELECT test_function_signalled_python(toUInt64(1))" + ) assert node.query_and_get_error("SELECT test_function_signalled_python(1)") - assert node.query_and_get_error("SELECT test_function_signalled_pool_python(toUInt64(1))") + assert node.query_and_get_error( + "SELECT test_function_signalled_pool_python(toUInt64(1))" + ) assert node.query_and_get_error("SELECT test_function_signalled_pool_python(1)") + def test_executable_function_slow_python(started_cluster): skip_test_msan(node) assert node.query_and_get_error("SELECT test_function_slow_python(toUInt64(1))") assert node.query_and_get_error("SELECT test_function_slow_python(1)") - assert node.query_and_get_error("SELECT test_function_slow_pool_python(toUInt64(1))") + assert node.query_and_get_error( + "SELECT test_function_slow_pool_python(toUInt64(1))" + ) assert node.query_and_get_error("SELECT test_function_slow_pool_python(1)") + def test_executable_function_non_direct_bash(started_cluster): skip_test_msan(node) - assert node.query("SELECT test_function_non_direct_bash(toUInt64(1))") == 'Key 1\n' - assert node.query("SELECT test_function_non_direct_bash(1)") == 'Key 1\n' + assert node.query("SELECT test_function_non_direct_bash(toUInt64(1))") == "Key 1\n" + assert node.query("SELECT test_function_non_direct_bash(1)") == "Key 1\n" + + assert ( + node.query("SELECT test_function_non_direct_pool_bash(toUInt64(1))") + == "Key 1\n" + ) + assert node.query("SELECT test_function_non_direct_pool_bash(1)") == "Key 1\n" - assert node.query("SELECT test_function_non_direct_pool_bash(toUInt64(1))") == 'Key 1\n' - assert node.query("SELECT test_function_non_direct_pool_bash(1)") == 'Key 1\n' def test_executable_function_sum_json_python(started_cluster): skip_test_msan(node) @@ -111,23 +165,66 @@ def test_executable_function_sum_json_python(started_cluster): node.query("CREATE TABLE test_table (lhs UInt64, rhs UInt64) ENGINE=TinyLog;") node.query("INSERT INTO test_table VALUES (0, 0), (1, 1), (2, 2);") - assert node.query("SELECT test_function_sum_json_unnamed_args_python(1, 2);") == '3\n' - assert node.query("SELECT test_function_sum_json_unnamed_args_python(lhs, rhs) FROM test_table;") == '0\n2\n4\n' + assert ( + node.query("SELECT test_function_sum_json_unnamed_args_python(1, 2);") == "3\n" + ) + assert ( + node.query( + "SELECT test_function_sum_json_unnamed_args_python(lhs, rhs) FROM test_table;" + ) + == "0\n2\n4\n" + ) - assert node.query("SELECT test_function_sum_json_partially_named_args_python(1, 2);") == '3\n' - assert node.query("SELECT test_function_sum_json_partially_named_args_python(lhs, rhs) FROM test_table;") == '0\n2\n4\n' + assert ( + node.query("SELECT test_function_sum_json_partially_named_args_python(1, 2);") + == "3\n" + ) + assert ( + node.query( + "SELECT test_function_sum_json_partially_named_args_python(lhs, rhs) FROM test_table;" + ) + == "0\n2\n4\n" + ) - assert node.query("SELECT test_function_sum_json_named_args_python(1, 2);") == '3\n' - assert node.query("SELECT test_function_sum_json_named_args_python(lhs, rhs) FROM test_table;") == '0\n2\n4\n' + assert node.query("SELECT test_function_sum_json_named_args_python(1, 2);") == "3\n" + assert ( + node.query( + "SELECT test_function_sum_json_named_args_python(lhs, rhs) FROM test_table;" + ) + == "0\n2\n4\n" + ) - assert node.query("SELECT test_function_sum_json_unnamed_args_pool_python(1, 2);") == '3\n' - assert node.query("SELECT test_function_sum_json_unnamed_args_pool_python(lhs, rhs) FROM test_table;") == '0\n2\n4\n' + assert ( + node.query("SELECT test_function_sum_json_unnamed_args_pool_python(1, 2);") + == "3\n" + ) + assert ( + node.query( + "SELECT test_function_sum_json_unnamed_args_pool_python(lhs, rhs) FROM test_table;" + ) + == "0\n2\n4\n" + ) - assert node.query("SELECT test_function_sum_json_partially_named_args_python(1, 2);") == '3\n' - assert node.query("SELECT test_function_sum_json_partially_named_args_python(lhs, rhs) FROM test_table;") == '0\n2\n4\n' + assert ( + node.query("SELECT test_function_sum_json_partially_named_args_python(1, 2);") + == "3\n" + ) + assert ( + node.query( + "SELECT test_function_sum_json_partially_named_args_python(lhs, rhs) FROM test_table;" + ) + == "0\n2\n4\n" + ) - assert node.query("SELECT test_function_sum_json_named_args_pool_python(1, 2);") == '3\n' - assert node.query("SELECT test_function_sum_json_named_args_pool_python(lhs, rhs) FROM test_table;") == '0\n2\n4\n' + assert ( + node.query("SELECT test_function_sum_json_named_args_pool_python(1, 2);") + == "3\n" + ) + assert ( + node.query( + "SELECT test_function_sum_json_named_args_pool_python(lhs, rhs) FROM test_table;" + ) + == "0\n2\n4\n" + ) node.query("DROP TABLE test_table;") - diff --git a/tests/integration/test_executable_user_defined_function/user_scripts/input.py b/tests/integration/test_executable_user_defined_function/user_scripts/input.py index 835ab1f441a..3ace4f73611 100755 --- a/tests/integration/test_executable_user_defined_function/user_scripts/input.py +++ b/tests/integration/test_executable_user_defined_function/user_scripts/input.py @@ -2,7 +2,7 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: - print("Key " + line, end='') + print("Key " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_user_defined_function/user_scripts/input_argument.py b/tests/integration/test_executable_user_defined_function/user_scripts/input_argument.py index c1b2e5966d7..b9b7f5065b2 100755 --- a/tests/integration/test_executable_user_defined_function/user_scripts/input_argument.py +++ b/tests/integration/test_executable_user_defined_function/user_scripts/input_argument.py @@ -2,9 +2,9 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": arg = int(sys.argv[1]) for line in sys.stdin: - print("Key " + str(arg) + " " + line, end='') + print("Key " + str(arg) + " " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_user_defined_function/user_scripts/input_chunk_header.py b/tests/integration/test_executable_user_defined_function/user_scripts/input_chunk_header.py index 5dc03e1c507..90c8bfd9a2f 100755 --- a/tests/integration/test_executable_user_defined_function/user_scripts/input_chunk_header.py +++ b/tests/integration/test_executable_user_defined_function/user_scripts/input_chunk_header.py @@ -2,13 +2,13 @@ import sys -if __name__ == '__main__': +if __name__ == "__main__": for chunk_header in sys.stdin: chunk_length = int(chunk_header) while chunk_length != 0: line = sys.stdin.readline() chunk_length -= 1 - print("Key " + line, end='') + print("Key " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_user_defined_function/user_scripts/input_signalled.py b/tests/integration/test_executable_user_defined_function/user_scripts/input_signalled.py index 27c8bc4840e..11a86737966 100755 --- a/tests/integration/test_executable_user_defined_function/user_scripts/input_signalled.py +++ b/tests/integration/test_executable_user_defined_function/user_scripts/input_signalled.py @@ -5,9 +5,9 @@ import os import signal import time -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: os.signal(os.getpid(), signal.SIGTERM) - print("Key " + line, end='') + print("Key " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_user_defined_function/user_scripts/input_slow.py b/tests/integration/test_executable_user_defined_function/user_scripts/input_slow.py index 648a9eac918..cbe47041712 100755 --- a/tests/integration/test_executable_user_defined_function/user_scripts/input_slow.py +++ b/tests/integration/test_executable_user_defined_function/user_scripts/input_slow.py @@ -5,8 +5,8 @@ import os import signal import time -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: time.sleep(5) - print("Key " + line, end='') + print("Key " + line, end="") sys.stdout.flush() diff --git a/tests/integration/test_executable_user_defined_function/user_scripts/input_sum.py b/tests/integration/test_executable_user_defined_function/user_scripts/input_sum.py index 432d7a13a2f..b8297cc42bc 100755 --- a/tests/integration/test_executable_user_defined_function/user_scripts/input_sum.py +++ b/tests/integration/test_executable_user_defined_function/user_scripts/input_sum.py @@ -3,8 +3,8 @@ import sys import re -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: - line_split = re.split(r'\t+', line) - print(int(line_split[0]) + int(line_split[1]), end='\n') + line_split = re.split(r"\t+", line) + print(int(line_split[0]) + int(line_split[1]), end="\n") sys.stdout.flush() diff --git a/tests/integration/test_executable_user_defined_function/user_scripts/input_sum_json_named_args.py b/tests/integration/test_executable_user_defined_function/user_scripts/input_sum_json_named_args.py index 6154abf665a..955196397d3 100755 --- a/tests/integration/test_executable_user_defined_function/user_scripts/input_sum_json_named_args.py +++ b/tests/integration/test_executable_user_defined_function/user_scripts/input_sum_json_named_args.py @@ -3,11 +3,11 @@ import sys import json -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: value = json.loads(line) - first_arg = int(value['argument_1']) - second_arg = int(value['argument_2']) - result = {'result_name': first_arg + second_arg} - print(json.dumps(result), end='\n') + first_arg = int(value["argument_1"]) + second_arg = int(value["argument_2"]) + result = {"result_name": first_arg + second_arg} + print(json.dumps(result), end="\n") sys.stdout.flush() diff --git a/tests/integration/test_executable_user_defined_function/user_scripts/input_sum_json_partially_named_args.py b/tests/integration/test_executable_user_defined_function/user_scripts/input_sum_json_partially_named_args.py index 1408418eb4a..9f3e3c091c2 100755 --- a/tests/integration/test_executable_user_defined_function/user_scripts/input_sum_json_partially_named_args.py +++ b/tests/integration/test_executable_user_defined_function/user_scripts/input_sum_json_partially_named_args.py @@ -3,11 +3,11 @@ import sys import json -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: value = json.loads(line) - first_arg = int(value['argument_1']) - second_arg = int(value['c2']) - result = {'result_name': first_arg + second_arg} - print(json.dumps(result), end='\n') + first_arg = int(value["argument_1"]) + second_arg = int(value["c2"]) + result = {"result_name": first_arg + second_arg} + print(json.dumps(result), end="\n") sys.stdout.flush() diff --git a/tests/integration/test_executable_user_defined_function/user_scripts/input_sum_json_unnamed_args.py b/tests/integration/test_executable_user_defined_function/user_scripts/input_sum_json_unnamed_args.py index 7e0c68510c8..0aad7b1b435 100755 --- a/tests/integration/test_executable_user_defined_function/user_scripts/input_sum_json_unnamed_args.py +++ b/tests/integration/test_executable_user_defined_function/user_scripts/input_sum_json_unnamed_args.py @@ -3,11 +3,11 @@ import sys import json -if __name__ == '__main__': +if __name__ == "__main__": for line in sys.stdin: value = json.loads(line) - first_arg = int(value['c1']) - second_arg = int(value['c2']) - result = {'result_name': first_arg + second_arg} - print(json.dumps(result), end='\n') + first_arg = int(value["c1"]) + second_arg = int(value["c2"]) + result = {"result_name": first_arg + second_arg} + print(json.dumps(result), end="\n") sys.stdout.flush() diff --git a/tests/integration/test_executable_user_defined_functions_config_reload/test.py b/tests/integration/test_executable_user_defined_functions_config_reload/test.py index 629c426a28c..91c93c4593b 100644 --- a/tests/integration/test_executable_user_defined_functions_config_reload/test.py +++ b/tests/integration/test_executable_user_defined_functions_config_reload/test.py @@ -10,16 +10,24 @@ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', stay_alive=True, main_configs=['config/executable_user_defined_functions_config.xml']) +node = cluster.add_instance( + "node", + stay_alive=True, + main_configs=["config/executable_user_defined_functions_config.xml"], +) def copy_file_to_container(local_path, dist_path, container_id): - os.system("docker cp {local} {cont_id}:{dist}".format(local=local_path, cont_id=container_id, dist=dist_path)) + os.system( + "docker cp {local} {cont_id}:{dist}".format( + local=local_path, cont_id=container_id, dist=dist_path + ) + ) -config = ''' +config = """ /etc/clickhouse-server/functions/{user_defined_executable_functions_config} -''' +""" @pytest.fixture(scope="module") @@ -27,8 +35,16 @@ def started_cluster(): try: cluster.start() - copy_file_to_container(os.path.join(SCRIPT_DIR, 'functions/.'), '/etc/clickhouse-server/functions', node.docker_id) - copy_file_to_container(os.path.join(SCRIPT_DIR, 'user_scripts/.'), '/var/lib/clickhouse/user_scripts', node.docker_id) + copy_file_to_container( + os.path.join(SCRIPT_DIR, "functions/."), + "/etc/clickhouse-server/functions", + node.docker_id, + ) + copy_file_to_container( + os.path.join(SCRIPT_DIR, "user_scripts/."), + "/var/lib/clickhouse/user_scripts", + node.docker_id, + ) node.restart_clickhouse() @@ -39,7 +55,12 @@ def started_cluster(): def change_config(user_defined_executable_functions_config): - node.replace_config("/etc/clickhouse-server/config.d/executable_user_defined_functions_config.xml", config.format(user_defined_executable_functions_config=user_defined_executable_functions_config)) + node.replace_config( + "/etc/clickhouse-server/config.d/executable_user_defined_functions_config.xml", + config.format( + user_defined_executable_functions_config=user_defined_executable_functions_config + ), + ) node.query("SYSTEM RELOAD CONFIG;") @@ -49,7 +70,7 @@ def test(started_cluster): time.sleep(10) - assert node.query("SELECT test_function_1(toUInt64(1));") == 'Key_1 1\n' + assert node.query("SELECT test_function_1(toUInt64(1));") == "Key_1 1\n" # Change path to the second executable user defined function in config. change_config("test_function_config2.xml") @@ -57,7 +78,7 @@ def test(started_cluster): time.sleep(10) # Check that the new executable user defined function is loaded. - assert node.query("SELECT test_function_2(toUInt64(1))") == 'Key_2 1\n' + assert node.query("SELECT test_function_2(toUInt64(1))") == "Key_2 1\n" # Check that the previous executable user defined function was unloaded. node.query_and_get_error("SELECT test_function_1(toUInt64(1));") diff --git a/tests/integration/test_explain_estimates/test.py b/tests/integration/test_explain_estimates/test.py index 7bccfb11a37..9ccce61cf68 100644 --- a/tests/integration/test_explain_estimates/test.py +++ b/tests/integration/test_explain_estimates/test.py @@ -3,7 +3,8 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('instance') +node1 = cluster.add_instance("instance") + @pytest.fixture(scope="module") def start_cluster(): @@ -14,12 +15,16 @@ def start_cluster(): finally: cluster.shutdown() + def test_explain_estimates(start_cluster): - node1.query("CREATE TABLE test (i Int64) ENGINE = MergeTree() ORDER BY i SETTINGS index_granularity = 16") + node1.query( + "CREATE TABLE test (i Int64) ENGINE = MergeTree() ORDER BY i SETTINGS index_granularity = 16" + ) node1.query("INSERT INTO test SELECT number FROM numbers(128)") node1.query("OPTIMIZE TABLE test") # sum(marks) - 1 because EXPLAIN ESIMATES does not include final mark. - system_parts_result = node1.query("SELECT any(database), any(table), count() as parts, sum(rows) as rows, sum(marks)-1 as marks FROM system.parts WHERE database = 'default' AND table = 'test' and active = 1 GROUP BY (database, table)") + system_parts_result = node1.query( + "SELECT any(database), any(table), count() as parts, sum(rows) as rows, sum(marks)-1 as marks FROM system.parts WHERE database = 'default' AND table = 'test' and active = 1 GROUP BY (database, table)" + ) explain_estimates_result = node1.query("EXPLAIN ESTIMATE SELECT * FROM test") - assert(system_parts_result == explain_estimates_result) - + assert system_parts_result == explain_estimates_result diff --git a/tests/integration/test_extreme_deduplication/test.py b/tests/integration/test_extreme_deduplication/test.py index d0d4b83d10f..2c8772aad4e 100644 --- a/tests/integration/test_extreme_deduplication/test.py +++ b/tests/integration/test_extreme_deduplication/test.py @@ -8,12 +8,18 @@ from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', - main_configs=["configs/conf.d/merge_tree.xml", "configs/conf.d/remote_servers.xml"], - with_zookeeper=True, macros={"layer": 0, "shard": 0, "replica": 1}) -node2 = cluster.add_instance('node2', - main_configs=["configs/conf.d/merge_tree.xml", "configs/conf.d/remote_servers.xml"], - with_zookeeper=True, macros={"layer": 0, "shard": 0, "replica": 2}) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/conf.d/merge_tree.xml", "configs/conf.d/remote_servers.xml"], + with_zookeeper=True, + macros={"layer": 0, "shard": 0, "replica": 1}, +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/conf.d/merge_tree.xml", "configs/conf.d/remote_servers.xml"], + with_zookeeper=True, + macros={"layer": 0, "shard": 0, "replica": 2}, +) nodes = [node1, node2] @@ -31,9 +37,11 @@ def started_cluster(): def test_deduplication_window_in_seconds(started_cluster): node = node1 - node1.query(""" + node1.query( + """ CREATE TABLE simple ON CLUSTER test_cluster (date Date, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/simple', '{replica}', date, id, 8192)""") + ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/simple', '{replica}', date, id, 8192)""" + ) node.query("INSERT INTO simple VALUES (0, 0)") time.sleep(1) @@ -44,11 +52,17 @@ def test_deduplication_window_in_seconds(started_cluster): # wait clean thread time.sleep(2) - assert \ - TSV.toMat(node.query("SELECT count() FROM system.zookeeper WHERE path='/clickhouse/tables/0/simple/blocks'"))[ - 0][ - 0] == "1" - node.query("INSERT INTO simple VALUES (0, 0)") # deduplication doesn't works here, the first hash node was deleted + assert ( + TSV.toMat( + node.query( + "SELECT count() FROM system.zookeeper WHERE path='/clickhouse/tables/0/simple/blocks'" + ) + )[0][0] + == "1" + ) + node.query( + "INSERT INTO simple VALUES (0, 0)" + ) # deduplication doesn't works here, the first hash node was deleted assert TSV.toMat(node.query("SELECT count() FROM simple"))[0][0] == "3" node1.query("""DROP TABLE simple ON CLUSTER test_cluster""") @@ -60,23 +74,37 @@ def test_deduplication_works_in_case_of_intensive_inserts(started_cluster): inserters = [] fetchers = [] - node1.query(""" + node1.query( + """ CREATE TABLE simple ON CLUSTER test_cluster (date Date, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/simple', '{replica}', date, id, 8192)""") + ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/simple', '{replica}', date, id, 8192)""" + ) node1.query("INSERT INTO simple VALUES (0, 0)") for node in nodes: host = node.ip_address - inserters.append(CommandRequest(['/bin/bash'], timeout=10, stdin=""" + inserters.append( + CommandRequest( + ["/bin/bash"], + timeout=10, + stdin=""" set -e for i in `seq 1000`; do {} --host {} -q "INSERT INTO simple VALUES (0, 0)" done -""".format(cluster.get_client_cmd(), host))) +""".format( + cluster.get_client_cmd(), host + ), + ) + ) - fetchers.append(CommandRequest(['/bin/bash'], timeout=10, stdin=""" + fetchers.append( + CommandRequest( + ["/bin/bash"], + timeout=10, + stdin=""" set -e for i in `seq 1000`; do res=`{} --host {} -q "SELECT count() FROM simple"` @@ -85,7 +113,11 @@ for i in `seq 1000`; do exit -1 fi; done -""".format(cluster.get_client_cmd(), host, node.name))) +""".format( + cluster.get_client_cmd(), host, node.name + ), + ) + ) # There were not errors during INSERTs for inserter in inserters: diff --git a/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py b/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py index 7bce2d50011..582748046f9 100644 --- a/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py +++ b/tests/integration/test_fetch_partition_from_auxiliary_zookeeper/test.py @@ -3,7 +3,9 @@ from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance("node", main_configs=["configs/zookeeper_config.xml"], with_zookeeper=True) +node = cluster.add_instance( + "node", main_configs=["configs/zookeeper_config.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -17,11 +19,11 @@ def start_cluster(): @pytest.mark.parametrize( - ('part', 'date', 'part_name'), + ("part", "date", "part_name"), [ - ('PARTITION', '2020-08-27', '2020-08-27'), - ('PART', '2020-08-28', '20200828_0_0_0'), - ] + ("PARTITION", "2020-08-27", "2020-08-27"), + ("PART", "2020-08-28", "20200828_0_0_0"), + ], ) def test_fetch_part_from_allowed_zookeeper(start_cluster, part, date, part_name): node.query( @@ -36,13 +38,26 @@ def test_fetch_part_from_allowed_zookeeper(start_cluster, part, date, part_name) node.query( """ALTER TABLE simple2 FETCH {part} '{part_name}' FROM 'zookeeper2:/clickhouse/tables/0/simple';""".format( - part=part, part_name=part_name)) + part=part, part_name=part_name + ) + ) - node.query("""ALTER TABLE simple2 ATTACH {part} '{part_name}';""".format(part=part, part_name=part_name)) + node.query( + """ALTER TABLE simple2 ATTACH {part} '{part_name}';""".format( + part=part, part_name=part_name + ) + ) with pytest.raises(QueryRuntimeException): node.query( """ALTER TABLE simple2 FETCH {part} '{part_name}' FROM 'zookeeper:/clickhouse/tables/0/simple';""".format( - part=part, part_name=part_name)) + part=part, part_name=part_name + ) + ) - assert node.query("""SELECT id FROM simple2 where date = '{date}'""".format(date=date)).strip() == "1" + assert ( + node.query( + """SELECT id FROM simple2 where date = '{date}'""".format(date=date) + ).strip() + == "1" + ) diff --git a/tests/integration/test_fetch_partition_should_reset_mutation/test.py b/tests/integration/test_fetch_partition_should_reset_mutation/test.py index 14a91a42031..7037393a3d2 100644 --- a/tests/integration/test_fetch_partition_should_reset_mutation/test.py +++ b/tests/integration/test_fetch_partition_should_reset_mutation/test.py @@ -4,7 +4,9 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -node = cluster.add_instance("node", main_configs=["configs/zookeeper_config.xml"], with_zookeeper=True) +node = cluster.add_instance( + "node", main_configs=["configs/zookeeper_config.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -25,36 +27,45 @@ def test_part_should_reset_mutation(start_cluster): node.query("optimize table test final") node.query("optimize table test final") + expected = TSV("""all_0_0_2\t1\ta""") + assert TSV(node.query("SELECT _part, * FROM test")) == expected - expected = TSV('''all_0_0_2\t1\ta''') - assert TSV(node.query('SELECT _part, * FROM test')) == expected + node.query( + "ALTER TABLE test UPDATE s='xxx' WHERE 1", settings={"mutations_sync": "2"} + ) + node.query( + "ALTER TABLE test UPDATE s='xxx' WHERE 1", settings={"mutations_sync": "2"} + ) + node.query( + "ALTER TABLE test UPDATE s='xxx' WHERE 1", settings={"mutations_sync": "2"} + ) + node.query( + "ALTER TABLE test UPDATE s='xxx' WHERE 1", settings={"mutations_sync": "2"} + ) - node.query("ALTER TABLE test UPDATE s='xxx' WHERE 1", settings={"mutations_sync": "2"}) - node.query("ALTER TABLE test UPDATE s='xxx' WHERE 1", settings={"mutations_sync": "2"}) - node.query("ALTER TABLE test UPDATE s='xxx' WHERE 1", settings={"mutations_sync": "2"}) - node.query("ALTER TABLE test UPDATE s='xxx' WHERE 1", settings={"mutations_sync": "2"}) - - expected = TSV('''all_0_0_2_4\t1\txxx''') - assert TSV(node.query('SELECT _part, * FROM test')) == expected + expected = TSV("""all_0_0_2_4\t1\txxx""") + assert TSV(node.query("SELECT _part, * FROM test")) == expected node.query( "CREATE TABLE restore (i Int64, s String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/restore', 'node') ORDER BY i;" ) - node.query("ALTER TABLE restore FETCH PARTITION tuple() FROM '/clickhouse/tables/test/'") + node.query( + "ALTER TABLE restore FETCH PARTITION tuple() FROM '/clickhouse/tables/test/'" + ) node.query("ALTER TABLE restore ATTACH PART 'all_0_0_2_4'") node.query("INSERT INTO restore select 2, 'a'") - print(TSV(node.query('SELECT _part, * FROM restore'))) - expected = TSV('''all_0_0_0\t1\txxx\nall_1_1_0\t2\ta''') - assert TSV(node.query('SELECT _part, * FROM restore ORDER BY i')) == expected + print(TSV(node.query("SELECT _part, * FROM restore"))) + expected = TSV("""all_0_0_0\t1\txxx\nall_1_1_0\t2\ta""") + assert TSV(node.query("SELECT _part, * FROM restore ORDER BY i")) == expected - node.query("ALTER TABLE restore UPDATE s='yyy' WHERE 1", settings={"mutations_sync": "2"}) + node.query( + "ALTER TABLE restore UPDATE s='yyy' WHERE 1", settings={"mutations_sync": "2"} + ) - - expected = TSV('''all_0_0_0_2\t1\tyyy\nall_1_1_0_2\t2\tyyy''') - assert TSV(node.query('SELECT _part, * FROM restore ORDER BY i')) == expected + expected = TSV("""all_0_0_0_2\t1\tyyy\nall_1_1_0_2\t2\tyyy""") + assert TSV(node.query("SELECT _part, * FROM restore ORDER BY i")) == expected node.query("ALTER TABLE restore DELETE WHERE 1", settings={"mutations_sync": "2"}) - assert node.query("SELECT count() FROM restore").strip() == "0" diff --git a/tests/integration/test_fetch_partition_with_outdated_parts/test.py b/tests/integration/test_fetch_partition_with_outdated_parts/test.py index 08d5e53e41e..b78d09b0316 100644 --- a/tests/integration/test_fetch_partition_with_outdated_parts/test.py +++ b/tests/integration/test_fetch_partition_with_outdated_parts/test.py @@ -6,8 +6,9 @@ import pytest cluster = ClickHouseCluster(__file__) -node = cluster.add_instance("node", main_configs=["configs/zookeeper_config.xml"], with_zookeeper=True) - +node = cluster.add_instance( + "node", main_configs=["configs/zookeeper_config.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") diff --git a/tests/integration/test_filesystem_layout/test.py b/tests/integration/test_filesystem_layout/test.py index 2519d0e5ac3..34e377e0ae4 100644 --- a/tests/integration/test_filesystem_layout/test.py +++ b/tests/integration/test_filesystem_layout/test.py @@ -16,30 +16,65 @@ def started_cluster(): def test_file_path_escaping(started_cluster): - node.query('CREATE DATABASE IF NOT EXISTS test ENGINE = Ordinary') - node.query(''' + node.query("CREATE DATABASE IF NOT EXISTS test ENGINE = Ordinary") + node.query( + """ CREATE TABLE test.`T.a_b,l-e!` (`~Id` UInt32) ENGINE = MergeTree() PARTITION BY `~Id` ORDER BY `~Id` SETTINGS min_bytes_for_wide_part = 0; - ''') - node.query('''INSERT INTO test.`T.a_b,l-e!` VALUES (1);''') - node.query('''ALTER TABLE test.`T.a_b,l-e!` FREEZE;''') + """ + ) + node.query("""INSERT INTO test.`T.a_b,l-e!` VALUES (1);""") + node.query("""ALTER TABLE test.`T.a_b,l-e!` FREEZE;""") - node.exec_in_container(["bash", "-c", "test -f /var/lib/clickhouse/data/test/T%2Ea_b%2Cl%2De%21/1_1_1_0/%7EId.bin"]) node.exec_in_container( - ["bash", "-c", "test -f /var/lib/clickhouse/shadow/1/data/test/T%2Ea_b%2Cl%2De%21/1_1_1_0/%7EId.bin"]) + [ + "bash", + "-c", + "test -f /var/lib/clickhouse/data/test/T%2Ea_b%2Cl%2De%21/1_1_1_0/%7EId.bin", + ] + ) + node.exec_in_container( + [ + "bash", + "-c", + "test -f /var/lib/clickhouse/shadow/1/data/test/T%2Ea_b%2Cl%2De%21/1_1_1_0/%7EId.bin", + ] + ) + def test_file_path_escaping_atomic_db(started_cluster): - node.query('CREATE DATABASE IF NOT EXISTS `test 2` ENGINE = Atomic') - node.query(''' + node.query("CREATE DATABASE IF NOT EXISTS `test 2` ENGINE = Atomic") + node.query( + """ CREATE TABLE `test 2`.`T.a_b,l-e!` UUID '12345678-1000-4000-8000-000000000001' (`~Id` UInt32) ENGINE = MergeTree() PARTITION BY `~Id` ORDER BY `~Id` SETTINGS min_bytes_for_wide_part = 0; - ''') - node.query('''INSERT INTO `test 2`.`T.a_b,l-e!` VALUES (1);''') - node.query('''ALTER TABLE `test 2`.`T.a_b,l-e!` FREEZE;''') + """ + ) + node.query("""INSERT INTO `test 2`.`T.a_b,l-e!` VALUES (1);""") + node.query("""ALTER TABLE `test 2`.`T.a_b,l-e!` FREEZE;""") - node.exec_in_container(["bash", "-c", "test -f /var/lib/clickhouse/store/123/12345678-1000-4000-8000-000000000001/1_1_1_0/%7EId.bin"]) - # Check symlink - node.exec_in_container(["bash", "-c", "test -L /var/lib/clickhouse/data/test%202/T%2Ea_b%2Cl%2De%21"]) - node.exec_in_container(["bash", "-c", "test -f /var/lib/clickhouse/data/test%202/T%2Ea_b%2Cl%2De%21/1_1_1_0/%7EId.bin"]) node.exec_in_container( - ["bash", "-c", "test -f /var/lib/clickhouse/shadow/2/store/123/12345678-1000-4000-8000-000000000001/1_1_1_0/%7EId.bin"]) + [ + "bash", + "-c", + "test -f /var/lib/clickhouse/store/123/12345678-1000-4000-8000-000000000001/1_1_1_0/%7EId.bin", + ] + ) + # Check symlink + node.exec_in_container( + ["bash", "-c", "test -L /var/lib/clickhouse/data/test%202/T%2Ea_b%2Cl%2De%21"] + ) + node.exec_in_container( + [ + "bash", + "-c", + "test -f /var/lib/clickhouse/data/test%202/T%2Ea_b%2Cl%2De%21/1_1_1_0/%7EId.bin", + ] + ) + node.exec_in_container( + [ + "bash", + "-c", + "test -f /var/lib/clickhouse/shadow/2/store/123/12345678-1000-4000-8000-000000000001/1_1_1_0/%7EId.bin", + ] + ) diff --git a/tests/integration/test_force_deduplication/test.py b/tests/integration/test_force_deduplication/test.py index 991e289f912..87b2c45bbc5 100644 --- a/tests/integration/test_force_deduplication/test.py +++ b/tests/integration/test_force_deduplication/test.py @@ -7,10 +7,10 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', with_zookeeper=True) +node = cluster.add_instance("node", with_zookeeper=True) -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def start_cluster(): try: cluster.start() @@ -30,21 +30,21 @@ def get_counts(): def test_basic(start_cluster): node.query( - ''' + """ CREATE TABLE test (A Int64) ENGINE = ReplicatedMergeTree ('/clickhouse/test/tables/test','1') ORDER BY tuple(); CREATE MATERIALIZED VIEW test_mv_a Engine=ReplicatedMergeTree ('/clickhouse/test/tables/test_mv_a','1') order by tuple() AS SELECT A FROM test; CREATE MATERIALIZED VIEW test_mv_b Engine=ReplicatedMergeTree ('/clickhouse/test/tables/test_mv_b','1') partition by A order by tuple() AS SELECT A FROM test; CREATE MATERIALIZED VIEW test_mv_c Engine=ReplicatedMergeTree ('/clickhouse/test/tables/test_mv_c','1') order by tuple() AS SELECT A FROM test; INSERT INTO test values(999); INSERT INTO test values(999); - ''' + """ ) with pytest.raises(QueryRuntimeException): node.query( - ''' + """ SET max_partitions_per_insert_block = 3; INSERT INTO test SELECT number FROM numbers(10); - ''' + """ ) old_src, old_a, old_b, old_c = get_counts() @@ -63,10 +63,10 @@ def test_basic(start_cluster): assert c == old_c node.query( - ''' + """ SET deduplicate_blocks_in_dependent_materialized_views = 1; INSERT INTO test SELECT number FROM numbers(10); - ''' + """ ) src, a, b, c = get_counts() assert src == 11 @@ -76,18 +76,18 @@ def test_basic(start_cluster): with pytest.raises(QueryRuntimeException): node.query( - ''' + """ SET max_partitions_per_insert_block = 3; SET deduplicate_blocks_in_dependent_materialized_views = 1; INSERT INTO test SELECT number FROM numbers(100,10); - ''' + """ ) node.query( - ''' + """ SET deduplicate_blocks_in_dependent_materialized_views = 1; INSERT INTO test SELECT number FROM numbers(100,10); - ''' + """ ) src, a, b, c = get_counts() diff --git a/tests/integration/test_force_drop_table/test.py b/tests/integration/test_force_drop_table/test.py index ad8316493e4..c1eec1cd277 100644 --- a/tests/integration/test_force_drop_table/test.py +++ b/tests/integration/test_force_drop_table/test.py @@ -3,7 +3,9 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=["configs/config.xml"], with_zookeeper=True) +node = cluster.add_instance( + "node", main_configs=["configs/config.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -14,28 +16,47 @@ def started_cluster(): finally: cluster.shutdown() + def create_force_drop_flag(node): force_drop_flag_path = "/var/lib/clickhouse/flags/force_drop_table" - node.exec_in_container(["bash", "-c", "touch {} && chmod a=rw {}".format(force_drop_flag_path, force_drop_flag_path)], user="root") + node.exec_in_container( + [ + "bash", + "-c", + "touch {} && chmod a=rw {}".format( + force_drop_flag_path, force_drop_flag_path + ), + ], + user="root", + ) -@pytest.mark.parametrize("engine", ['Ordinary', 'Atomic']) + +@pytest.mark.parametrize("engine", ["Ordinary", "Atomic"]) def test_drop_materialized_view(started_cluster, engine): node.query("CREATE DATABASE d ENGINE={}".format(engine)) - node.query("CREATE TABLE d.rmt (n UInt64) ENGINE=ReplicatedMergeTree('/test/rmt', 'r1') ORDER BY n PARTITION BY n % 2") - node.query("CREATE MATERIALIZED VIEW d.mv (n UInt64, s String) ENGINE=MergeTree ORDER BY n PARTITION BY n % 2 AS SELECT n, toString(n) AS s FROM d.rmt") + node.query( + "CREATE TABLE d.rmt (n UInt64) ENGINE=ReplicatedMergeTree('/test/rmt', 'r1') ORDER BY n PARTITION BY n % 2" + ) + node.query( + "CREATE MATERIALIZED VIEW d.mv (n UInt64, s String) ENGINE=MergeTree ORDER BY n PARTITION BY n % 2 AS SELECT n, toString(n) AS s FROM d.rmt" + ) node.query("INSERT INTO d.rmt VALUES (1), (2)") assert "is greater than max" in node.query_and_get_error("DROP TABLE d.rmt") assert "is greater than max" in node.query_and_get_error("DROP TABLE d.mv") assert "is greater than max" in node.query_and_get_error("TRUNCATE TABLE d.rmt") assert "is greater than max" in node.query_and_get_error("TRUNCATE TABLE d.mv") - assert "is greater than max" in node.query_and_get_error("ALTER TABLE d.rmt DROP PARTITION '0'") + assert "is greater than max" in node.query_and_get_error( + "ALTER TABLE d.rmt DROP PARTITION '0'" + ) assert node.query("SELECT * FROM d.rmt ORDER BY n") == "1\n2\n" assert node.query("SELECT * FROM d.mv ORDER BY n") == "1\t1\n2\t2\n" create_force_drop_flag(node) node.query("ALTER TABLE d.rmt DROP PARTITION '0'") assert node.query("SELECT * FROM d.rmt ORDER BY n") == "1\n" - assert "is greater than max" in node.query_and_get_error("ALTER TABLE d.mv DROP PARTITION '0'") + assert "is greater than max" in node.query_and_get_error( + "ALTER TABLE d.mv DROP PARTITION '0'" + ) create_force_drop_flag(node) node.query("ALTER TABLE d.mv DROP PARTITION '0'") assert node.query("SELECT * FROM d.mv ORDER BY n") == "1\t1\n" @@ -46,4 +67,3 @@ def test_drop_materialized_view(started_cluster, engine): create_force_drop_flag(node) node.query("DROP TABLE d.mv SYNC") node.query("DROP DATABASE d") - diff --git a/tests/integration/test_format_avro_confluent/test.py b/tests/integration/test_format_avro_confluent/test.py index 23e2d8d8c47..42b7ddce193 100644 --- a/tests/integration/test_format_avro_confluent/test.py +++ b/tests/integration/test_format_avro_confluent/test.py @@ -3,10 +3,13 @@ import logging import avro.schema import pytest -from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient +from confluent_kafka.avro.cached_schema_registry_client import ( + CachedSchemaRegistryClient, +) from confluent_kafka.avro.serializer.message_serializer import MessageSerializer from helpers.cluster import ClickHouseCluster, ClickHouseInstance + @pytest.fixture(scope="module") def started_cluster(): try: @@ -37,36 +40,34 @@ def run_query(instance, query, data=None, settings=None): def test_select(started_cluster): # type: (ClickHouseCluster) -> None - schema_registry_client = CachedSchemaRegistryClient('http://localhost:{}'.format(started_cluster.schema_registry_port)) + schema_registry_client = CachedSchemaRegistryClient( + "http://localhost:{}".format(started_cluster.schema_registry_port) + ) serializer = MessageSerializer(schema_registry_client) - schema = avro.schema.make_avsc_object({ - 'name': 'test_record', - 'type': 'record', - 'fields': [ - { - 'name': 'value', - 'type': 'long' - } - ] - }) + schema = avro.schema.make_avsc_object( + { + "name": "test_record", + "type": "record", + "fields": [{"name": "value", "type": "long"}], + } + ) buf = io.BytesIO() for x in range(0, 3): message = serializer.encode_record_with_schema( - 'test_subject', schema, {'value': x} + "test_subject", schema, {"value": x} ) buf.write(message) data = buf.getvalue() instance = started_cluster.instances["dummy"] # type: ClickHouseInstance schema_registry_url = "http://{}:{}".format( - started_cluster.schema_registry_host, - 8081 + started_cluster.schema_registry_host, 8081 ) run_query(instance, "create table avro_data(value Int64) engine = Memory()") - settings = {'format_avro_schema_registry_url': schema_registry_url} + settings = {"format_avro_schema_registry_url": schema_registry_url} run_query(instance, "insert into avro_data format AvroConfluent", data, settings) stdout = run_query(instance, "select * from avro_data") assert list(map(str.split, stdout.splitlines())) == [ diff --git a/tests/integration/test_format_schema_on_server/test.py b/tests/integration/test_format_schema_on_server/test.py index 3b53a897dc0..7001d53ccf2 100644 --- a/tests/integration/test_format_schema_on_server/test.py +++ b/tests/integration/test_format_schema_on_server/test.py @@ -2,15 +2,14 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', - clickhouse_path_dir='clickhouse_path') +instance = cluster.add_instance("instance", clickhouse_path_dir="clickhouse_path") @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() - instance.query('CREATE DATABASE test') + instance.query("CREATE DATABASE test") yield cluster finally: @@ -19,23 +18,29 @@ def started_cluster(): def create_simple_table(): instance.query("DROP TABLE IF EXISTS test.simple") - instance.query(''' + instance.query( + """ CREATE TABLE test.simple (key UInt64, value String) ENGINE = MergeTree ORDER BY tuple(); - ''') + """ + ) def test_protobuf_format_input(started_cluster): create_simple_table() instance.http_query( "INSERT INTO test.simple FORMAT Protobuf SETTINGS format_schema='simple:KeyValuePair'", - "\x07\x08\x01\x12\x03abc\x07\x08\x02\x12\x03def") + "\x07\x08\x01\x12\x03abc\x07\x08\x02\x12\x03def", + ) assert instance.query("SELECT * from test.simple") == "1\tabc\n2\tdef\n" def test_protobuf_format_output(started_cluster): create_simple_table() - instance.query("INSERT INTO test.simple VALUES (1, 'abc'), (2, 'def')"); - assert instance.http_query( - "SELECT * FROM test.simple FORMAT Protobuf SETTINGS format_schema='simple:KeyValuePair'") == \ - "\x07\x08\x01\x12\x03abc\x07\x08\x02\x12\x03def" + instance.query("INSERT INTO test.simple VALUES (1, 'abc'), (2, 'def')") + assert ( + instance.http_query( + "SELECT * FROM test.simple FORMAT Protobuf SETTINGS format_schema='simple:KeyValuePair'" + ) + == "\x07\x08\x01\x12\x03abc\x07\x08\x02\x12\x03def" + ) diff --git a/tests/integration/test_freeze_table/test.py b/tests/integration/test_freeze_table/test.py index 4d4aa22d4e2..8b9d1b58360 100644 --- a/tests/integration/test_freeze_table/test.py +++ b/tests/integration/test_freeze_table/test.py @@ -17,7 +17,8 @@ def started_cluster(): def test_freeze_table(started_cluster): - node.query(''' + node.query( + """ CREATE TABLE table_for_freeze ( key UInt64, @@ -26,38 +27,45 @@ def test_freeze_table(started_cluster): ENGINE = MergeTree() ORDER BY key PARTITION BY key % 10; - ''') - node.query(''' + """ + ) + node.query( + """ INSERT INTO table_for_freeze SELECT number, toString(number) from numbers(10); - ''') + """ + ) - freeze_result = TSV.toMat(node.query(''' + freeze_result = TSV.toMat( + node.query( + """ ALTER TABLE table_for_freeze FREEZE WITH NAME 'test_01417' FORMAT TSVWithNames SETTINGS alter_partition_verbose_result = 1; - ''')) + """ + ) + ) assert 11 == len(freeze_result) - path_col_ix = freeze_result[0].index('part_backup_path') + path_col_ix = freeze_result[0].index("part_backup_path") for row in freeze_result[1:]: # skip header part_backup_path = row[path_col_ix] - node.exec_in_container( - ["bash", "-c", "test -d {}".format(part_backup_path)] - ) + node.exec_in_container(["bash", "-c", "test -d {}".format(part_backup_path)]) - freeze_result = TSV.toMat(node.query(''' + freeze_result = TSV.toMat( + node.query( + """ ALTER TABLE table_for_freeze FREEZE PARTITION '3' WITH NAME 'test_01417_single_part' FORMAT TSVWithNames SETTINGS alter_partition_verbose_result = 1; - ''')) + """ + ) + ) assert 2 == len(freeze_result) - path_col_ix = freeze_result[0].index('part_backup_path') + path_col_ix = freeze_result[0].index("part_backup_path") for row in freeze_result[1:]: # skip header part_backup_path = row[path_col_ix] - assert 'test_01417_single_part' in part_backup_path - node.exec_in_container( - ["bash", "-c", "test -d {}".format(part_backup_path)] - ) + assert "test_01417_single_part" in part_backup_path + node.exec_in_container(["bash", "-c", "test -d {}".format(part_backup_path)]) diff --git a/tests/integration/test_global_overcommit_tracker/test.py b/tests/integration/test_global_overcommit_tracker/test.py index 4bcd9961330..c2f3a22915f 100644 --- a/tests/integration/test_global_overcommit_tracker/test.py +++ b/tests/integration/test_global_overcommit_tracker/test.py @@ -4,9 +4,12 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=['configs/global_overcommit_tracker.xml']) +node = cluster.add_instance( + "node", main_configs=["configs/global_overcommit_tracker.xml"] +) -@pytest.fixture(scope='module', autouse=True) + +@pytest.fixture(scope="module", autouse=True) def start_cluster(): try: cluster.start() @@ -14,8 +17,10 @@ def start_cluster(): finally: cluster.shutdown() -TEST_QUERY_A = 'SELECT number FROM numbers(1000) GROUP BY number SETTINGS max_guaranteed_memory_usage_for_user=1' -TEST_QUERY_B = 'SELECT number FROM numbers(1000) GROUP BY number SETTINGS max_guaranteed_memory_usage_for_user=2' + +TEST_QUERY_A = "SELECT number FROM numbers(1000) GROUP BY number SETTINGS max_guaranteed_memory_usage_for_user=1" +TEST_QUERY_B = "SELECT number FROM numbers(1000) GROUP BY number SETTINGS max_guaranteed_memory_usage_for_user=2" + def test_overcommited_is_killed(): node.query("CREATE USER A") @@ -40,7 +45,9 @@ def test_overcommited_is_killed(): if err == "": finished = True - assert overcommited_killed and finished, "no overcommited task was killed or all tasks are killed" + assert ( + overcommited_killed and finished + ), "no overcommited task was killed or all tasks are killed" node.query("DROP USER IF EXISTS A") node.query("DROP USER IF EXISTS B") diff --git a/tests/integration/test_globs_in_filepath/test.py b/tests/integration/test_globs_in_filepath/test.py index 7e534dd69bc..6f5368e0243 100644 --- a/tests/integration/test_globs_in_filepath/test.py +++ b/tests/integration/test_globs_in_filepath/test.py @@ -3,8 +3,10 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node') -path_to_userfiles_from_defaut_config = "/var/lib/clickhouse/user_files/" # should be the same as in config file +node = cluster.add_instance("node") +path_to_userfiles_from_defaut_config = ( + "/var/lib/clickhouse/user_files/" # should be the same as in config file +) @pytest.fixture(scope="module") @@ -25,85 +27,174 @@ def test_strange_filenames(start_cluster): # 2 rows data some_data = "\t111.222\nData\t333.444" - node.exec_in_container(['bash', '-c', 'mkdir {}strange_names/'.format(path_to_userfiles_from_defaut_config)], - privileged=True, user='root') + node.exec_in_container( + [ + "bash", + "-c", + "mkdir {}strange_names/".format(path_to_userfiles_from_defaut_config), + ], + privileged=True, + user="root", + ) - files = ["p.o.i.n.t.s", - "b}{ra{ces", - "b}.o{t.h"] + files = ["p.o.i.n.t.s", "b}{ra{ces", "b}.o{t.h"] # filename inside testing data for debug simplicity for filename in files: - node.exec_in_container(['bash', '-c', 'echo "{}{}" > {}strange_names/{}'.format(filename, some_data, - path_to_userfiles_from_defaut_config, - filename)], privileged=True, - user='root') + node.exec_in_container( + [ + "bash", + "-c", + 'echo "{}{}" > {}strange_names/{}'.format( + filename, some_data, path_to_userfiles_from_defaut_config, filename + ), + ], + privileged=True, + user="root", + ) - test_requests = [("p.o.??n.t.s", "2"), - ("p.o.*t.s", "2"), - ("b}{r?{ces", "2"), - ("b}*ces", "2"), - ("b}.?{t.h", "2")] + test_requests = [ + ("p.o.??n.t.s", "2"), + ("p.o.*t.s", "2"), + ("b}{r?{ces", "2"), + ("b}*ces", "2"), + ("b}.?{t.h", "2"), + ] for pattern, value in test_requests: - assert node.query(''' + assert ( + node.query( + """ select count(*) from file('strange_names/{}', 'TSV', 'text String, number Float64') - '''.format(pattern)) == '{}\n'.format(value) - assert node.query(''' + """.format( + pattern + ) + ) + == "{}\n".format(value) + ) + assert ( + node.query( + """ select count(*) from file('{}strange_names/{}', 'TSV', 'text String, number Float64') - '''.format(path_to_userfiles_from_defaut_config, pattern)) == '{}\n'.format(value) + """.format( + path_to_userfiles_from_defaut_config, pattern + ) + ) + == "{}\n".format(value) + ) def test_linear_structure(start_cluster): # 2 rows data some_data = "\t123.456\nData\t789.012" - files = ["file1", "file2", "file3", "file4", "file5", - "file000", "file111", "file222", "file333", "file444", - "a_file", "b_file", "c_file", "d_file", "e_file", - "a_data", "b_data", "c_data", "d_data", "e_data"] + files = [ + "file1", + "file2", + "file3", + "file4", + "file5", + "file000", + "file111", + "file222", + "file333", + "file444", + "a_file", + "b_file", + "c_file", + "d_file", + "e_file", + "a_data", + "b_data", + "c_data", + "d_data", + "e_data", + ] # filename inside testing data for debug simplicity for filename in files: - node.exec_in_container(['bash', '-c', - 'echo "{}{}" > {}{}'.format(filename, some_data, path_to_userfiles_from_defaut_config, - filename)], privileged=True, user='root') + node.exec_in_container( + [ + "bash", + "-c", + 'echo "{}{}" > {}{}'.format( + filename, some_data, path_to_userfiles_from_defaut_config, filename + ), + ], + privileged=True, + user="root", + ) - test_requests = [("file{0..9}", "10"), - ("file?", "10"), - ("nothing*", "0"), - ("file{0..9}{0..9}{0..9}", "10"), - ("file{000..999}", "10"), - ("file???", "10"), - ("file*", "20"), - ("a_{file,data}", "4"), - ("?_{file,data}", "20"), - ("{a,b,c,d,e}_{file,data}", "20"), - ("{a,b,c,d,e}?{file,data}", "20"), - ("*", "40")] + test_requests = [ + ("file{0..9}", "10"), + ("file?", "10"), + ("nothing*", "0"), + ("file{0..9}{0..9}{0..9}", "10"), + ("file{000..999}", "10"), + ("file???", "10"), + ("file*", "20"), + ("a_{file,data}", "4"), + ("?_{file,data}", "20"), + ("{a,b,c,d,e}_{file,data}", "20"), + ("{a,b,c,d,e}?{file,data}", "20"), + ("*", "40"), + ] for pattern, value in test_requests: - assert node.query(''' + assert ( + node.query( + """ select count(*) from file('{}', 'TSV', 'text String, number Float64') - '''.format(pattern)) == '{}\n'.format(value) - assert node.query(''' + """.format( + pattern + ) + ) + == "{}\n".format(value) + ) + assert ( + node.query( + """ select count(*) from file('{}{}', 'TSV', 'text String, number Float64') - '''.format(path_to_userfiles_from_defaut_config, pattern)) == '{}\n'.format(value) + """.format( + path_to_userfiles_from_defaut_config, pattern + ) + ) + == "{}\n".format(value) + ) def test_deep_structure(start_cluster): # 2 rows data some_data = "\t135.791\nData\t246.802" - dirs = ["directory1/", "directory2/", "some_more_dir/", "we/", - "directory1/big_dir/", - "directory1/dir1/", "directory1/dir2/", "directory1/dir3/", - "directory2/dir1/", "directory2/dir2/", "directory2/one_more_dir/", - "some_more_dir/yet_another_dir/", - "we/need/", "we/need/to/", "we/need/to/go/", "we/need/to/go/deeper/"] + dirs = [ + "directory1/", + "directory2/", + "some_more_dir/", + "we/", + "directory1/big_dir/", + "directory1/dir1/", + "directory1/dir2/", + "directory1/dir3/", + "directory2/dir1/", + "directory2/dir2/", + "directory2/one_more_dir/", + "some_more_dir/yet_another_dir/", + "we/need/", + "we/need/to/", + "we/need/to/go/", + "we/need/to/go/deeper/", + ] for dir in dirs: - node.exec_in_container(['bash', '-c', 'mkdir {}{}'.format(path_to_userfiles_from_defaut_config, dir)], - privileged=True, user='root') + node.exec_in_container( + [ + "bash", + "-c", + "mkdir {}{}".format(path_to_userfiles_from_defaut_config, dir), + ], + privileged=True, + user="root", + ) # all directories appeared in files must be listed in dirs files = [] @@ -117,34 +208,106 @@ def test_deep_structure(start_cluster): # filename inside testing data for debug simplicity for filename in files: - node.exec_in_container(['bash', '-c', - 'echo "{}{}" > {}{}'.format(filename, some_data, path_to_userfiles_from_defaut_config, - filename)], privileged=True, user='root') + node.exec_in_container( + [ + "bash", + "-c", + 'echo "{}{}" > {}{}'.format( + filename, some_data, path_to_userfiles_from_defaut_config, filename + ), + ], + privileged=True, + user="root", + ) - test_requests = [("directory{1..5}/big_dir/*", "2002"), ("directory{0..6}/big_dir/*{0..9}{0..9}{0..9}", "2000"), - ("?", "0"), - ("directory{0..5}/dir{1..3}/file", "10"), ("directory{0..5}/dir?/file", "10"), - ("we/need/to/go/deeper/file", "2"), ("*/*/*/*/*/*", "2"), ("we/need/??/go/deeper/*?*?*?*?*", "2")] + test_requests = [ + ("directory{1..5}/big_dir/*", "2002"), + ("directory{0..6}/big_dir/*{0..9}{0..9}{0..9}", "2000"), + ("?", "0"), + ("directory{0..5}/dir{1..3}/file", "10"), + ("directory{0..5}/dir?/file", "10"), + ("we/need/to/go/deeper/file", "2"), + ("*/*/*/*/*/*", "2"), + ("we/need/??/go/deeper/*?*?*?*?*", "2"), + ] for pattern, value in test_requests: - assert node.query(''' + assert ( + node.query( + """ select count(*) from file('{}', 'TSV', 'text String, number Float64') - '''.format(pattern)) == '{}\n'.format(value) - assert node.query(''' + """.format( + pattern + ) + ) + == "{}\n".format(value) + ) + assert ( + node.query( + """ select count(*) from file('{}{}', 'TSV', 'text String, number Float64') - '''.format(path_to_userfiles_from_defaut_config, pattern)) == '{}\n'.format(value) + """.format( + path_to_userfiles_from_defaut_config, pattern + ) + ) + == "{}\n".format(value) + ) def test_table_function_and_virtual_columns(start_cluster): - node.exec_in_container(['bash', '-c', 'mkdir -p {}some/path/to/'.format(path_to_userfiles_from_defaut_config)]) - node.exec_in_container(['bash', '-c', 'touch {}some/path/to/data.CSV'.format(path_to_userfiles_from_defaut_config)]) + node.exec_in_container( + [ + "bash", + "-c", + "mkdir -p {}some/path/to/".format(path_to_userfiles_from_defaut_config), + ] + ) + node.exec_in_container( + [ + "bash", + "-c", + "touch {}some/path/to/data.CSV".format( + path_to_userfiles_from_defaut_config + ), + ] + ) node.query( - "insert into table function file('some/path/to/data.CSV', CSV, 'n UInt8, s String') select number, concat('str_', toString(number)) from numbers(100000)") - assert node.query( - "select count() from file('some/path/to/data.CSV', CSV, 'n UInt8, s String')").rstrip() == '100000' - node.query("insert into table function file('nonexist.csv', 'CSV', 'val1 UInt32') values (1)") - assert node.query("select * from file('nonexist.csv', 'CSV', 'val1 UInt32')").rstrip() == '1' - assert "nonexist.csv" in node.query("select _path from file('nonexis?.csv', 'CSV', 'val1 UInt32')").rstrip() - assert "nonexist.csv" in node.query("select _path from file('nonexist.csv', 'CSV', 'val1 UInt32')").rstrip() - assert "nonexist.csv" == node.query("select _file from file('nonexis?.csv', 'CSV', 'val1 UInt32')").rstrip() - assert "nonexist.csv" == node.query("select _file from file('nonexist.csv', 'CSV', 'val1 UInt32')").rstrip() + "insert into table function file('some/path/to/data.CSV', CSV, 'n UInt8, s String') select number, concat('str_', toString(number)) from numbers(100000)" + ) + assert ( + node.query( + "select count() from file('some/path/to/data.CSV', CSV, 'n UInt8, s String')" + ).rstrip() + == "100000" + ) + node.query( + "insert into table function file('nonexist.csv', 'CSV', 'val1 UInt32') values (1)" + ) + assert ( + node.query("select * from file('nonexist.csv', 'CSV', 'val1 UInt32')").rstrip() + == "1" + ) + assert ( + "nonexist.csv" + in node.query( + "select _path from file('nonexis?.csv', 'CSV', 'val1 UInt32')" + ).rstrip() + ) + assert ( + "nonexist.csv" + in node.query( + "select _path from file('nonexist.csv', 'CSV', 'val1 UInt32')" + ).rstrip() + ) + assert ( + "nonexist.csv" + == node.query( + "select _file from file('nonexis?.csv', 'CSV', 'val1 UInt32')" + ).rstrip() + ) + assert ( + "nonexist.csv" + == node.query( + "select _file from file('nonexist.csv', 'CSV', 'val1 UInt32')" + ).rstrip() + ) diff --git a/tests/integration/test_grant_and_revoke/test.py b/tests/integration/test_grant_and_revoke/test.py index 196141f9bfe..2988db24d74 100644 --- a/tests/integration/test_grant_and_revoke/test.py +++ b/tests/integration/test_grant_and_revoke/test.py @@ -3,7 +3,7 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance') +instance = cluster.add_instance("instance") @pytest.fixture(scope="module", autouse=True) @@ -12,7 +12,9 @@ def start_cluster(): cluster.start() instance.query("CREATE DATABASE test") - instance.query("CREATE TABLE test.table(x UInt32, y UInt32) ENGINE = MergeTree ORDER BY tuple()") + instance.query( + "CREATE TABLE test.table(x UInt32, y UInt32) ENGINE = MergeTree ORDER BY tuple()" + ) instance.query("INSERT INTO test.table VALUES (1,5), (2,10)") yield cluster @@ -32,28 +34,34 @@ def cleanup_after_test(): def test_smoke(): instance.query("CREATE USER A") - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test.table", user='A') + assert "Not enough privileges" in instance.query_and_get_error( + "SELECT * FROM test.table", user="A" + ) - instance.query('GRANT SELECT ON test.table TO A') - assert instance.query("SELECT * FROM test.table", user='A') == "1\t5\n2\t10\n" + instance.query("GRANT SELECT ON test.table TO A") + assert instance.query("SELECT * FROM test.table", user="A") == "1\t5\n2\t10\n" - instance.query('REVOKE SELECT ON test.table FROM A') - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test.table", user='A') + instance.query("REVOKE SELECT ON test.table FROM A") + assert "Not enough privileges" in instance.query_and_get_error( + "SELECT * FROM test.table", user="A" + ) def test_grant_option(): instance.query("CREATE USER A") instance.query("CREATE USER B") - instance.query('GRANT SELECT ON test.table TO A') - assert instance.query("SELECT * FROM test.table", user='A') == "1\t5\n2\t10\n" - assert "Not enough privileges" in instance.query_and_get_error("GRANT SELECT ON test.table TO B", user='A') + instance.query("GRANT SELECT ON test.table TO A") + assert instance.query("SELECT * FROM test.table", user="A") == "1\t5\n2\t10\n" + assert "Not enough privileges" in instance.query_and_get_error( + "GRANT SELECT ON test.table TO B", user="A" + ) - instance.query('GRANT SELECT ON test.table TO A WITH GRANT OPTION') - instance.query("GRANT SELECT ON test.table TO B", user='A') - assert instance.query("SELECT * FROM test.table", user='B') == "1\t5\n2\t10\n" + instance.query("GRANT SELECT ON test.table TO A WITH GRANT OPTION") + instance.query("GRANT SELECT ON test.table TO B", user="A") + assert instance.query("SELECT * FROM test.table", user="B") == "1\t5\n2\t10\n" - instance.query('REVOKE SELECT ON test.table FROM A, B') + instance.query("REVOKE SELECT ON test.table FROM A, B") def test_revoke_requires_grant_option(): @@ -64,45 +72,51 @@ def test_revoke_requires_grant_option(): assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" expected_error = "Not enough privileges" - assert expected_error in instance.query_and_get_error("REVOKE SELECT ON test.table FROM B", user='A') + assert expected_error in instance.query_and_get_error( + "REVOKE SELECT ON test.table FROM B", user="A" + ) assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" instance.query("GRANT SELECT ON test.table TO A") expected_error = "privileges have been granted, but without grant option" - assert expected_error in instance.query_and_get_error("REVOKE SELECT ON test.table FROM B", user='A') + assert expected_error in instance.query_and_get_error( + "REVOKE SELECT ON test.table FROM B", user="A" + ) assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" instance.query("GRANT SELECT ON test.table TO A WITH GRANT OPTION") assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" - instance.query("REVOKE SELECT ON test.table FROM B", user='A') + instance.query("REVOKE SELECT ON test.table FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "" instance.query("GRANT SELECT ON test.table TO B") assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" - instance.query("REVOKE SELECT ON test.* FROM B", user='A') + instance.query("REVOKE SELECT ON test.* FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "" instance.query("GRANT SELECT ON test.table TO B") assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" - instance.query("REVOKE ALL ON test.* FROM B", user='A') + instance.query("REVOKE ALL ON test.* FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "" instance.query("GRANT SELECT ON test.table TO B") assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" - instance.query("REVOKE ALL ON *.* FROM B", user='A') + instance.query("REVOKE ALL ON *.* FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "" instance.query("REVOKE GRANT OPTION FOR ALL ON *.* FROM A") instance.query("GRANT SELECT ON test.table TO B") assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" expected_error = "privileges have been granted, but without grant option" - assert expected_error in instance.query_and_get_error("REVOKE SELECT ON test.table FROM B", user='A') + assert expected_error in instance.query_and_get_error( + "REVOKE SELECT ON test.table FROM B", user="A" + ) assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" instance.query("GRANT SELECT ON test.* TO A WITH GRANT OPTION") instance.query("GRANT SELECT ON test.table TO B") assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" - instance.query("REVOKE SELECT ON test.table FROM B", user='A') + instance.query("REVOKE SELECT ON test.table FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "" @@ -110,101 +124,213 @@ def test_allowed_grantees(): instance.query("CREATE USER A") instance.query("CREATE USER B") - instance.query('GRANT SELECT ON test.table TO A WITH GRANT OPTION') - instance.query("GRANT SELECT ON test.table TO B", user='A') - assert instance.query("SELECT * FROM test.table", user='B') == "1\t5\n2\t10\n" - instance.query("REVOKE SELECT ON test.table FROM B", user='A') + instance.query("GRANT SELECT ON test.table TO A WITH GRANT OPTION") + instance.query("GRANT SELECT ON test.table TO B", user="A") + assert instance.query("SELECT * FROM test.table", user="B") == "1\t5\n2\t10\n" + instance.query("REVOKE SELECT ON test.table FROM B", user="A") - instance.query('ALTER USER A GRANTEES NONE') + instance.query("ALTER USER A GRANTEES NONE") expected_error = "user `B` is not allowed as grantee" - assert expected_error in instance.query_and_get_error("GRANT SELECT ON test.table TO B", user='A') + assert expected_error in instance.query_and_get_error( + "GRANT SELECT ON test.table TO B", user="A" + ) - instance.query('ALTER USER A GRANTEES ANY EXCEPT B') - assert instance.query('SHOW CREATE USER A') == "CREATE USER A GRANTEES ANY EXCEPT B\n" + instance.query("ALTER USER A GRANTEES ANY EXCEPT B") + assert ( + instance.query("SHOW CREATE USER A") == "CREATE USER A GRANTEES ANY EXCEPT B\n" + ) expected_error = "user `B` is not allowed as grantee" - assert expected_error in instance.query_and_get_error("GRANT SELECT ON test.table TO B", user='A') + assert expected_error in instance.query_and_get_error( + "GRANT SELECT ON test.table TO B", user="A" + ) - instance.query('ALTER USER A GRANTEES B') - instance.query("GRANT SELECT ON test.table TO B", user='A') - assert instance.query("SELECT * FROM test.table", user='B') == "1\t5\n2\t10\n" - instance.query("REVOKE SELECT ON test.table FROM B", user='A') + instance.query("ALTER USER A GRANTEES B") + instance.query("GRANT SELECT ON test.table TO B", user="A") + assert instance.query("SELECT * FROM test.table", user="B") == "1\t5\n2\t10\n" + instance.query("REVOKE SELECT ON test.table FROM B", user="A") - instance.query('ALTER USER A GRANTEES ANY') - assert instance.query('SHOW CREATE USER A') == "CREATE USER A\n" - instance.query("GRANT SELECT ON test.table TO B", user='A') - assert instance.query("SELECT * FROM test.table", user='B') == "1\t5\n2\t10\n" + instance.query("ALTER USER A GRANTEES ANY") + assert instance.query("SHOW CREATE USER A") == "CREATE USER A\n" + instance.query("GRANT SELECT ON test.table TO B", user="A") + assert instance.query("SELECT * FROM test.table", user="B") == "1\t5\n2\t10\n" - instance.query('ALTER USER A GRANTEES NONE') + instance.query("ALTER USER A GRANTEES NONE") expected_error = "user `B` is not allowed as grantee" - assert expected_error in instance.query_and_get_error("REVOKE SELECT ON test.table FROM B", user='A') + assert expected_error in instance.query_and_get_error( + "REVOKE SELECT ON test.table FROM B", user="A" + ) instance.query("CREATE USER C GRANTEES ANY EXCEPT C") - assert instance.query('SHOW CREATE USER C') == "CREATE USER C GRANTEES ANY EXCEPT C\n" - instance.query('GRANT SELECT ON test.table TO C WITH GRANT OPTION') - assert instance.query("SELECT * FROM test.table", user='C') == "1\t5\n2\t10\n" + assert ( + instance.query("SHOW CREATE USER C") == "CREATE USER C GRANTEES ANY EXCEPT C\n" + ) + instance.query("GRANT SELECT ON test.table TO C WITH GRANT OPTION") + assert instance.query("SELECT * FROM test.table", user="C") == "1\t5\n2\t10\n" expected_error = "user `C` is not allowed as grantee" - assert expected_error in instance.query_and_get_error("REVOKE SELECT ON test.table FROM C", user='C') + assert expected_error in instance.query_and_get_error( + "REVOKE SELECT ON test.table FROM C", user="C" + ) def test_grant_all_on_table(): instance.query("CREATE USER A, B") instance.query("GRANT ALL ON test.table TO A WITH GRANT OPTION") - instance.query("GRANT ALL ON test.table TO B", user='A') - assert instance.query("SHOW GRANTS FOR B") ==\ - "GRANT SHOW TABLES, SHOW COLUMNS, SHOW DICTIONARIES, SELECT, INSERT, ALTER TABLE, ALTER VIEW, CREATE TABLE, CREATE VIEW, CREATE DICTIONARY, "\ - "DROP TABLE, DROP VIEW, DROP DICTIONARY, TRUNCATE, OPTIMIZE, CREATE ROW POLICY, ALTER ROW POLICY, DROP ROW POLICY, SHOW ROW POLICIES, "\ - "SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, SYSTEM MOVES, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, "\ + instance.query("GRANT ALL ON test.table TO B", user="A") + assert ( + instance.query("SHOW GRANTS FOR B") + == "GRANT SHOW TABLES, SHOW COLUMNS, SHOW DICTIONARIES, SELECT, INSERT, ALTER TABLE, ALTER VIEW, CREATE TABLE, CREATE VIEW, CREATE DICTIONARY, " + "DROP TABLE, DROP VIEW, DROP DICTIONARY, TRUNCATE, OPTIMIZE, CREATE ROW POLICY, ALTER ROW POLICY, DROP ROW POLICY, SHOW ROW POLICIES, " + "SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, SYSTEM MOVES, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, " "SYSTEM RESTART REPLICA, SYSTEM RESTORE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON test.table TO B\n" - instance.query("REVOKE ALL ON test.table FROM B", user='A') + ) + instance.query("REVOKE ALL ON test.table FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "" def test_implicit_show_grants(): instance.query("CREATE USER A") - assert instance.query("select count() FROM system.databases WHERE name='test'", user="A") == "0\n" - assert instance.query("select count() FROM system.tables WHERE database='test' AND name='table'", user="A") == "0\n" - assert instance.query("select count() FROM system.columns WHERE database='test' AND table='table'", - user="A") == "0\n" + assert ( + instance.query( + "select count() FROM system.databases WHERE name='test'", user="A" + ) + == "0\n" + ) + assert ( + instance.query( + "select count() FROM system.tables WHERE database='test' AND name='table'", + user="A", + ) + == "0\n" + ) + assert ( + instance.query( + "select count() FROM system.columns WHERE database='test' AND table='table'", + user="A", + ) + == "0\n" + ) instance.query("GRANT SELECT(x) ON test.table TO A") assert instance.query("SHOW GRANTS FOR A") == "GRANT SELECT(x) ON test.table TO A\n" - assert instance.query("select count() FROM system.databases WHERE name='test'", user="A") == "1\n" - assert instance.query("select count() FROM system.tables WHERE database='test' AND name='table'", user="A") == "1\n" - assert instance.query("select count() FROM system.columns WHERE database='test' AND table='table'", - user="A") == "1\n" + assert ( + instance.query( + "select count() FROM system.databases WHERE name='test'", user="A" + ) + == "1\n" + ) + assert ( + instance.query( + "select count() FROM system.tables WHERE database='test' AND name='table'", + user="A", + ) + == "1\n" + ) + assert ( + instance.query( + "select count() FROM system.columns WHERE database='test' AND table='table'", + user="A", + ) + == "1\n" + ) instance.query("GRANT SELECT ON test.table TO A") assert instance.query("SHOW GRANTS FOR A") == "GRANT SELECT ON test.table TO A\n" - assert instance.query("select count() FROM system.databases WHERE name='test'", user="A") == "1\n" - assert instance.query("select count() FROM system.tables WHERE database='test' AND name='table'", user="A") == "1\n" - assert instance.query("select count() FROM system.columns WHERE database='test' AND table='table'", - user="A") == "2\n" + assert ( + instance.query( + "select count() FROM system.databases WHERE name='test'", user="A" + ) + == "1\n" + ) + assert ( + instance.query( + "select count() FROM system.tables WHERE database='test' AND name='table'", + user="A", + ) + == "1\n" + ) + assert ( + instance.query( + "select count() FROM system.columns WHERE database='test' AND table='table'", + user="A", + ) + == "2\n" + ) instance.query("GRANT SELECT ON test.* TO A") assert instance.query("SHOW GRANTS FOR A") == "GRANT SELECT ON test.* TO A\n" - assert instance.query("select count() FROM system.databases WHERE name='test'", user="A") == "1\n" - assert instance.query("select count() FROM system.tables WHERE database='test' AND name='table'", user="A") == "1\n" - assert instance.query("select count() FROM system.columns WHERE database='test' AND table='table'", - user="A") == "2\n" + assert ( + instance.query( + "select count() FROM system.databases WHERE name='test'", user="A" + ) + == "1\n" + ) + assert ( + instance.query( + "select count() FROM system.tables WHERE database='test' AND name='table'", + user="A", + ) + == "1\n" + ) + assert ( + instance.query( + "select count() FROM system.columns WHERE database='test' AND table='table'", + user="A", + ) + == "2\n" + ) instance.query("GRANT SELECT ON *.* TO A") assert instance.query("SHOW GRANTS FOR A") == "GRANT SELECT ON *.* TO A\n" - assert instance.query("select count() FROM system.databases WHERE name='test'", user="A") == "1\n" - assert instance.query("select count() FROM system.tables WHERE database='test' AND name='table'", user="A") == "1\n" - assert instance.query("select count() FROM system.columns WHERE database='test' AND table='table'", - user="A") == "2\n" + assert ( + instance.query( + "select count() FROM system.databases WHERE name='test'", user="A" + ) + == "1\n" + ) + assert ( + instance.query( + "select count() FROM system.tables WHERE database='test' AND name='table'", + user="A", + ) + == "1\n" + ) + assert ( + instance.query( + "select count() FROM system.columns WHERE database='test' AND table='table'", + user="A", + ) + == "2\n" + ) instance.query("REVOKE ALL ON *.* FROM A") - assert instance.query("select count() FROM system.databases WHERE name='test'", user="A") == "0\n" - assert instance.query("select count() FROM system.tables WHERE database='test' AND name='table'", user="A") == "0\n" - assert instance.query("select count() FROM system.columns WHERE database='test' AND table='table'", - user="A") == "0\n" + assert ( + instance.query( + "select count() FROM system.databases WHERE name='test'", user="A" + ) + == "0\n" + ) + assert ( + instance.query( + "select count() FROM system.tables WHERE database='test' AND name='table'", + user="A", + ) + == "0\n" + ) + assert ( + instance.query( + "select count() FROM system.columns WHERE database='test' AND table='table'", + user="A", + ) + == "0\n" + ) def test_implicit_create_view_grant(): instance.query("CREATE USER A") expected_error = "Not enough privileges" - assert expected_error in instance.query_and_get_error("CREATE VIEW test.view_1 AS SELECT 1", user="A") + assert expected_error in instance.query_and_get_error( + "CREATE VIEW test.view_1 AS SELECT 1", user="A" + ) instance.query("GRANT CREATE TABLE ON test.* TO A") instance.query("CREATE VIEW test.view_1 AS SELECT 1", user="A") @@ -212,118 +338,230 @@ def test_implicit_create_view_grant(): instance.query("REVOKE CREATE TABLE ON test.* FROM A") instance.query("DROP TABLE test.view_1") - assert expected_error in instance.query_and_get_error("CREATE VIEW test.view_1 AS SELECT 1", user="A") + assert expected_error in instance.query_and_get_error( + "CREATE VIEW test.view_1 AS SELECT 1", user="A" + ) def test_implicit_create_temporary_table_grant(): instance.query("CREATE USER A") expected_error = "Not enough privileges" - assert expected_error in instance.query_and_get_error("CREATE TEMPORARY TABLE tmp(name String)", user="A") + assert expected_error in instance.query_and_get_error( + "CREATE TEMPORARY TABLE tmp(name String)", user="A" + ) instance.query("GRANT CREATE TABLE ON test.* TO A") instance.query("CREATE TEMPORARY TABLE tmp(name String)", user="A") instance.query("REVOKE CREATE TABLE ON *.* FROM A") - assert expected_error in instance.query_and_get_error("CREATE TEMPORARY TABLE tmp(name String)", user="A") + assert expected_error in instance.query_and_get_error( + "CREATE TEMPORARY TABLE tmp(name String)", user="A" + ) def test_introspection(): instance.query("CREATE USER A") instance.query("CREATE USER B") - instance.query('GRANT SELECT ON test.table TO A') - instance.query('GRANT CREATE ON *.* TO B WITH GRANT OPTION') + instance.query("GRANT SELECT ON test.table TO A") + instance.query("GRANT CREATE ON *.* TO B WITH GRANT OPTION") assert instance.query("SHOW USERS") == TSV(["A", "B", "default"]) assert instance.query("SHOW CREATE USERS A") == TSV(["CREATE USER A"]) assert instance.query("SHOW CREATE USERS B") == TSV(["CREATE USER B"]) - assert instance.query("SHOW CREATE USERS A,B") == TSV(["CREATE USER A", "CREATE USER B"]) - assert instance.query("SHOW CREATE USERS") == TSV(["CREATE USER A", "CREATE USER B", - "CREATE USER default IDENTIFIED WITH plaintext_password SETTINGS PROFILE default"]) + assert instance.query("SHOW CREATE USERS A,B") == TSV( + ["CREATE USER A", "CREATE USER B"] + ) + assert instance.query("SHOW CREATE USERS") == TSV( + [ + "CREATE USER A", + "CREATE USER B", + "CREATE USER default IDENTIFIED WITH plaintext_password SETTINGS PROFILE default", + ] + ) - assert instance.query("SHOW GRANTS FOR A") == TSV(["GRANT SELECT ON test.table TO A"]) - assert instance.query("SHOW GRANTS FOR B") == TSV(["GRANT CREATE ON *.* TO B WITH GRANT OPTION"]) + assert instance.query("SHOW GRANTS FOR A") == TSV( + ["GRANT SELECT ON test.table TO A"] + ) + assert instance.query("SHOW GRANTS FOR B") == TSV( + ["GRANT CREATE ON *.* TO B WITH GRANT OPTION"] + ) assert instance.query("SHOW GRANTS FOR A,B") == TSV( - ["GRANT SELECT ON test.table TO A", "GRANT CREATE ON *.* TO B WITH GRANT OPTION"]) + [ + "GRANT SELECT ON test.table TO A", + "GRANT CREATE ON *.* TO B WITH GRANT OPTION", + ] + ) assert instance.query("SHOW GRANTS FOR B,A") == TSV( - ["GRANT SELECT ON test.table TO A", "GRANT CREATE ON *.* TO B WITH GRANT OPTION"]) + [ + "GRANT SELECT ON test.table TO A", + "GRANT CREATE ON *.* TO B WITH GRANT OPTION", + ] + ) assert instance.query("SHOW GRANTS FOR ALL") == TSV( - ["GRANT SELECT ON test.table TO A", "GRANT CREATE ON *.* TO B WITH GRANT OPTION", - "GRANT ALL ON *.* TO default WITH GRANT OPTION"]) + [ + "GRANT SELECT ON test.table TO A", + "GRANT CREATE ON *.* TO B WITH GRANT OPTION", + "GRANT ALL ON *.* TO default WITH GRANT OPTION", + ] + ) - assert instance.query("SHOW GRANTS", user='A') == TSV(["GRANT SELECT ON test.table TO A"]) - assert instance.query("SHOW GRANTS", user='B') == TSV(["GRANT CREATE ON *.* TO B WITH GRANT OPTION"]) + assert instance.query("SHOW GRANTS", user="A") == TSV( + ["GRANT SELECT ON test.table TO A"] + ) + assert instance.query("SHOW GRANTS", user="B") == TSV( + ["GRANT CREATE ON *.* TO B WITH GRANT OPTION"] + ) + + assert instance.query("SHOW GRANTS FOR ALL", user="A") == TSV( + ["GRANT SELECT ON test.table TO A"] + ) + assert instance.query("SHOW GRANTS FOR ALL", user="B") == TSV( + ["GRANT CREATE ON *.* TO B WITH GRANT OPTION"] + ) + assert instance.query("SHOW GRANTS FOR ALL") == TSV( + [ + "GRANT SELECT ON test.table TO A", + "GRANT CREATE ON *.* TO B WITH GRANT OPTION", + "GRANT ALL ON *.* TO default WITH GRANT OPTION", + ] + ) - assert instance.query("SHOW GRANTS FOR ALL", user='A') == TSV(["GRANT SELECT ON test.table TO A"]) - assert instance.query("SHOW GRANTS FOR ALL", user='B') == TSV(["GRANT CREATE ON *.* TO B WITH GRANT OPTION"]) - assert instance.query("SHOW GRANTS FOR ALL") == TSV(["GRANT SELECT ON test.table TO A", - "GRANT CREATE ON *.* TO B WITH GRANT OPTION", - "GRANT ALL ON *.* TO default WITH GRANT OPTION"]) - expected_error = "necessary to have grant SHOW USERS" - assert expected_error in instance.query_and_get_error("SHOW GRANTS FOR B", user='A') - - expected_access1 = "CREATE USER A\n" \ - "CREATE USER B\n" \ - "CREATE USER default IDENTIFIED WITH plaintext_password SETTINGS PROFILE default" - expected_access2 = "GRANT SELECT ON test.table TO A\n" \ - "GRANT CREATE ON *.* TO B WITH GRANT OPTION\n" \ - "GRANT ALL ON *.* TO default WITH GRANT OPTION\n" + assert expected_error in instance.query_and_get_error("SHOW GRANTS FOR B", user="A") + + expected_access1 = ( + "CREATE USER A\n" + "CREATE USER B\n" + "CREATE USER default IDENTIFIED WITH plaintext_password SETTINGS PROFILE default" + ) + expected_access2 = ( + "GRANT SELECT ON test.table TO A\n" + "GRANT CREATE ON *.* TO B WITH GRANT OPTION\n" + "GRANT ALL ON *.* TO default WITH GRANT OPTION\n" + ) assert expected_access1 in instance.query("SHOW ACCESS") assert expected_access2 in instance.query("SHOW ACCESS") assert instance.query( - "SELECT name, storage, auth_type, auth_params, host_ip, host_names, host_names_regexp, host_names_like, default_roles_all, default_roles_list, default_roles_except from system.users WHERE name IN ('A', 'B') ORDER BY name") == \ - TSV([["A", "local directory", "no_password", "{}", "['::/0']", "[]", "[]", "[]", 1, "[]", "[]"], - ["B", "local directory", "no_password", "{}", "['::/0']", "[]", "[]", "[]", 1, "[]", "[]"]]) + "SELECT name, storage, auth_type, auth_params, host_ip, host_names, host_names_regexp, host_names_like, default_roles_all, default_roles_list, default_roles_except from system.users WHERE name IN ('A', 'B') ORDER BY name" + ) == TSV( + [ + [ + "A", + "local directory", + "no_password", + "{}", + "['::/0']", + "[]", + "[]", + "[]", + 1, + "[]", + "[]", + ], + [ + "B", + "local directory", + "no_password", + "{}", + "['::/0']", + "[]", + "[]", + "[]", + 1, + "[]", + "[]", + ], + ] + ) assert instance.query( - "SELECT * from system.grants WHERE user_name IN ('A', 'B') ORDER BY user_name, access_type, grant_option") == \ - TSV([["A", "\\N", "SELECT", "test", "table", "\\N", 0, 0], - ["B", "\\N", "CREATE", "\\N", "\\N", "\\N", 0, 1]]) + "SELECT * from system.grants WHERE user_name IN ('A', 'B') ORDER BY user_name, access_type, grant_option" + ) == TSV( + [ + ["A", "\\N", "SELECT", "test", "table", "\\N", 0, 0], + ["B", "\\N", "CREATE", "\\N", "\\N", "\\N", 0, 1], + ] + ) def test_current_database(): instance.query("CREATE USER A") instance.query("GRANT SELECT ON table TO A", database="test") - assert instance.query("SHOW GRANTS FOR A") == TSV(["GRANT SELECT ON test.table TO A"]) - assert instance.query("SHOW GRANTS FOR A", database="test") == TSV(["GRANT SELECT ON test.table TO A"]) + assert instance.query("SHOW GRANTS FOR A") == TSV( + ["GRANT SELECT ON test.table TO A"] + ) + assert instance.query("SHOW GRANTS FOR A", database="test") == TSV( + ["GRANT SELECT ON test.table TO A"] + ) - assert instance.query("SELECT * FROM test.table", user='A') == "1\t5\n2\t10\n" - assert instance.query("SELECT * FROM table", user='A', database='test') == "1\t5\n2\t10\n" + assert instance.query("SELECT * FROM test.table", user="A") == "1\t5\n2\t10\n" + assert ( + instance.query("SELECT * FROM table", user="A", database="test") + == "1\t5\n2\t10\n" + ) - instance.query("CREATE TABLE default.table(x UInt32, y UInt32) ENGINE = MergeTree ORDER BY tuple()") - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM table", user='A') + instance.query( + "CREATE TABLE default.table(x UInt32, y UInt32) ENGINE = MergeTree ORDER BY tuple()" + ) + assert "Not enough privileges" in instance.query_and_get_error( + "SELECT * FROM table", user="A" + ) def test_grant_with_replace_option(): instance.query("CREATE USER A") - instance.query('GRANT SELECT ON test.table TO A') - assert instance.query("SHOW GRANTS FOR A") == TSV(["GRANT SELECT ON test.table TO A"]) + instance.query("GRANT SELECT ON test.table TO A") + assert instance.query("SHOW GRANTS FOR A") == TSV( + ["GRANT SELECT ON test.table TO A"] + ) - instance.query('GRANT INSERT ON test.table TO A WITH REPLACE OPTION') - assert instance.query("SHOW GRANTS FOR A") == TSV(["GRANT INSERT ON test.table TO A"]) + instance.query("GRANT INSERT ON test.table TO A WITH REPLACE OPTION") + assert instance.query("SHOW GRANTS FOR A") == TSV( + ["GRANT INSERT ON test.table TO A"] + ) - instance.query('GRANT NONE ON *.* TO A WITH REPLACE OPTION') + instance.query("GRANT NONE ON *.* TO A WITH REPLACE OPTION") assert instance.query("SHOW GRANTS FOR A") == TSV([]) - instance.query('CREATE USER B') - instance.query('GRANT SELECT ON test.table TO B') + instance.query("CREATE USER B") + instance.query("GRANT SELECT ON test.table TO B") assert instance.query("SHOW GRANTS FOR A") == TSV([]) - assert instance.query("SHOW GRANTS FOR B") == TSV(["GRANT SELECT ON test.table TO B"]) + assert instance.query("SHOW GRANTS FOR B") == TSV( + ["GRANT SELECT ON test.table TO B"] + ) - expected_error = "it's necessary to have grant INSERT ON test.table WITH GRANT OPTION" - assert expected_error in instance.query_and_get_error("GRANT INSERT ON test.table TO B WITH REPLACE OPTION", user='A') + expected_error = ( + "it's necessary to have grant INSERT ON test.table WITH GRANT OPTION" + ) + assert expected_error in instance.query_and_get_error( + "GRANT INSERT ON test.table TO B WITH REPLACE OPTION", user="A" + ) assert instance.query("SHOW GRANTS FOR A") == TSV([]) - assert instance.query("SHOW GRANTS FOR B") == TSV(["GRANT SELECT ON test.table TO B"]) + assert instance.query("SHOW GRANTS FOR B") == TSV( + ["GRANT SELECT ON test.table TO B"] + ) instance.query("GRANT INSERT ON test.table TO A WITH GRANT OPTION") - expected_error = "it's necessary to have grant SELECT ON test.table WITH GRANT OPTION" - assert expected_error in instance.query_and_get_error("GRANT INSERT ON test.table TO B WITH REPLACE OPTION", user='A') - assert instance.query("SHOW GRANTS FOR A") == TSV(["GRANT INSERT ON test.table TO A WITH GRANT OPTION"]) - assert instance.query("SHOW GRANTS FOR B") == TSV(["GRANT SELECT ON test.table TO B"]) + expected_error = ( + "it's necessary to have grant SELECT ON test.table WITH GRANT OPTION" + ) + assert expected_error in instance.query_and_get_error( + "GRANT INSERT ON test.table TO B WITH REPLACE OPTION", user="A" + ) + assert instance.query("SHOW GRANTS FOR A") == TSV( + ["GRANT INSERT ON test.table TO A WITH GRANT OPTION"] + ) + assert instance.query("SHOW GRANTS FOR B") == TSV( + ["GRANT SELECT ON test.table TO B"] + ) instance.query("GRANT SELECT ON test.table TO A WITH GRANT OPTION") - instance.query("GRANT INSERT ON test.table TO B WITH REPLACE OPTION", user='A') - assert instance.query("SHOW GRANTS FOR A") == TSV(["GRANT SELECT, INSERT ON test.table TO A WITH GRANT OPTION"]) - assert instance.query("SHOW GRANTS FOR B") == TSV(["GRANT INSERT ON test.table TO B"]) + instance.query("GRANT INSERT ON test.table TO B WITH REPLACE OPTION", user="A") + assert instance.query("SHOW GRANTS FOR A") == TSV( + ["GRANT SELECT, INSERT ON test.table TO A WITH GRANT OPTION"] + ) + assert instance.query("SHOW GRANTS FOR B") == TSV( + ["GRANT INSERT ON test.table TO B"] + ) diff --git a/tests/integration/test_graphite_merge_tree/test.py b/tests/integration/test_graphite_merge_tree/test.py index 9e48f12f007..c4364a03fd9 100644 --- a/tests/integration/test_graphite_merge_tree/test.py +++ b/tests/integration/test_graphite_merge_tree/test.py @@ -9,9 +9,11 @@ from helpers.test_tools import TSV from helpers.test_tools import csv_compare cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', - main_configs=['configs/graphite_rollup.xml'], - user_configs=["configs/users.xml"]) +instance = cluster.add_instance( + "instance", + main_configs=["configs/graphite_rollup.xml"], + user_configs=["configs/users.xml"], +) q = instance.query @@ -19,7 +21,7 @@ q = instance.query def started_cluster(): try: cluster.start() - q('CREATE DATABASE test') + q("CREATE DATABASE test") yield cluster @@ -29,7 +31,8 @@ def started_cluster(): @pytest.fixture def graphite_table(started_cluster): - q(''' + q( + """ DROP TABLE IF EXISTS test.graphite; CREATE TABLE test.graphite (metric String, value Float64, timestamp UInt32, date Date, updated UInt32) @@ -37,11 +40,12 @@ CREATE TABLE test.graphite PARTITION BY toYYYYMM(date) ORDER BY (metric, timestamp) SETTINGS index_granularity=8192; -''') +""" + ) yield - q('DROP TABLE test.graphite') + q("DROP TABLE test.graphite") def test_rollup_versions(graphite_table): @@ -52,35 +56,42 @@ def test_rollup_versions(graphite_table): # Insert rows with timestamps relative to the current time so that the # first retention clause is active. # Two parts are created. - q(''' + q( + """ INSERT INTO test.graphite (metric, value, timestamp, date, updated) VALUES ('one_min.x1', 100, {timestamp}, '{date}', 1); INSERT INTO test.graphite (metric, value, timestamp, date, updated) VALUES ('one_min.x1', 200, {timestamp}, '{date}', 2); -'''.format(timestamp=timestamp, date=date)) +""".format( + timestamp=timestamp, date=date + ) + ) - expected1 = '''\ + expected1 = """\ one_min.x1 100 {timestamp} {date} 1 one_min.x1 200 {timestamp} {date} 2 -'''.format(timestamp=timestamp, date=date) +""".format( + timestamp=timestamp, date=date + ) - assert TSV( - q('SELECT * FROM test.graphite ORDER BY updated') - ) == TSV(expected1) + assert TSV(q("SELECT * FROM test.graphite ORDER BY updated")) == TSV(expected1) - q('OPTIMIZE TABLE test.graphite') + q("OPTIMIZE TABLE test.graphite") # After rollup only the row with max version is retained. - expected2 = '''\ + expected2 = """\ one_min.x1 200 {timestamp} {date} 2 -'''.format(timestamp=rounded_timestamp, date=date) +""".format( + timestamp=rounded_timestamp, date=date + ) - assert TSV(q('SELECT * FROM test.graphite')) == TSV(expected2) + assert TSV(q("SELECT * FROM test.graphite")) == TSV(expected2) def test_rollup_aggregation(graphite_table): # This query essentially emulates what rollup does. - result1 = q(''' + result1 = q( + """ SELECT avg(v), max(upd) FROM (SELECT timestamp, argMax(value, (updated, number)) AS v, @@ -94,16 +105,18 @@ FROM (SELECT timestamp, FROM system.numbers LIMIT 1000000) WHERE intDiv(timestamp, 600) * 600 = 1111444200 GROUP BY timestamp) -''') +""" + ) - expected1 = '''\ + expected1 = """\ 999634.9918367347 499999 -''' +""" assert TSV(result1) == TSV(expected1) # Timestamp 1111111111 is in sufficiently distant past # so that the last retention clause is active. - result2 = q(''' + result2 = q( + """ INSERT INTO test.graphite SELECT 'one_min.x' AS metric, toFloat64(number) AS value, @@ -115,17 +128,19 @@ INSERT INTO test.graphite OPTIMIZE TABLE test.graphite PARTITION 201702 FINAL; SELECT * FROM test.graphite; -''') +""" + ) - expected2 = '''\ + expected2 = """\ one_min.x 999634.9918367347 1111444200 2017-02-02 499999 -''' +""" assert TSV(result2) == TSV(expected2) def test_rollup_aggregation_2(graphite_table): - result = q(''' + result = q( + """ INSERT INTO test.graphite SELECT 'one_min.x' AS metric, toFloat64(number) AS value, @@ -137,17 +152,19 @@ INSERT INTO test.graphite OPTIMIZE TABLE test.graphite PARTITION 201702 FINAL; SELECT * FROM test.graphite; -''') +""" + ) - expected = '''\ + expected = """\ one_min.x 24 1111110600 2017-02-02 100 -''' +""" assert TSV(result) == TSV(expected) def test_multiple_paths_and_versions(graphite_table): - result = q(''' + result = q( + """ INSERT INTO test.graphite SELECT 'one_min.x' AS metric, toFloat64(number) AS value, @@ -172,69 +189,72 @@ INSERT INTO test.graphite OPTIMIZE TABLE test.graphite PARTITION 201702 FINAL; SELECT * FROM test.graphite; -''') +""" + ) - with open(p.join(p.dirname(__file__), - 'test_multiple_paths_and_versions.reference') - ) as reference: + with open( + p.join(p.dirname(__file__), "test_multiple_paths_and_versions.reference") + ) as reference: assert TSV(result) == TSV(reference) def test_multiple_output_blocks(graphite_table): MERGED_BLOCK_SIZE = 8192 - to_insert = '' - expected = '' + to_insert = "" + expected = "" for i in range(2 * MERGED_BLOCK_SIZE + 1): rolled_up_time = 1000000200 + 600 * i for j in range(3): cur_time = rolled_up_time + 100 * j - to_insert += 'one_min.x1 {} {} 2001-09-09 1\n'.format( - 10 * j, cur_time - ) - to_insert += 'one_min.x1 {} {} 2001-09-09 2\n'.format( + to_insert += "one_min.x1 {} {} 2001-09-09 1\n".format(10 * j, cur_time) + to_insert += "one_min.x1 {} {} 2001-09-09 2\n".format( 10 * (j + 1), cur_time ) - expected += 'one_min.x1 20 {} 2001-09-09 2\n'.format(rolled_up_time) + expected += "one_min.x1 20 {} 2001-09-09 2\n".format(rolled_up_time) - q('INSERT INTO test.graphite FORMAT TSV', to_insert) + q("INSERT INTO test.graphite FORMAT TSV", to_insert) - result = q(''' + result = q( + """ OPTIMIZE TABLE test.graphite PARTITION 200109 FINAL; SELECT * FROM test.graphite; -''') +""" + ) assert TSV(result) == TSV(expected) def test_paths_not_matching_any_pattern(graphite_table): - to_insert = '''\ + to_insert = """\ one_min.x1 100 1000000000 2001-09-09 1 zzzzzzzz 100 1000000001 2001-09-09 1 zzzzzzzz 200 1000000001 2001-09-09 2 -''' +""" - q('INSERT INTO test.graphite FORMAT TSV', to_insert) + q("INSERT INTO test.graphite FORMAT TSV", to_insert) - expected = '''\ + expected = """\ one_min.x1 100 999999600 2001-09-09 1 zzzzzzzz 200 1000000001 2001-09-09 2 -''' +""" - result = q(''' + result = q( + """ OPTIMIZE TABLE test.graphite PARTITION 200109 FINAL; SELECT * FROM test.graphite; -''') +""" + ) assert TSV(result) == TSV(expected) def test_system_graphite_retentions(graphite_table): - expected = ''' + expected = """ graphite_rollup all \\\\.count$ sum 0 0 1 0 ['test'] ['graphite'] graphite_rollup all \\\\.max$ max 0 0 2 0 ['test'] ['graphite'] graphite_rollup all ^five_min\\\\. 31536000 14400 3 0 ['test'] ['graphite'] @@ -243,13 +263,14 @@ graphite_rollup all ^five_min\\\\. 0 300 3 0 ['test'] ['graphite'] graphite_rollup all ^one_min avg 31536000 600 4 0 ['test'] ['graphite'] graphite_rollup all ^one_min avg 7776000 300 4 0 ['test'] ['graphite'] graphite_rollup all ^one_min avg 0 60 4 0 ['test'] ['graphite'] - ''' - result = q('SELECT * from system.graphite_retentions') + """ + result = q("SELECT * from system.graphite_retentions") mismatch = csv_compare(result, expected) assert len(mismatch) == 0, f"got\n{result}\nwant\n{expected}\ndiff\n{mismatch}\n" - q(''' + q( + """ DROP TABLE IF EXISTS test.graphite2; CREATE TABLE test.graphite2 (metric String, value Float64, timestamp UInt32, date Date, updated UInt32) @@ -257,8 +278,9 @@ CREATE TABLE test.graphite2 PARTITION BY toYYYYMM(date) ORDER BY (metric, timestamp) SETTINGS index_granularity=8192; - ''') - expected = ''' + """ + ) + expected = """ graphite_rollup ['test','test'] ['graphite','graphite2'] graphite_rollup ['test','test'] ['graphite','graphite2'] graphite_rollup ['test','test'] ['graphite','graphite2'] @@ -267,19 +289,22 @@ graphite_rollup ['test','test'] ['graphite','graphite2'] graphite_rollup ['test','test'] ['graphite','graphite2'] graphite_rollup ['test','test'] ['graphite','graphite2'] graphite_rollup ['test','test'] ['graphite','graphite2'] - ''' - result = q(''' + """ + result = q( + """ SELECT config_name, Tables.database, Tables.table FROM system.graphite_retentions - ''') + """ + ) assert TSV(result) == TSV(expected) def test_path_dangling_pointer(graphite_table): - q(''' + q( + """ DROP TABLE IF EXISTS test.graphite2; CREATE TABLE test.graphite2 (metric String, value Float64, timestamp UInt32, date Date, updated UInt32) @@ -287,37 +312,48 @@ CREATE TABLE test.graphite2 PARTITION BY toYYYYMM(date) ORDER BY (metric, timestamp) SETTINGS index_granularity=1; - ''') + """ + ) - path = 'abcd' * 4000000 # 16MB - q('INSERT INTO test.graphite2 FORMAT TSV', - "{}\t0.0\t0\t2018-01-01\t100\n".format(path)) - q('INSERT INTO test.graphite2 FORMAT TSV', - "{}\t0.0\t0\t2018-01-01\t101\n".format(path)) + path = "abcd" * 4000000 # 16MB + q( + "INSERT INTO test.graphite2 FORMAT TSV", + "{}\t0.0\t0\t2018-01-01\t100\n".format(path), + ) + q( + "INSERT INTO test.graphite2 FORMAT TSV", + "{}\t0.0\t0\t2018-01-01\t101\n".format(path), + ) for version in range(10): - q('INSERT INTO test.graphite2 FORMAT TSV', - "{}\t0.0\t0\t2018-01-01\t{}\n".format(path, version)) + q( + "INSERT INTO test.graphite2 FORMAT TSV", + "{}\t0.0\t0\t2018-01-01\t{}\n".format(path, version), + ) while True: - q('OPTIMIZE TABLE test.graphite2 PARTITION 201801 FINAL') - parts = int(q("SELECT count() FROM system.parts " - "WHERE active AND database='test' " - "AND table='graphite2'")) + q("OPTIMIZE TABLE test.graphite2 PARTITION 201801 FINAL") + parts = int( + q( + "SELECT count() FROM system.parts " + "WHERE active AND database='test' " + "AND table='graphite2'" + ) + ) if parts == 1: break - print(('Parts', parts)) + print(("Parts", parts)) - assert TSV( - q("SELECT value, timestamp, date, updated FROM test.graphite2") - ) == TSV("0\t0\t2018-01-01\t101\n") + assert TSV(q("SELECT value, timestamp, date, updated FROM test.graphite2")) == TSV( + "0\t0\t2018-01-01\t101\n" + ) - q('DROP TABLE test.graphite2') + q("DROP TABLE test.graphite2") def test_combined_rules(graphite_table): # 1487970000 ~ Sat 25 Feb 00:00:00 MSK 2017 - to_insert = 'INSERT INTO test.graphite VALUES ' - expected_unmerged = '' + to_insert = "INSERT INTO test.graphite VALUES " + expected_unmerged = "" for i in range(384): to_insert += "('five_min.count', {v}, {t}, toDate({t}), 1), ".format( v=1, t=1487970000 + (i * 300) @@ -325,18 +361,20 @@ def test_combined_rules(graphite_table): to_insert += "('five_min.max', {v}, {t}, toDate({t}), 1), ".format( v=i, t=1487970000 + (i * 300) ) - expected_unmerged += ("five_min.count\t{v1}\t{t}\n" - "five_min.max\t{v2}\t{t}\n").format( - v1=1, v2=i, - t=1487970000 + (i * 300) - ) + expected_unmerged += ( + "five_min.count\t{v1}\t{t}\n" "five_min.max\t{v2}\t{t}\n" + ).format(v1=1, v2=i, t=1487970000 + (i * 300)) q(to_insert) - assert TSV(q('SELECT metric, value, timestamp FROM test.graphite' - ' ORDER BY (timestamp, metric)')) == TSV(expected_unmerged) + assert TSV( + q( + "SELECT metric, value, timestamp FROM test.graphite" + " ORDER BY (timestamp, metric)" + ) + ) == TSV(expected_unmerged) - q('OPTIMIZE TABLE test.graphite PARTITION 201702 FINAL') - expected_merged = ''' + q("OPTIMIZE TABLE test.graphite PARTITION 201702 FINAL") + expected_merged = """ five_min.count 48 1487970000 2017-02-25 1 five_min.count 48 1487984400 2017-02-25 1 five_min.count 48 1487998800 2017-02-25 1 @@ -353,13 +391,15 @@ def test_combined_rules(graphite_table): five_min.max 287 1488042000 2017-02-25 1 five_min.max 335 1488056400 2017-02-26 1 five_min.max 383 1488070800 2017-02-26 1 - ''' - assert TSV(q('SELECT * FROM test.graphite' - ' ORDER BY (metric, timestamp)')) == TSV(expected_merged) + """ + assert TSV(q("SELECT * FROM test.graphite" " ORDER BY (metric, timestamp)")) == TSV( + expected_merged + ) def test_combined_rules_with_default(graphite_table): - q(''' + q( + """ DROP TABLE IF EXISTS test.graphite; CREATE TABLE test.graphite (metric String, value Float64, timestamp UInt32, date Date, updated UInt32) @@ -367,10 +407,11 @@ CREATE TABLE test.graphite PARTITION BY toYYYYMM(date) ORDER BY (metric, timestamp) SETTINGS index_granularity=1; - ''') + """ + ) # 1487970000 ~ Sat 25 Feb 00:00:00 MSK 2017 - to_insert = 'INSERT INTO test.graphite VALUES ' - expected_unmerged = '' + to_insert = "INSERT INTO test.graphite VALUES " + expected_unmerged = "" for i in range(100): to_insert += "('top_level.count', {v}, {t}, toDate({t}), 1), ".format( v=1, t=1487970000 + (i * 60) @@ -378,18 +419,20 @@ CREATE TABLE test.graphite to_insert += "('top_level.max', {v}, {t}, toDate({t}), 1), ".format( v=i, t=1487970000 + (i * 60) ) - expected_unmerged += ("top_level.count\t{v1}\t{t}\n" - "top_level.max\t{v2}\t{t}\n").format( - v1=1, v2=i, - t=1487970000 + (i * 60) - ) + expected_unmerged += ( + "top_level.count\t{v1}\t{t}\n" "top_level.max\t{v2}\t{t}\n" + ).format(v1=1, v2=i, t=1487970000 + (i * 60)) q(to_insert) - assert TSV(q('SELECT metric, value, timestamp FROM test.graphite' - ' ORDER BY (timestamp, metric)')) == TSV(expected_unmerged) + assert TSV( + q( + "SELECT metric, value, timestamp FROM test.graphite" + " ORDER BY (timestamp, metric)" + ) + ) == TSV(expected_unmerged) - q('OPTIMIZE TABLE test.graphite PARTITION 201702 FINAL') - expected_merged = ''' + q("OPTIMIZE TABLE test.graphite PARTITION 201702 FINAL") + expected_merged = """ top_level.count 10 1487970000 2017-02-25 1 top_level.count 10 1487970600 2017-02-25 1 top_level.count 10 1487971200 2017-02-25 1 @@ -410,13 +453,15 @@ CREATE TABLE test.graphite top_level.max 79 1487974200 2017-02-25 1 top_level.max 89 1487974800 2017-02-25 1 top_level.max 99 1487975400 2017-02-25 1 - ''' - assert TSV(q('SELECT * FROM test.graphite' - ' ORDER BY (metric, timestamp)')) == TSV(expected_merged) + """ + assert TSV(q("SELECT * FROM test.graphite" " ORDER BY (metric, timestamp)")) == TSV( + expected_merged + ) def test_broken_partial_rollup(graphite_table): - q(''' + q( + """ DROP TABLE IF EXISTS test.graphite; CREATE TABLE test.graphite (metric String, value Float64, timestamp UInt32, date Date, updated UInt32) @@ -424,41 +469,46 @@ CREATE TABLE test.graphite PARTITION BY toYYYYMM(date) ORDER BY (metric, timestamp) SETTINGS index_granularity=1; - ''') - to_insert = '''\ + """ + ) + to_insert = """\ one_min.x1 100 1000000000 2001-09-09 1 zzzzzzzz 100 1000000001 2001-09-09 1 zzzzzzzz 200 1000000001 2001-09-09 2 -''' +""" - q('INSERT INTO test.graphite FORMAT TSV', to_insert) + q("INSERT INTO test.graphite FORMAT TSV", to_insert) - expected = '''\ + expected = """\ one_min.x1 100 1000000000 2001-09-09 1 zzzzzzzz 200 1000000001 2001-09-09 2 -''' +""" - result = q(''' + result = q( + """ OPTIMIZE TABLE test.graphite PARTITION 200109 FINAL; SELECT * FROM test.graphite; -''') +""" + ) assert TSV(result) == TSV(expected) def test_wrong_rollup_config(graphite_table): with pytest.raises(QueryRuntimeException) as exc: - q(''' + q( + """ CREATE TABLE test.graphite_not_created (metric String, value Float64, timestamp UInt32, date Date, updated UInt32) ENGINE = GraphiteMergeTree('graphite_rollup_wrong_age_precision') PARTITION BY toYYYYMM(date) ORDER BY (metric, timestamp) SETTINGS index_granularity=1; - ''') + """ + ) # The order of retentions is not guaranteed - assert ("age and precision should only grow up: " in str(exc.value)) - assert ("36000:600" in str(exc.value)) - assert ("72000:300" in str(exc.value)) + assert "age and precision should only grow up: " in str(exc.value) + assert "36000:600" in str(exc.value) + assert "72000:300" in str(exc.value) diff --git a/tests/integration/test_graphite_merge_tree_typed/test.py b/tests/integration/test_graphite_merge_tree_typed/test.py index e26fd0d2e77..5647489f64f 100644 --- a/tests/integration/test_graphite_merge_tree_typed/test.py +++ b/tests/integration/test_graphite_merge_tree_typed/test.py @@ -10,9 +10,11 @@ from helpers.test_tools import TSV from helpers.test_tools import csv_compare cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', - main_configs=['configs/graphite_rollup.xml'], - user_configs=["configs/users.xml"]) +instance = cluster.add_instance( + "instance", + main_configs=["configs/graphite_rollup.xml"], + user_configs=["configs/users.xml"], +) q = instance.query @@ -20,7 +22,7 @@ q = instance.query def started_cluster(): try: cluster.start() - q('CREATE DATABASE test') + q("CREATE DATABASE test") yield cluster @@ -30,7 +32,8 @@ def started_cluster(): @pytest.fixture def graphite_table(started_cluster): - q(''' + q( + """ DROP TABLE IF EXISTS test.graphite; CREATE TABLE test.graphite (metric String, value Float64, timestamp UInt32, date Date, updated UInt32) @@ -38,11 +41,12 @@ CREATE TABLE test.graphite PARTITION BY toYYYYMM(date) ORDER BY (metric, timestamp) SETTINGS index_granularity=8192; -''') +""" + ) yield - q('DROP TABLE test.graphite') + q("DROP TABLE test.graphite") def test_rollup_versions_plain(graphite_table): @@ -53,30 +57,36 @@ def test_rollup_versions_plain(graphite_table): # Insert rows with timestamps relative to the current time so that the # first retention clause is active. # Two parts are created. - q(''' + q( + """ INSERT INTO test.graphite (metric, value, timestamp, date, updated) VALUES ('one_min.x1', 100, {timestamp}, '{date}', 1); INSERT INTO test.graphite (metric, value, timestamp, date, updated) VALUES ('one_min.x1', 200, {timestamp}, '{date}', 2); -'''.format(timestamp=timestamp, date=date)) +""".format( + timestamp=timestamp, date=date + ) + ) - expected1 = '''\ + expected1 = """\ one_min.x1 100 {timestamp} {date} 1 one_min.x1 200 {timestamp} {date} 2 -'''.format(timestamp=timestamp, date=date) +""".format( + timestamp=timestamp, date=date + ) - assert TSV( - q('SELECT * FROM test.graphite ORDER BY updated') - ) == TSV(expected1) + assert TSV(q("SELECT * FROM test.graphite ORDER BY updated")) == TSV(expected1) - q('OPTIMIZE TABLE test.graphite') + q("OPTIMIZE TABLE test.graphite") # After rollup only the row with max version is retained. - expected2 = '''\ + expected2 = """\ one_min.x1 200 {timestamp} {date} 2 -'''.format(timestamp=rounded_timestamp, date=date) +""".format( + timestamp=rounded_timestamp, date=date + ) - assert TSV(q('SELECT * FROM test.graphite')) == TSV(expected2) + assert TSV(q("SELECT * FROM test.graphite")) == TSV(expected2) def test_rollup_versions_tagged(graphite_table): @@ -87,30 +97,38 @@ def test_rollup_versions_tagged(graphite_table): # Insert rows with timestamps relative to the current time so that the # first retention clause is active. # Two parts are created. - q(''' + q( + """ INSERT INTO test.graphite (metric, value, timestamp, date, updated) VALUES ('x1?retention=one_min', 100, {timestamp}, '{date}', 1); INSERT INTO test.graphite (metric, value, timestamp, date, updated) VALUES ('x1?retention=one_min', 200, {timestamp}, '{date}', 2); -'''.format(timestamp=timestamp, date=date)) +""".format( + timestamp=timestamp, date=date + ) + ) - expected1 = '''\ + expected1 = """\ x1?retention=one_min 100 {timestamp} {date} 1 x1?retention=one_min 200 {timestamp} {date} 2 -'''.format(timestamp=timestamp, date=date) +""".format( + timestamp=timestamp, date=date + ) - result = q('SELECT * FROM test.graphite ORDER BY metric, updated') + result = q("SELECT * FROM test.graphite ORDER BY metric, updated") mismatch = csv_compare(result, expected1) assert len(mismatch) == 0, f"got\n{result}\nwant\n{expected1}\ndiff\n{mismatch}\n" - q('OPTIMIZE TABLE test.graphite') + q("OPTIMIZE TABLE test.graphite") # After rollup only the row with max version is retained. - expected2 = '''\ + expected2 = """\ x1?retention=one_min 200 {timestamp} {date} 2 -'''.format(timestamp=rounded_timestamp, date=date) +""".format( + timestamp=rounded_timestamp, date=date + ) - result = q('SELECT * FROM test.graphite ORDER BY metric, updated') + result = q("SELECT * FROM test.graphite ORDER BY metric, updated") mismatch = csv_compare(result, expected2) assert len(mismatch) == 0, f"got\n{result}\nwant\n{expected2}\ndiff\n{mismatch}\n" @@ -123,7 +141,8 @@ def test_rollup_versions_all(graphite_table): # Insert rows with timestamps relative to the current time so that the # first retention clause is active. # Two parts are created. - q(''' + q( + """ INSERT INTO test.graphite (metric, value, timestamp, date, updated) VALUES ('ten_min.x1', 100, {timestamp}, '{date}', 1); INSERT INTO test.graphite (metric, value, timestamp, date, updated) @@ -132,35 +151,43 @@ INSERT INTO test.graphite (metric, value, timestamp, date, updated) VALUES ('ten_min.x1?env=staging', 100, {timestamp}, '{date}', 1); INSERT INTO test.graphite (metric, value, timestamp, date, updated) VALUES ('ten_min.x1?env=staging', 200, {timestamp}, '{date}', 2); -'''.format(timestamp=timestamp, date=date)) +""".format( + timestamp=timestamp, date=date + ) + ) - expected1 = '''\ + expected1 = """\ ten_min.x1 100 {timestamp} {date} 1 ten_min.x1 200 {timestamp} {date} 2 ten_min.x1?env=staging 100 {timestamp} {date} 1 ten_min.x1?env=staging 200 {timestamp} {date} 2 -'''.format(timestamp=timestamp, date=date) +""".format( + timestamp=timestamp, date=date + ) - result = q('SELECT * FROM test.graphite ORDER BY metric, updated') + result = q("SELECT * FROM test.graphite ORDER BY metric, updated") mismatch = csv_compare(result, expected1) assert len(mismatch) == 0, f"got\n{result}\nwant\n{expected1}\ndiff\n{mismatch}\n" - q('OPTIMIZE TABLE test.graphite') + q("OPTIMIZE TABLE test.graphite") # After rollup only the row with max version is retained. - expected2 = '''\ + expected2 = """\ ten_min.x1 200 {timestamp} {date} 2 ten_min.x1?env=staging 200 {timestamp} {date} 2 -'''.format(timestamp=rounded_timestamp, date=date) +""".format( + timestamp=rounded_timestamp, date=date + ) - result = q('SELECT * FROM test.graphite ORDER BY metric, updated') + result = q("SELECT * FROM test.graphite ORDER BY metric, updated") mismatch = csv_compare(result, expected2) assert len(mismatch) == 0, f"got\n{result}\nwant\n{expected2}\ndiff\n{mismatch}\n" def test_rollup_aggregation_plain(graphite_table): # This query essentially emulates what rollup does. - result1 = q(''' + result1 = q( + """ SELECT avg(v), max(upd) FROM (SELECT timestamp, argMax(value, (updated, number)) AS v, @@ -174,16 +201,18 @@ FROM (SELECT timestamp, FROM system.numbers LIMIT 1000000) WHERE intDiv(timestamp, 600) * 600 = 1111444200 GROUP BY timestamp) -''') +""" + ) - expected1 = '''\ + expected1 = """\ 999634.9918367347 499999 -''' +""" assert TSV(result1) == TSV(expected1) # Timestamp 1111111111 is in sufficiently distant past # so that the last retention clause is active. - result2 = q(''' + result2 = q( + """ INSERT INTO test.graphite SELECT 'one_min.x' AS metric, toFloat64(number) AS value, @@ -195,18 +224,20 @@ INSERT INTO test.graphite OPTIMIZE TABLE test.graphite PARTITION 201702 FINAL; SELECT * FROM test.graphite; -''') +""" + ) - expected2 = '''\ + expected2 = """\ one_min.x 999634.9918367347 1111444200 2017-02-02 499999 -''' +""" assert TSV(result2) == TSV(expected2) def test_rollup_aggregation_tagged(graphite_table): # This query essentially emulates what rollup does. - result1 = q(''' + result1 = q( + """ SELECT avg(v), max(upd) FROM (SELECT timestamp, argMax(value, (updated, number)) AS v, @@ -220,16 +251,18 @@ FROM (SELECT timestamp, FROM system.numbers LIMIT 1000000) WHERE intDiv(timestamp, 600) * 600 = 1111444200 GROUP BY timestamp) -''') +""" + ) - expected1 = '''\ + expected1 = """\ 999634.9918367347 499999 -''' +""" assert TSV(result1) == TSV(expected1) # Timestamp 1111111111 is in sufficiently distant past # so that the last retention clause is active. - result2 = q(''' + result2 = q( + """ INSERT INTO test.graphite SELECT 'x?retention=one_min' AS metric, toFloat64(number) AS value, @@ -241,17 +274,19 @@ INSERT INTO test.graphite OPTIMIZE TABLE test.graphite PARTITION 201702 FINAL; SELECT * FROM test.graphite; -''') +""" + ) - expected2 = '''\ + expected2 = """\ x?retention=one_min 999634.9918367347 1111444200 2017-02-02 499999 -''' +""" assert TSV(result2) == TSV(expected2) def test_rollup_aggregation_2_plain(graphite_table): - result = q(''' + result = q( + """ INSERT INTO test.graphite SELECT 'one_min.x' AS metric, toFloat64(number) AS value, @@ -263,17 +298,19 @@ INSERT INTO test.graphite OPTIMIZE TABLE test.graphite PARTITION 201702 FINAL; SELECT * FROM test.graphite; -''') +""" + ) - expected = '''\ + expected = """\ one_min.x 24 1111110600 2017-02-02 100 -''' +""" assert TSV(result) == TSV(expected) def test_rollup_aggregation_2_tagged(graphite_table): - result = q(''' + result = q( + """ INSERT INTO test.graphite SELECT 'x?retention=one_min' AS metric, toFloat64(number) AS value, @@ -285,17 +322,19 @@ INSERT INTO test.graphite OPTIMIZE TABLE test.graphite PARTITION 201702 FINAL; SELECT * FROM test.graphite; -''') +""" + ) - expected = '''\ + expected = """\ x?retention=one_min 24 1111110600 2017-02-02 100 -''' +""" assert TSV(result) == TSV(expected) def test_multiple_paths_and_versions_plain(graphite_table): - result = q(''' + result = q( + """ INSERT INTO test.graphite SELECT 'one_min.x' AS metric, toFloat64(number) AS value, @@ -320,16 +359,18 @@ INSERT INTO test.graphite OPTIMIZE TABLE test.graphite PARTITION 201702 FINAL; SELECT * FROM test.graphite; -''') +""" + ) - with open(p.join(p.dirname(__file__), - 'test_multiple_paths_and_versions.reference.plain') - ) as reference: + with open( + p.join(p.dirname(__file__), "test_multiple_paths_and_versions.reference.plain") + ) as reference: assert TSV(result) == TSV(reference) def test_multiple_paths_and_versions_tagged(graphite_table): - result = q(''' + result = q( + """ INSERT INTO test.graphite SELECT 'x?retention=one_min' AS metric, toFloat64(number) AS value, @@ -354,97 +395,102 @@ INSERT INTO test.graphite OPTIMIZE TABLE test.graphite PARTITION 201702 FINAL; SELECT * FROM test.graphite; -''') +""" + ) - with open(p.join(p.dirname(__file__), - 'test_multiple_paths_and_versions.reference.tagged') - ) as reference: + with open( + p.join(p.dirname(__file__), "test_multiple_paths_and_versions.reference.tagged") + ) as reference: assert TSV(result) == TSV(reference) def test_multiple_output_blocks(graphite_table): MERGED_BLOCK_SIZE = 8192 - to_insert = '' - expected = '' + to_insert = "" + expected = "" for i in range(2 * MERGED_BLOCK_SIZE + 1): rolled_up_time = 1000000200 + 600 * i for j in range(3): cur_time = rolled_up_time + 100 * j - to_insert += 'one_min.x1 {} {} 2001-09-09 1\n'.format( - 10 * j, cur_time - ) - to_insert += 'one_min.x1 {} {} 2001-09-09 2\n'.format( + to_insert += "one_min.x1 {} {} 2001-09-09 1\n".format(10 * j, cur_time) + to_insert += "one_min.x1 {} {} 2001-09-09 2\n".format( 10 * (j + 1), cur_time ) - expected += 'one_min.x1 20 {} 2001-09-09 2\n'.format(rolled_up_time) + expected += "one_min.x1 20 {} 2001-09-09 2\n".format(rolled_up_time) - q('INSERT INTO test.graphite FORMAT TSV', to_insert) + q("INSERT INTO test.graphite FORMAT TSV", to_insert) - result = q(''' + result = q( + """ OPTIMIZE TABLE test.graphite PARTITION 200109 FINAL; SELECT * FROM test.graphite; -''') +""" + ) assert TSV(result) == TSV(expected) def test_paths_not_matching_any_pattern(graphite_table): - to_insert = '''\ + to_insert = """\ one_min.x1 100 1000000000 2001-09-09 1 zzzzzzzz 100 1000000001 2001-09-09 1 zzzzzzzz 200 1000000001 2001-09-09 2 -''' +""" - q('INSERT INTO test.graphite FORMAT TSV', to_insert) + q("INSERT INTO test.graphite FORMAT TSV", to_insert) - expected = '''\ + expected = """\ one_min.x1 100 999999600 2001-09-09 1 zzzzzzzz 200 1000000001 2001-09-09 2 -''' +""" - result = q(''' + result = q( + """ OPTIMIZE TABLE test.graphite PARTITION 200109 FINAL; SELECT * FROM test.graphite; -''') +""" + ) assert TSV(result) == TSV(expected) def test_rules_isolation(graphite_table): - to_insert = '''\ + to_insert = """\ one_min.x1 100 1000000000 2001-09-09 1 for_taggged 100 1000000001 2001-09-09 1 for_taggged 200 1000000001 2001-09-09 2 one_min?env=staging 100 1000000001 2001-09-09 1 one_min?env=staging 200 1000000001 2001-09-09 2 -''' +""" - q('INSERT INTO test.graphite FORMAT TSV', to_insert) + q("INSERT INTO test.graphite FORMAT TSV", to_insert) - expected = '''\ + expected = """\ for_taggged 200 1000000001 2001-09-09 2 one_min.x1 100 999999600 2001-09-09 1 one_min?env=staging 200 1000000001 2001-09-09 2 -''' +""" - result = q(''' + result = q( + """ OPTIMIZE TABLE test.graphite PARTITION 200109 FINAL; SELECT * FROM test.graphite; -''') +""" + ) - result = q('SELECT * FROM test.graphite ORDER BY metric, updated') + result = q("SELECT * FROM test.graphite ORDER BY metric, updated") mismatch = csv_compare(result, expected) assert len(mismatch) == 0, f"got\n{result}\nwant\n{expected}\ndiff\n{mismatch}\n" def test_system_graphite_retentions(graphite_table): - expected = ''' + expected = """ graphite_rollup plain \\\\.count$ sum 0 0 1 0 ['test'] ['graphite'] graphite_rollup plain \\\\.max$ max 0 0 2 0 ['test'] ['graphite'] graphite_rollup plain ^five_min\\\\. 31536000 14400 3 0 ['test'] ['graphite'] @@ -465,13 +511,14 @@ graphite_rollup tagged ^for_taggged avg 0 60 7 0 ['test'] ['graphite'] graphite_rollup all ^ten_min\\\\. sum 31536000 28800 8 0 ['test'] ['graphite'] graphite_rollup all ^ten_min\\\\. sum 5184000 7200 8 0 ['test'] ['graphite'] graphite_rollup all ^ten_min\\\\. sum 0 600 8 0 ['test'] ['graphite'] - ''' - result = q('SELECT * from system.graphite_retentions') + """ + result = q("SELECT * from system.graphite_retentions") mismatch = csv_compare(result, expected) assert len(mismatch) == 0, f"got\n{result}\nwant\n{expected}\ndiff\n{mismatch}\n" - q(''' + q( + """ DROP TABLE IF EXISTS test.graphite2; CREATE TABLE test.graphite2 (metric String, value Float64, timestamp UInt32, date Date, updated UInt32) @@ -479,8 +526,9 @@ CREATE TABLE test.graphite2 PARTITION BY toYYYYMM(date) ORDER BY (metric, timestamp) SETTINGS index_granularity=8192; - ''') - expected = ''' + """ + ) + expected = """ graphite_rollup ['test','test'] ['graphite','graphite2'] graphite_rollup ['test','test'] ['graphite','graphite2'] graphite_rollup ['test','test'] ['graphite','graphite2'] @@ -489,19 +537,22 @@ graphite_rollup ['test','test'] ['graphite','graphite2'] graphite_rollup ['test','test'] ['graphite','graphite2'] graphite_rollup ['test','test'] ['graphite','graphite2'] graphite_rollup ['test','test'] ['graphite','graphite2'] - ''' - result = q(''' + """ + result = q( + """ SELECT config_name, Tables.database, Tables.table FROM system.graphite_retentions - ''') + """ + ) assert csv_compare(result, expected), f"got\n{result}\nwant\n{expected}" def test_path_dangling_pointer(graphite_table): - q(''' + q( + """ DROP TABLE IF EXISTS test.graphite2; CREATE TABLE test.graphite2 (metric String, value Float64, timestamp UInt32, date Date, updated UInt32) @@ -509,37 +560,48 @@ CREATE TABLE test.graphite2 PARTITION BY toYYYYMM(date) ORDER BY (metric, timestamp) SETTINGS index_granularity=1; - ''') + """ + ) - path = 'abcd' * 4000000 # 16MB - q('INSERT INTO test.graphite2 FORMAT TSV', - "{}\t0.0\t0\t2018-01-01\t100\n".format(path)) - q('INSERT INTO test.graphite2 FORMAT TSV', - "{}\t0.0\t0\t2018-01-01\t101\n".format(path)) + path = "abcd" * 4000000 # 16MB + q( + "INSERT INTO test.graphite2 FORMAT TSV", + "{}\t0.0\t0\t2018-01-01\t100\n".format(path), + ) + q( + "INSERT INTO test.graphite2 FORMAT TSV", + "{}\t0.0\t0\t2018-01-01\t101\n".format(path), + ) for version in range(10): - q('INSERT INTO test.graphite2 FORMAT TSV', - "{}\t0.0\t0\t2018-01-01\t{}\n".format(path, version)) + q( + "INSERT INTO test.graphite2 FORMAT TSV", + "{}\t0.0\t0\t2018-01-01\t{}\n".format(path, version), + ) while True: - q('OPTIMIZE TABLE test.graphite2 PARTITION 201801 FINAL') - parts = int(q("SELECT count() FROM system.parts " - "WHERE active AND database='test' " - "AND table='graphite2'")) + q("OPTIMIZE TABLE test.graphite2 PARTITION 201801 FINAL") + parts = int( + q( + "SELECT count() FROM system.parts " + "WHERE active AND database='test' " + "AND table='graphite2'" + ) + ) if parts == 1: break - print(('Parts', parts)) + print(("Parts", parts)) - assert TSV( - q("SELECT value, timestamp, date, updated FROM test.graphite2") - ) == TSV("0\t0\t2018-01-01\t101\n") + assert TSV(q("SELECT value, timestamp, date, updated FROM test.graphite2")) == TSV( + "0\t0\t2018-01-01\t101\n" + ) - q('DROP TABLE test.graphite2') + q("DROP TABLE test.graphite2") def test_combined_rules(graphite_table): # 1487970000 ~ Sat 25 Feb 00:00:00 MSK 2017 - to_insert = 'INSERT INTO test.graphite VALUES ' - expected_unmerged = '' + to_insert = "INSERT INTO test.graphite VALUES " + expected_unmerged = "" for i in range(384): to_insert += "('five_min.count', {v}, {t}, toDate({t}), 1), ".format( v=1, t=1487970000 + (i * 300) @@ -547,18 +609,20 @@ def test_combined_rules(graphite_table): to_insert += "('five_min.max', {v}, {t}, toDate({t}), 1), ".format( v=i, t=1487970000 + (i * 300) ) - expected_unmerged += ("five_min.count\t{v1}\t{t}\n" - "five_min.max\t{v2}\t{t}\n").format( - v1=1, v2=i, - t=1487970000 + (i * 300) - ) + expected_unmerged += ( + "five_min.count\t{v1}\t{t}\n" "five_min.max\t{v2}\t{t}\n" + ).format(v1=1, v2=i, t=1487970000 + (i * 300)) q(to_insert) - assert TSV(q('SELECT metric, value, timestamp FROM test.graphite' - ' ORDER BY (timestamp, metric)')) == TSV(expected_unmerged) + assert TSV( + q( + "SELECT metric, value, timestamp FROM test.graphite" + " ORDER BY (timestamp, metric)" + ) + ) == TSV(expected_unmerged) - q('OPTIMIZE TABLE test.graphite PARTITION 201702 FINAL') - expected_merged = ''' + q("OPTIMIZE TABLE test.graphite PARTITION 201702 FINAL") + expected_merged = """ five_min.count 48 1487970000 2017-02-25 1 five_min.count 48 1487984400 2017-02-25 1 five_min.count 48 1487998800 2017-02-25 1 @@ -575,6 +639,7 @@ def test_combined_rules(graphite_table): five_min.max 287 1488042000 2017-02-25 1 five_min.max 335 1488056400 2017-02-26 1 five_min.max 383 1488070800 2017-02-26 1 - ''' - assert TSV(q('SELECT * FROM test.graphite' - ' ORDER BY (metric, timestamp)')) == TSV(expected_merged) + """ + assert TSV(q("SELECT * FROM test.graphite" " ORDER BY (metric, timestamp)")) == TSV( + expected_merged + ) diff --git a/tests/integration/test_groupBitmapAnd_on_distributed/test.py b/tests/integration/test_groupBitmapAnd_on_distributed/test.py index b0fb55b13ff..4dbc81236e7 100644 --- a/tests/integration/test_groupBitmapAnd_on_distributed/test.py +++ b/tests/integration/test_groupBitmapAnd_on_distributed/test.py @@ -1,16 +1,35 @@ import pytest from helpers.cluster import ClickHouseCluster + cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=["configs/clusters.xml"], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=["configs/clusters.xml"], with_zookeeper=True) -node3 = cluster.add_instance('node3', main_configs=["configs/clusters.xml"], with_zookeeper=True) -node4 = cluster.add_instance('node4', main_configs=["configs/clusters.xml"], image='yandex/clickhouse-server', tag='21.5', with_zookeeper=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/clusters.xml"], with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/clusters.xml"], with_zookeeper=True +) +node3 = cluster.add_instance( + "node3", main_configs=["configs/clusters.xml"], with_zookeeper=True +) +node4 = cluster.add_instance( + "node4", + main_configs=["configs/clusters.xml"], + image="yandex/clickhouse-server", + tag="21.5", + with_zookeeper=True, +) + def insert_data(node, table_name): - node.query("""INSERT INTO {} - VALUES (bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32))));""".format(table_name)) + node.query( + """INSERT INTO {} + VALUES (bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32))));""".format( + table_name + ) + ) + @pytest.fixture(scope="module") def start_cluster(): @@ -27,25 +46,36 @@ def test_groupBitmapAnd_on_distributed_table(start_cluster): cluster_name = "awesome_cluster" for node in (node1, node2): - node.query("""CREATE TABLE {} + node.query( + """CREATE TABLE {} ( z AggregateFunction(groupBitmap, UInt32) ) ENGINE = MergeTree() - ORDER BY tuple()""".format(local_table_name)) + ORDER BY tuple()""".format( + local_table_name + ) + ) - node.query("""CREATE TABLE {} + node.query( + """CREATE TABLE {} ( z AggregateFunction(groupBitmap, UInt32) ) - ENGINE = Distributed('{}', 'default', '{}')""".format(distributed_table_name, cluster_name, local_table_name)) + ENGINE = Distributed('{}', 'default', '{}')""".format( + distributed_table_name, cluster_name, local_table_name + ) + ) insert_data(node1, local_table_name) expected = "10" for node in (node1, node2): - result = node.query("select groupBitmapAnd(z) FROM {};".format(distributed_table_name)).strip() - assert(result == expected) + result = node.query( + "select groupBitmapAnd(z) FROM {};".format(distributed_table_name) + ).strip() + assert result == expected + def test_groupBitmapAnd_function_versioning(start_cluster): local_table_name = "bitmap_column_expr_versioning_test" @@ -53,30 +83,54 @@ def test_groupBitmapAnd_function_versioning(start_cluster): cluster_name = "test_version_cluster" for node in (node3, node4): - node.query("""CREATE TABLE {} + node.query( + """CREATE TABLE {} ( z AggregateFunction(groupBitmap, UInt32) ) ENGINE = MergeTree() - ORDER BY tuple()""".format(local_table_name)) + ORDER BY tuple()""".format( + local_table_name + ) + ) - node.query("""CREATE TABLE {} + node.query( + """CREATE TABLE {} ( z AggregateFunction(groupBitmap, UInt32) ) - ENGINE = Distributed('{}', 'default', '{}')""".format(distributed_table_name, cluster_name, local_table_name)) + ENGINE = Distributed('{}', 'default', '{}')""".format( + distributed_table_name, cluster_name, local_table_name + ) + ) - node.query("""INSERT INTO {} VALUES - (bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32))));""".format(local_table_name)) + node.query( + """INSERT INTO {} VALUES + (bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32))));""".format( + local_table_name + ) + ) expected = "10" - new_version_distributed_result = node3.query("select groupBitmapAnd(z) FROM {};".format(distributed_table_name)).strip() - old_version_distributed_result = node4.query("select groupBitmapAnd(z) FROM {};".format(distributed_table_name)).strip() - assert(new_version_distributed_result == expected) - assert(old_version_distributed_result == expected) + new_version_distributed_result = node3.query( + "select groupBitmapAnd(z) FROM {};".format(distributed_table_name) + ).strip() + old_version_distributed_result = node4.query( + "select groupBitmapAnd(z) FROM {};".format(distributed_table_name) + ).strip() + assert new_version_distributed_result == expected + assert old_version_distributed_result == expected - result_from_old_to_new_version = node3.query("select groupBitmapAnd(z) FROM remote('node4', default.{})".format(local_table_name)).strip() - assert(result_from_old_to_new_version == expected) + result_from_old_to_new_version = node3.query( + "select groupBitmapAnd(z) FROM remote('node4', default.{})".format( + local_table_name + ) + ).strip() + assert result_from_old_to_new_version == expected - result_from_new_to_old_version = node4.query("select groupBitmapAnd(z) FROM remote('node3', default.{})".format(local_table_name)).strip() - assert(result_from_new_to_old_version == expected) + result_from_new_to_old_version = node4.query( + "select groupBitmapAnd(z) FROM remote('node3', default.{})".format( + local_table_name + ) + ).strip() + assert result_from_new_to_old_version == expected diff --git a/tests/integration/test_grpc_protocol/test.py b/tests/integration/test_grpc_protocol/test.py index bd9a0cbe438..109561dce1f 100644 --- a/tests/integration/test_grpc_protocol/test.py +++ b/tests/integration/test_grpc_protocol/test.py @@ -12,17 +12,21 @@ import lz4.frame GRPC_PORT = 9100 SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -DEFAULT_ENCODING = 'utf-8' +DEFAULT_ENCODING = "utf-8" # Use grpcio-tools to generate *pb2.py files from *.proto. -proto_dir = os.path.join(SCRIPT_DIR, './protos') -gen_dir = os.path.join(SCRIPT_DIR, './_gen') +proto_dir = os.path.join(SCRIPT_DIR, "./protos") +gen_dir = os.path.join(SCRIPT_DIR, "./_gen") os.makedirs(gen_dir, exist_ok=True) run_and_check( - 'python3 -m grpc_tools.protoc -I{proto_dir} --python_out={gen_dir} --grpc_python_out={gen_dir} \ - {proto_dir}/clickhouse_grpc.proto'.format(proto_dir=proto_dir, gen_dir=gen_dir), shell=True) + "python3 -m grpc_tools.protoc -I{proto_dir} --python_out={gen_dir} --grpc_python_out={gen_dir} \ + {proto_dir}/clickhouse_grpc.proto".format( + proto_dir=proto_dir, gen_dir=gen_dir + ), + shell=True, +) sys.path.append(gen_dir) import clickhouse_grpc_pb2 @@ -31,13 +35,14 @@ import clickhouse_grpc_pb2_grpc # Utilities -config_dir = os.path.join(SCRIPT_DIR, './configs') +config_dir = os.path.join(SCRIPT_DIR, "./configs") cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=['configs/grpc_config.xml']) +node = cluster.add_instance("node", main_configs=["configs/grpc_config.xml"]) main_channel = None + def create_channel(): - node_ip_with_grpc_port = cluster.get_instance_ip('node') + ':' + str(GRPC_PORT) + node_ip_with_grpc_port = cluster.get_instance_ip("node") + ":" + str(GRPC_PORT) channel = grpc.insecure_channel(node_ip_with_grpc_port) grpc.channel_ready_future(channel).result(timeout=10) global main_channel @@ -45,31 +50,59 @@ def create_channel(): main_channel = channel return channel -def query_common(query_text, settings={}, input_data=[], input_data_delimiter='', output_format='TabSeparated', send_output_columns=False, - external_tables=[], user_name='', password='', query_id='123', session_id='', stream_output=False, channel=None): + +def query_common( + query_text, + settings={}, + input_data=[], + input_data_delimiter="", + output_format="TabSeparated", + send_output_columns=False, + external_tables=[], + user_name="", + password="", + query_id="123", + session_id="", + stream_output=False, + channel=None, +): if type(input_data) is not list: input_data = [input_data] if type(input_data_delimiter) is str: - input_data_delimiter=input_data_delimiter.encode(DEFAULT_ENCODING) + input_data_delimiter = input_data_delimiter.encode(DEFAULT_ENCODING) if not channel: channel = main_channel stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(channel) + def query_info(): - input_data_part = input_data.pop(0) if input_data else b'' + input_data_part = input_data.pop(0) if input_data else b"" if type(input_data_part) is str: input_data_part = input_data_part.encode(DEFAULT_ENCODING) - return clickhouse_grpc_pb2.QueryInfo(query=query_text, settings=settings, input_data=input_data_part, - input_data_delimiter=input_data_delimiter, output_format=output_format, - send_output_columns=send_output_columns, external_tables=external_tables, - user_name=user_name, password=password, query_id=query_id, - session_id=session_id, next_query_info=bool(input_data)) + return clickhouse_grpc_pb2.QueryInfo( + query=query_text, + settings=settings, + input_data=input_data_part, + input_data_delimiter=input_data_delimiter, + output_format=output_format, + send_output_columns=send_output_columns, + external_tables=external_tables, + user_name=user_name, + password=password, + query_id=query_id, + session_id=session_id, + next_query_info=bool(input_data), + ) + def send_query_info(): yield query_info() while input_data: input_data_part = input_data.pop(0) if type(input_data_part) is str: input_data_part = input_data_part.encode(DEFAULT_ENCODING) - yield clickhouse_grpc_pb2.QueryInfo(input_data=input_data_part, next_query_info=bool(input_data)) + yield clickhouse_grpc_pb2.QueryInfo( + input_data=input_data_part, next_query_info=bool(input_data) + ) + stream_input = len(input_data) > 1 if stream_input and stream_output: return list(stub.ExecuteQueryWithStreamIO(send_query_info())) @@ -80,58 +113,73 @@ def query_common(query_text, settings={}, input_data=[], input_data_delimiter='' else: return [stub.ExecuteQuery(query_info())] + def query_no_errors(*args, **kwargs): results = query_common(*args, **kwargs) - if results and results[-1].HasField('exception'): + if results and results[-1].HasField("exception"): raise Exception(results[-1].exception.display_text) return results + def query(*args, **kwargs): - output = b'' + output = b"" for result in query_no_errors(*args, **kwargs): output += result.output return output.decode(DEFAULT_ENCODING) + def query_and_get_error(*args, **kwargs): results = query_common(*args, **kwargs) - if not results or not results[-1].HasField('exception'): + if not results or not results[-1].HasField("exception"): raise Exception("Expected to be failed but succeeded!") return results[-1].exception + def query_and_get_totals(*args, **kwargs): - totals = b'' + totals = b"" for result in query_no_errors(*args, **kwargs): totals += result.totals return totals.decode(DEFAULT_ENCODING) + def query_and_get_extremes(*args, **kwargs): - extremes = b'' + extremes = b"" for result in query_no_errors(*args, **kwargs): extremes += result.extremes return extremes.decode(DEFAULT_ENCODING) + def query_and_get_logs(*args, **kwargs): logs = "" for result in query_no_errors(*args, **kwargs): for log_entry in result.logs: - #print(log_entry) + # print(log_entry) logs += log_entry.text + "\n" return logs + class QueryThread(Thread): - def __init__(self, query_text, expected_output, query_id, use_separate_channel=False): + def __init__( + self, query_text, expected_output, query_id, use_separate_channel=False + ): Thread.__init__(self) self.query_text = query_text self.expected_output = expected_output self.use_separate_channel = use_separate_channel self.query_id = query_id - + def run(self): if self.use_separate_channel: with create_channel() as channel: - assert query(self.query_text, query_id=self.query_id, channel=channel) == self.expected_output + assert ( + query(self.query_text, query_id=self.query_id, channel=channel) + == self.expected_output + ) else: - assert query(self.query_text, query_id=self.query_id) == self.expected_output + assert ( + query(self.query_text, query_id=self.query_id) == self.expected_output + ) + @pytest.fixture(scope="module", autouse=True) def start_cluster(): @@ -139,10 +187,11 @@ def start_cluster(): try: with create_channel() as channel: yield cluster - + finally: cluster.shutdown() + @pytest.fixture(autouse=True) def reset_after_test(): yield @@ -151,12 +200,15 @@ def reset_after_test(): # Actual tests + def test_select_one(): assert query("SELECT 1") == "1\n" + def test_ordinary_query(): assert query("SELECT count() FROM numbers(100)") == "100\n" + def test_insert_query(): query("CREATE TABLE t (a UInt8) ENGINE = Memory") query("INSERT INTO t VALUES (1),(2),(3)") @@ -165,97 +217,152 @@ def test_insert_query(): query("INSERT INTO t FORMAT TabSeparated", input_data="9\n10\n") assert query("SELECT a FROM t ORDER BY a") == "1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n" + def test_insert_query_streaming(): query("CREATE TABLE t (a UInt8) ENGINE = Memory") - query("INSERT INTO t VALUES", input_data=["(1),(2),(3),", "(5),(4),(6),", "(7),(8),(9)"]) + query( + "INSERT INTO t VALUES", + input_data=["(1),(2),(3),", "(5),(4),(6),", "(7),(8),(9)"], + ) assert query("SELECT a FROM t ORDER BY a") == "1\n2\n3\n4\n5\n6\n7\n8\n9\n" + def test_insert_query_delimiter(): query("CREATE TABLE t (a UInt8) ENGINE = Memory") - query("INSERT INTO t FORMAT CSV 1\n2", input_data=["3", "4\n5"], input_data_delimiter='\n') + query( + "INSERT INTO t FORMAT CSV 1\n2", + input_data=["3", "4\n5"], + input_data_delimiter="\n", + ) assert query("SELECT a FROM t ORDER BY a") == "1\n2\n3\n4\n5\n" query("DROP TABLE t") query("CREATE TABLE t (a UInt8) ENGINE = Memory") query("INSERT INTO t FORMAT CSV 1\n2", input_data=["3", "4\n5"]) assert query("SELECT a FROM t ORDER BY a") == "1\n5\n234\n" + def test_insert_default_column(): - query("CREATE TABLE t (a UInt8, b Int32 DEFAULT 100 - a, c String DEFAULT 'c') ENGINE = Memory") + query( + "CREATE TABLE t (a UInt8, b Int32 DEFAULT 100 - a, c String DEFAULT 'c') ENGINE = Memory" + ) query("INSERT INTO t (c, a) VALUES ('x',1),('y',2)") query("INSERT INTO t (a) FORMAT TabSeparated", input_data="3\n4\n") - assert query("SELECT * FROM t ORDER BY a") == "1\t99\tx\n" \ - "2\t98\ty\n" \ - "3\t97\tc\n" \ - "4\t96\tc\n" + assert ( + query("SELECT * FROM t ORDER BY a") == "1\t99\tx\n" + "2\t98\ty\n" + "3\t97\tc\n" + "4\t96\tc\n" + ) + def test_insert_splitted_row(): query("CREATE TABLE t (a UInt8) ENGINE = Memory") query("INSERT INTO t VALUES", input_data=["(1),(2),(", "3),(5),(4),(6)"]) assert query("SELECT a FROM t ORDER BY a") == "1\n2\n3\n4\n5\n6\n" + def test_output_format(): query("CREATE TABLE t (a UInt8) ENGINE = Memory") query("INSERT INTO t VALUES (1),(2),(3)") - assert query("SELECT a FROM t ORDER BY a FORMAT JSONEachRow") == '{"a":1}\n{"a":2}\n{"a":3}\n' - assert query("SELECT a FROM t ORDER BY a", output_format="JSONEachRow") == '{"a":1}\n{"a":2}\n{"a":3}\n' + assert ( + query("SELECT a FROM t ORDER BY a FORMAT JSONEachRow") + == '{"a":1}\n{"a":2}\n{"a":3}\n' + ) + assert ( + query("SELECT a FROM t ORDER BY a", output_format="JSONEachRow") + == '{"a":1}\n{"a":2}\n{"a":3}\n' + ) + def test_totals_and_extremes(): query("CREATE TABLE t (x UInt8, y UInt8) ENGINE = Memory") query("INSERT INTO t VALUES (1, 2), (2, 4), (3, 2), (3, 3), (3, 4)") - assert query("SELECT sum(x), y FROM t GROUP BY y WITH TOTALS") == "4\t2\n3\t3\n5\t4\n" - assert query_and_get_totals("SELECT sum(x), y FROM t GROUP BY y WITH TOTALS") == "12\t0\n" + assert ( + query("SELECT sum(x), y FROM t GROUP BY y WITH TOTALS") == "4\t2\n3\t3\n5\t4\n" + ) + assert ( + query_and_get_totals("SELECT sum(x), y FROM t GROUP BY y WITH TOTALS") + == "12\t0\n" + ) assert query("SELECT x, y FROM t") == "1\t2\n2\t4\n3\t2\n3\t3\n3\t4\n" - assert query_and_get_extremes("SELECT x, y FROM t", settings={"extremes": "1"}) == "1\t2\n3\t4\n" + assert ( + query_and_get_extremes("SELECT x, y FROM t", settings={"extremes": "1"}) + == "1\t2\n3\t4\n" + ) + def test_get_query_details(): - result = list(query_no_errors("CREATE TABLE t (a UInt8) ENGINE = Memory", query_id = '123'))[0] - assert result.query_id == '123' + result = list( + query_no_errors("CREATE TABLE t (a UInt8) ENGINE = Memory", query_id="123") + )[0] + assert result.query_id == "123" pytz.timezone(result.time_zone) - assert result.output_format == '' + assert result.output_format == "" assert len(result.output_columns) == 0 - assert result.output == b'' + assert result.output == b"" # - result = list(query_no_errors("SELECT 'a', 1", query_id = '', output_format = 'TabSeparated'))[0] + result = list( + query_no_errors("SELECT 'a', 1", query_id="", output_format="TabSeparated") + )[0] uuid.UUID(result.query_id) pytz.timezone(result.time_zone) - assert result.output_format == 'TabSeparated' + assert result.output_format == "TabSeparated" assert len(result.output_columns) == 0 - assert result.output == b'a\t1\n' + assert result.output == b"a\t1\n" # - result = list(query_no_errors("SELECT 'a' AS x, 1 FORMAT JSONEachRow", query_id = '', send_output_columns=True))[0] + result = list( + query_no_errors( + "SELECT 'a' AS x, 1 FORMAT JSONEachRow", + query_id="", + send_output_columns=True, + ) + )[0] uuid.UUID(result.query_id) pytz.timezone(result.time_zone) - assert result.output_format == 'JSONEachRow' - assert ([(col.name, col.type) for col in result.output_columns]) == [('x', 'String'), ('1', 'UInt8')] + assert result.output_format == "JSONEachRow" + assert ([(col.name, col.type) for col in result.output_columns]) == [ + ("x", "String"), + ("1", "UInt8"), + ] assert result.output == b'{"x":"a","1":1}\n' + def test_errors_handling(): e = query_and_get_error("") - #print(e) + # print(e) assert "Empty query" in e.display_text query("CREATE TABLE t (a UInt8) ENGINE = Memory") e = query_and_get_error("CREATE TABLE t (a UInt8) ENGINE = Memory") assert "Table default.t already exists" in e.display_text + def test_authentication(): query("CREATE USER OR REPLACE john IDENTIFIED BY 'qwe123'") - assert query("SELECT currentUser()", user_name="john", password="qwe123") == "john\n" + assert ( + query("SELECT currentUser()", user_name="john", password="qwe123") == "john\n" + ) query("DROP USER john") + def test_logs(): - logs = query_and_get_logs("SELECT 1", settings={'send_logs_level':'debug'}) + logs = query_and_get_logs("SELECT 1", settings={"send_logs_level": "debug"}) assert "SELECT 1" in logs assert "Read 1 rows" in logs assert "Peak memory usage" in logs + def test_progress(): - results = query_no_errors("SELECT number, sleep(0.31) FROM numbers(8) SETTINGS max_block_size=2, interactive_delay=100000", stream_output=True) + results = query_no_errors( + "SELECT number, sleep(0.31) FROM numbers(8) SETTINGS max_block_size=2, interactive_delay=100000", + stream_output=True, + ) for result in results: - result.time_zone = '' - result.query_id = '' - #print(results) - assert str(results) ==\ -"""[progress { + result.time_zone = "" + result.query_id = "" + # print(results) + assert ( + str(results) + == """[progress { read_rows: 2 read_bytes: 16 total_rows_to_read: 8 @@ -285,6 +392,8 @@ output_format: "TabSeparated" rows_before_limit: 8 } ]""" + ) + def test_session_settings(): session_a = "session A" @@ -293,8 +402,21 @@ def test_session_settings(): query("SET custom_y=2", session_id=session_a) query("SET custom_x=3", session_id=session_b) query("SET custom_y=4", session_id=session_b) - assert query("SELECT getSetting('custom_x'), getSetting('custom_y')", session_id=session_a) == "1\t2\n" - assert query("SELECT getSetting('custom_x'), getSetting('custom_y')", session_id=session_b) == "3\t4\n" + assert ( + query( + "SELECT getSetting('custom_x'), getSetting('custom_y')", + session_id=session_a, + ) + == "1\t2\n" + ) + assert ( + query( + "SELECT getSetting('custom_x'), getSetting('custom_y')", + session_id=session_b, + ) + == "3\t4\n" + ) + def test_session_temp_tables(): session_a = "session A" @@ -307,182 +429,322 @@ def test_session_temp_tables(): assert query("SELECT * FROM my_temp_table", session_id=session_b) == "20\n" assert query("SELECT * FROM my_temp_table", session_id=session_a) == "10\n" + def test_no_session(): e = query_and_get_error("SET custom_x=1") assert "There is no session" in e.display_text e = query_and_get_error("CREATE TEMPORARY TABLE my_temp_table(a Int8)") assert "There is no session" in e.display_text + def test_input_function(): query("CREATE TABLE t (a UInt8) ENGINE = Memory") - query("INSERT INTO t SELECT col1 * col2 FROM input('col1 UInt8, col2 UInt8') FORMAT CSV", input_data=["5,4\n", "8,11\n", "10,12\n"]) + query( + "INSERT INTO t SELECT col1 * col2 FROM input('col1 UInt8, col2 UInt8') FORMAT CSV", + input_data=["5,4\n", "8,11\n", "10,12\n"], + ) assert query("SELECT a FROM t ORDER BY a") == "20\n88\n120\n" - query("INSERT INTO t SELECT col1 * col2 FROM input('col1 UInt8, col2 UInt8') FORMAT CSV 11,13") + query( + "INSERT INTO t SELECT col1 * col2 FROM input('col1 UInt8, col2 UInt8') FORMAT CSV 11,13" + ) assert query("SELECT a FROM t ORDER BY a") == "20\n88\n120\n143\n" - query("INSERT INTO t SELECT col1 * col2 FROM input('col1 UInt8, col2 UInt8') FORMAT CSV 20,10\n", input_data="15,15\n") + query( + "INSERT INTO t SELECT col1 * col2 FROM input('col1 UInt8, col2 UInt8') FORMAT CSV 20,10\n", + input_data="15,15\n", + ) assert query("SELECT a FROM t ORDER BY a") == "20\n88\n120\n143\n200\n225\n" + def test_external_table(): - columns = [clickhouse_grpc_pb2.NameAndType(name='UserID', type='UInt64'), clickhouse_grpc_pb2.NameAndType(name='UserName', type='String')] - ext1 = clickhouse_grpc_pb2.ExternalTable(name='ext1', columns=columns, data=b'1\tAlex\n2\tBen\n3\tCarl\n', format='TabSeparated') - assert query("SELECT * FROM ext1 ORDER BY UserID", external_tables=[ext1]) == "1\tAlex\n"\ - "2\tBen\n"\ - "3\tCarl\n" - ext2 = clickhouse_grpc_pb2.ExternalTable(name='ext2', columns=columns, data=b'4,Daniel\n5,Ethan\n', format='CSV') - assert query("SELECT * FROM (SELECT * FROM ext1 UNION ALL SELECT * FROM ext2) ORDER BY UserID", external_tables=[ext1, ext2]) == "1\tAlex\n"\ - "2\tBen\n"\ - "3\tCarl\n"\ - "4\tDaniel\n"\ - "5\tEthan\n" - unnamed_columns = [clickhouse_grpc_pb2.NameAndType(type='UInt64'), clickhouse_grpc_pb2.NameAndType(type='String')] - unnamed_table = clickhouse_grpc_pb2.ExternalTable(columns=unnamed_columns, data=b'6\tGeorge\n7\tFred\n') - assert query("SELECT * FROM _data ORDER BY _2", external_tables=[unnamed_table]) == "7\tFred\n"\ - "6\tGeorge\n" + columns = [ + clickhouse_grpc_pb2.NameAndType(name="UserID", type="UInt64"), + clickhouse_grpc_pb2.NameAndType(name="UserName", type="String"), + ] + ext1 = clickhouse_grpc_pb2.ExternalTable( + name="ext1", + columns=columns, + data=b"1\tAlex\n2\tBen\n3\tCarl\n", + format="TabSeparated", + ) + assert ( + query("SELECT * FROM ext1 ORDER BY UserID", external_tables=[ext1]) + == "1\tAlex\n" + "2\tBen\n" + "3\tCarl\n" + ) + ext2 = clickhouse_grpc_pb2.ExternalTable( + name="ext2", columns=columns, data=b"4,Daniel\n5,Ethan\n", format="CSV" + ) + assert ( + query( + "SELECT * FROM (SELECT * FROM ext1 UNION ALL SELECT * FROM ext2) ORDER BY UserID", + external_tables=[ext1, ext2], + ) + == "1\tAlex\n" + "2\tBen\n" + "3\tCarl\n" + "4\tDaniel\n" + "5\tEthan\n" + ) + unnamed_columns = [ + clickhouse_grpc_pb2.NameAndType(type="UInt64"), + clickhouse_grpc_pb2.NameAndType(type="String"), + ] + unnamed_table = clickhouse_grpc_pb2.ExternalTable( + columns=unnamed_columns, data=b"6\tGeorge\n7\tFred\n" + ) + assert ( + query("SELECT * FROM _data ORDER BY _2", external_tables=[unnamed_table]) + == "7\tFred\n" + "6\tGeorge\n" + ) + def test_external_table_streaming(): - columns = [clickhouse_grpc_pb2.NameAndType(name='UserID', type='UInt64'), clickhouse_grpc_pb2.NameAndType(name='UserName', type='String')] + columns = [ + clickhouse_grpc_pb2.NameAndType(name="UserID", type="UInt64"), + clickhouse_grpc_pb2.NameAndType(name="UserName", type="String"), + ] + def send_query_info(): - yield clickhouse_grpc_pb2.QueryInfo(query="SELECT * FROM exts ORDER BY UserID", - external_tables=[clickhouse_grpc_pb2.ExternalTable(name='exts', columns=columns, data=b'1\tAlex\n2\tBen\n3\tCarl\n')], - next_query_info=True) - yield clickhouse_grpc_pb2.QueryInfo(external_tables=[clickhouse_grpc_pb2.ExternalTable(name='exts', data=b'4\tDaniel\n5\tEthan\n')]) + yield clickhouse_grpc_pb2.QueryInfo( + query="SELECT * FROM exts ORDER BY UserID", + external_tables=[ + clickhouse_grpc_pb2.ExternalTable( + name="exts", columns=columns, data=b"1\tAlex\n2\tBen\n3\tCarl\n" + ) + ], + next_query_info=True, + ) + yield clickhouse_grpc_pb2.QueryInfo( + external_tables=[ + clickhouse_grpc_pb2.ExternalTable( + name="exts", data=b"4\tDaniel\n5\tEthan\n" + ) + ] + ) + stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel) result = stub.ExecuteQueryWithStreamInput(send_query_info()) - assert result.output == b'1\tAlex\n'\ - b'2\tBen\n'\ - b'3\tCarl\n'\ - b'4\tDaniel\n'\ - b'5\tEthan\n' + assert ( + result.output == b"1\tAlex\n" + b"2\tBen\n" + b"3\tCarl\n" + b"4\tDaniel\n" + b"5\tEthan\n" + ) + def test_simultaneous_queries_same_channel(): - threads=[] + threads = [] try: for i in range(0, 100): - thread = QueryThread("SELECT sum(number) FROM numbers(10)", expected_output="45\n", query_id='sqA'+str(i)) + thread = QueryThread( + "SELECT sum(number) FROM numbers(10)", + expected_output="45\n", + query_id="sqA" + str(i), + ) threads.append(thread) thread.start() finally: for thread in threads: thread.join() + def test_simultaneous_queries_multiple_channels(): - threads=[] + threads = [] try: for i in range(0, 100): - thread = QueryThread("SELECT sum(number) FROM numbers(10)", expected_output="45\n", query_id='sqB'+str(i), use_separate_channel=True) + thread = QueryThread( + "SELECT sum(number) FROM numbers(10)", + expected_output="45\n", + query_id="sqB" + str(i), + use_separate_channel=True, + ) threads.append(thread) thread.start() finally: for thread in threads: thread.join() + def test_cancel_while_processing_input(): query("CREATE TABLE t (a UInt8) ENGINE = Memory") + def send_query_info(): - yield clickhouse_grpc_pb2.QueryInfo(query="INSERT INTO t FORMAT TabSeparated", input_data=b'1\n2\n3\n', next_query_info=True) - yield clickhouse_grpc_pb2.QueryInfo(input_data=b'4\n5\n6\n', next_query_info=True) + yield clickhouse_grpc_pb2.QueryInfo( + query="INSERT INTO t FORMAT TabSeparated", + input_data=b"1\n2\n3\n", + next_query_info=True, + ) + yield clickhouse_grpc_pb2.QueryInfo( + input_data=b"4\n5\n6\n", next_query_info=True + ) yield clickhouse_grpc_pb2.QueryInfo(cancel=True) + stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel) result = stub.ExecuteQueryWithStreamInput(send_query_info()) assert result.cancelled == True assert result.progress.written_rows == 6 assert query("SELECT a FROM t ORDER BY a") == "1\n2\n3\n4\n5\n6\n" + def test_cancel_while_generating_output(): def send_query_info(): - yield clickhouse_grpc_pb2.QueryInfo(query="SELECT number, sleep(0.2) FROM numbers(10) SETTINGS max_block_size=2") + yield clickhouse_grpc_pb2.QueryInfo( + query="SELECT number, sleep(0.2) FROM numbers(10) SETTINGS max_block_size=2" + ) time.sleep(0.5) yield clickhouse_grpc_pb2.QueryInfo(cancel=True) + stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel) results = list(stub.ExecuteQueryWithStreamIO(send_query_info())) assert len(results) >= 1 assert results[-1].cancelled == True - output = b'' + output = b"" for result in results: output += result.output - assert output == b'0\t0\n1\t0\n2\t0\n3\t0\n' + assert output == b"0\t0\n1\t0\n2\t0\n3\t0\n" + def test_compressed_output(): - query_info = clickhouse_grpc_pb2.QueryInfo(query="SELECT 0 FROM numbers(1000)", output_compression_type="lz4") + query_info = clickhouse_grpc_pb2.QueryInfo( + query="SELECT 0 FROM numbers(1000)", output_compression_type="lz4" + ) stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel) result = stub.ExecuteQuery(query_info) - assert lz4.frame.decompress(result.output) == (b'0\n')*1000 + assert lz4.frame.decompress(result.output) == (b"0\n") * 1000 + def test_compressed_output_streaming(): - query_info = clickhouse_grpc_pb2.QueryInfo(query="SELECT 0 FROM numbers(100000)", output_compression_type="lz4") + query_info = clickhouse_grpc_pb2.QueryInfo( + query="SELECT 0 FROM numbers(100000)", output_compression_type="lz4" + ) stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel) d_context = lz4.frame.create_decompression_context() - data = b'' + data = b"" for result in stub.ExecuteQueryWithStreamOutput(query_info): d1, _, _ = lz4.frame.decompress_chunk(d_context, result.output) data += d1 - assert data == (b'0\n')*100000 + assert data == (b"0\n") * 100000 + def test_compressed_output_gzip(): - query_info = clickhouse_grpc_pb2.QueryInfo(query="SELECT 0 FROM numbers(1000)", output_compression_type="gzip", output_compression_level=6) + query_info = clickhouse_grpc_pb2.QueryInfo( + query="SELECT 0 FROM numbers(1000)", + output_compression_type="gzip", + output_compression_level=6, + ) stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel) result = stub.ExecuteQuery(query_info) - assert gzip.decompress(result.output) == (b'0\n')*1000 + assert gzip.decompress(result.output) == (b"0\n") * 1000 + def test_compressed_totals_and_extremes(): query("CREATE TABLE t (x UInt8, y UInt8) ENGINE = Memory") query("INSERT INTO t VALUES (1, 2), (2, 4), (3, 2), (3, 3), (3, 4)") stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel) - query_info = clickhouse_grpc_pb2.QueryInfo(query="SELECT sum(x), y FROM t GROUP BY y WITH TOTALS", output_compression_type="lz4") + query_info = clickhouse_grpc_pb2.QueryInfo( + query="SELECT sum(x), y FROM t GROUP BY y WITH TOTALS", + output_compression_type="lz4", + ) result = stub.ExecuteQuery(query_info) - assert lz4.frame.decompress(result.totals) == b'12\t0\n' - query_info = clickhouse_grpc_pb2.QueryInfo(query="SELECT x, y FROM t", settings={"extremes": "1"}, output_compression_type="lz4") + assert lz4.frame.decompress(result.totals) == b"12\t0\n" + query_info = clickhouse_grpc_pb2.QueryInfo( + query="SELECT x, y FROM t", + settings={"extremes": "1"}, + output_compression_type="lz4", + ) result = stub.ExecuteQuery(query_info) - assert lz4.frame.decompress(result.extremes) == b'1\t2\n3\t4\n' + assert lz4.frame.decompress(result.extremes) == b"1\t2\n3\t4\n" + def test_compressed_insert_query_streaming(): query("CREATE TABLE t (a UInt8) ENGINE = Memory") - data = lz4.frame.compress(b'(1),(2),(3),(5),(4),(6),(7),(8),(9)') + data = lz4.frame.compress(b"(1),(2),(3),(5),(4),(6),(7),(8),(9)") sz1 = len(data) // 3 sz2 = len(data) // 3 d1 = data[:sz1] - d2 = data[sz1:sz1+sz2] - d3 = data[sz1+sz2:] + d2 = data[sz1 : sz1 + sz2] + d3 = data[sz1 + sz2 :] + def send_query_info(): - yield clickhouse_grpc_pb2.QueryInfo(query="INSERT INTO t VALUES", input_data=d1, input_compression_type="lz4", next_query_info=True) + yield clickhouse_grpc_pb2.QueryInfo( + query="INSERT INTO t VALUES", + input_data=d1, + input_compression_type="lz4", + next_query_info=True, + ) yield clickhouse_grpc_pb2.QueryInfo(input_data=d2, next_query_info=True) yield clickhouse_grpc_pb2.QueryInfo(input_data=d3) + stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel) stub.ExecuteQueryWithStreamInput(send_query_info()) assert query("SELECT a FROM t ORDER BY a") == "1\n2\n3\n4\n5\n6\n7\n8\n9\n" + def test_compressed_external_table(): - columns = [clickhouse_grpc_pb2.NameAndType(name='UserID', type='UInt64'), clickhouse_grpc_pb2.NameAndType(name='UserName', type='String')] - d1 = lz4.frame.compress(b'1\tAlex\n2\tBen\n3\tCarl\n') - d2 = gzip.compress(b'4,Daniel\n5,Ethan\n') - ext1 = clickhouse_grpc_pb2.ExternalTable(name='ext1', columns=columns, data=d1, format='TabSeparated', compression_type="lz4") - ext2 = clickhouse_grpc_pb2.ExternalTable(name='ext2', columns=columns, data=d2, format='CSV', compression_type="gzip") + columns = [ + clickhouse_grpc_pb2.NameAndType(name="UserID", type="UInt64"), + clickhouse_grpc_pb2.NameAndType(name="UserName", type="String"), + ] + d1 = lz4.frame.compress(b"1\tAlex\n2\tBen\n3\tCarl\n") + d2 = gzip.compress(b"4,Daniel\n5,Ethan\n") + ext1 = clickhouse_grpc_pb2.ExternalTable( + name="ext1", + columns=columns, + data=d1, + format="TabSeparated", + compression_type="lz4", + ) + ext2 = clickhouse_grpc_pb2.ExternalTable( + name="ext2", columns=columns, data=d2, format="CSV", compression_type="gzip" + ) stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel) - query_info = clickhouse_grpc_pb2.QueryInfo(query="SELECT * FROM (SELECT * FROM ext1 UNION ALL SELECT * FROM ext2) ORDER BY UserID", external_tables=[ext1, ext2]) + query_info = clickhouse_grpc_pb2.QueryInfo( + query="SELECT * FROM (SELECT * FROM ext1 UNION ALL SELECT * FROM ext2) ORDER BY UserID", + external_tables=[ext1, ext2], + ) result = stub.ExecuteQuery(query_info) - assert result.output == b"1\tAlex\n"\ - b"2\tBen\n"\ - b"3\tCarl\n"\ - b"4\tDaniel\n"\ - b"5\tEthan\n" + assert ( + result.output == b"1\tAlex\n" + b"2\tBen\n" + b"3\tCarl\n" + b"4\tDaniel\n" + b"5\tEthan\n" + ) + def test_transport_compression(): - query_info = clickhouse_grpc_pb2.QueryInfo(query="SELECT 0 FROM numbers(1000000)", transport_compression_type='gzip', transport_compression_level=3) + query_info = clickhouse_grpc_pb2.QueryInfo( + query="SELECT 0 FROM numbers(1000000)", + transport_compression_type="gzip", + transport_compression_level=3, + ) stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel) result = stub.ExecuteQuery(query_info) - assert result.output == (b'0\n')*1000000 + assert result.output == (b"0\n") * 1000000 + def test_opentelemetry_context_propagation(): trace_id = "80c190b5-9dc1-4eae-82b9-6c261438c817" parent_span_id = 123 trace_state = "some custom state" trace_id_hex = trace_id.replace("-", "") - parent_span_id_hex = f'{parent_span_id:0>16X}' - metadata = [("traceparent", f"00-{trace_id_hex}-{parent_span_id_hex}-01"), ("tracestate", trace_state)] + parent_span_id_hex = f"{parent_span_id:0>16X}" + metadata = [ + ("traceparent", f"00-{trace_id_hex}-{parent_span_id_hex}-01"), + ("tracestate", trace_state), + ] stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(main_channel) query_info = clickhouse_grpc_pb2.QueryInfo(query="SELECT 1") result = stub.ExecuteQuery(query_info, metadata=metadata) assert result.output == b"1\n" node.query("SYSTEM FLUSH LOGS") - assert node.query(f"SELECT attribute['db.statement'], attribute['clickhouse.tracestate'] FROM system.opentelemetry_span_log " - f"WHERE trace_id='{trace_id}' AND parent_span_id={parent_span_id}") == "SELECT 1\tsome custom state\n" + assert ( + node.query( + f"SELECT attribute['db.statement'], attribute['clickhouse.tracestate'] FROM system.opentelemetry_span_log " + f"WHERE trace_id='{trace_id}' AND parent_span_id={parent_span_id}" + ) + == "SELECT 1\tsome custom state\n" + ) diff --git a/tests/integration/test_grpc_protocol_ssl/test.py b/tests/integration/test_grpc_protocol_ssl/test.py index 1f21fbe5f8a..60c3ccd7a9d 100644 --- a/tests/integration/test_grpc_protocol_ssl/test.py +++ b/tests/integration/test_grpc_protocol_ssl/test.py @@ -5,19 +5,23 @@ import grpc from helpers.cluster import ClickHouseCluster, run_and_check GRPC_PORT = 9100 -NODE_IP = '10.5.172.77' # It's important for the node to work at this IP because 'server-cert.pem' requires that (see server-ext.cnf). +NODE_IP = "10.5.172.77" # It's important for the node to work at this IP because 'server-cert.pem' requires that (see server-ext.cnf). SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -DEFAULT_ENCODING = 'utf-8' +DEFAULT_ENCODING = "utf-8" # Use grpcio-tools to generate *pb2.py files from *.proto. -proto_dir = os.path.join(SCRIPT_DIR, './protos') -gen_dir = os.path.join(SCRIPT_DIR, './_gen') +proto_dir = os.path.join(SCRIPT_DIR, "./protos") +gen_dir = os.path.join(SCRIPT_DIR, "./_gen") os.makedirs(gen_dir, exist_ok=True) run_and_check( - 'python3 -m grpc_tools.protoc -I{proto_dir} --python_out={gen_dir} --grpc_python_out={gen_dir} \ - {proto_dir}/clickhouse_grpc.proto'.format(proto_dir=proto_dir, gen_dir=gen_dir), shell=True) + "python3 -m grpc_tools.protoc -I{proto_dir} --python_out={gen_dir} --grpc_python_out={gen_dir} \ + {proto_dir}/clickhouse_grpc.proto".format( + proto_dir=proto_dir, gen_dir=gen_dir + ), + shell=True, +) sys.path.append(gen_dir) import clickhouse_grpc_pb2 @@ -26,62 +30,80 @@ import clickhouse_grpc_pb2_grpc # Utilities -node_ip_with_grpc_port = NODE_IP + ':' + str(GRPC_PORT) -config_dir = os.path.join(SCRIPT_DIR, './configs') +node_ip_with_grpc_port = NODE_IP + ":" + str(GRPC_PORT) +config_dir = os.path.join(SCRIPT_DIR, "./configs") cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', ipv4_address=NODE_IP, main_configs=['configs/grpc_config.xml', 'configs/server-key.pem', 'configs/server-cert.pem', 'configs/ca-cert.pem']) +node = cluster.add_instance( + "node", + ipv4_address=NODE_IP, + main_configs=[ + "configs/grpc_config.xml", + "configs/server-key.pem", + "configs/server-cert.pem", + "configs/ca-cert.pem", + ], +) + def create_secure_channel(): - ca_cert = open(os.path.join(config_dir, 'ca-cert.pem'), 'rb').read() - client_key = open(os.path.join(config_dir, 'client-key.pem'), 'rb').read() - client_cert = open(os.path.join(config_dir, 'client-cert.pem'), 'rb').read() + ca_cert = open(os.path.join(config_dir, "ca-cert.pem"), "rb").read() + client_key = open(os.path.join(config_dir, "client-key.pem"), "rb").read() + client_cert = open(os.path.join(config_dir, "client-cert.pem"), "rb").read() credentials = grpc.ssl_channel_credentials(ca_cert, client_key, client_cert) channel = grpc.secure_channel(node_ip_with_grpc_port, credentials) grpc.channel_ready_future(channel).result(timeout=10) return channel + def create_insecure_channel(): channel = grpc.insecure_channel(node_ip_with_grpc_port) grpc.channel_ready_future(channel).result(timeout=2) return channel + def create_secure_channel_with_wrong_client_certificate(): - ca_cert = open(os.path.join(config_dir, 'ca-cert.pem'), 'rb').read() - client_key = open(os.path.join(config_dir, 'wrong-client-key.pem'), 'rb').read() - client_cert = open(os.path.join(config_dir, 'wrong-client-cert.pem'), 'rb').read() + ca_cert = open(os.path.join(config_dir, "ca-cert.pem"), "rb").read() + client_key = open(os.path.join(config_dir, "wrong-client-key.pem"), "rb").read() + client_cert = open(os.path.join(config_dir, "wrong-client-cert.pem"), "rb").read() credentials = grpc.ssl_channel_credentials(ca_cert, client_key, client_cert) channel = grpc.secure_channel(node_ip_with_grpc_port, credentials) grpc.channel_ready_future(channel).result(timeout=2) return channel + def query(query_text, channel): query_info = clickhouse_grpc_pb2.QueryInfo(query=query_text) stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(channel) result = stub.ExecuteQuery(query_info) - if result and result.HasField('exception'): + if result and result.HasField("exception"): raise Exception(result.exception.display_text) return result.output.decode(DEFAULT_ENCODING) + @pytest.fixture(scope="module", autouse=True) def start_cluster(): cluster.start() try: yield cluster - + finally: cluster.shutdown() + # Actual tests + def test_secure_channel(): with create_secure_channel() as channel: assert query("SELECT 'ok'", channel) == "ok\n" + def test_insecure_channel(): with pytest.raises(grpc.FutureTimeoutError): with create_insecure_channel() as channel: query("SELECT 'ok'", channel) + def test_wrong_client_certificate(): with pytest.raises(grpc.FutureTimeoutError): with create_insecure_channel() as channel: diff --git a/tests/integration/test_hedged_requests/test.py b/tests/integration/test_hedged_requests/test.py index b137dadfca9..5d24b66cd02 100644 --- a/tests/integration/test_hedged_requests/test.py +++ b/tests/integration/test_hedged_requests/test.py @@ -11,34 +11,46 @@ from helpers.network import PartitionManager from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -NODES = {'node_' + str(i): None for i in (1, 2, 3)} +NODES = {"node_" + str(i): None for i in (1, 2, 3)} -NODES['node'] = None +NODES["node"] = None # Sleep time in milliseconds. sleep_time = 30000 + @pytest.fixture(scope="module") def started_cluster(): - NODES['node'] = cluster.add_instance( - 'node', stay_alive=True, main_configs=['configs/remote_servers.xml'], user_configs=['configs/users.xml']) + NODES["node"] = cluster.add_instance( + "node", + stay_alive=True, + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/users.xml"], + ) for name in NODES: - if name != 'node': - NODES[name] = cluster.add_instance(name, user_configs=['configs/users1.xml']) + if name != "node": + NODES[name] = cluster.add_instance( + name, user_configs=["configs/users1.xml"] + ) try: cluster.start() for node_id, node in list(NODES.items()): - node.query('''CREATE TABLE test_hedged (id UInt32, date Date) ENGINE = - MergeTree() ORDER BY id PARTITION BY toYYYYMM(date)''') + node.query( + """CREATE TABLE test_hedged (id UInt32, date Date) ENGINE = + MergeTree() ORDER BY id PARTITION BY toYYYYMM(date)""" + ) - node.query("INSERT INTO test_hedged select number, toDate(number) from numbers(100);") + node.query( + "INSERT INTO test_hedged select number, toDate(number) from numbers(100);" + ) - - NODES['node'].query('''CREATE TABLE distributed (id UInt32, date Date) ENGINE = - Distributed('test_cluster', 'default', 'test_hedged')''') + NODES["node"].query( + """CREATE TABLE distributed (id UInt32, date Date) ENGINE = + Distributed('test_cluster', 'default', 'test_hedged')""" + ) yield cluster @@ -46,24 +58,28 @@ def started_cluster(): cluster.shutdown() -config = ''' +config = """ {sleep_in_send_tables_status_ms} {sleep_in_send_data_ms} -''' +""" def check_query(expected_replica, receive_timeout=300): - NODES['node'].restart_clickhouse() - + NODES["node"].restart_clickhouse() + # Without hedged requests select query will last more than 30 seconds, # with hedged requests it will last just around 1-2 second start = time.time() - result = NODES['node'].query("SELECT hostName(), id FROM distributed ORDER BY id LIMIT 1 SETTINGS receive_timeout={}".format(receive_timeout)); + result = NODES["node"].query( + "SELECT hostName(), id FROM distributed ORDER BY id LIMIT 1 SETTINGS receive_timeout={}".format( + receive_timeout + ) + ) query_time = time.time() - start assert TSV(result) == TSV(expected_replica + "\t0") @@ -75,9 +91,16 @@ def check_query(expected_replica, receive_timeout=300): def check_settings(node_name, sleep_in_send_tables_status_ms, sleep_in_send_data_ms): attempts = 0 while attempts < 1000: - setting1 = NODES[node_name].http_query("SELECT value FROM system.settings WHERE name='sleep_in_send_tables_status_ms'") - setting2 = NODES[node_name].http_query("SELECT value FROM system.settings WHERE name='sleep_in_send_data_ms'") - if int(setting1) == sleep_in_send_tables_status_ms and int(setting2) == sleep_in_send_data_ms: + setting1 = NODES[node_name].http_query( + "SELECT value FROM system.settings WHERE name='sleep_in_send_tables_status_ms'" + ) + setting2 = NODES[node_name].http_query( + "SELECT value FROM system.settings WHERE name='sleep_in_send_data_ms'" + ) + if ( + int(setting1) == sleep_in_send_tables_status_ms + and int(setting2) == sleep_in_send_data_ms + ): return time.sleep(0.1) attempts += 1 @@ -86,31 +109,56 @@ def check_settings(node_name, sleep_in_send_tables_status_ms, sleep_in_send_data def check_changing_replica_events(expected_count): - result = NODES['node'].query("SELECT value FROM system.events WHERE event='HedgedRequestsChangeReplica'") + result = NODES["node"].query( + "SELECT value FROM system.events WHERE event='HedgedRequestsChangeReplica'" + ) # If server load is high we can see more than expected # replica change events, but never less than expected assert int(result) >= expected_count -def update_configs(node_1_sleep_in_send_tables_status=0, node_1_sleep_in_send_data=0, - node_2_sleep_in_send_tables_status=0, node_2_sleep_in_send_data=0, - node_3_sleep_in_send_tables_status=0, node_3_sleep_in_send_data=0): - NODES['node_1'].replace_config( - '/etc/clickhouse-server/users.d/users1.xml', - config.format(sleep_in_send_tables_status_ms=node_1_sleep_in_send_tables_status, sleep_in_send_data_ms=node_1_sleep_in_send_data)) +def update_configs( + node_1_sleep_in_send_tables_status=0, + node_1_sleep_in_send_data=0, + node_2_sleep_in_send_tables_status=0, + node_2_sleep_in_send_data=0, + node_3_sleep_in_send_tables_status=0, + node_3_sleep_in_send_data=0, +): + NODES["node_1"].replace_config( + "/etc/clickhouse-server/users.d/users1.xml", + config.format( + sleep_in_send_tables_status_ms=node_1_sleep_in_send_tables_status, + sleep_in_send_data_ms=node_1_sleep_in_send_data, + ), + ) - NODES['node_2'].replace_config( - '/etc/clickhouse-server/users.d/users1.xml', - config.format(sleep_in_send_tables_status_ms=node_2_sleep_in_send_tables_status, sleep_in_send_data_ms=node_2_sleep_in_send_data)) + NODES["node_2"].replace_config( + "/etc/clickhouse-server/users.d/users1.xml", + config.format( + sleep_in_send_tables_status_ms=node_2_sleep_in_send_tables_status, + sleep_in_send_data_ms=node_2_sleep_in_send_data, + ), + ) - NODES['node_3'].replace_config( - '/etc/clickhouse-server/users.d/users1.xml', - config.format(sleep_in_send_tables_status_ms=node_3_sleep_in_send_tables_status, sleep_in_send_data_ms=node_3_sleep_in_send_data)) + NODES["node_3"].replace_config( + "/etc/clickhouse-server/users.d/users1.xml", + config.format( + sleep_in_send_tables_status_ms=node_3_sleep_in_send_tables_status, + sleep_in_send_data_ms=node_3_sleep_in_send_data, + ), + ) - check_settings('node_1', node_1_sleep_in_send_tables_status, node_1_sleep_in_send_data) - check_settings('node_2', node_2_sleep_in_send_tables_status, node_2_sleep_in_send_data) - check_settings('node_3', node_3_sleep_in_send_tables_status, node_3_sleep_in_send_data) + check_settings( + "node_1", node_1_sleep_in_send_tables_status, node_1_sleep_in_send_data + ) + check_settings( + "node_2", node_2_sleep_in_send_tables_status, node_2_sleep_in_send_data + ) + check_settings( + "node_3", node_3_sleep_in_send_tables_status, node_3_sleep_in_send_data + ) def test_stuck_replica(started_cluster): @@ -121,16 +169,22 @@ def test_stuck_replica(started_cluster): check_query(expected_replica="node_2") check_changing_replica_events(1) - result = NODES['node'].query("SELECT slowdowns_count FROM system.clusters WHERE cluster='test_cluster' and host_name='node_1'") + result = NODES["node"].query( + "SELECT slowdowns_count FROM system.clusters WHERE cluster='test_cluster' and host_name='node_1'" + ) assert TSV(result) == TSV("1") - result = NODES['node'].query("SELECT hostName(), id FROM distributed ORDER BY id LIMIT 1"); + result = NODES["node"].query( + "SELECT hostName(), id FROM distributed ORDER BY id LIMIT 1" + ) assert TSV(result) == TSV("node_2\t0") # Check that we didn't choose node_1 first again and slowdowns_count didn't increase. - result = NODES['node'].query("SELECT slowdowns_count FROM system.clusters WHERE cluster='test_cluster' and host_name='node_1'") + result = NODES["node"].query( + "SELECT slowdowns_count FROM system.clusters WHERE cluster='test_cluster' and host_name='node_1'" + ) assert TSV(result) == TSV("1") @@ -141,12 +195,16 @@ def test_long_query(started_cluster): update_configs() # Restart to reset pool states. - NODES['node'].restart_clickhouse() + NODES["node"].restart_clickhouse() - result = NODES['node'].query("select hostName(), max(id + sleep(1.5)) from distributed settings max_block_size = 1, max_threads = 1;") + result = NODES["node"].query( + "select hostName(), max(id + sleep(1.5)) from distributed settings max_block_size = 1, max_threads = 1;" + ) assert TSV(result) == TSV("node_1\t99") - NODES['node'].query("INSERT INTO distributed select number, toDate(number) from numbers(100);") + NODES["node"].query( + "INSERT INTO distributed select number, toDate(number) from numbers(100);" + ) def test_send_table_status_sleep(started_cluster): @@ -156,7 +214,10 @@ def test_send_table_status_sleep(started_cluster): def test_send_table_status_sleep2(started_cluster): - update_configs(node_1_sleep_in_send_tables_status=sleep_time, node_2_sleep_in_send_tables_status=sleep_time) + update_configs( + node_1_sleep_in_send_tables_status=sleep_time, + node_2_sleep_in_send_tables_status=sleep_time, + ) check_query(expected_replica="node_3") check_changing_replica_events(2) @@ -168,36 +229,48 @@ def test_send_data(started_cluster): def test_send_data2(started_cluster): - update_configs(node_1_sleep_in_send_data=sleep_time, node_2_sleep_in_send_data=sleep_time) + update_configs( + node_1_sleep_in_send_data=sleep_time, node_2_sleep_in_send_data=sleep_time + ) check_query(expected_replica="node_3") check_changing_replica_events(2) def test_combination1(started_cluster): - update_configs(node_1_sleep_in_send_tables_status=sleep_time, node_2_sleep_in_send_data=sleep_time) + update_configs( + node_1_sleep_in_send_tables_status=sleep_time, + node_2_sleep_in_send_data=sleep_time, + ) check_query(expected_replica="node_3") check_changing_replica_events(2) def test_combination2(started_cluster): - update_configs(node_1_sleep_in_send_data=sleep_time, node_2_sleep_in_send_tables_status=sleep_time) + update_configs( + node_1_sleep_in_send_data=sleep_time, + node_2_sleep_in_send_tables_status=sleep_time, + ) check_query(expected_replica="node_3") check_changing_replica_events(2) def test_combination3(started_cluster): - update_configs(node_1_sleep_in_send_data=sleep_time, - node_2_sleep_in_send_tables_status=1000, - node_3_sleep_in_send_data=sleep_time) + update_configs( + node_1_sleep_in_send_data=sleep_time, + node_2_sleep_in_send_tables_status=1000, + node_3_sleep_in_send_data=sleep_time, + ) check_query(expected_replica="node_2") check_changing_replica_events(3) def test_combination4(started_cluster): - update_configs(node_1_sleep_in_send_tables_status=1000, - node_1_sleep_in_send_data=sleep_time, - node_2_sleep_in_send_tables_status=1000, - node_3_sleep_in_send_tables_status=1000) + update_configs( + node_1_sleep_in_send_tables_status=1000, + node_1_sleep_in_send_data=sleep_time, + node_2_sleep_in_send_tables_status=1000, + node_3_sleep_in_send_tables_status=1000, + ) check_query(expected_replica="node_2") check_changing_replica_events(4) @@ -205,9 +278,11 @@ def test_combination4(started_cluster): def test_receive_timeout1(started_cluster): # Check the situation when first two replicas get receive timeout # in establishing connection, but the third replica is ok. - update_configs(node_1_sleep_in_send_tables_status=3000, - node_2_sleep_in_send_tables_status=3000, - node_3_sleep_in_send_data=1000) + update_configs( + node_1_sleep_in_send_tables_status=3000, + node_2_sleep_in_send_tables_status=3000, + node_3_sleep_in_send_data=1000, + ) check_query(expected_replica="node_3", receive_timeout=2) check_changing_replica_events(2) @@ -216,9 +291,10 @@ def test_receive_timeout2(started_cluster): # Check the situation when first replica get receive timeout # in packet receiving but there are replicas in process of # connection establishing. - update_configs(node_1_sleep_in_send_data=4000, - node_2_sleep_in_send_tables_status=2000, - node_3_sleep_in_send_tables_status=2000) + update_configs( + node_1_sleep_in_send_data=4000, + node_2_sleep_in_send_tables_status=2000, + node_3_sleep_in_send_tables_status=2000, + ) check_query(expected_replica="node_2", receive_timeout=3) check_changing_replica_events(3) - diff --git a/tests/integration/test_hedged_requests_parallel/test.py b/tests/integration/test_hedged_requests_parallel/test.py index 3ea6cf80622..ff83e99e6dd 100644 --- a/tests/integration/test_hedged_requests_parallel/test.py +++ b/tests/integration/test_hedged_requests_parallel/test.py @@ -11,33 +11,46 @@ from helpers.network import PartitionManager cluster = ClickHouseCluster(__file__) -NODES = {'node_' + str(i): None for i in (1, 2, 3, 4)} -NODES['node'] = None +NODES = {"node_" + str(i): None for i in (1, 2, 3, 4)} +NODES["node"] = None # Cleep time in milliseconds. sleep_time = 30000 + @pytest.fixture(scope="module") def started_cluster(): cluster = ClickHouseCluster(__file__) - NODES['node'] = cluster.add_instance( - 'node', stay_alive=True, main_configs=['configs/remote_servers.xml'], user_configs=['configs/users.xml']) + NODES["node"] = cluster.add_instance( + "node", + stay_alive=True, + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/users.xml"], + ) for name in NODES: - if name != 'node': - NODES[name] = cluster.add_instance(name, user_configs=['configs/users1.xml']) - + if name != "node": + NODES[name] = cluster.add_instance( + name, user_configs=["configs/users1.xml"] + ) + try: cluster.start() for node_id, node in list(NODES.items()): - node.query('''CREATE TABLE test_hedged (id UInt32, date Date) ENGINE = - MergeTree() ORDER BY id PARTITION BY toYYYYMM(date)''') + node.query( + """CREATE TABLE test_hedged (id UInt32, date Date) ENGINE = + MergeTree() ORDER BY id PARTITION BY toYYYYMM(date)""" + ) - node.query("INSERT INTO test_hedged SELECT number, toDateTime(number) FROM numbers(100)") + node.query( + "INSERT INTO test_hedged SELECT number, toDateTime(number) FROM numbers(100)" + ) - NODES['node'].query('''CREATE TABLE distributed (id UInt32, date Date) ENGINE = - Distributed('test_cluster', 'default', 'test_hedged')''') + NODES["node"].query( + """CREATE TABLE distributed (id UInt32, date Date) ENGINE = + Distributed('test_cluster', 'default', 'test_hedged')""" + ) yield cluster @@ -45,14 +58,14 @@ def started_cluster(): cluster.shutdown() -config = ''' +config = """ {sleep_in_send_tables_status_ms} {sleep_in_send_data_ms} -''' +""" QUERY_1 = "SELECT count() FROM distributed" @@ -60,13 +73,13 @@ QUERY_2 = "SELECT * FROM distributed" def check_query(query=QUERY_1): - NODES['node'].restart_clickhouse() + NODES["node"].restart_clickhouse() # Without hedged requests select query will last more than 30 seconds, # with hedged requests it will last just around 1-2 second start = time.time() - NODES['node'].query(query); + NODES["node"].query(query) query_time = time.time() - start print("Query time:", query_time) @@ -76,9 +89,16 @@ def check_query(query=QUERY_1): def check_settings(node_name, sleep_in_send_tables_status_ms, sleep_in_send_data_ms): attempts = 0 while attempts < 1000: - setting1 = NODES[node_name].http_query("SELECT value FROM system.settings WHERE name='sleep_in_send_tables_status_ms'") - setting2 = NODES[node_name].http_query("SELECT value FROM system.settings WHERE name='sleep_in_send_data_ms'") - if int(setting1) == sleep_in_send_tables_status_ms and int(setting2) == sleep_in_send_data_ms: + setting1 = NODES[node_name].http_query( + "SELECT value FROM system.settings WHERE name='sleep_in_send_tables_status_ms'" + ) + setting2 = NODES[node_name].http_query( + "SELECT value FROM system.settings WHERE name='sleep_in_send_data_ms'" + ) + if ( + int(setting1) == sleep_in_send_tables_status_ms + and int(setting2) == sleep_in_send_data_ms + ): return time.sleep(0.1) attempts += 1 @@ -87,75 +107,116 @@ def check_settings(node_name, sleep_in_send_tables_status_ms, sleep_in_send_data def check_changing_replica_events(expected_count): - result = NODES['node'].query("SELECT value FROM system.events WHERE event='HedgedRequestsChangeReplica'") + result = NODES["node"].query( + "SELECT value FROM system.events WHERE event='HedgedRequestsChangeReplica'" + ) # If server load is high we can see more than expected # replica change events, but never less than expected assert int(result) >= expected_count -def update_configs(node_1_sleep_in_send_tables_status=0, node_1_sleep_in_send_data=0, - node_2_sleep_in_send_tables_status=0, node_2_sleep_in_send_data=0, - node_3_sleep_in_send_tables_status=0, node_3_sleep_in_send_data=0, - node_4_sleep_in_send_tables_status=0, node_4_sleep_in_send_data=0): - NODES['node_1'].replace_config( - '/etc/clickhouse-server/users.d/users1.xml', - config.format(sleep_in_send_tables_status_ms=node_1_sleep_in_send_tables_status, sleep_in_send_data_ms=node_1_sleep_in_send_data)) +def update_configs( + node_1_sleep_in_send_tables_status=0, + node_1_sleep_in_send_data=0, + node_2_sleep_in_send_tables_status=0, + node_2_sleep_in_send_data=0, + node_3_sleep_in_send_tables_status=0, + node_3_sleep_in_send_data=0, + node_4_sleep_in_send_tables_status=0, + node_4_sleep_in_send_data=0, +): + NODES["node_1"].replace_config( + "/etc/clickhouse-server/users.d/users1.xml", + config.format( + sleep_in_send_tables_status_ms=node_1_sleep_in_send_tables_status, + sleep_in_send_data_ms=node_1_sleep_in_send_data, + ), + ) - NODES['node_2'].replace_config( - '/etc/clickhouse-server/users.d/users1.xml', - config.format(sleep_in_send_tables_status_ms=node_2_sleep_in_send_tables_status, sleep_in_send_data_ms=node_2_sleep_in_send_data)) + NODES["node_2"].replace_config( + "/etc/clickhouse-server/users.d/users1.xml", + config.format( + sleep_in_send_tables_status_ms=node_2_sleep_in_send_tables_status, + sleep_in_send_data_ms=node_2_sleep_in_send_data, + ), + ) - NODES['node_3'].replace_config( - '/etc/clickhouse-server/users.d/users1.xml', - config.format(sleep_in_send_tables_status_ms=node_3_sleep_in_send_tables_status, sleep_in_send_data_ms=node_3_sleep_in_send_data)) + NODES["node_3"].replace_config( + "/etc/clickhouse-server/users.d/users1.xml", + config.format( + sleep_in_send_tables_status_ms=node_3_sleep_in_send_tables_status, + sleep_in_send_data_ms=node_3_sleep_in_send_data, + ), + ) - NODES['node_4'].replace_config( - '/etc/clickhouse-server/users.d/users1.xml', - config.format(sleep_in_send_tables_status_ms=node_4_sleep_in_send_tables_status, sleep_in_send_data_ms=node_4_sleep_in_send_data)) + NODES["node_4"].replace_config( + "/etc/clickhouse-server/users.d/users1.xml", + config.format( + sleep_in_send_tables_status_ms=node_4_sleep_in_send_tables_status, + sleep_in_send_data_ms=node_4_sleep_in_send_data, + ), + ) - check_settings('node_1', node_1_sleep_in_send_tables_status, node_1_sleep_in_send_data) - check_settings('node_2', node_2_sleep_in_send_tables_status, node_2_sleep_in_send_data) - check_settings('node_3', node_3_sleep_in_send_tables_status, node_3_sleep_in_send_data) - check_settings('node_4', node_4_sleep_in_send_tables_status, node_4_sleep_in_send_data) + check_settings( + "node_1", node_1_sleep_in_send_tables_status, node_1_sleep_in_send_data + ) + check_settings( + "node_2", node_2_sleep_in_send_tables_status, node_2_sleep_in_send_data + ) + check_settings( + "node_3", node_3_sleep_in_send_tables_status, node_3_sleep_in_send_data + ) + check_settings( + "node_4", node_4_sleep_in_send_tables_status, node_4_sleep_in_send_data + ) def test_send_table_status_sleep(started_cluster): - update_configs(node_1_sleep_in_send_tables_status=sleep_time, node_2_sleep_in_send_tables_status=sleep_time) + update_configs( + node_1_sleep_in_send_tables_status=sleep_time, + node_2_sleep_in_send_tables_status=sleep_time, + ) check_query() check_changing_replica_events(2) def test_send_data(started_cluster): - update_configs(node_1_sleep_in_send_data=sleep_time, node_2_sleep_in_send_data=sleep_time) + update_configs( + node_1_sleep_in_send_data=sleep_time, node_2_sleep_in_send_data=sleep_time + ) check_query() check_changing_replica_events(2) def test_combination1(started_cluster): - update_configs(node_1_sleep_in_send_tables_status=1000, - node_2_sleep_in_send_tables_status=1000, - node_3_sleep_in_send_data=sleep_time) + update_configs( + node_1_sleep_in_send_tables_status=1000, + node_2_sleep_in_send_tables_status=1000, + node_3_sleep_in_send_data=sleep_time, + ) check_query() check_changing_replica_events(3) def test_combination2(started_cluster): - update_configs(node_1_sleep_in_send_data=sleep_time, - node_2_sleep_in_send_tables_status=1000, - node_3_sleep_in_send_data=sleep_time, - node_4_sleep_in_send_tables_status=1000) + update_configs( + node_1_sleep_in_send_data=sleep_time, + node_2_sleep_in_send_tables_status=1000, + node_3_sleep_in_send_data=sleep_time, + node_4_sleep_in_send_tables_status=1000, + ) check_query() check_changing_replica_events(4) def test_query_with_no_data_to_sample(started_cluster): - update_configs(node_1_sleep_in_send_data=sleep_time, - node_2_sleep_in_send_data=sleep_time) + update_configs( + node_1_sleep_in_send_data=sleep_time, node_2_sleep_in_send_data=sleep_time + ) # When there is no way to sample data, the whole query will be performed by # the first replica and the second replica will just send EndOfStream, # so we will change only the first replica here. check_query(query=QUERY_2) check_changing_replica_events(1) - diff --git a/tests/integration/test_hive_query/test.py b/tests/integration/test_hive_query/test.py index 20b6a6cb8f2..9e9a20fa6d1 100644 --- a/tests/integration/test_hive_query/test.py +++ b/tests/integration/test_hive_query/test.py @@ -16,81 +16,101 @@ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) def started_cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance('h0_0_0', main_configs=['configs/config.xml'], extra_configs=[ 'configs/hdfs-site.xml'], with_hive=True) - + cluster.add_instance( + "h0_0_0", + main_configs=["configs/config.xml"], + extra_configs=["configs/hdfs-site.xml"], + with_hive=True, + ) + logging.info("Starting cluster ...") cluster.start() yield cluster finally: cluster.shutdown() + def test_create_parquet_table(started_cluster): - logging.info('Start testing creating hive table ...') - node = started_cluster.instances['h0_0_0'] + logging.info("Start testing creating hive table ...") + node = started_cluster.instances["h0_0_0"] test_passed = False for i in range(10): node.query("set input_format_parquet_allow_missing_columns = true") - result = node.query(""" + result = node.query( + """ DROP TABLE IF EXISTS default.demo_parquet; CREATE TABLE default.demo_parquet (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)) ENGINE = Hive('thrift://hivetest:9083', 'test', 'demo') PARTITION BY(day) - """) + """ + ) logging.info("create result {}".format(result)) - if result.strip() == '': - test_passed = True - break + if result.strip() == "": + test_passed = True + break time.sleep(60) assert test_passed + def test_create_parquet_table_1(started_cluster): - logging.info('Start testing creating hive table ...') - node = started_cluster.instances['h0_0_0'] + logging.info("Start testing creating hive table ...") + node = started_cluster.instances["h0_0_0"] for i in range(10): node.query("set input_format_parquet_allow_missing_columns = true") - result = node.query(""" + result = node.query( + """ DROP TABLE IF EXISTS default.demo_parquet_parts; CREATE TABLE default.demo_parquet_parts (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String), `hour` String) ENGINE = Hive('thrift://hivetest:9083', 'test', 'parquet_demo') PARTITION BY(day, hour); - """) + """ + ) logging.info("create result {}".format(result)) - if result.strip() == '': - test_passed = True - break + if result.strip() == "": + test_passed = True + break time.sleep(60) assert test_passed + def test_create_orc_table(started_cluster): - logging.info('Start testing creating hive table ...') - node = started_cluster.instances['h0_0_0'] + logging.info("Start testing creating hive table ...") + node = started_cluster.instances["h0_0_0"] test_passed = False for i in range(10): - result = node.query(""" + result = node.query( + """ DROP TABLE IF EXISTS default.demo_orc; CREATE TABLE default.demo_orc (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)) ENGINE = Hive('thrift://hivetest:9083', 'test', 'demo_orc') PARTITION BY(day) - """) + """ + ) logging.info("create result {}".format(result)) - if result.strip() == '': - test_passed = True - break + if result.strip() == "": + test_passed = True + break time.sleep(60) - + assert test_passed + def test_create_text_table(started_cluster): - logging.info('Start testing creating hive table ...') - node = started_cluster.instances['h0_0_0'] - result = node.query(""" + logging.info("Start testing creating hive table ...") + node = started_cluster.instances["h0_0_0"] + result = node.query( + """ DROP TABLE IF EXISTS default.demo_text; CREATE TABLE default.demo_text (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)) ENGINE = Hive('thrift://hivetest:9083', 'test', 'demo_text') PARTITION BY (tuple()) - """) + """ + ) logging.info("create result {}".format(result)) - - assert result.strip() == '' + + assert result.strip() == "" + def test_parquet_groupby(started_cluster): - logging.info('Start testing groupby ...') - node = started_cluster.instances['h0_0_0'] - result = node.query(""" + logging.info("Start testing groupby ...") + node = started_cluster.instances["h0_0_0"] + result = node.query( + """ SELECT day, count(*) FROM default.demo_parquet group by day order by day - """) + """ + ) expected_result = """2021-11-01 1 2021-11-05 2 2021-11-11 1 @@ -98,22 +118,29 @@ def test_parquet_groupby(started_cluster): """ assert result == expected_result + def test_parquet_in_filter(started_cluster): - logging.info('Start testing groupby ...') - node = started_cluster.instances['h0_0_0'] - result = node.query(""" + logging.info("Start testing groupby ...") + node = started_cluster.instances["h0_0_0"] + result = node.query( + """ SELECT count(*) FROM default.demo_parquet_parts where day = '2021-11-05' and hour in ('00') - """) + """ + ) expected_result = """2 """ logging.info("query result:{}".format(result)) assert result == expected_result + + def test_orc_groupby(started_cluster): - logging.info('Start testing groupby ...') - node = started_cluster.instances['h0_0_0'] - result = node.query(""" + logging.info("Start testing groupby ...") + node = started_cluster.instances["h0_0_0"] + result = node.query( + """ SELECT day, count(*) FROM default.demo_orc group by day order by day - """) + """ + ) expected_result = """2021-11-01 1 2021-11-05 2 2021-11-11 1 @@ -121,11 +148,28 @@ def test_orc_groupby(started_cluster): """ assert result == expected_result + +def test_hive_columns_prunning(started_cluster): + logging.info("Start testing groupby ...") + node = started_cluster.instances["h0_0_0"] + result = node.query( + """ + SELECT count(*) FROM default.demo_parquet_parts where day = '2021-11-05' + """ + ) + expected_result = """4 +""" + logging.info("query result:{}".format(result)) + assert result == expected_result + + def test_text_count(started_cluster): - node = started_cluster.instances['h0_0_0'] - result = node.query(""" + node = started_cluster.instances["h0_0_0"] + result = node.query( + """ SELECT day, count(*) FROM default.demo_orc group by day order by day SETTINGS format_csv_delimiter = '\x01' - """) + """ + ) expected_result = """2021-11-01 1 2021-11-05 2 2021-11-11 1 @@ -133,12 +177,15 @@ def test_text_count(started_cluster): """ assert result == expected_result + def test_parquet_groupby_with_cache(started_cluster): - logging.info('Start testing groupby ...') - node = started_cluster.instances['h0_0_0'] - result = node.query(""" + logging.info("Start testing groupby ...") + node = started_cluster.instances["h0_0_0"] + result = node.query( + """ SELECT day, count(*) FROM default.demo_parquet group by day order by day - """) + """ + ) expected_result = """2021-11-01 1 2021-11-05 2 2021-11-11 1 @@ -146,32 +193,42 @@ def test_parquet_groupby_with_cache(started_cluster): """ assert result == expected_result + def test_parquet_groupby_by_hive_function(started_cluster): - logging.info('Start testing groupby ...') - node = started_cluster.instances['h0_0_0'] - result = node.query(""" + logging.info("Start testing groupby ...") + node = started_cluster.instances["h0_0_0"] + result = node.query( + """ SELECT day, count(*) FROM hive('thrift://hivetest:9083', 'test', 'demo', '`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)', 'day') group by day order by day - """) + """ + ) expected_result = """2021-11-01 1 2021-11-05 2 2021-11-11 1 2021-11-16 2 """ assert result == expected_result - + + def test_cache_read_bytes(started_cluster): - node = started_cluster.instances['h0_0_0'] - result = node.query(""" + node = started_cluster.instances["h0_0_0"] + result = node.query( + """ CREATE TABLE IF NOT EXISTS default.demo_parquet_1 (`id` Nullable(String), `score` Nullable(Int32), `day` Nullable(String)) ENGINE = Hive('thrift://hivetest:9083', 'test', 'demo') PARTITION BY(day) - """) + """ + ) test_passed = False for i in range(10): - result = node.query(""" + result = node.query( + """ SELECT day, count(*) FROM default.demo_parquet_1 group by day order by day settings input_format_parquet_allow_missing_columns = true - """) + """ + ) node.query("system flush logs") - result = node.query("select sum(ProfileEvent_ExternalDataSourceLocalCacheReadBytes) from system.metric_log where ProfileEvent_ExternalDataSourceLocalCacheReadBytes > 0") - if result.strip() == '0': + result = node.query( + "select sum(ProfileEvent_ExternalDataSourceLocalCacheReadBytes) from system.metric_log where ProfileEvent_ExternalDataSourceLocalCacheReadBytes > 0" + ) + if result.strip() == "0": logging.info("ProfileEvent_ExternalDataSourceLocalCacheReadBytes == 0") time.sleep(10) continue diff --git a/tests/integration/test_host_ip_change/test.py b/tests/integration/test_host_ip_change/test.py index 7525914e803..604f2e5dc76 100644 --- a/tests/integration/test_host_ip_change/test.py +++ b/tests/integration/test_host_ip_change/test.py @@ -10,19 +10,29 @@ cluster = ClickHouseCluster(__file__) def _fill_nodes(nodes, table_name): for node in nodes: node.query( - ''' + """ CREATE DATABASE IF NOT EXISTS test; CREATE TABLE IF NOT EXISTS {0}(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{0}', '{1}') ORDER BY id PARTITION BY toYYYYMM(date); - '''.format(table_name, node.name) + """.format( + table_name, node.name + ) ) -node1 = cluster.add_instance('node1', main_configs=['configs/listen_host.xml'], with_zookeeper=True, - ipv6_address='2001:3984:3989::1:1111') -node2 = cluster.add_instance('node2', main_configs=['configs/listen_host.xml', 'configs/dns_update_long.xml'], - with_zookeeper=True, ipv6_address='2001:3984:3989::1:1112') +node1 = cluster.add_instance( + "node1", + main_configs=["configs/listen_host.xml"], + with_zookeeper=True, + ipv6_address="2001:3984:3989::1:1111", +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/listen_host.xml", "configs/dns_update_long.xml"], + with_zookeeper=True, + ipv6_address="2001:3984:3989::1:1112", +) @pytest.fixture(scope="module") @@ -30,7 +40,7 @@ def cluster_without_dns_cache_update(): try: cluster.start() - _fill_nodes([node1, node2], 'test_table_drop') + _fill_nodes([node1, node2], "test_table_drop") yield cluster @@ -46,7 +56,9 @@ def cluster_without_dns_cache_update(): # node2 has long dns_cache_update_period, so dns cache update wouldn't work def test_ip_change_drop_dns_cache(cluster_without_dns_cache_update): # First we check, that normal replication works - node1.query("INSERT INTO test_table_drop VALUES ('2018-10-01', 1), ('2018-10-02', 2), ('2018-10-03', 3)") + node1.query( + "INSERT INTO test_table_drop VALUES ('2018-10-01', 1), ('2018-10-02', 2), ('2018-10-03', 3)" + ) assert node1.query("SELECT count(*) from test_table_drop") == "3\n" assert_eq_with_retry(node2, "SELECT count(*) from test_table_drop", "3") @@ -54,7 +66,9 @@ def test_ip_change_drop_dns_cache(cluster_without_dns_cache_update): cluster.restart_instance_with_ip_change(node1, "2001:3984:3989::1:7777") # Put some data to source node1 - node1.query("INSERT INTO test_table_drop VALUES ('2018-10-01', 5), ('2018-10-02', 6), ('2018-10-03', 7)") + node1.query( + "INSERT INTO test_table_drop VALUES ('2018-10-01', 5), ('2018-10-02', 6), ('2018-10-03', 7)" + ) # Check that data is placed on node1 assert node1.query("SELECT count(*) from test_table_drop") == "6\n" @@ -73,11 +87,22 @@ def test_ip_change_drop_dns_cache(cluster_without_dns_cache_update): assert_eq_with_retry(node2, "SELECT count(*) from test_table_drop", "7") -node3 = cluster.add_instance('node3', main_configs=['configs/listen_host.xml'], - with_zookeeper=True, ipv6_address='2001:3984:3989::1:1113') -node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml', 'configs/listen_host.xml', - 'configs/dns_update_short.xml'], - with_zookeeper=True, ipv6_address='2001:3984:3989::1:1114') +node3 = cluster.add_instance( + "node3", + main_configs=["configs/listen_host.xml"], + with_zookeeper=True, + ipv6_address="2001:3984:3989::1:1113", +) +node4 = cluster.add_instance( + "node4", + main_configs=[ + "configs/remote_servers.xml", + "configs/listen_host.xml", + "configs/dns_update_short.xml", + ], + with_zookeeper=True, + ipv6_address="2001:3984:3989::1:1114", +) @pytest.fixture(scope="module") @@ -85,7 +110,7 @@ def cluster_with_dns_cache_update(): try: cluster.start() - _fill_nodes([node3, node4], 'test_table_update') + _fill_nodes([node3, node4], "test_table_update") yield cluster @@ -101,7 +126,9 @@ def cluster_with_dns_cache_update(): # node4 has short dns_cache_update_period, so testing update of dns cache def test_ip_change_update_dns_cache(cluster_with_dns_cache_update): # First we check, that normal replication works - node3.query("INSERT INTO test_table_update VALUES ('2018-10-01', 1), ('2018-10-02', 2), ('2018-10-03', 3)") + node3.query( + "INSERT INTO test_table_update VALUES ('2018-10-01', 1), ('2018-10-02', 2), ('2018-10-03', 3)" + ) assert node3.query("SELECT count(*) from test_table_update") == "3\n" assert_eq_with_retry(node4, "SELECT count(*) from test_table_update", "3") @@ -109,20 +136,26 @@ def test_ip_change_update_dns_cache(cluster_with_dns_cache_update): cluster.restart_instance_with_ip_change(node3, "2001:3984:3989::1:8888") # Put some data to source node3 - node3.query("INSERT INTO test_table_update VALUES ('2018-10-01', 5), ('2018-10-02', 6), ('2018-10-03', 7)") + node3.query( + "INSERT INTO test_table_update VALUES ('2018-10-01', 5), ('2018-10-02', 6), ('2018-10-03', 7)" + ) # Check that data is placed on node3 assert node3.query("SELECT count(*) from test_table_update") == "6\n" curl_result = node4.exec_in_container(["bash", "-c", "curl -s 'node3:8123'"]) - assert curl_result == 'Ok.\n' + assert curl_result == "Ok.\n" cat_resolv = node4.exec_in_container(["bash", "-c", "cat /etc/resolv.conf"]) print(("RESOLV {}".format(cat_resolv))) - assert_eq_with_retry(node4, "SELECT * FROM remote('node3', 'system', 'one')", "0", sleep_time=0.5) + assert_eq_with_retry( + node4, "SELECT * FROM remote('node3', 'system', 'one')", "0", sleep_time=0.5 + ) # Because of DNS cache update, ip of node3 would be updated - assert_eq_with_retry(node4, "SELECT count(*) from test_table_update", "6", sleep_time=3) + assert_eq_with_retry( + node4, "SELECT count(*) from test_table_update", "6", sleep_time=3 + ) # Just to be sure check one more time node3.query("INSERT INTO test_table_update VALUES ('2018-10-01', 8)") @@ -131,39 +164,55 @@ def test_ip_change_update_dns_cache(cluster_with_dns_cache_update): def set_hosts(node, hosts): - new_content = '\\n'.join(['127.0.0.1 localhost', '::1 localhost'] + hosts) - node.exec_in_container(['bash', '-c', 'echo -e "{}" > /etc/hosts'.format(new_content)], privileged=True, - user='root') + new_content = "\\n".join(["127.0.0.1 localhost", "::1 localhost"] + hosts) + node.exec_in_container( + ["bash", "-c", 'echo -e "{}" > /etc/hosts'.format(new_content)], + privileged=True, + user="root", + ) def test_dns_cache_update(cluster_with_dns_cache_update): - set_hosts(node4, ['127.255.255.255 lost_host']) + set_hosts(node4, ["127.255.255.255 lost_host"]) with pytest.raises(QueryRuntimeException): node4.query("SELECT * FROM remote('lost_host', 'system', 'one')") node4.query( - "CREATE TABLE distributed_lost_host (dummy UInt8) ENGINE = Distributed(lost_host_cluster, 'system', 'one')") + "CREATE TABLE distributed_lost_host (dummy UInt8) ENGINE = Distributed(lost_host_cluster, 'system', 'one')" + ) with pytest.raises(QueryRuntimeException): node4.query("SELECT * FROM distributed_lost_host") - set_hosts(node4, ['127.0.0.1 lost_host']) + set_hosts(node4, ["127.0.0.1 lost_host"]) # Wait a bit until dns cache will be updated - assert_eq_with_retry(node4, "SELECT * FROM remote('lost_host', 'system', 'one')", "0") + assert_eq_with_retry( + node4, "SELECT * FROM remote('lost_host', 'system', 'one')", "0" + ) assert_eq_with_retry(node4, "SELECT * FROM distributed_lost_host", "0") - assert TSV(node4.query( - "SELECT DISTINCT host_name, host_address FROM system.clusters WHERE cluster='lost_host_cluster'")) == TSV( - "lost_host\t127.0.0.1\n") + assert TSV( + node4.query( + "SELECT DISTINCT host_name, host_address FROM system.clusters WHERE cluster='lost_host_cluster'" + ) + ) == TSV("lost_host\t127.0.0.1\n") assert TSV(node4.query("SELECT hostName()")) == TSV("node4") # Check SYSTEM DROP DNS CACHE on node5 and background cache update on node6 -node5 = cluster.add_instance('node5', main_configs=['configs/listen_host.xml', 'configs/dns_update_long.xml'], - user_configs=['configs/users_with_hostname.xml'], ipv6_address='2001:3984:3989::1:1115') -node6 = cluster.add_instance('node6', main_configs=['configs/listen_host.xml', 'configs/dns_update_short.xml'], - user_configs=['configs/users_with_hostname.xml'], ipv6_address='2001:3984:3989::1:1116') +node5 = cluster.add_instance( + "node5", + main_configs=["configs/listen_host.xml", "configs/dns_update_long.xml"], + user_configs=["configs/users_with_hostname.xml"], + ipv6_address="2001:3984:3989::1:1115", +) +node6 = cluster.add_instance( + "node6", + main_configs=["configs/listen_host.xml", "configs/dns_update_short.xml"], + user_configs=["configs/users_with_hostname.xml"], + ipv6_address="2001:3984:3989::1:1116", +) @pytest.mark.parametrize("node", [node5, node6]) @@ -171,16 +220,39 @@ def test_user_access_ip_change(cluster_with_dns_cache_update, node): node_name = node.name node_num = node.name[-1] # getaddrinfo(...) may hang for a log time without this options - node.exec_in_container(['bash', '-c', 'echo -e "options timeout:1\noptions attempts:2" >> /etc/resolv.conf'], - privileged=True, user='root') + node.exec_in_container( + [ + "bash", + "-c", + 'echo -e "options timeout:1\noptions attempts:2" >> /etc/resolv.conf', + ], + privileged=True, + user="root", + ) - assert node3.query("SELECT * FROM remote('{}', 'system', 'one')".format(node_name)) == "0\n" - assert node4.query("SELECT * FROM remote('{}', 'system', 'one')".format(node_name)) == "0\n" + assert ( + node3.query("SELECT * FROM remote('{}', 'system', 'one')".format(node_name)) + == "0\n" + ) + assert ( + node4.query("SELECT * FROM remote('{}', 'system', 'one')".format(node_name)) + == "0\n" + ) - set_hosts(node, ['127.255.255.255 node3', '2001:3984:3989::1:88{}4 unknown_host'.format(node_num)]) + set_hosts( + node, + [ + "127.255.255.255 node3", + "2001:3984:3989::1:88{}4 unknown_host".format(node_num), + ], + ) - cluster.restart_instance_with_ip_change(node3, "2001:3984:3989::1:88{}3".format(node_num)) - cluster.restart_instance_with_ip_change(node4, "2001:3984:3989::1:88{}4".format(node_num)) + cluster.restart_instance_with_ip_change( + node3, "2001:3984:3989::1:88{}3".format(node_num) + ) + cluster.restart_instance_with_ip_change( + node4, "2001:3984:3989::1:88{}4".format(node_num) + ) with pytest.raises(QueryRuntimeException): node3.query("SELECT * FROM remote('{}', 'system', 'one')".format(node_name)) @@ -190,13 +262,26 @@ def test_user_access_ip_change(cluster_with_dns_cache_update, node): set_hosts(node, []) retry_count = 60 - if node_name == 'node5': + if node_name == "node5": # client is not allowed to connect, so execute it directly in container to send query from localhost - node.exec_in_container(['bash', '-c', 'clickhouse client -q "SYSTEM DROP DNS CACHE"'], privileged=True, - user='root') + node.exec_in_container( + ["bash", "-c", 'clickhouse client -q "SYSTEM DROP DNS CACHE"'], + privileged=True, + user="root", + ) retry_count = 1 - assert_eq_with_retry(node3, "SELECT * FROM remote('{}', 'system', 'one')".format(node_name), "0", - retry_count=retry_count, sleep_time=1) - assert_eq_with_retry(node4, "SELECT * FROM remote('{}', 'system', 'one')".format(node_name), "0", - retry_count=retry_count, sleep_time=1) + assert_eq_with_retry( + node3, + "SELECT * FROM remote('{}', 'system', 'one')".format(node_name), + "0", + retry_count=retry_count, + sleep_time=1, + ) + assert_eq_with_retry( + node4, + "SELECT * FROM remote('{}', 'system', 'one')".format(node_name), + "0", + retry_count=retry_count, + sleep_time=1, + ) diff --git a/tests/integration/test_http_and_readonly/test.py b/tests/integration/test_http_and_readonly/test.py index 9929e34c9d2..1ce3345bf80 100644 --- a/tests/integration/test_http_and_readonly/test.py +++ b/tests/integration/test_http_and_readonly/test.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance') +instance = cluster.add_instance("instance") @pytest.fixture(scope="module", autouse=True) @@ -17,6 +17,12 @@ def setup_nodes(): def test_http_get_is_readonly(): assert "Cannot execute query in readonly mode" in instance.http_query_and_get_error( - "CREATE TABLE xxx (a Date) ENGINE = MergeTree(a, a, 256)") - assert "Cannot modify 'readonly' setting in readonly mode" in instance.http_query_and_get_error( - "CREATE TABLE xxx (a Date) ENGINE = MergeTree(a, a, 256)", params={"readonly": 0}) + "CREATE TABLE xxx (a Date) ENGINE = MergeTree(a, a, 256)" + ) + assert ( + "Cannot modify 'readonly' setting in readonly mode" + in instance.http_query_and_get_error( + "CREATE TABLE xxx (a Date) ENGINE = MergeTree(a, a, 256)", + params={"readonly": 0}, + ) + ) diff --git a/tests/integration/test_http_handlers_config/test.py b/tests/integration/test_http_handlers_config/test.py index 01872a1d0c3..e73324ada8f 100644 --- a/tests/integration/test_http_handlers_config/test.py +++ b/tests/integration/test_http_handlers_config/test.py @@ -16,192 +16,485 @@ class SimpleCluster: def add_instance(self, name, config_dir): script_path = os.path.dirname(os.path.realpath(__file__)) - return self.cluster.add_instance(name, main_configs=[os.path.join(script_path, config_dir, 'config.xml')]) + return self.cluster.add_instance( + name, main_configs=[os.path.join(script_path, config_dir, "config.xml")] + ) def test_dynamic_query_handler(): with contextlib.closing( - SimpleCluster(ClickHouseCluster(__file__), "dynamic_handler", "test_dynamic_handler")) as cluster: - test_query = urllib.parse.quote_plus('SELECT * FROM system.settings WHERE name = \'max_threads\'') + SimpleCluster( + ClickHouseCluster(__file__), "dynamic_handler", "test_dynamic_handler" + ) + ) as cluster: + test_query = urllib.parse.quote_plus( + "SELECT * FROM system.settings WHERE name = 'max_threads'" + ) - assert 404 == cluster.instance.http_request('?max_threads=1', method='GET', headers={'XXX': 'xxx'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "?max_threads=1", method="GET", headers={"XXX": "xxx"} + ).status_code + ) - assert 404 == cluster.instance.http_request('test_dynamic_handler_get?max_threads=1', method='POST', - headers={'XXX': 'xxx'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "test_dynamic_handler_get?max_threads=1", + method="POST", + headers={"XXX": "xxx"}, + ).status_code + ) - assert 404 == cluster.instance.http_request('test_dynamic_handler_get?max_threads=1', method='GET', - headers={'XXX': 'bad'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "test_dynamic_handler_get?max_threads=1", + method="GET", + headers={"XXX": "bad"}, + ).status_code + ) - assert 400 == cluster.instance.http_request('test_dynamic_handler_get?max_threads=1', method='GET', - headers={'XXX': 'xxx'}).status_code + assert ( + 400 + == cluster.instance.http_request( + "test_dynamic_handler_get?max_threads=1", + method="GET", + headers={"XXX": "xxx"}, + ).status_code + ) - assert 200 == cluster.instance.http_request( - 'test_dynamic_handler_get?max_threads=1&get_dynamic_handler_query=' + test_query, - method='GET', headers={'XXX': 'xxx'}).status_code + assert ( + 200 + == cluster.instance.http_request( + "test_dynamic_handler_get?max_threads=1&get_dynamic_handler_query=" + + test_query, + method="GET", + headers={"XXX": "xxx"}, + ).status_code + ) def test_predefined_query_handler(): with contextlib.closing( - SimpleCluster(ClickHouseCluster(__file__), "predefined_handler", "test_predefined_handler")) as cluster: - assert 404 == cluster.instance.http_request('?max_threads=1', method='GET', headers={'XXX': 'xxx'}).status_code + SimpleCluster( + ClickHouseCluster(__file__), "predefined_handler", "test_predefined_handler" + ) + ) as cluster: + assert ( + 404 + == cluster.instance.http_request( + "?max_threads=1", method="GET", headers={"XXX": "xxx"} + ).status_code + ) - assert 404 == cluster.instance.http_request('test_predefined_handler_get?max_threads=1', method='GET', - headers={'XXX': 'bad'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "test_predefined_handler_get?max_threads=1", + method="GET", + headers={"XXX": "bad"}, + ).status_code + ) - assert 404 == cluster.instance.http_request('test_predefined_handler_get?max_threads=1', method='POST', - headers={'XXX': 'xxx'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "test_predefined_handler_get?max_threads=1", + method="POST", + headers={"XXX": "xxx"}, + ).status_code + ) - assert 500 == cluster.instance.http_request('test_predefined_handler_get?max_threads=1', method='GET', - headers={'XXX': 'xxx'}).status_code + assert ( + 500 + == cluster.instance.http_request( + "test_predefined_handler_get?max_threads=1", + method="GET", + headers={"XXX": "xxx"}, + ).status_code + ) - assert b'max_threads\t1\n' == cluster.instance.http_request( - 'test_predefined_handler_get?max_threads=1&setting_name=max_threads', method='GET', - headers={'XXX': 'xxx'}).content + assert ( + b"max_threads\t1\n" + == cluster.instance.http_request( + "test_predefined_handler_get?max_threads=1&setting_name=max_threads", + method="GET", + headers={"XXX": "xxx"}, + ).content + ) - assert b'max_final_threads\t1\nmax_threads\t1\n' == cluster.instance.http_request( - 'query_param_with_url/max_threads?max_threads=1&max_final_threads=1', - headers={'XXX': 'max_final_threads'}).content + assert ( + b"max_final_threads\t1\nmax_threads\t1\n" + == cluster.instance.http_request( + "query_param_with_url/max_threads?max_threads=1&max_final_threads=1", + headers={"XXX": "max_final_threads"}, + ).content + ) def test_fixed_static_handler(): with contextlib.closing( - SimpleCluster(ClickHouseCluster(__file__), "static_handler", "test_static_handler")) as cluster: - assert 404 == cluster.instance.http_request('', method='GET', headers={'XXX': 'xxx'}).status_code + SimpleCluster( + ClickHouseCluster(__file__), "static_handler", "test_static_handler" + ) + ) as cluster: + assert ( + 404 + == cluster.instance.http_request( + "", method="GET", headers={"XXX": "xxx"} + ).status_code + ) - assert 404 == cluster.instance.http_request('test_get_fixed_static_handler', method='GET', - headers={'XXX': 'bad'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "test_get_fixed_static_handler", method="GET", headers={"XXX": "bad"} + ).status_code + ) - assert 404 == cluster.instance.http_request('test_get_fixed_static_handler', method='POST', - headers={'XXX': 'xxx'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "test_get_fixed_static_handler", method="POST", headers={"XXX": "xxx"} + ).status_code + ) - assert 402 == cluster.instance.http_request('test_get_fixed_static_handler', method='GET', - headers={'XXX': 'xxx'}).status_code - assert 'text/html; charset=UTF-8' == \ - cluster.instance.http_request('test_get_fixed_static_handler', method='GET', - headers={'XXX': 'xxx'}).headers['Content-Type'] - assert b'Test get static handler and fix content' == cluster.instance.http_request( - 'test_get_fixed_static_handler', method='GET', headers={'XXX': 'xxx'}).content + assert ( + 402 + == cluster.instance.http_request( + "test_get_fixed_static_handler", method="GET", headers={"XXX": "xxx"} + ).status_code + ) + assert ( + "text/html; charset=UTF-8" + == cluster.instance.http_request( + "test_get_fixed_static_handler", method="GET", headers={"XXX": "xxx"} + ).headers["Content-Type"] + ) + assert ( + b"Test get static handler and fix content" + == cluster.instance.http_request( + "test_get_fixed_static_handler", method="GET", headers={"XXX": "xxx"} + ).content + ) def test_config_static_handler(): with contextlib.closing( - SimpleCluster(ClickHouseCluster(__file__), "static_handler", "test_static_handler")) as cluster: - assert 404 == cluster.instance.http_request('', method='GET', headers={'XXX': 'xxx'}).status_code + SimpleCluster( + ClickHouseCluster(__file__), "static_handler", "test_static_handler" + ) + ) as cluster: + assert ( + 404 + == cluster.instance.http_request( + "", method="GET", headers={"XXX": "xxx"} + ).status_code + ) - assert 404 == cluster.instance.http_request('test_get_config_static_handler', method='GET', - headers={'XXX': 'bad'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "test_get_config_static_handler", method="GET", headers={"XXX": "bad"} + ).status_code + ) - assert 404 == cluster.instance.http_request('test_get_config_static_handler', method='POST', - headers={'XXX': 'xxx'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "test_get_config_static_handler", method="POST", headers={"XXX": "xxx"} + ).status_code + ) # check default status code - assert 200 == cluster.instance.http_request('test_get_config_static_handler', method='GET', - headers={'XXX': 'xxx'}).status_code - assert 'text/plain; charset=UTF-8' == \ - cluster.instance.http_request('test_get_config_static_handler', method='GET', - headers={'XXX': 'xxx'}).headers['Content-Type'] - assert b'Test get static handler and config content' == cluster.instance.http_request( - 'test_get_config_static_handler', method='GET', headers={'XXX': 'xxx'}).content + assert ( + 200 + == cluster.instance.http_request( + "test_get_config_static_handler", method="GET", headers={"XXX": "xxx"} + ).status_code + ) + assert ( + "text/plain; charset=UTF-8" + == cluster.instance.http_request( + "test_get_config_static_handler", method="GET", headers={"XXX": "xxx"} + ).headers["Content-Type"] + ) + assert ( + b"Test get static handler and config content" + == cluster.instance.http_request( + "test_get_config_static_handler", method="GET", headers={"XXX": "xxx"} + ).content + ) def test_absolute_path_static_handler(): with contextlib.closing( - SimpleCluster(ClickHouseCluster(__file__), "static_handler", "test_static_handler")) as cluster: + SimpleCluster( + ClickHouseCluster(__file__), "static_handler", "test_static_handler" + ) + ) as cluster: cluster.instance.exec_in_container( - ['bash', '-c', - 'echo "Absolute Path File" > /var/lib/clickhouse/user_files/absolute_path_file.html'], - privileged=True, user='root') + [ + "bash", + "-c", + 'echo "Absolute Path File" > /var/lib/clickhouse/user_files/absolute_path_file.html', + ], + privileged=True, + user="root", + ) - assert 404 == cluster.instance.http_request('', method='GET', headers={'XXX': 'xxx'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "", method="GET", headers={"XXX": "xxx"} + ).status_code + ) - assert 404 == cluster.instance.http_request('test_get_absolute_path_static_handler', method='GET', - headers={'XXX': 'bad'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "test_get_absolute_path_static_handler", + method="GET", + headers={"XXX": "bad"}, + ).status_code + ) - assert 404 == cluster.instance.http_request('test_get_absolute_path_static_handler', method='POST', - headers={'XXX': 'xxx'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "test_get_absolute_path_static_handler", + method="POST", + headers={"XXX": "xxx"}, + ).status_code + ) # check default status code - assert 200 == cluster.instance.http_request('test_get_absolute_path_static_handler', method='GET', - headers={'XXX': 'xxx'}).status_code - assert 'text/html; charset=UTF-8' == \ - cluster.instance.http_request('test_get_absolute_path_static_handler', method='GET', - headers={'XXX': 'xxx'}).headers['Content-Type'] - assert b'Absolute Path File\n' == cluster.instance.http_request( - 'test_get_absolute_path_static_handler', method='GET', headers={'XXX': 'xxx'}).content + assert ( + 200 + == cluster.instance.http_request( + "test_get_absolute_path_static_handler", + method="GET", + headers={"XXX": "xxx"}, + ).status_code + ) + assert ( + "text/html; charset=UTF-8" + == cluster.instance.http_request( + "test_get_absolute_path_static_handler", + method="GET", + headers={"XXX": "xxx"}, + ).headers["Content-Type"] + ) + assert ( + b"Absolute Path File\n" + == cluster.instance.http_request( + "test_get_absolute_path_static_handler", + method="GET", + headers={"XXX": "xxx"}, + ).content + ) def test_relative_path_static_handler(): with contextlib.closing( - SimpleCluster(ClickHouseCluster(__file__), "static_handler", "test_static_handler")) as cluster: + SimpleCluster( + ClickHouseCluster(__file__), "static_handler", "test_static_handler" + ) + ) as cluster: cluster.instance.exec_in_container( - ['bash', '-c', - 'echo "Relative Path File" > /var/lib/clickhouse/user_files/relative_path_file.html'], - privileged=True, user='root') + [ + "bash", + "-c", + 'echo "Relative Path File" > /var/lib/clickhouse/user_files/relative_path_file.html', + ], + privileged=True, + user="root", + ) - assert 404 == cluster.instance.http_request('', method='GET', headers={'XXX': 'xxx'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "", method="GET", headers={"XXX": "xxx"} + ).status_code + ) - assert 404 == cluster.instance.http_request('test_get_relative_path_static_handler', method='GET', - headers={'XXX': 'bad'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "test_get_relative_path_static_handler", + method="GET", + headers={"XXX": "bad"}, + ).status_code + ) - assert 404 == cluster.instance.http_request('test_get_relative_path_static_handler', method='POST', - headers={'XXX': 'xxx'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "test_get_relative_path_static_handler", + method="POST", + headers={"XXX": "xxx"}, + ).status_code + ) # check default status code - assert 200 == cluster.instance.http_request('test_get_relative_path_static_handler', method='GET', - headers={'XXX': 'xxx'}).status_code - assert 'text/html; charset=UTF-8' == \ - cluster.instance.http_request('test_get_relative_path_static_handler', method='GET', - headers={'XXX': 'xxx'}).headers['Content-Type'] - assert b'Relative Path File\n' == cluster.instance.http_request( - 'test_get_relative_path_static_handler', method='GET', headers={'XXX': 'xxx'}).content + assert ( + 200 + == cluster.instance.http_request( + "test_get_relative_path_static_handler", + method="GET", + headers={"XXX": "xxx"}, + ).status_code + ) + assert ( + "text/html; charset=UTF-8" + == cluster.instance.http_request( + "test_get_relative_path_static_handler", + method="GET", + headers={"XXX": "xxx"}, + ).headers["Content-Type"] + ) + assert ( + b"Relative Path File\n" + == cluster.instance.http_request( + "test_get_relative_path_static_handler", + method="GET", + headers={"XXX": "xxx"}, + ).content + ) def test_defaults_http_handlers(): with contextlib.closing( - SimpleCluster(ClickHouseCluster(__file__), "defaults_handlers", "test_defaults_handlers")) as cluster: - assert 200 == cluster.instance.http_request('', method='GET').status_code - assert b'Default server response' == cluster.instance.http_request('', method='GET').content + SimpleCluster( + ClickHouseCluster(__file__), "defaults_handlers", "test_defaults_handlers" + ) + ) as cluster: + assert 200 == cluster.instance.http_request("", method="GET").status_code + assert ( + b"Default server response" + == cluster.instance.http_request("", method="GET").content + ) - assert 200 == cluster.instance.http_request('ping', method='GET').status_code - assert b'Ok.\n' == cluster.instance.http_request('ping', method='GET').content + assert 200 == cluster.instance.http_request("ping", method="GET").status_code + assert b"Ok.\n" == cluster.instance.http_request("ping", method="GET").content - assert 200 == cluster.instance.http_request('replicas_status', method='get').status_code - assert b'Ok.\n' == cluster.instance.http_request('replicas_status', method='get').content + assert ( + 200 + == cluster.instance.http_request( + "replicas_status", method="get" + ).status_code + ) + assert ( + b"Ok.\n" + == cluster.instance.http_request("replicas_status", method="get").content + ) - assert 200 == cluster.instance.http_request('replicas_status?verbose=1', method='get').status_code - assert b'' == cluster.instance.http_request('replicas_status?verbose=1', method='get').content + assert ( + 200 + == cluster.instance.http_request( + "replicas_status?verbose=1", method="get" + ).status_code + ) + assert ( + b"" + == cluster.instance.http_request( + "replicas_status?verbose=1", method="get" + ).content + ) - assert 200 == cluster.instance.http_request('?query=SELECT+1', method='GET').status_code - assert b'1\n' == cluster.instance.http_request('?query=SELECT+1', method='GET').content + assert ( + 200 + == cluster.instance.http_request( + "?query=SELECT+1", method="GET" + ).status_code + ) + assert ( + b"1\n" + == cluster.instance.http_request("?query=SELECT+1", method="GET").content + ) def test_prometheus_handler(): with contextlib.closing( - SimpleCluster(ClickHouseCluster(__file__), "prometheus_handler", "test_prometheus_handler")) as cluster: - assert 404 == cluster.instance.http_request('', method='GET', headers={'XXX': 'xxx'}).status_code + SimpleCluster( + ClickHouseCluster(__file__), "prometheus_handler", "test_prometheus_handler" + ) + ) as cluster: + assert ( + 404 + == cluster.instance.http_request( + "", method="GET", headers={"XXX": "xxx"} + ).status_code + ) - assert 404 == cluster.instance.http_request('test_prometheus', method='GET', headers={'XXX': 'bad'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "test_prometheus", method="GET", headers={"XXX": "bad"} + ).status_code + ) - assert 404 == cluster.instance.http_request('test_prometheus', method='POST', - headers={'XXX': 'xxx'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "test_prometheus", method="POST", headers={"XXX": "xxx"} + ).status_code + ) - assert 200 == cluster.instance.http_request('test_prometheus', method='GET', headers={'XXX': 'xxx'}).status_code - assert b'ClickHouseProfileEvents_Query' in cluster.instance.http_request('test_prometheus', method='GET', - headers={'XXX': 'xxx'}).content + assert ( + 200 + == cluster.instance.http_request( + "test_prometheus", method="GET", headers={"XXX": "xxx"} + ).status_code + ) + assert ( + b"ClickHouseProfileEvents_Query" + in cluster.instance.http_request( + "test_prometheus", method="GET", headers={"XXX": "xxx"} + ).content + ) def test_replicas_status_handler(): - with contextlib.closing(SimpleCluster(ClickHouseCluster(__file__), "replicas_status_handler", - "test_replicas_status_handler")) as cluster: - assert 404 == cluster.instance.http_request('', method='GET', headers={'XXX': 'xxx'}).status_code + with contextlib.closing( + SimpleCluster( + ClickHouseCluster(__file__), + "replicas_status_handler", + "test_replicas_status_handler", + ) + ) as cluster: + assert ( + 404 + == cluster.instance.http_request( + "", method="GET", headers={"XXX": "xxx"} + ).status_code + ) - assert 404 == cluster.instance.http_request('test_replicas_status', method='GET', - headers={'XXX': 'bad'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "test_replicas_status", method="GET", headers={"XXX": "bad"} + ).status_code + ) - assert 404 == cluster.instance.http_request('test_replicas_status', method='POST', - headers={'XXX': 'xxx'}).status_code + assert ( + 404 + == cluster.instance.http_request( + "test_replicas_status", method="POST", headers={"XXX": "xxx"} + ).status_code + ) - assert 200 == cluster.instance.http_request('test_replicas_status', method='GET', - headers={'XXX': 'xxx'}).status_code - assert b'Ok.\n' == cluster.instance.http_request('test_replicas_status', method='GET', - headers={'XXX': 'xxx'}).content + assert ( + 200 + == cluster.instance.http_request( + "test_replicas_status", method="GET", headers={"XXX": "xxx"} + ).status_code + ) + assert ( + b"Ok.\n" + == cluster.instance.http_request( + "test_replicas_status", method="GET", headers={"XXX": "xxx"} + ).content + ) diff --git a/tests/integration/test_https_replication/test.py b/tests/integration/test_https_replication/test.py index 1008ce07ad3..4cf9f19b870 100644 --- a/tests/integration/test_https_replication/test.py +++ b/tests/integration/test_https_replication/test.py @@ -15,21 +15,40 @@ Both ssl_conf.xml and no_ssl_conf.xml have the same port def _fill_nodes(nodes, shard): for node in nodes: node.query( - ''' + """ CREATE DATABASE test; CREATE TABLE test_table(date Date, id UInt32, dummy UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}', date, id, 8192); - '''.format(shard=shard, replica=node.name)) + """.format( + shard=shard, replica=node.name + ) + ) cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', - main_configs=['configs/remote_servers.xml', 'configs/ssl_conf.xml', "configs/server.crt", - "configs/server.key", "configs/dhparam.pem"], with_zookeeper=True) -node2 = cluster.add_instance('node2', - main_configs=['configs/remote_servers.xml', 'configs/ssl_conf.xml', "configs/server.crt", - "configs/server.key", "configs/dhparam.pem"], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", + main_configs=[ + "configs/remote_servers.xml", + "configs/ssl_conf.xml", + "configs/server.crt", + "configs/server.key", + "configs/dhparam.pem", + ], + with_zookeeper=True, +) +node2 = cluster.add_instance( + "node2", + main_configs=[ + "configs/remote_servers.xml", + "configs/ssl_conf.xml", + "configs/server.crt", + "configs/server.key", + "configs/dhparam.pem", + ], + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -48,13 +67,13 @@ def both_https_cluster(): def test_both_https(both_https_cluster): node1.query("insert into test_table values ('2017-06-16', 111, 0)") - assert_eq_with_retry(node1, "SELECT id FROM test_table order by id", '111') - assert_eq_with_retry(node2, "SELECT id FROM test_table order by id", '111') + assert_eq_with_retry(node1, "SELECT id FROM test_table order by id", "111") + assert_eq_with_retry(node2, "SELECT id FROM test_table order by id", "111") node2.query("insert into test_table values ('2017-06-17', 222, 1)") - assert_eq_with_retry(node1, "SELECT id FROM test_table order by id", '111\n222') - assert_eq_with_retry(node2, "SELECT id FROM test_table order by id", '111\n222') + assert_eq_with_retry(node1, "SELECT id FROM test_table order by id", "111\n222") + assert_eq_with_retry(node2, "SELECT id FROM test_table order by id", "111\n222") def test_replication_after_partition(both_https_cluster): @@ -80,12 +99,20 @@ def test_replication_after_partition(both_https_cluster): cres.wait() ires.wait() - assert_eq_with_retry(node1, "SELECT count() FROM test_table", '100') - assert_eq_with_retry(node2, "SELECT count() FROM test_table", '100') + assert_eq_with_retry(node1, "SELECT count() FROM test_table", "100") + assert_eq_with_retry(node2, "SELECT count() FROM test_table", "100") -node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml', 'configs/no_ssl_conf.xml'], with_zookeeper=True) -node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml', 'configs/no_ssl_conf.xml'], with_zookeeper=True) +node3 = cluster.add_instance( + "node3", + main_configs=["configs/remote_servers.xml", "configs/no_ssl_conf.xml"], + with_zookeeper=True, +) +node4 = cluster.add_instance( + "node4", + main_configs=["configs/remote_servers.xml", "configs/no_ssl_conf.xml"], + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -104,22 +131,31 @@ def both_http_cluster(): def test_both_http(both_http_cluster): node3.query("insert into test_table values ('2017-06-16', 111, 0)") - assert_eq_with_retry(node3, "SELECT id FROM test_table order by id", '111') - assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", '111') + assert_eq_with_retry(node3, "SELECT id FROM test_table order by id", "111") + assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", "111") node4.query("insert into test_table values ('2017-06-17', 222, 1)") - assert_eq_with_retry(node3, "SELECT id FROM test_table order by id", '111\n222') - assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", '111\n222') + assert_eq_with_retry(node3, "SELECT id FROM test_table order by id", "111\n222") + assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", "111\n222") -node5 = cluster.add_instance('node5', - main_configs=['configs/remote_servers.xml', 'configs/ssl_conf.xml', "configs/server.crt", - "configs/server.key", "configs/dhparam.pem"], - with_zookeeper=True) -node6 = cluster.add_instance('node6', - main_configs=['configs/remote_servers.xml', 'configs/no_ssl_conf.xml'], - with_zookeeper=True) +node5 = cluster.add_instance( + "node5", + main_configs=[ + "configs/remote_servers.xml", + "configs/ssl_conf.xml", + "configs/server.crt", + "configs/server.key", + "configs/dhparam.pem", + ], + with_zookeeper=True, +) +node6 = cluster.add_instance( + "node6", + main_configs=["configs/remote_servers.xml", "configs/no_ssl_conf.xml"], + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -138,10 +174,10 @@ def mixed_protocol_cluster(): def test_mixed_protocol(mixed_protocol_cluster): node5.query("insert into test_table values ('2017-06-16', 111, 0)") - assert_eq_with_retry(node5, "SELECT id FROM test_table order by id", '111') - assert_eq_with_retry(node6, "SELECT id FROM test_table order by id", '') + assert_eq_with_retry(node5, "SELECT id FROM test_table order by id", "111") + assert_eq_with_retry(node6, "SELECT id FROM test_table order by id", "") node6.query("insert into test_table values ('2017-06-17', 222, 1)") - assert_eq_with_retry(node5, "SELECT id FROM test_table order by id", '111') - assert_eq_with_retry(node6, "SELECT id FROM test_table order by id", '222') + assert_eq_with_retry(node5, "SELECT id FROM test_table order by id", "111") + assert_eq_with_retry(node6, "SELECT id FROM test_table order by id", "222") diff --git a/tests/integration/test_inherit_multiple_profiles/test.py b/tests/integration/test_inherit_multiple_profiles/test.py index 658ccc3f51b..46f2868fe36 100644 --- a/tests/integration/test_inherit_multiple_profiles/test.py +++ b/tests/integration/test_inherit_multiple_profiles/test.py @@ -5,8 +5,9 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', - user_configs=['configs/combined_profile.xml']) +instance = cluster.add_instance( + "instance", user_configs=["configs/combined_profile.xml"] +) q = instance.query @@ -22,52 +23,63 @@ def started_cluster(): def test_combined_profile(started_cluster): - settings = q(''' + settings = q( + """ SELECT name, value FROM system.settings WHERE name IN ('max_insert_block_size', 'max_network_bytes', 'max_query_size', 'max_parallel_replicas', 'readonly') AND changed ORDER BY name -''', user='test_combined_profile') +""", + user="test_combined_profile", + ) - expected1 = '''\ + expected1 = """\ max_insert_block_size 654321 max_network_bytes 1234567890 max_parallel_replicas 2 max_query_size 400000000 -readonly 2''' +readonly 2""" assert TSV(settings) == TSV(expected1) with pytest.raises(QueryRuntimeException) as exc: - q(''' + q( + """ SET max_insert_block_size = 1000; - ''', user='test_combined_profile') + """, + user="test_combined_profile", + ) - assert ("max_insert_block_size shouldn't be less than 654320." in - str(exc.value)) + assert "max_insert_block_size shouldn't be less than 654320." in str(exc.value) with pytest.raises(QueryRuntimeException) as exc: - q(''' + q( + """ SET max_network_bytes = 2000000000; - ''', user='test_combined_profile') + """, + user="test_combined_profile", + ) - assert ("max_network_bytes shouldn't be greater than 1234567891." in - str(exc.value)) + assert "max_network_bytes shouldn't be greater than 1234567891." in str(exc.value) with pytest.raises(QueryRuntimeException) as exc: - q(''' + q( + """ SET max_parallel_replicas = 1000; - ''', user='test_combined_profile') + """, + user="test_combined_profile", + ) - assert ('max_parallel_replicas should not be changed.' in - str(exc.value)) + assert "max_parallel_replicas should not be changed." in str(exc.value) with pytest.raises(QueryRuntimeException) as exc: - q(''' + q( + """ SET max_memory_usage = 1000; - ''', user='test_combined_profile') + """, + user="test_combined_profile", + ) - assert ("max_memory_usage shouldn't be less than 300000000." in - str(exc.value)) + assert "max_memory_usage shouldn't be less than 300000000." in str(exc.value) diff --git a/tests/integration/test_input_format_parallel_parsing_memory_tracking/test.py b/tests/integration/test_input_format_parallel_parsing_memory_tracking/test.py index 1c686c7982e..2e4824a5a4f 100644 --- a/tests/integration/test_input_format_parallel_parsing_memory_tracking/test.py +++ b/tests/integration/test_input_format_parallel_parsing_memory_tracking/test.py @@ -8,13 +8,16 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', main_configs=[ - 'configs/conf.xml', - 'configs/asynchronous_metrics_update_period_s.xml', -]) +instance = cluster.add_instance( + "instance", + main_configs=[ + "configs/conf.xml", + "configs/asynchronous_metrics_update_period_s.xml", + ], +) -@pytest.fixture(scope='module', autouse=True) +@pytest.fixture(scope="module", autouse=True) def start_cluster(): try: cluster.start() @@ -26,11 +29,27 @@ def start_cluster(): # max_memory_usage_for_user cannot be used, since the memory for user accounted # correctly, only total is not (it is set via conf.xml) def test_memory_tracking_total(): - instance.query('CREATE TABLE null (row String) ENGINE=Null') - instance.exec_in_container(['bash', '-c', - 'clickhouse local -q "SELECT arrayStringConcat(arrayMap(x->toString(cityHash64(x)), range(1000)), \' \') from numbers(10000)" > data.json']) + instance.query("CREATE TABLE null (row String) ENGINE=Null") + instance.exec_in_container( + [ + "bash", + "-c", + "clickhouse local -q \"SELECT arrayStringConcat(arrayMap(x->toString(cityHash64(x)), range(1000)), ' ') from numbers(10000)\" > data.json", + ] + ) for it in range(0, 20): # the problem can be triggered only via HTTP, # since clickhouse-client parses the data by itself. - assert instance.exec_in_container(['curl', '--silent', '--show-error', '--data-binary', '@data.json', - 'http://127.1:8123/?query=INSERT%20INTO%20null%20FORMAT%20TSV']) == '', f'Failed on {it} iteration' + assert ( + instance.exec_in_container( + [ + "curl", + "--silent", + "--show-error", + "--data-binary", + "@data.json", + "http://127.1:8123/?query=INSERT%20INTO%20null%20FORMAT%20TSV", + ] + ) + == "" + ), f"Failed on {it} iteration" diff --git a/tests/integration/test_insert_distributed_async_extra_dirs/test.py b/tests/integration/test_insert_distributed_async_extra_dirs/test.py index 8365fce298d..b4421ba9590 100644 --- a/tests/integration/test_insert_distributed_async_extra_dirs/test.py +++ b/tests/integration/test_insert_distributed_async_extra_dirs/test.py @@ -8,9 +8,12 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=['configs/remote_servers.xml'], stay_alive=True) +node = cluster.add_instance( + "node", main_configs=["configs/remote_servers.xml"], stay_alive=True +) -@pytest.fixture(scope='module', autouse=True) + +@pytest.fixture(scope="module", autouse=True) def start_cluster(): try: cluster.start() @@ -18,9 +21,11 @@ def start_cluster(): finally: cluster.shutdown() + def test_insert_distributed_async_send_success(): - node.query('CREATE TABLE data (key Int, value String) Engine=Null()') - node.query(""" + node.query("CREATE TABLE data (key Int, value String) Engine=Null()") + node.query( + """ CREATE TABLE dist AS data Engine=Distributed( test_cluster, @@ -28,16 +33,53 @@ def test_insert_distributed_async_send_success(): data, key ) - """) + """ + ) - node.exec_in_container(['bash', '-c', 'mkdir /var/lib/clickhouse/data/default/dist/shard10000_replica10000']) - node.exec_in_container(['bash', '-c', 'touch /var/lib/clickhouse/data/default/dist/shard10000_replica10000/1.bin']) + node.exec_in_container( + [ + "bash", + "-c", + "mkdir /var/lib/clickhouse/data/default/dist/shard10000_replica10000", + ] + ) + node.exec_in_container( + [ + "bash", + "-c", + "touch /var/lib/clickhouse/data/default/dist/shard10000_replica10000/1.bin", + ] + ) - node.exec_in_container(['bash', '-c', 'mkdir /var/lib/clickhouse/data/default/dist/shard1_replica10000']) - node.exec_in_container(['bash', '-c', 'touch /var/lib/clickhouse/data/default/dist/shard1_replica10000/1.bin']) + node.exec_in_container( + [ + "bash", + "-c", + "mkdir /var/lib/clickhouse/data/default/dist/shard1_replica10000", + ] + ) + node.exec_in_container( + [ + "bash", + "-c", + "touch /var/lib/clickhouse/data/default/dist/shard1_replica10000/1.bin", + ] + ) - node.exec_in_container(['bash', '-c', 'mkdir /var/lib/clickhouse/data/default/dist/shard10000_replica1']) - node.exec_in_container(['bash', '-c', 'touch /var/lib/clickhouse/data/default/dist/shard10000_replica1/1.bin']) + node.exec_in_container( + [ + "bash", + "-c", + "mkdir /var/lib/clickhouse/data/default/dist/shard10000_replica1", + ] + ) + node.exec_in_container( + [ + "bash", + "-c", + "touch /var/lib/clickhouse/data/default/dist/shard10000_replica1/1.bin", + ] + ) # will check that clickhouse-server is alive node.restart_clickhouse() diff --git a/tests/integration/test_insert_distributed_async_send/test.py b/tests/integration/test_insert_distributed_async_send/test.py index a9bf9801f4c..80b9081d3f2 100644 --- a/tests/integration/test_insert_distributed_async_send/test.py +++ b/tests/integration/test_insert_distributed_async_send/test.py @@ -13,34 +13,57 @@ from helpers.client import QueryRuntimeException cluster = ClickHouseCluster(__file__) # n1 -- distributed_directory_monitor_batch_inserts=1 -n1 = cluster.add_instance('n1', main_configs=['configs/remote_servers.xml'], user_configs=['configs/users.d/batch.xml']) +n1 = cluster.add_instance( + "n1", + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/users.d/batch.xml"], +) # n2 -- distributed_directory_monitor_batch_inserts=0 -n2 = cluster.add_instance('n2', main_configs=['configs/remote_servers.xml'], user_configs=['configs/users.d/no_batch.xml']) +n2 = cluster.add_instance( + "n2", + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/users.d/no_batch.xml"], +) # n3 -- distributed_directory_monitor_batch_inserts=1/distributed_directory_monitor_split_batch_on_failure=1 -n3 = cluster.add_instance('n3', main_configs=['configs/remote_servers_split.xml'], user_configs=[ - 'configs/users.d/batch.xml', - 'configs/users.d/split.xml', -]) +n3 = cluster.add_instance( + "n3", + main_configs=["configs/remote_servers_split.xml"], + user_configs=[ + "configs/users.d/batch.xml", + "configs/users.d/split.xml", + ], +) # n4 -- distributed_directory_monitor_batch_inserts=0/distributed_directory_monitor_split_batch_on_failure=1 -n4 = cluster.add_instance('n4', main_configs=['configs/remote_servers_split.xml'], user_configs=[ - 'configs/users.d/no_batch.xml', - 'configs/users.d/split.xml', -]) +n4 = cluster.add_instance( + "n4", + main_configs=["configs/remote_servers_split.xml"], + user_configs=[ + "configs/users.d/no_batch.xml", + "configs/users.d/split.xml", + ], +) -batch_params = pytest.mark.parametrize('batch', [ - (1), - (0), -]) +batch_params = pytest.mark.parametrize( + "batch", + [ + (1), + (0), + ], +) -batch_and_split_params = pytest.mark.parametrize('batch,split', [ - (1, 0), - (0, 0), - (1, 1), - (0, 1), -]) +batch_and_split_params = pytest.mark.parametrize( + "batch,split", + [ + (1, 0), + (0, 0), + (1, 1), + (0, 1), + ], +) -@pytest.fixture(scope='module', autouse=True) + +@pytest.fixture(scope="module", autouse=True) def start_cluster(): try: cluster.start() @@ -51,8 +74,11 @@ def start_cluster(): def create_tables(remote_cluster_name): for _, instance in list(cluster.instances.items()): - instance.query('CREATE TABLE data (key Int, value String) Engine=MergeTree() ORDER BY key') - instance.query(f""" + instance.query( + "CREATE TABLE data (key Int, value String) Engine=MergeTree() ORDER BY key" + ) + instance.query( + f""" CREATE TABLE dist AS data Engine=Distributed( {remote_cluster_name}, @@ -60,26 +86,33 @@ def create_tables(remote_cluster_name): data, key ) - """) + """ + ) # only via SYSTEM FLUSH DISTRIBUTED - instance.query('SYSTEM STOP DISTRIBUTED SENDS dist') + instance.query("SYSTEM STOP DISTRIBUTED SENDS dist") + def drop_tables(): for _, instance in list(cluster.instances.items()): - instance.query('DROP TABLE IF EXISTS data') - instance.query('DROP TABLE IF EXISTS dist') + instance.query("DROP TABLE IF EXISTS data") + instance.query("DROP TABLE IF EXISTS dist") + # return amount of bytes of the 2.bin for n2 shard def insert_data(node): - node.query('INSERT INTO dist SELECT number, randomPrintableASCII(100) FROM numbers(10000)', settings={ - # do not do direct INSERT, always via SYSTEM FLUSH DISTRIBUTED - 'prefer_localhost_replica': 0, - }) + node.query( + "INSERT INTO dist SELECT number, randomPrintableASCII(100) FROM numbers(10000)", + settings={ + # do not do direct INSERT, always via SYSTEM FLUSH DISTRIBUTED + "prefer_localhost_replica": 0, + }, + ) path = get_path_to_dist_batch() - size = int(node.exec_in_container(['bash', '-c', f'wc -c < {path}'])) - assert size > 1<<16 + size = int(node.exec_in_container(["bash", "-c", f"wc -c < {path}"])) + assert size > 1 << 16 return size + def get_node(batch, split=None): if split: if batch: @@ -89,56 +122,65 @@ def get_node(batch, split=None): return n1 return n2 + def bootstrap(batch, split=None): drop_tables() - create_tables('insert_distributed_async_send_cluster_two_replicas') + create_tables("insert_distributed_async_send_cluster_two_replicas") return insert_data(get_node(batch, split)) -def get_path_to_dist_batch(file='2.bin'): + +def get_path_to_dist_batch(file="2.bin"): # There are: # - /var/lib/clickhouse/data/default/dist/shard1_replica1/1.bin # - /var/lib/clickhouse/data/default/dist/shard1_replica2/2.bin # # @return the file for the n2 shard - return f'/var/lib/clickhouse/data/default/dist/shard1_replica2/{file}' + return f"/var/lib/clickhouse/data/default/dist/shard1_replica2/{file}" + def check_dist_after_corruption(truncate, batch, split=None): node = get_node(batch, split) if batch: # In batch mode errors are ignored - node.query('SYSTEM FLUSH DISTRIBUTED dist') + node.query("SYSTEM FLUSH DISTRIBUTED dist") else: if truncate: - with pytest.raises(QueryRuntimeException, match="Cannot read all data. Bytes read:"): - node.query('SYSTEM FLUSH DISTRIBUTED dist') + with pytest.raises( + QueryRuntimeException, match="Cannot read all data. Bytes read:" + ): + node.query("SYSTEM FLUSH DISTRIBUTED dist") else: - with pytest.raises(QueryRuntimeException, match="Checksum doesn't match: corrupted data. Reference:"): - node.query('SYSTEM FLUSH DISTRIBUTED dist') + with pytest.raises( + QueryRuntimeException, + match="Checksum doesn't match: corrupted data. Reference:", + ): + node.query("SYSTEM FLUSH DISTRIBUTED dist") # send pending files # (since we have two nodes and corrupt file for only one of them) - node.query('SYSTEM FLUSH DISTRIBUTED dist') + node.query("SYSTEM FLUSH DISTRIBUTED dist") # but there is broken file - broken = get_path_to_dist_batch('broken') - node.exec_in_container(['bash', '-c', f'ls {broken}/2.bin']) + broken = get_path_to_dist_batch("broken") + node.exec_in_container(["bash", "-c", f"ls {broken}/2.bin"]) if split: - assert int(n3.query('SELECT count() FROM data')) == 10000 - assert int(n4.query('SELECT count() FROM data')) == 0 + assert int(n3.query("SELECT count() FROM data")) == 10000 + assert int(n4.query("SELECT count() FROM data")) == 0 else: - assert int(n1.query('SELECT count() FROM data')) == 10000 - assert int(n2.query('SELECT count() FROM data')) == 0 + assert int(n1.query("SELECT count() FROM data")) == 10000 + assert int(n2.query("SELECT count() FROM data")) == 0 @batch_params def test_insert_distributed_async_send_success(batch): bootstrap(batch) node = get_node(batch) - node.query('SYSTEM FLUSH DISTRIBUTED dist') - assert int(n1.query('SELECT count() FROM data')) == 10000 - assert int(n2.query('SELECT count() FROM data')) == 10000 + node.query("SYSTEM FLUSH DISTRIBUTED dist") + assert int(n1.query("SELECT count() FROM data")) == 10000 + assert int(n2.query("SELECT count() FROM data")) == 10000 + @batch_and_split_params def test_insert_distributed_async_send_truncated_1(batch, split): @@ -148,10 +190,13 @@ def test_insert_distributed_async_send_truncated_1(batch, split): new_size = size - 10 # we cannot use truncate, due to hardlinks - node.exec_in_container(['bash', '-c', f'mv {path} /tmp/bin && head -c {new_size} /tmp/bin > {path}']) + node.exec_in_container( + ["bash", "-c", f"mv {path} /tmp/bin && head -c {new_size} /tmp/bin > {path}"] + ) check_dist_after_corruption(True, batch, split) + @batch_params def test_insert_distributed_async_send_truncated_2(batch): bootstrap(batch) @@ -159,10 +204,13 @@ def test_insert_distributed_async_send_truncated_2(batch): node = get_node(batch) # we cannot use truncate, due to hardlinks - node.exec_in_container(['bash', '-c', f'mv {path} /tmp/bin && head -c 10000 /tmp/bin > {path}']) + node.exec_in_container( + ["bash", "-c", f"mv {path} /tmp/bin && head -c 10000 /tmp/bin > {path}"] + ) check_dist_after_corruption(True, batch) + # The difference from the test_insert_distributed_async_send_corrupted_small # is that small corruption will be seen only on local node @batch_params @@ -174,10 +222,17 @@ def test_insert_distributed_async_send_corrupted_big(batch): from_original_size = size - 8192 zeros_size = 8192 - node.exec_in_container(['bash', '-c', f'mv {path} /tmp/bin && head -c {from_original_size} /tmp/bin > {path} && head -c {zeros_size} /dev/zero >> {path}']) + node.exec_in_container( + [ + "bash", + "-c", + f"mv {path} /tmp/bin && head -c {from_original_size} /tmp/bin > {path} && head -c {zeros_size} /dev/zero >> {path}", + ] + ) check_dist_after_corruption(False, batch) + @batch_params def test_insert_distributed_async_send_corrupted_small(batch): size = bootstrap(batch) @@ -186,10 +241,17 @@ def test_insert_distributed_async_send_corrupted_small(batch): from_original_size = size - 60 zeros_size = 60 - node.exec_in_container(['bash', '-c', f'mv {path} /tmp/bin && head -c {from_original_size} /tmp/bin > {path} && head -c {zeros_size} /dev/zero >> {path}']) + node.exec_in_container( + [ + "bash", + "-c", + f"mv {path} /tmp/bin && head -c {from_original_size} /tmp/bin > {path} && head -c {zeros_size} /dev/zero >> {path}", + ] + ) check_dist_after_corruption(False, batch) + @batch_params def test_insert_distributed_async_send_different_header(batch): """ @@ -198,47 +260,66 @@ def test_insert_distributed_async_send_different_header(batch): """ drop_tables() - create_tables('insert_distributed_async_send_cluster_two_shards') + create_tables("insert_distributed_async_send_cluster_two_shards") node = get_node(batch) - node.query("INSERT INTO dist VALUES (0, 'f')", settings={ - 'prefer_localhost_replica': 0, - }) - node.query('ALTER TABLE dist MODIFY COLUMN value UInt64') - node.query("INSERT INTO dist VALUES (2, 1)", settings={ - 'prefer_localhost_replica': 0, - }) + node.query( + "INSERT INTO dist VALUES (0, 'f')", + settings={ + "prefer_localhost_replica": 0, + }, + ) + node.query("ALTER TABLE dist MODIFY COLUMN value UInt64") + node.query( + "INSERT INTO dist VALUES (2, 1)", + settings={ + "prefer_localhost_replica": 0, + }, + ) - n1.query('ALTER TABLE data MODIFY COLUMN value UInt64', settings={ - 'mutations_sync': 1, - }) + n1.query( + "ALTER TABLE data MODIFY COLUMN value UInt64", + settings={ + "mutations_sync": 1, + }, + ) if batch: # but only one batch will be sent, and first is with UInt64 column, so # one rows inserted, and for string ('f') exception will be throw. - with pytest.raises(QueryRuntimeException, match=r"DB::Exception: Cannot parse string 'f' as UInt64: syntax error at begin of string"): - node.query('SYSTEM FLUSH DISTRIBUTED dist') - assert int(n1.query('SELECT count() FROM data')) == 1 + with pytest.raises( + QueryRuntimeException, + match=r"DB::Exception: Cannot parse string 'f' as UInt64: syntax error at begin of string", + ): + node.query("SYSTEM FLUSH DISTRIBUTED dist") + assert int(n1.query("SELECT count() FROM data")) == 1 # but once underlying column String, implicit conversion will do the # thing, and insert left batch. - n1.query(""" + n1.query( + """ DROP TABLE data SYNC; CREATE TABLE data (key Int, value String) Engine=MergeTree() ORDER BY key; - """) - node.query('SYSTEM FLUSH DISTRIBUTED dist') - assert int(n1.query('SELECT count() FROM data')) == 1 + """ + ) + node.query("SYSTEM FLUSH DISTRIBUTED dist") + assert int(n1.query("SELECT count() FROM data")) == 1 else: # first send with String ('f'), so zero rows will be inserted - with pytest.raises(QueryRuntimeException, match=r"DB::Exception: Cannot parse string 'f' as UInt64: syntax error at begin of string"): - node.query('SYSTEM FLUSH DISTRIBUTED dist') - assert int(n1.query('SELECT count() FROM data')) == 0 + with pytest.raises( + QueryRuntimeException, + match=r"DB::Exception: Cannot parse string 'f' as UInt64: syntax error at begin of string", + ): + node.query("SYSTEM FLUSH DISTRIBUTED dist") + assert int(n1.query("SELECT count() FROM data")) == 0 # but once underlying column String, implicit conversion will do the # thing, and insert 2 rows (mixed UInt64 and String). - n1.query(""" + n1.query( + """ DROP TABLE data SYNC; CREATE TABLE data (key Int, value String) Engine=MergeTree() ORDER BY key; - """) - node.query('SYSTEM FLUSH DISTRIBUTED dist') - assert int(n1.query('SELECT count() FROM data')) == 2 + """ + ) + node.query("SYSTEM FLUSH DISTRIBUTED dist") + assert int(n1.query("SELECT count() FROM data")) == 2 - assert int(n2.query('SELECT count() FROM data')) == 0 + assert int(n2.query("SELECT count() FROM data")) == 0 diff --git a/tests/integration/test_insert_distributed_load_balancing/test.py b/tests/integration/test_insert_distributed_load_balancing/test.py index 29cc953280f..5a17a6d5770 100644 --- a/tests/integration/test_insert_distributed_load_balancing/test.py +++ b/tests/integration/test_insert_distributed_load_balancing/test.py @@ -8,16 +8,19 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -n1 = cluster.add_instance('n1', main_configs=['configs/remote_servers.xml']) -n2 = cluster.add_instance('n2', main_configs=['configs/remote_servers.xml']) +n1 = cluster.add_instance("n1", main_configs=["configs/remote_servers.xml"]) +n2 = cluster.add_instance("n2", main_configs=["configs/remote_servers.xml"]) -params = pytest.mark.parametrize('cluster,q', [ - ('internal_replication', 0), - ('no_internal_replication', 1), -]) +params = pytest.mark.parametrize( + "cluster,q", + [ + ("internal_replication", 0), + ("no_internal_replication", 1), + ], +) -@pytest.fixture(scope='module', autouse=True) +@pytest.fixture(scope="module", autouse=True) def start_cluster(): try: cluster.start() @@ -27,13 +30,14 @@ def start_cluster(): def create_tables(cluster): - n1.query('DROP TABLE IF EXISTS data') - n2.query('DROP TABLE IF EXISTS data') - n1.query('DROP TABLE IF EXISTS dist') + n1.query("DROP TABLE IF EXISTS data") + n2.query("DROP TABLE IF EXISTS data") + n1.query("DROP TABLE IF EXISTS dist") - n1.query('CREATE TABLE data (key Int) Engine=Memory()') - n2.query('CREATE TABLE data (key Int) Engine=Memory()') - n1.query(""" + n1.query("CREATE TABLE data (key Int) Engine=Memory()") + n2.query("CREATE TABLE data (key Int) Engine=Memory()") + n1.query( + """ CREATE TABLE dist AS data Engine=Distributed( {cluster}, @@ -41,45 +45,53 @@ def create_tables(cluster): data, rand() ) - """.format(cluster=cluster)) + """.format( + cluster=cluster + ) + ) def insert_data(cluster, **settings): create_tables(cluster) - n1.query('INSERT INTO dist SELECT * FROM numbers(10)', settings=settings) - n1.query('SYSTEM FLUSH DISTRIBUTED dist') + n1.query("INSERT INTO dist SELECT * FROM numbers(10)", settings=settings) + n1.query("SYSTEM FLUSH DISTRIBUTED dist") @params def test_prefer_localhost_replica_1(cluster, q): insert_data(cluster) - assert int(n1.query('SELECT count() FROM data')) == 10 - assert int(n2.query('SELECT count() FROM data')) == 10 * q + assert int(n1.query("SELECT count() FROM data")) == 10 + assert int(n2.query("SELECT count() FROM data")) == 10 * q @params def test_prefer_localhost_replica_1_load_balancing_in_order(cluster, q): - insert_data(cluster, load_balancing='in_order') - assert int(n1.query('SELECT count() FROM data')) == 10 - assert int(n2.query('SELECT count() FROM data')) == 10 * q + insert_data(cluster, load_balancing="in_order") + assert int(n1.query("SELECT count() FROM data")) == 10 + assert int(n2.query("SELECT count() FROM data")) == 10 * q @params def test_prefer_localhost_replica_0_load_balancing_nearest_hostname(cluster, q): - insert_data(cluster, load_balancing='nearest_hostname', prefer_localhost_replica=0) - assert int(n1.query('SELECT count() FROM data')) == 10 - assert int(n2.query('SELECT count() FROM data')) == 10 * q + insert_data(cluster, load_balancing="nearest_hostname", prefer_localhost_replica=0) + assert int(n1.query("SELECT count() FROM data")) == 10 + assert int(n2.query("SELECT count() FROM data")) == 10 * q @params def test_prefer_localhost_replica_0_load_balancing_in_order(cluster, q): - insert_data(cluster, load_balancing='in_order', prefer_localhost_replica=0) - assert int(n1.query('SELECT count() FROM data')) == 10 * q - assert int(n2.query('SELECT count() FROM data')) == 10 + insert_data(cluster, load_balancing="in_order", prefer_localhost_replica=0) + assert int(n1.query("SELECT count() FROM data")) == 10 * q + assert int(n2.query("SELECT count() FROM data")) == 10 @params def test_prefer_localhost_replica_0_load_balancing_in_order_sync(cluster, q): - insert_data(cluster, load_balancing='in_order', prefer_localhost_replica=0, insert_distributed_sync=1) - assert int(n1.query('SELECT count() FROM data')) == 10 * q - assert int(n2.query('SELECT count() FROM data')) == 10 + insert_data( + cluster, + load_balancing="in_order", + prefer_localhost_replica=0, + insert_distributed_sync=1, + ) + assert int(n1.query("SELECT count() FROM data")) == 10 * q + assert int(n2.query("SELECT count() FROM data")) == 10 diff --git a/tests/integration/test_insert_into_distributed/test.py b/tests/integration/test_insert_into_distributed/test.py index e2af59903bd..b8d94d2a043 100644 --- a/tests/integration/test_insert_into_distributed/test.py +++ b/tests/integration/test_insert_into_distributed/test.py @@ -8,21 +8,35 @@ from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -instance_test_reconnect = cluster.add_instance('instance_test_reconnect', main_configs=['configs/remote_servers.xml']) +instance_test_reconnect = cluster.add_instance( + "instance_test_reconnect", main_configs=["configs/remote_servers.xml"] +) instance_test_inserts_batching = cluster.add_instance( - 'instance_test_inserts_batching', - main_configs=['configs/remote_servers.xml'], user_configs=['configs/enable_distributed_inserts_batching.xml']) -remote = cluster.add_instance('remote', main_configs=['configs/forbid_background_merges.xml']) + "instance_test_inserts_batching", + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/enable_distributed_inserts_batching.xml"], +) +remote = cluster.add_instance( + "remote", main_configs=["configs/forbid_background_merges.xml"] +) instance_test_inserts_local_cluster = cluster.add_instance( - 'instance_test_inserts_local_cluster', - main_configs=['configs/remote_servers.xml']) + "instance_test_inserts_local_cluster", main_configs=["configs/remote_servers.xml"] +) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) -shard1 = cluster.add_instance('shard1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -shard2 = cluster.add_instance('shard2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +shard1 = cluster.add_instance( + "shard1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +shard2 = cluster.add_instance( + "shard2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -31,70 +45,107 @@ def started_cluster(): cluster.start() remote.query("CREATE TABLE local1 (x UInt32) ENGINE = Log") - instance_test_reconnect.query(''' + instance_test_reconnect.query( + """ CREATE TABLE distributed (x UInt32) ENGINE = Distributed('test_cluster', 'default', 'local1') -''') +""" + ) - remote.query("CREATE TABLE local2 (d Date, x UInt32, s String) ENGINE = MergeTree(d, x, 8192)") - instance_test_inserts_batching.query(''' + remote.query( + "CREATE TABLE local2 (d Date, x UInt32, s String) ENGINE = MergeTree(d, x, 8192)" + ) + instance_test_inserts_batching.query( + """ CREATE TABLE distributed (d Date, x UInt32) ENGINE = Distributed('test_cluster', 'default', 'local2') SETTINGS fsync_after_insert=1, fsync_directories=1 -''') +""" + ) instance_test_inserts_local_cluster.query( - "CREATE TABLE local (d Date, x UInt32) ENGINE = MergeTree(d, x, 8192)") - instance_test_inserts_local_cluster.query(''' + "CREATE TABLE local (d Date, x UInt32) ENGINE = MergeTree(d, x, 8192)" + ) + instance_test_inserts_local_cluster.query( + """ CREATE TABLE distributed_on_local (d Date, x UInt32) ENGINE = Distributed('test_local_cluster', 'default', 'local') -''') +""" + ) - node1.query(''' + node1.query( + """ CREATE TABLE replicated(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/replicated', 'node1', date, id, 8192) -''') - node2.query(''' +""" + ) + node2.query( + """ CREATE TABLE replicated(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/replicated', 'node2', date, id, 8192) -''') +""" + ) - node1.query(''' + node1.query( + """ CREATE TABLE distributed (date Date, id UInt32) ENGINE = Distributed('shard_with_local_replica', 'default', 'replicated') -''') +""" + ) - node2.query(''' + node2.query( + """ CREATE TABLE distributed (date Date, id UInt32) ENGINE = Distributed('shard_with_local_replica', 'default', 'replicated') -''') +""" + ) - shard1.query(''' -CREATE TABLE low_cardinality (d Date, x UInt32, s LowCardinality(String)) ENGINE = MergeTree(d, x, 8192)''') + shard1.query( + """ +CREATE TABLE low_cardinality (d Date, x UInt32, s LowCardinality(String)) ENGINE = MergeTree(d, x, 8192)""" + ) - shard2.query(''' -CREATE TABLE low_cardinality (d Date, x UInt32, s LowCardinality(String)) ENGINE = MergeTree(d, x, 8192)''') + shard2.query( + """ +CREATE TABLE low_cardinality (d Date, x UInt32, s LowCardinality(String)) ENGINE = MergeTree(d, x, 8192)""" + ) - shard1.query(''' -CREATE TABLE low_cardinality_all (d Date, x UInt32, s LowCardinality(String)) ENGINE = Distributed('shard_with_low_cardinality', 'default', 'low_cardinality', sipHash64(s))''') + shard1.query( + """ +CREATE TABLE low_cardinality_all (d Date, x UInt32, s LowCardinality(String)) ENGINE = Distributed('shard_with_low_cardinality', 'default', 'low_cardinality', sipHash64(s))""" + ) - node1.query(''' -CREATE TABLE table_function (n UInt8, s String) ENGINE = MergeTree() ORDER BY n''') + node1.query( + """ +CREATE TABLE table_function (n UInt8, s String) ENGINE = MergeTree() ORDER BY n""" + ) - node2.query(''' -CREATE TABLE table_function (n UInt8, s String) ENGINE = MergeTree() ORDER BY n''') + node2.query( + """ +CREATE TABLE table_function (n UInt8, s String) ENGINE = MergeTree() ORDER BY n""" + ) - node1.query(''' + node1.query( + """ CREATE TABLE distributed_one_replica_internal_replication (date Date, id UInt32) ENGINE = Distributed('shard_with_local_replica_internal_replication', 'default', 'single_replicated') -''') +""" + ) - node2.query(''' + node2.query( + """ CREATE TABLE distributed_one_replica_internal_replication (date Date, id UInt32) ENGINE = Distributed('shard_with_local_replica_internal_replication', 'default', 'single_replicated') -''') +""" + ) - node1.query(''' + node1.query( + """ CREATE TABLE distributed_one_replica_no_internal_replication (date Date, id UInt32) ENGINE = Distributed('shard_with_local_replica', 'default', 'single_replicated') -''') +""" + ) - node2.query(''' + node2.query( + """ CREATE TABLE distributed_one_replica_no_internal_replication (date Date, id UInt32) ENGINE = Distributed('shard_with_local_replica', 'default', 'single_replicated') -''') +""" + ) - node2.query(''' + node2.query( + """ CREATE TABLE single_replicated(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/single_replicated', 'node2', date, id, 8192) -''') +""" + ) yield cluster @@ -109,10 +160,12 @@ def test_reconnect(started_cluster): # Open a connection for insertion. instance.query("INSERT INTO distributed VALUES (1)") time.sleep(1) - assert remote.query("SELECT count(*) FROM local1").strip() == '1' + assert remote.query("SELECT count(*) FROM local1").strip() == "1" # Now break the connection. - pm.partition_instances(instance, remote, action='REJECT --reject-with tcp-reset') + pm.partition_instances( + instance, remote, action="REJECT --reject-with tcp-reset" + ) instance.query("INSERT INTO distributed VALUES (2)") time.sleep(1) @@ -123,7 +176,7 @@ def test_reconnect(started_cluster): instance.query("INSERT INTO distributed VALUES (3)") time.sleep(5) - assert remote.query("SELECT count(*) FROM local1").strip() == '3' + assert remote.query("SELECT count(*) FROM local1").strip() == "3" def test_inserts_batching(started_cluster): @@ -139,10 +192,14 @@ def test_inserts_batching(started_cluster): instance.query("INSERT INTO distributed(x, d) VALUES (2, '2000-01-01')") for i in range(3, 7): - instance.query("INSERT INTO distributed(d, x) VALUES ('2000-01-01', {})".format(i)) + instance.query( + "INSERT INTO distributed(d, x) VALUES ('2000-01-01', {})".format(i) + ) for i in range(7, 9): - instance.query("INSERT INTO distributed(x, d) VALUES ({}, '2000-01-01')".format(i)) + instance.query( + "INSERT INTO distributed(x, d) VALUES ({}, '2000-01-01')".format(i) + ) instance.query("INSERT INTO distributed(d, x) VALUES ('2000-01-01', 9)") @@ -150,12 +207,16 @@ def test_inserts_batching(started_cluster): instance.query("ALTER TABLE distributed ADD COLUMN s String") for i in range(10, 13): - instance.query("INSERT INTO distributed(d, x) VALUES ('2000-01-01', {})".format(i)) + instance.query( + "INSERT INTO distributed(d, x) VALUES ('2000-01-01', {})".format(i) + ) instance.query("SYSTEM FLUSH DISTRIBUTED distributed") time.sleep(1.0) - result = remote.query("SELECT _part, groupArray(x) FROM local2 GROUP BY _part ORDER BY _part") + result = remote.query( + "SELECT _part, groupArray(x) FROM local2 GROUP BY _part ORDER BY _part" + ) # Explanation: as merges are turned off on remote instance, active parts in local2 table correspond 1-to-1 # to inserted blocks. @@ -166,13 +227,13 @@ def test_inserts_batching(started_cluster): # 3. Full batch of inserts before ALTER. # 4. Full batch of inserts after ALTER (that have different block structure). # 5. What was left to insert with the column structure before ALTER. - expected = '''\ + expected = """\ 20000101_20000101_1_1_0\t[1] 20000101_20000101_2_2_0\t[2,3,4] 20000101_20000101_3_3_0\t[5,6,7] 20000101_20000101_4_4_0\t[10,11,12] 20000101_20000101_5_5_0\t[8,9] -''' +""" assert TSV(result) == TSV(expected) @@ -180,11 +241,13 @@ def test_inserts_local(started_cluster): instance = instance_test_inserts_local_cluster instance.query("INSERT INTO distributed_on_local VALUES ('2000-01-01', 1)") time.sleep(0.5) - assert instance.query("SELECT count(*) FROM local").strip() == '1' + assert instance.query("SELECT count(*) FROM local").strip() == "1" def test_inserts_single_replica_local_internal_replication(started_cluster): - with pytest.raises(QueryRuntimeException, match="Table default.single_replicated doesn't exist"): + with pytest.raises( + QueryRuntimeException, match="Table default.single_replicated doesn't exist" + ): node1.query( "INSERT INTO distributed_one_replica_internal_replication VALUES ('2000-01-01', 1)", settings={ @@ -194,7 +257,7 @@ def test_inserts_single_replica_local_internal_replication(started_cluster): "load_balancing": "first_or_random", }, ) - assert node2.query("SELECT count(*) FROM single_replicated").strip() == '0' + assert node2.query("SELECT count(*) FROM single_replicated").strip() == "0" def test_inserts_single_replica_internal_replication(started_cluster): @@ -208,14 +271,16 @@ def test_inserts_single_replica_internal_replication(started_cluster): "load_balancing": "first_or_random", }, ) - assert node2.query("SELECT count(*) FROM single_replicated").strip() == '1' + assert node2.query("SELECT count(*) FROM single_replicated").strip() == "1" finally: node2.query("TRUNCATE TABLE single_replicated") def test_inserts_single_replica_no_internal_replication(started_cluster): try: - with pytest.raises(QueryRuntimeException, match="Table default.single_replicated doesn't exist"): + with pytest.raises( + QueryRuntimeException, match="Table default.single_replicated doesn't exist" + ): node1.query( "INSERT INTO distributed_one_replica_no_internal_replication VALUES ('2000-01-01', 1)", settings={ @@ -223,7 +288,7 @@ def test_inserts_single_replica_no_internal_replication(started_cluster): "prefer_localhost_replica": "0", }, ) - assert node2.query("SELECT count(*) FROM single_replicated").strip() == '1' + assert node2.query("SELECT count(*) FROM single_replicated").strip() == "1" finally: node2.query("TRUNCATE TABLE single_replicated") @@ -235,22 +300,22 @@ def test_prefer_localhost_replica(started_cluster): node2.query("INSERT INTO distributed VALUES (toDate('2017-06-17'), 22)") time.sleep(1.0) - expected_distributed = '''\ + expected_distributed = """\ 2017-06-17\t11 2017-06-17\t22 -''' +""" - expected_from_node2 = '''\ + expected_from_node2 = """\ 2017-06-17\t11 2017-06-17\t22 2017-06-17\t44 -''' +""" - expected_from_node1 = '''\ + expected_from_node1 = """\ 2017-06-17\t11 2017-06-17\t22 2017-06-17\t33 -''' +""" assert TSV(node1.query(test_query)) == TSV(expected_distributed) assert TSV(node2.query(test_query)) == TSV(expected_distributed) @@ -270,19 +335,30 @@ def test_prefer_localhost_replica(started_cluster): assert TSV(node2.query(test_query)) == TSV(expected_from_node2) # Now query is sent to node1, as it higher in order - assert TSV(node2.query(test_query + " SETTINGS load_balancing='in_order', prefer_localhost_replica=0")) == TSV( - expected_from_node1) + assert TSV( + node2.query( + test_query + + " SETTINGS load_balancing='in_order', prefer_localhost_replica=0" + ) + ) == TSV(expected_from_node1) def test_inserts_low_cardinality(started_cluster): instance = shard1 - instance.query("INSERT INTO low_cardinality_all (d,x,s) VALUES ('2018-11-12',1,'123')") + instance.query( + "INSERT INTO low_cardinality_all (d,x,s) VALUES ('2018-11-12',1,'123')" + ) time.sleep(0.5) - assert instance.query("SELECT count(*) FROM low_cardinality_all").strip() == '1' + assert instance.query("SELECT count(*) FROM low_cardinality_all").strip() == "1" def test_table_function(started_cluster): node1.query( - "insert into table function cluster('shard_with_local_replica', 'default', 'table_function') select number, concat('str_', toString(number)) from numbers(100000)") - assert node1.query( - "select count() from cluster('shard_with_local_replica', 'default', 'table_function')").rstrip() == '100000' + "insert into table function cluster('shard_with_local_replica', 'default', 'table_function') select number, concat('str_', toString(number)) from numbers(100000)" + ) + assert ( + node1.query( + "select count() from cluster('shard_with_local_replica', 'default', 'table_function')" + ).rstrip() + == "100000" + ) diff --git a/tests/integration/test_insert_into_distributed_sync_async/test.py b/tests/integration/test_insert_into_distributed_sync_async/test.py index 1f479003b99..e0c454feee6 100755 --- a/tests/integration/test_insert_into_distributed_sync_async/test.py +++ b/tests/integration/test_insert_into_distributed_sync_async/test.py @@ -11,8 +11,8 @@ from helpers.client import QueryRuntimeException, QueryTimeoutExceedException cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml']) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml']) +node1 = cluster.add_instance("node1", main_configs=["configs/remote_servers.xml"]) +node2 = cluster.add_instance("node2", main_configs=["configs/remote_servers.xml"]) @pytest.fixture(scope="module") @@ -21,13 +21,17 @@ def started_cluster(): cluster.start() for node in (node1, node2): - node.query(''' + node.query( + """ CREATE TABLE local_table(date Date, val UInt64) ENGINE = MergeTree(date, (date, val), 8192); -''') +""" + ) - node1.query(''' + node1.query( + """ CREATE TABLE distributed_table(date Date, val UInt64) ENGINE = Distributed(test_cluster, default, local_table) -''') +""" + ) yield cluster @@ -36,41 +40,61 @@ CREATE TABLE distributed_table(date Date, val UInt64) ENGINE = Distributed(test_ def test_insertion_sync(started_cluster): - node1.query('''SET insert_distributed_sync = 1, insert_distributed_timeout = 0; - INSERT INTO distributed_table SELECT today() as date, number as val FROM system.numbers LIMIT 10000''') + node1.query( + """SET insert_distributed_sync = 1, insert_distributed_timeout = 0; + INSERT INTO distributed_table SELECT today() as date, number as val FROM system.numbers LIMIT 10000""" + ) - assert node2.query("SELECT count() FROM local_table").rstrip() == '10000' + assert node2.query("SELECT count() FROM local_table").rstrip() == "10000" - node1.query(''' + node1.query( + """ SET insert_distributed_sync = 1, insert_distributed_timeout = 1; - INSERT INTO distributed_table SELECT today() - 1 as date, number as val FROM system.numbers LIMIT 10000''') + INSERT INTO distributed_table SELECT today() - 1 as date, number as val FROM system.numbers LIMIT 10000""" + ) - assert node2.query("SELECT count() FROM local_table").rstrip() == '20000' + assert node2.query("SELECT count() FROM local_table").rstrip() == "20000" # Insert with explicitly specified columns. - node1.query(''' + node1.query( + """ SET insert_distributed_sync = 1, insert_distributed_timeout = 1; - INSERT INTO distributed_table(date, val) VALUES ('2000-01-01', 100500)''') + INSERT INTO distributed_table(date, val) VALUES ('2000-01-01', 100500)""" + ) # Insert with columns specified in different order. - node1.query(''' + node1.query( + """ SET insert_distributed_sync = 1, insert_distributed_timeout = 1; - INSERT INTO distributed_table(val, date) VALUES (100500, '2000-01-01')''') + INSERT INTO distributed_table(val, date) VALUES (100500, '2000-01-01')""" + ) # Insert with an incomplete list of columns. - node1.query(''' + node1.query( + """ SET insert_distributed_sync = 1, insert_distributed_timeout = 1; - INSERT INTO distributed_table(val) VALUES (100500)''') + INSERT INTO distributed_table(val) VALUES (100500)""" + ) - expected = TSV(''' + expected = TSV( + """ 1970-01-01 100500 2000-01-01 100500 -2000-01-01 100500''') - assert TSV(node2.query('SELECT date, val FROM local_table WHERE val = 100500 ORDER BY date')) == expected +2000-01-01 100500""" + ) + assert ( + TSV( + node2.query( + "SELECT date, val FROM local_table WHERE val = 100500 ORDER BY date" + ) + ) + == expected + ) node1.query("TRUNCATE TABLE local_table SYNC") node2.query("TRUNCATE TABLE local_table SYNC") + """ def test_insertion_sync_fails_on_error(started_cluster): with PartitionManager() as pm: @@ -84,41 +108,53 @@ def test_insertion_sync_fails_on_error(started_cluster): def test_insertion_sync_fails_with_timeout(started_cluster): with pytest.raises(QueryRuntimeException): - node1.query(''' + node1.query( + """ SET insert_distributed_sync = 1, insert_distributed_timeout = 1; - INSERT INTO distributed_table SELECT today() as date, number as val FROM system.numbers''') + INSERT INTO distributed_table SELECT today() as date, number as val FROM system.numbers""" + ) def test_insertion_without_sync_ignores_timeout(started_cluster): with pytest.raises(QueryTimeoutExceedException): - node1.query(''' + node1.query( + """ SET insert_distributed_sync = 0, insert_distributed_timeout = 1; - INSERT INTO distributed_table SELECT today() as date, number as val FROM system.numbers''', timeout=1.5) + INSERT INTO distributed_table SELECT today() as date, number as val FROM system.numbers""", + timeout=1.5, + ) def test_insertion_sync_with_disabled_timeout(started_cluster): with pytest.raises(QueryTimeoutExceedException): - node1.query(''' + node1.query( + """ SET insert_distributed_sync = 1, insert_distributed_timeout = 0; - INSERT INTO distributed_table SELECT today() as date, number as val FROM system.numbers''', timeout=1) + INSERT INTO distributed_table SELECT today() as date, number as val FROM system.numbers""", + timeout=1, + ) def test_async_inserts_into_local_shard(started_cluster): - node1.query('''CREATE TABLE shard_local (i Int64) ENGINE = Memory''') + node1.query("""CREATE TABLE shard_local (i Int64) ENGINE = Memory""") node1.query( - '''CREATE TABLE shard_distributed (i Int64) ENGINE = Distributed(local_shard_with_internal_replication, default, shard_local)''') - node1.query('''INSERT INTO shard_distributed VALUES (1)''', settings={"insert_distributed_sync": 0}) + """CREATE TABLE shard_distributed (i Int64) ENGINE = Distributed(local_shard_with_internal_replication, default, shard_local)""" + ) + node1.query( + """INSERT INTO shard_distributed VALUES (1)""", + settings={"insert_distributed_sync": 0}, + ) - assert TSV(node1.query('''SELECT count() FROM shard_distributed''')) == TSV("1\n") - node1.query('''DETACH TABLE shard_distributed''') - node1.query('''ATTACH TABLE shard_distributed''') - assert TSV(node1.query('''SELECT count() FROM shard_distributed''')) == TSV("1\n") + assert TSV(node1.query("""SELECT count() FROM shard_distributed""")) == TSV("1\n") + node1.query("""DETACH TABLE shard_distributed""") + node1.query("""ATTACH TABLE shard_distributed""") + assert TSV(node1.query("""SELECT count() FROM shard_distributed""")) == TSV("1\n") - node1.query('''DROP TABLE shard_distributed''') - node1.query('''DROP TABLE shard_local''') + node1.query("""DROP TABLE shard_distributed""") + node1.query("""DROP TABLE shard_local""") -if __name__ == '__main__': +if __name__ == "__main__": with contextmanager(started_cluster)() as cluster: for name, instance in list(cluster.instances.items()): print(name, instance.ip_address) diff --git a/tests/integration/test_insert_into_distributed_through_materialized_view/test.py b/tests/integration/test_insert_into_distributed_through_materialized_view/test.py index 32edb6829c8..7c2ce9f05f2 100644 --- a/tests/integration/test_insert_into_distributed_through_materialized_view/test.py +++ b/tests/integration/test_insert_into_distributed_through_materialized_view/test.py @@ -7,15 +7,21 @@ from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -instance_test_reconnect = cluster.add_instance('instance_test_reconnect', main_configs=['configs/remote_servers.xml']) +instance_test_reconnect = cluster.add_instance( + "instance_test_reconnect", main_configs=["configs/remote_servers.xml"] +) instance_test_inserts_batching = cluster.add_instance( - 'instance_test_inserts_batching', - main_configs=['configs/remote_servers.xml'], user_configs=['configs/enable_distributed_inserts_batching.xml']) -remote = cluster.add_instance('remote', main_configs=['configs/forbid_background_merges.xml']) + "instance_test_inserts_batching", + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/enable_distributed_inserts_batching.xml"], +) +remote = cluster.add_instance( + "remote", main_configs=["configs/forbid_background_merges.xml"] +) instance_test_inserts_local_cluster = cluster.add_instance( - 'instance_test_inserts_local_cluster', - main_configs=['configs/remote_servers.xml']) + "instance_test_inserts_local_cluster", main_configs=["configs/remote_servers.xml"] +) @pytest.fixture(scope="module") @@ -25,29 +31,47 @@ def started_cluster(): remote.query("CREATE TABLE local1 (x UInt32) ENGINE = Log") - instance_test_reconnect.query(''' -CREATE TABLE distributed (x UInt32) ENGINE = Distributed('test_cluster', 'default', 'local1') -''') - instance_test_reconnect.query("CREATE TABLE local1_source (x UInt32) ENGINE = Memory") instance_test_reconnect.query( - "CREATE MATERIALIZED VIEW local1_view to distributed AS SELECT x FROM local1_source") + """ +CREATE TABLE distributed (x UInt32) ENGINE = Distributed('test_cluster', 'default', 'local1') +""" + ) + instance_test_reconnect.query( + "CREATE TABLE local1_source (x UInt32) ENGINE = Memory" + ) + instance_test_reconnect.query( + "CREATE MATERIALIZED VIEW local1_view to distributed AS SELECT x FROM local1_source" + ) - remote.query("CREATE TABLE local2 (d Date, x UInt32, s String) ENGINE = MergeTree(d, x, 8192)") - instance_test_inserts_batching.query(''' -CREATE TABLE distributed (d Date, x UInt32) ENGINE = Distributed('test_cluster', 'default', 'local2') -''') - instance_test_inserts_batching.query("CREATE TABLE local2_source (d Date, x UInt32) ENGINE = Log") + remote.query( + "CREATE TABLE local2 (d Date, x UInt32, s String) ENGINE = MergeTree(d, x, 8192)" + ) instance_test_inserts_batching.query( - "CREATE MATERIALIZED VIEW local2_view to distributed AS SELECT d,x FROM local2_source") + """ +CREATE TABLE distributed (d Date, x UInt32) ENGINE = Distributed('test_cluster', 'default', 'local2') +""" + ) + instance_test_inserts_batching.query( + "CREATE TABLE local2_source (d Date, x UInt32) ENGINE = Log" + ) + instance_test_inserts_batching.query( + "CREATE MATERIALIZED VIEW local2_view to distributed AS SELECT d,x FROM local2_source" + ) - instance_test_inserts_local_cluster.query("CREATE TABLE local_source (d Date, x UInt32) ENGINE = Memory") instance_test_inserts_local_cluster.query( - "CREATE MATERIALIZED VIEW local_view to distributed_on_local AS SELECT d,x FROM local_source") + "CREATE TABLE local_source (d Date, x UInt32) ENGINE = Memory" + ) instance_test_inserts_local_cluster.query( - "CREATE TABLE local (d Date, x UInt32) ENGINE = MergeTree(d, x, 8192)") - instance_test_inserts_local_cluster.query(''' + "CREATE MATERIALIZED VIEW local_view to distributed_on_local AS SELECT d,x FROM local_source" + ) + instance_test_inserts_local_cluster.query( + "CREATE TABLE local (d Date, x UInt32) ENGINE = MergeTree(d, x, 8192)" + ) + instance_test_inserts_local_cluster.query( + """ CREATE TABLE distributed_on_local (d Date, x UInt32) ENGINE = Distributed('test_local_cluster', 'default', 'local') -''') +""" + ) yield cluster @@ -62,10 +86,12 @@ def test_reconnect(started_cluster): # Open a connection for insertion. instance.query("INSERT INTO local1_source VALUES (1)") time.sleep(1) - assert remote.query("SELECT count(*) FROM local1").strip() == '1' + assert remote.query("SELECT count(*) FROM local1").strip() == "1" # Now break the connection. - pm.partition_instances(instance, remote, action='REJECT --reject-with tcp-reset') + pm.partition_instances( + instance, remote, action="REJECT --reject-with tcp-reset" + ) instance.query("INSERT INTO local1_source VALUES (2)") time.sleep(1) @@ -77,7 +103,7 @@ def test_reconnect(started_cluster): instance.query("INSERT INTO local1_source VALUES (3)") time.sleep(1) - assert remote.query("SELECT count(*) FROM local1").strip() == '3' + assert remote.query("SELECT count(*) FROM local1").strip() == "3" @pytest.mark.skip(reason="Flapping test") @@ -94,10 +120,14 @@ def test_inserts_batching(started_cluster): instance.query("INSERT INTO local2_source(x, d) VALUES (2, '2000-01-01')") for i in range(3, 7): - instance.query("INSERT INTO local2_source(d, x) VALUES ('2000-01-01', {})".format(i)) + instance.query( + "INSERT INTO local2_source(d, x) VALUES ('2000-01-01', {})".format(i) + ) for i in range(7, 9): - instance.query("INSERT INTO local2_source(x, d) VALUES ({}, '2000-01-01')".format(i)) + instance.query( + "INSERT INTO local2_source(x, d) VALUES ({}, '2000-01-01')".format(i) + ) instance.query("INSERT INTO local2_source(d, x) VALUES ('2000-01-01', 9)") @@ -107,15 +137,23 @@ def test_inserts_batching(started_cluster): # Memory Engine doesn't support ALTER so we just DROP/CREATE everything instance.query("DROP TABLE local2_source") - instance.query("CREATE TABLE local2_source (d Date, x UInt32, s String) ENGINE = Memory") - instance.query("CREATE MATERIALIZED VIEW local2_view to distributed AS SELECT d,x,s FROM local2_source") + instance.query( + "CREATE TABLE local2_source (d Date, x UInt32, s String) ENGINE = Memory" + ) + instance.query( + "CREATE MATERIALIZED VIEW local2_view to distributed AS SELECT d,x,s FROM local2_source" + ) for i in range(10, 13): - instance.query("INSERT INTO local2_source(d, x) VALUES ('2000-01-01', {})".format(i)) + instance.query( + "INSERT INTO local2_source(d, x) VALUES ('2000-01-01', {})".format(i) + ) time.sleep(1.0) - result = remote.query("SELECT _part, groupArray(x) FROM local2 GROUP BY _part ORDER BY _part") + result = remote.query( + "SELECT _part, groupArray(x) FROM local2 GROUP BY _part ORDER BY _part" + ) # Explanation: as merges are turned off on remote instance, active parts in local2 table correspond 1-to-1 # to inserted blocks. @@ -126,13 +164,13 @@ def test_inserts_batching(started_cluster): # 3. Full batch of inserts regardless order of columns thanks to the view. # 4. Full batch of inserts after ALTER (that have different block structure). # 5. What was left to insert before ALTER. - expected = '''\ + expected = """\ 20000101_20000101_1_1_0 [1] 20000101_20000101_2_2_0 [2,3,4] 20000101_20000101_3_3_0 [5,6,7] 20000101_20000101_4_4_0 [10,11,12] 20000101_20000101_5_5_0 [8,9] -''' +""" assert TSV(result) == TSV(expected) @@ -140,4 +178,4 @@ def test_inserts_local(started_cluster): instance = instance_test_inserts_local_cluster instance.query("INSERT INTO local_source VALUES ('2000-01-01', 1)") time.sleep(0.5) - assert instance.query("SELECT count(*) FROM local").strip() == '1' + assert instance.query("SELECT count(*) FROM local").strip() == "1" diff --git a/tests/integration/test_jbod_balancer/test.py b/tests/integration/test_jbod_balancer/test.py index ef0308cc658..3807d6e1cea 100644 --- a/tests/integration/test_jbod_balancer/test.py +++ b/tests/integration/test_jbod_balancer/test.py @@ -14,7 +14,9 @@ cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( "node1", - main_configs=["configs/config.d/storage_configuration.xml",], + main_configs=[ + "configs/config.d/storage_configuration.xml", + ], with_zookeeper=True, stay_alive=True, tmpfs=["/jbod1:size=100M", "/jbod2:size=100M", "/jbod3:size=100M"], diff --git a/tests/integration/test_jbod_ha/test.py b/tests/integration/test_jbod_ha/test.py index 0a8631ff207..3dec61985b1 100644 --- a/tests/integration/test_jbod_ha/test.py +++ b/tests/integration/test_jbod_ha/test.py @@ -14,7 +14,9 @@ cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( "node1", - main_configs=["configs/config.d/storage_configuration.xml",], + main_configs=[ + "configs/config.d/storage_configuration.xml", + ], with_zookeeper=True, stay_alive=True, tmpfs=["/jbod1:size=100M", "/jbod2:size=100M", "/jbod3:size=100M"], diff --git a/tests/integration/test_jdbc_bridge/test.py b/tests/integration/test_jdbc_bridge/test.py index b5304c4cb10..0e41cc8c8b7 100644 --- a/tests/integration/test_jdbc_bridge/test.py +++ b/tests/integration/test_jdbc_bridge/test.py @@ -8,75 +8,108 @@ from helpers.test_tools import TSV from string import Template cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance("instance", main_configs=["configs/jdbc_bridge.xml"], with_jdbc_bridge=True) +instance = cluster.add_instance( + "instance", main_configs=["configs/jdbc_bridge.xml"], with_jdbc_bridge=True +) datasource = "self" records = 1000 + @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() - instance.query(''' + instance.query( + """ CREATE DATABASE test; CREATE TABLE test.ClickHouseTable(Num UInt32, Str String, Desc Nullable(String)) engine = Memory; INSERT INTO test.ClickHouseTable(Num, Str) SELECT number, toString(number) FROM system.numbers LIMIT {}; - '''.format(records)) + """.format( + records + ) + ) while True: datasources = instance.query("select * from jdbc('', 'show datasources')") - if 'self' in datasources: - logging.debug(f"JDBC Driver self datasource initialized.\n{datasources}") + if "self" in datasources: + logging.debug( + f"JDBC Driver self datasource initialized.\n{datasources}" + ) break else: - logging.debug(f"Waiting JDBC Driver to initialize 'self' datasource.\n{datasources}") + logging.debug( + f"Waiting JDBC Driver to initialize 'self' datasource.\n{datasources}" + ) yield cluster finally: cluster.shutdown() + def test_jdbc_query(started_cluster): """Test simple query with inline schema and query parameters""" expected = "{}\t{}".format(datasource, records) - actual = instance.query(''' + actual = instance.query( + """ SELECT * FROM jdbc( '{}?datasource_column&fetch_size=1', 'rows UInt32', 'SELECT count(1) AS rows FROM test.ClickHouseTable' ) - '''.format(datasource)) - assert TSV(actual) == TSV(expected), "expecting {} but got {}".format(expected, actual) + """.format( + datasource + ) + ) + assert TSV(actual) == TSV(expected), "expecting {} but got {}".format( + expected, actual + ) + def test_jdbc_distributed_query(started_cluster): """Test distributed query involving both JDBC table function and ClickHouse table""" - actual = instance.query(''' + actual = instance.query( + """ SELECT a.Num + 1 FROM jdbc('{0}', 'SELECT * FROM test.ClickHouseTable') a INNER JOIN jdbc('{0}', 'num UInt32', 'SELECT {1} - 1 AS num') b on a.Num = b.num INNER JOIN test.ClickHouseTable c on b.num = c.Num - '''.format(datasource, records)) + """.format( + datasource, records + ) + ) assert int(actual) == records, "expecting {} but got {}".format(records, actual) + def test_jdbc_insert(started_cluster): """Test insert query using JDBC table function""" - instance.query('DROP TABLE IF EXISTS test.test_insert') - instance.query(''' + instance.query("DROP TABLE IF EXISTS test.test_insert") + instance.query( + """ CREATE TABLE test.test_insert ENGINE = Memory AS SELECT * FROM test.ClickHouseTable; SELECT * FROM jdbc('{0}?mutation', 'INSERT INTO test.test_insert VALUES({1}, ''{1}'', ''{1}'')'); - '''.format(datasource, records)) + """.format( + datasource, records + ) + ) expected = records actual = instance.query( - "SELECT Desc FROM jdbc('{}', 'SELECT * FROM test.test_insert WHERE Num = {}')".format(datasource, records)) + "SELECT Desc FROM jdbc('{}', 'SELECT * FROM test.test_insert WHERE Num = {}')".format( + datasource, records + ) + ) assert int(actual) == expected, "expecting {} but got {}".format(records, actual) + def test_jdbc_update(started_cluster): """Test update query using JDBC table function""" secrets = str(uuid.uuid1()) - instance.query('DROP TABLE IF EXISTS test.test_update') - instance.query(''' + instance.query("DROP TABLE IF EXISTS test.test_update") + instance.query( + """ CREATE TABLE test.test_update ENGINE = Memory AS SELECT * FROM test.ClickHouseTable; SELECT * @@ -84,18 +117,29 @@ def test_jdbc_update(started_cluster): '{}?mutation', 'SET mutations_sync = 1; ALTER TABLE test.test_update UPDATE Str=''{}'' WHERE Num = {} - 1;' ) - '''.format(datasource, secrets, records)) + """.format( + datasource, secrets, records + ) + ) - actual = instance.query(''' + actual = instance.query( + """ SELECT Str FROM jdbc('{}', 'SELECT * FROM test.test_update WHERE Num = {} - 1') - '''.format(datasource, records)) - assert TSV(actual) == TSV(secrets), "expecting {} but got {}".format(secrets, actual) + """.format( + datasource, records + ) + ) + assert TSV(actual) == TSV(secrets), "expecting {} but got {}".format( + secrets, actual + ) + def test_jdbc_delete(started_cluster): """Test delete query using JDBC table function""" - instance.query('DROP TABLE IF EXISTS test.test_delete') - instance.query(''' + instance.query("DROP TABLE IF EXISTS test.test_delete") + instance.query( + """ CREATE TABLE test.test_delete ENGINE = Memory AS SELECT * FROM test.ClickHouseTable; SELECT * @@ -103,20 +147,31 @@ def test_jdbc_delete(started_cluster): '{}?mutation', 'SET mutations_sync = 1; ALTER TABLE test.test_delete DELETE WHERE Num < {} - 1;' ) - '''.format(datasource, records)) + """.format( + datasource, records + ) + ) expected = records - 1 actual = instance.query( - "SELECT Str FROM jdbc('{}', 'SELECT * FROM test.test_delete')".format(datasource, records)) + "SELECT Str FROM jdbc('{}', 'SELECT * FROM test.test_delete')".format( + datasource, records + ) + ) assert int(actual) == expected, "expecting {} but got {}".format(expected, actual) + def test_jdbc_table_engine(started_cluster): """Test query against a JDBC table""" - instance.query('DROP TABLE IF EXISTS test.jdbc_table') - actual = instance.query(''' + instance.query("DROP TABLE IF EXISTS test.jdbc_table") + actual = instance.query( + """ CREATE TABLE test.jdbc_table(Str String) ENGINE = JDBC('{}', 'test', 'ClickHouseTable'); SELECT count(1) FROM test.jdbc_table; - '''.format(datasource)) + """.format( + datasource + ) + ) assert int(actual) == records, "expecting {} but got {}".format(records, actual) diff --git a/tests/integration/test_jemalloc_percpu_arena/test.py b/tests/integration/test_jemalloc_percpu_arena/test.py index 6a4522c1b76..80d8e2ae36a 100755 --- a/tests/integration/test_jemalloc_percpu_arena/test.py +++ b/tests/integration/test_jemalloc_percpu_arena/test.py @@ -13,42 +13,54 @@ CPU_ID = 4 def run_command_in_container(cmd, *args): # /clickhouse is mounted by integration tests runner - alternative_binary = os.getenv('CLICKHOUSE_BINARY', '/clickhouse') + alternative_binary = os.getenv("CLICKHOUSE_BINARY", "/clickhouse") if alternative_binary: - args+=( - '--volume', f'{alternative_binary}:/usr/bin/clickhouse', + args += ( + "--volume", + f"{alternative_binary}:/usr/bin/clickhouse", ) - return subprocess.check_output(['docker', 'run', '--rm', - *args, - 'ubuntu:20.04', - 'sh', '-c', cmd, - ]) + return subprocess.check_output( + [ + "docker", + "run", + "--rm", + *args, + "ubuntu:20.04", + "sh", + "-c", + cmd, + ] + ) def run_with_cpu_limit(cmd, *args): with NamedTemporaryFile() as online_cpu: # NOTE: this is not the number of CPUs, but specific CPU ID - online_cpu.write(f'{CPU_ID}'.encode()) + online_cpu.write(f"{CPU_ID}".encode()) online_cpu.flush() # replace /sys/devices/system/cpu/online to full _SC_NPROCESSORS_ONLN # like LXD/LXC from [1] does. # # [1]: https://github.com/ClickHouse/ClickHouse/issues/32806 - args+=( - '--volume', f'{online_cpu.name}:/sys/devices/system/cpu/online', + args += ( + "--volume", + f"{online_cpu.name}:/sys/devices/system/cpu/online", ) return run_command_in_container(cmd, *args) def skip_if_jemalloc_disabled(): - output = run_command_in_container("""clickhouse local -q " + output = run_command_in_container( + """clickhouse local -q " SELECT value FROM system.build_options WHERE name = 'USE_JEMALLOC'" - """).strip() - if output != b'ON' and output != b'1': - pytest.skip(f'Compiled w/o jemalloc (USE_JEMALLOC={output})') + """ + ).strip() + if output != b"ON" and output != b"1": + pytest.skip(f"Compiled w/o jemalloc (USE_JEMALLOC={output})") + # Ensure that clickhouse works even when number of online CPUs # (_SC_NPROCESSORS_ONLN) is smaller then available (_SC_NPROCESSORS_CONF). @@ -59,29 +71,37 @@ def test_jemalloc_percpu_arena(): assert multiprocessing.cpu_count() > CPU_ID - online_cpus = int(run_with_cpu_limit('getconf _NPROCESSORS_ONLN')) + online_cpus = int(run_with_cpu_limit("getconf _NPROCESSORS_ONLN")) assert online_cpus == 1, online_cpus - all_cpus = int(run_with_cpu_limit('getconf _NPROCESSORS_CONF')) + all_cpus = int(run_with_cpu_limit("getconf _NPROCESSORS_CONF")) assert all_cpus == multiprocessing.cpu_count(), all_cpus # implicitly disable percpu arena - result = run_with_cpu_limit('clickhouse local -q "select 1"', + result = run_with_cpu_limit( + 'clickhouse local -q "select 1"', # NOTE: explicitly disable, since it is enabled by default in debug build # (and even though debug builds are not in CI let's state this). - '--env', 'MALLOC_CONF=abort_conf:false') + "--env", + "MALLOC_CONF=abort_conf:false", + ) assert int(result) == int(1), result # should fail because of abort_conf:true with pytest.raises(subprocess.CalledProcessError): - run_with_cpu_limit('clickhouse local -q "select 1"', - '--env', 'MALLOC_CONF=abort_conf:true') + run_with_cpu_limit( + 'clickhouse local -q "select 1"', "--env", "MALLOC_CONF=abort_conf:true" + ) # should not fail even with abort_conf:true, due to explicit narenas # NOTE: abort:false to make it compatible with debug build - run_with_cpu_limit('clickhouse local -q "select 1"', - '--env', f'MALLOC_CONF=abort_conf:true,abort:false,narenas:{all_cpus}') + run_with_cpu_limit( + 'clickhouse local -q "select 1"', + "--env", + f"MALLOC_CONF=abort_conf:true,abort:false,narenas:{all_cpus}", + ) + # For manual run. -if __name__ == '__main__': +if __name__ == "__main__": test_jemalloc_percpu_arena() diff --git a/tests/integration/test_join_set_family_s3/test.py b/tests/integration/test_join_set_family_s3/test.py index 9454acf1541..b09d5735628 100644 --- a/tests/integration/test_join_set_family_s3/test.py +++ b/tests/integration/test_join_set_family_s3/test.py @@ -9,9 +9,12 @@ from helpers.cluster import ClickHouseCluster def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("node", - main_configs=["configs/minio.xml", "configs/ssl.xml"], - with_minio=True, stay_alive=True) + cluster.add_instance( + "node", + main_configs=["configs/minio.xml", "configs/ssl.xml"], + with_minio=True, + stay_alive=True, + ) logging.info("Starting cluster...") cluster.start() @@ -22,7 +25,7 @@ def cluster(): cluster.shutdown() -def assert_objects_count(cluster, objects_count, path='data/'): +def assert_objects_count(cluster, objects_count, path="data/"): minio = cluster.minio_client s3_objects = list(minio.list_objects(cluster.minio_bucket, path)) if objects_count != len(s3_objects): @@ -41,22 +44,42 @@ def test_set_s3(cluster): node.query("INSERT INTO TABLE testLocalSet VALUES (1)") node.query("INSERT INTO TABLE testS3Set VALUES (1)") - assert node.query("SELECT number in testLocalSet, number in testS3Set FROM system.numbers LIMIT 3") == "0\t0\n1\t1\n0\t0\n" + assert ( + node.query( + "SELECT number in testLocalSet, number in testS3Set FROM system.numbers LIMIT 3" + ) + == "0\t0\n1\t1\n0\t0\n" + ) assert_objects_count(cluster, 1) node.query("INSERT INTO TABLE testLocalSet VALUES (2)") node.query("INSERT INTO TABLE testS3Set VALUES (2)") - assert node.query("SELECT number in testLocalSet, number in testS3Set FROM system.numbers LIMIT 3") == "0\t0\n1\t1\n1\t1\n" + assert ( + node.query( + "SELECT number in testLocalSet, number in testS3Set FROM system.numbers LIMIT 3" + ) + == "0\t0\n1\t1\n1\t1\n" + ) assert_objects_count(cluster, 2) node.restart_clickhouse() - assert node.query("SELECT number in testLocalSet, number in testS3Set FROM system.numbers LIMIT 3") == "0\t0\n1\t1\n1\t1\n" + assert ( + node.query( + "SELECT number in testLocalSet, number in testS3Set FROM system.numbers LIMIT 3" + ) + == "0\t0\n1\t1\n1\t1\n" + ) node.query("TRUNCATE TABLE testLocalSet") node.query("TRUNCATE TABLE testS3Set") - assert node.query("SELECT number in testLocalSet, number in testS3Set FROM system.numbers LIMIT 3") == "0\t0\n0\t0\n0\t0\n" + assert ( + node.query( + "SELECT number in testLocalSet, number in testS3Set FROM system.numbers LIMIT 3" + ) + == "0\t0\n0\t0\n0\t0\n" + ) assert_objects_count(cluster, 0) node.query("DROP TABLE testLocalSet") @@ -66,28 +89,52 @@ def test_set_s3(cluster): def test_join_s3(cluster): node = cluster.instances["node"] - node.query("CREATE TABLE testLocalJoin(`id` UInt64, `val` String) ENGINE = Join(ANY, LEFT, id)") - node.query("CREATE TABLE testS3Join(`id` UInt64, `val` String) ENGINE = Join(ANY, LEFT, id) SETTINGS disk='s3'") + node.query( + "CREATE TABLE testLocalJoin(`id` UInt64, `val` String) ENGINE = Join(ANY, LEFT, id)" + ) + node.query( + "CREATE TABLE testS3Join(`id` UInt64, `val` String) ENGINE = Join(ANY, LEFT, id) SETTINGS disk='s3'" + ) node.query("INSERT INTO testLocalJoin VALUES (1, 'a')") node.query("INSERT INTO testS3Join VALUES (1, 'a')") - assert node.query("SELECT joinGet('testLocalJoin', 'val', number) as local, joinGet('testS3Join', 'val', number) as s3 FROM system.numbers LIMIT 3") == "\t\na\ta\n\t\n" + assert ( + node.query( + "SELECT joinGet('testLocalJoin', 'val', number) as local, joinGet('testS3Join', 'val', number) as s3 FROM system.numbers LIMIT 3" + ) + == "\t\na\ta\n\t\n" + ) assert_objects_count(cluster, 1) node.query("INSERT INTO testLocalJoin VALUES (2, 'b')") node.query("INSERT INTO testS3Join VALUES (2, 'b')") - assert node.query("SELECT joinGet('testLocalJoin', 'val', number) as local, joinGet('testS3Join', 'val', number) as s3 FROM system.numbers LIMIT 3") == "\t\na\ta\nb\tb\n" + assert ( + node.query( + "SELECT joinGet('testLocalJoin', 'val', number) as local, joinGet('testS3Join', 'val', number) as s3 FROM system.numbers LIMIT 3" + ) + == "\t\na\ta\nb\tb\n" + ) assert_objects_count(cluster, 2) node.restart_clickhouse() - assert node.query("SELECT joinGet('testLocalJoin', 'val', number) as local, joinGet('testS3Join', 'val', number) as s3 FROM system.numbers LIMIT 3") == "\t\na\ta\nb\tb\n" + assert ( + node.query( + "SELECT joinGet('testLocalJoin', 'val', number) as local, joinGet('testS3Join', 'val', number) as s3 FROM system.numbers LIMIT 3" + ) + == "\t\na\ta\nb\tb\n" + ) node.query("TRUNCATE TABLE testLocalJoin") node.query("TRUNCATE TABLE testS3Join") - assert node.query("SELECT joinGet('testLocalJoin', 'val', number) as local, joinGet('testS3Join', 'val', number) as s3 FROM system.numbers LIMIT 3") == "\t\n\t\n\t\n" + assert ( + node.query( + "SELECT joinGet('testLocalJoin', 'val', number) as local, joinGet('testS3Join', 'val', number) as s3 FROM system.numbers LIMIT 3" + ) + == "\t\n\t\n\t\n" + ) assert_objects_count(cluster, 0) node.query("DROP TABLE testLocalJoin") diff --git a/tests/integration/test_keeper_and_access_storage/test.py b/tests/integration/test_keeper_and_access_storage/test.py index 3a3c7535a85..ae6b0085094 100644 --- a/tests/integration/test_keeper_and_access_storage/test.py +++ b/tests/integration/test_keeper_and_access_storage/test.py @@ -6,7 +6,9 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/keeper.xml'], stay_alive=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/keeper.xml"], stay_alive=True +) # test that server is able to start @pytest.fixture(scope="module") @@ -17,5 +19,6 @@ def started_cluster(): finally: cluster.shutdown() + def test_create_replicated(started_cluster): assert node1.query("SELECT 1") == "1\n" diff --git a/tests/integration/test_keeper_auth/test.py b/tests/integration/test_keeper_auth/test.py index 6be78f95483..364d93dfc53 100644 --- a/tests/integration/test_keeper_auth/test.py +++ b/tests/integration/test_keeper_auth/test.py @@ -1,15 +1,26 @@ - import pytest from helpers.cluster import ClickHouseCluster from kazoo.client import KazooClient, KazooState from kazoo.security import ACL, make_digest_acl, make_acl -from kazoo.exceptions import AuthFailedError, InvalidACLError, NoAuthError, KazooException +from kazoo.exceptions import ( + AuthFailedError, + InvalidACLError, + NoAuthError, + KazooException, +) cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=['configs/keeper_config.xml'], with_zookeeper=True, use_keeper=False, stay_alive=True) +node = cluster.add_instance( + "node", + main_configs=["configs/keeper_config.xml"], + with_zookeeper=True, + use_keeper=False, + stay_alive=True, +) SUPERAUTH = "super:admin" + @pytest.fixture(scope="module") def started_cluster(): try: @@ -20,63 +31,106 @@ def started_cluster(): finally: cluster.shutdown() + def get_fake_zk(timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip('node') + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip("node") + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance + def get_genuine_zk(): print("Zoo1", cluster.get_instance_ip("zoo1")) - return cluster.get_kazoo_client('zoo1') + return cluster.get_kazoo_client("zoo1") -@pytest.mark.parametrize( - ('get_zk'), - [ - get_genuine_zk, - get_fake_zk - ] -) +@pytest.mark.parametrize(("get_zk"), [get_genuine_zk, get_fake_zk]) def test_remove_acl(started_cluster, get_zk): auth_connection = get_zk() - auth_connection.add_auth('digest', 'user1:password1') + auth_connection.add_auth("digest", "user1:password1") # Consistent with zookeeper, accept generated digest - auth_connection.create("/test_remove_acl1", b"dataX", acl=[make_acl("digest", "user1:XDkd2dsEuhc9ImU3q8pa8UOdtpI=", read=True, write=False, create=False, delete=False, admin=False)]) - auth_connection.create("/test_remove_acl2", b"dataX", acl=[make_acl("digest", "user1:XDkd2dsEuhc9ImU3q8pa8UOdtpI=", read=True, write=True, create=False, delete=False, admin=False)]) - auth_connection.create("/test_remove_acl3", b"dataX", acl=[make_acl("digest", "user1:XDkd2dsEuhc9ImU3q8pa8UOdtpI=", all=True)]) + auth_connection.create( + "/test_remove_acl1", + b"dataX", + acl=[ + make_acl( + "digest", + "user1:XDkd2dsEuhc9ImU3q8pa8UOdtpI=", + read=True, + write=False, + create=False, + delete=False, + admin=False, + ) + ], + ) + auth_connection.create( + "/test_remove_acl2", + b"dataX", + acl=[ + make_acl( + "digest", + "user1:XDkd2dsEuhc9ImU3q8pa8UOdtpI=", + read=True, + write=True, + create=False, + delete=False, + admin=False, + ) + ], + ) + auth_connection.create( + "/test_remove_acl3", + b"dataX", + acl=[make_acl("digest", "user1:XDkd2dsEuhc9ImU3q8pa8UOdtpI=", all=True)], + ) auth_connection.delete("/test_remove_acl2") - auth_connection.create("/test_remove_acl4", b"dataX", acl=[make_acl("digest", "user1:XDkd2dsEuhc9ImU3q8pa8UOdtpI=", read=True, write=True, create=True, delete=False, admin=False)]) + auth_connection.create( + "/test_remove_acl4", + b"dataX", + acl=[ + make_acl( + "digest", + "user1:XDkd2dsEuhc9ImU3q8pa8UOdtpI=", + read=True, + write=True, + create=True, + delete=False, + admin=False, + ) + ], + ) acls, stat = auth_connection.get_acls("/test_remove_acl3") assert stat.aversion == 0 assert len(acls) == 1 for acl in acls: - assert acl.acl_list == ['ALL'] + assert acl.acl_list == ["ALL"] assert acl.perms == 31 -@pytest.mark.parametrize( - ('get_zk'), - [ - get_genuine_zk, - get_fake_zk - ] -) - +@pytest.mark.parametrize(("get_zk"), [get_genuine_zk, get_fake_zk]) def test_digest_auth_basic(started_cluster, get_zk): auth_connection = get_zk() - auth_connection.add_auth('digest', 'user1:password1') + auth_connection.add_auth("digest", "user1:password1") auth_connection.create("/test_no_acl", b"") - auth_connection.create("/test_all_acl", b"data", acl=[make_acl("auth", "", all=True)]) + auth_connection.create( + "/test_all_acl", b"data", acl=[make_acl("auth", "", all=True)] + ) # Consistent with zookeeper, accept generated digest - auth_connection.create("/test_all_digest_acl", b"dataX", acl=[make_acl("digest", "user1:XDkd2dsEuhc9ImU3q8pa8UOdtpI=", all=True)]) + auth_connection.create( + "/test_all_digest_acl", + b"dataX", + acl=[make_acl("digest", "user1:XDkd2dsEuhc9ImU3q8pa8UOdtpI=", all=True)], + ) assert auth_connection.get("/test_all_acl")[0] == b"data" assert auth_connection.get("/test_all_digest_acl")[0] == b"dataX" @@ -104,7 +158,7 @@ def test_digest_auth_basic(started_cluster, get_zk): no_auth_connection = get_zk() # wrong auth - no_auth_connection.add_auth('digest', 'user2:password2') + no_auth_connection.add_auth("digest", "user2:password2") with pytest.raises(NoAuthError): no_auth_connection.set("/test_all_acl", b"hello") @@ -122,7 +176,7 @@ def test_digest_auth_basic(started_cluster, get_zk): no_auth_connection.create("/some_allowed_node", b"data") # auth added, go on - no_auth_connection.add_auth('digest', 'user1:password1') + no_auth_connection.add_auth("digest", "user1:password1") for path in ["/test_no_acl", "/test_all_acl"]: no_auth_connection.set(path, b"auth_added") assert no_auth_connection.get(path)[0] == b"auth_added" @@ -131,62 +185,71 @@ def test_digest_auth_basic(started_cluster, get_zk): def test_super_auth(started_cluster): auth_connection = get_fake_zk() - auth_connection.add_auth('digest', 'user1:password1') + auth_connection.add_auth("digest", "user1:password1") auth_connection.create("/test_super_no_acl", b"") - auth_connection.create("/test_super_all_acl", b"data", acl=[make_acl("auth", "", all=True)]) + auth_connection.create( + "/test_super_all_acl", b"data", acl=[make_acl("auth", "", all=True)] + ) super_connection = get_fake_zk() - super_connection.add_auth('digest', 'super:admin') + super_connection.add_auth("digest", "super:admin") for path in ["/test_super_no_acl", "/test_super_all_acl"]: super_connection.set(path, b"value") assert super_connection.get(path)[0] == b"value" -@pytest.mark.parametrize( - ('get_zk'), - [ - get_genuine_zk, - get_fake_zk - ] -) +@pytest.mark.parametrize(("get_zk"), [get_genuine_zk, get_fake_zk]) def test_digest_auth_multiple(started_cluster, get_zk): auth_connection = get_zk() - auth_connection.add_auth('digest', 'user1:password1') - auth_connection.add_auth('digest', 'user2:password2') - auth_connection.add_auth('digest', 'user3:password3') + auth_connection.add_auth("digest", "user1:password1") + auth_connection.add_auth("digest", "user2:password2") + auth_connection.add_auth("digest", "user3:password3") - auth_connection.create("/test_multi_all_acl", b"data", acl=[make_acl("auth", "", all=True)]) + auth_connection.create( + "/test_multi_all_acl", b"data", acl=[make_acl("auth", "", all=True)] + ) one_auth_connection = get_zk() - one_auth_connection.add_auth('digest', 'user1:password1') + one_auth_connection.add_auth("digest", "user1:password1") one_auth_connection.set("/test_multi_all_acl", b"X") assert one_auth_connection.get("/test_multi_all_acl")[0] == b"X" other_auth_connection = get_zk() - other_auth_connection.add_auth('digest', 'user2:password2') + other_auth_connection.add_auth("digest", "user2:password2") other_auth_connection.set("/test_multi_all_acl", b"Y") assert other_auth_connection.get("/test_multi_all_acl")[0] == b"Y" -@pytest.mark.parametrize( - ('get_zk'), - [ - get_genuine_zk, - get_fake_zk - ] -) + +@pytest.mark.parametrize(("get_zk"), [get_genuine_zk, get_fake_zk]) def test_partial_auth(started_cluster, get_zk): auth_connection = get_zk() - auth_connection.add_auth('digest', 'user1:password1') + auth_connection.add_auth("digest", "user1:password1") - auth_connection.create("/test_partial_acl", b"data", acl=[make_acl("auth", "", read=False, write=True, create=True, delete=True, admin=True)]) + auth_connection.create( + "/test_partial_acl", + b"data", + acl=[ + make_acl( + "auth", "", read=False, write=True, create=True, delete=True, admin=True + ) + ], + ) auth_connection.set("/test_partial_acl", b"X") - auth_connection.create("/test_partial_acl/subnode", b"X", acl=[make_acl("auth", "", read=False, write=True, create=True, delete=True, admin=True)]) + auth_connection.create( + "/test_partial_acl/subnode", + b"X", + acl=[ + make_acl( + "auth", "", read=False, write=True, create=True, delete=True, admin=True + ) + ], + ) with pytest.raises(NoAuthError): auth_connection.get("/test_partial_acl") @@ -197,16 +260,40 @@ def test_partial_auth(started_cluster, get_zk): # exists works without read perm assert auth_connection.exists("/test_partial_acl") is not None - auth_connection.create("/test_partial_acl_create", b"data", acl=[make_acl("auth", "", read=True, write=True, create=False, delete=True, admin=True)]) + auth_connection.create( + "/test_partial_acl_create", + b"data", + acl=[ + make_acl( + "auth", "", read=True, write=True, create=False, delete=True, admin=True + ) + ], + ) with pytest.raises(NoAuthError): auth_connection.create("/test_partial_acl_create/subnode") - auth_connection.create("/test_partial_acl_set", b"data", acl=[make_acl("auth", "", read=True, write=False, create=True, delete=True, admin=True)]) + auth_connection.create( + "/test_partial_acl_set", + b"data", + acl=[ + make_acl( + "auth", "", read=True, write=False, create=True, delete=True, admin=True + ) + ], + ) with pytest.raises(NoAuthError): auth_connection.set("/test_partial_acl_set", b"X") # not allowed to delete child node - auth_connection.create("/test_partial_acl_delete", b"data", acl=[make_acl("auth", "", read=True, write=True, create=True, delete=False, admin=True)]) + auth_connection.create( + "/test_partial_acl_delete", + b"data", + acl=[ + make_acl( + "auth", "", read=True, write=True, create=True, delete=False, admin=True + ) + ], + ) auth_connection.create("/test_partial_acl_delete/subnode") with pytest.raises(NoAuthError): auth_connection.delete("/test_partial_acl_delete/subnode") @@ -216,85 +303,156 @@ def test_bad_auth(started_cluster): auth_connection = get_fake_zk() with pytest.raises(AuthFailedError): - auth_connection.add_auth('world', 'anyone') + auth_connection.add_auth("world", "anyone") auth_connection = get_fake_zk() with pytest.raises(AuthFailedError): print("Sending 1") - auth_connection.add_auth('adssagf', 'user1:password1') + auth_connection.add_auth("adssagf", "user1:password1") auth_connection = get_fake_zk() with pytest.raises(AuthFailedError): print("Sending 2") - auth_connection.add_auth('digest', '') + auth_connection.add_auth("digest", "") auth_connection = get_fake_zk() with pytest.raises(AuthFailedError): print("Sending 3") - auth_connection.add_auth('', 'user1:password1') + auth_connection.add_auth("", "user1:password1") auth_connection = get_fake_zk() with pytest.raises(AuthFailedError): print("Sending 4") - auth_connection.add_auth('digest', 'user1') + auth_connection.add_auth("digest", "user1") auth_connection = get_fake_zk() with pytest.raises(AuthFailedError): print("Sending 5") - auth_connection.add_auth('digest', 'user1:password:otherpassword') + auth_connection.add_auth("digest", "user1:password:otherpassword") auth_connection = get_fake_zk() with pytest.raises(AuthFailedError): print("Sending 6") - auth_connection.add_auth('auth', 'user1:password') + auth_connection.add_auth("auth", "user1:password") auth_connection = get_fake_zk() with pytest.raises(AuthFailedError): print("Sending 7") - auth_connection.add_auth('world', 'somebody') + auth_connection.add_auth("world", "somebody") auth_connection = get_fake_zk() with pytest.raises(InvalidACLError): print("Sending 8") - auth_connection.create("/test_bad_acl", b"data", acl=[make_acl("dasd", "", read=True, write=False, create=True, delete=True, admin=True)]) + auth_connection.create( + "/test_bad_acl", + b"data", + acl=[ + make_acl( + "dasd", + "", + read=True, + write=False, + create=True, + delete=True, + admin=True, + ) + ], + ) auth_connection = get_fake_zk() with pytest.raises(InvalidACLError): print("Sending 9") - auth_connection.create("/test_bad_acl", b"data", acl=[make_acl("digest", "", read=True, write=False, create=True, delete=True, admin=True)]) + auth_connection.create( + "/test_bad_acl", + b"data", + acl=[ + make_acl( + "digest", + "", + read=True, + write=False, + create=True, + delete=True, + admin=True, + ) + ], + ) auth_connection = get_fake_zk() with pytest.raises(InvalidACLError): print("Sending 10") - auth_connection.create("/test_bad_acl", b"data", acl=[make_acl("", "", read=True, write=False, create=True, delete=True, admin=True)]) + auth_connection.create( + "/test_bad_acl", + b"data", + acl=[ + make_acl( + "", "", read=True, write=False, create=True, delete=True, admin=True + ) + ], + ) auth_connection = get_fake_zk() with pytest.raises(InvalidACLError): print("Sending 11") - auth_connection.create("/test_bad_acl", b"data", acl=[make_acl("digest", "dsdasda", read=True, write=False, create=True, delete=True, admin=True)]) + auth_connection.create( + "/test_bad_acl", + b"data", + acl=[ + make_acl( + "digest", + "dsdasda", + read=True, + write=False, + create=True, + delete=True, + admin=True, + ) + ], + ) auth_connection = get_fake_zk() with pytest.raises(InvalidACLError): print("Sending 12") - auth_connection.create("/test_bad_acl", b"data", acl=[make_acl("digest", "dsad:DSAa:d", read=True, write=False, create=True, delete=True, admin=True)]) + auth_connection.create( + "/test_bad_acl", + b"data", + acl=[ + make_acl( + "digest", + "dsad:DSAa:d", + read=True, + write=False, + create=True, + delete=True, + admin=True, + ) + ], + ) + def test_auth_snapshot(started_cluster): connection = get_fake_zk() - connection.add_auth('digest', 'user1:password1') + connection.add_auth("digest", "user1:password1") - connection.create("/test_snapshot_acl", b"data", acl=[make_acl("auth", "", all=True)]) + connection.create( + "/test_snapshot_acl", b"data", acl=[make_acl("auth", "", all=True)] + ) connection1 = get_fake_zk() - connection1.add_auth('digest', 'user2:password2') + connection1.add_auth("digest", "user2:password2") - connection1.create("/test_snapshot_acl1", b"data", acl=[make_acl("auth", "", all=True)]) + connection1.create( + "/test_snapshot_acl1", b"data", acl=[make_acl("auth", "", all=True)] + ) connection2 = get_fake_zk() connection2.create("/test_snapshot_acl2", b"data") for i in range(100): - connection.create(f"/test_snapshot_acl/path{i}", b"data", acl=[make_acl("auth", "", all=True)]) + connection.create( + f"/test_snapshot_acl/path{i}", b"data", acl=[make_acl("auth", "", all=True)] + ) node.restart_clickhouse() @@ -303,7 +461,7 @@ def test_auth_snapshot(started_cluster): with pytest.raises(NoAuthError): connection.get("/test_snapshot_acl") - connection.add_auth('digest', 'user1:password1') + connection.add_auth("digest", "user1:password1") assert connection.get("/test_snapshot_acl")[0] == b"data" @@ -316,14 +474,13 @@ def test_auth_snapshot(started_cluster): assert connection.get(f"/test_snapshot_acl/path{i}")[0] == b"data" connection1 = get_fake_zk() - connection1.add_auth('digest', 'user2:password2') + connection1.add_auth("digest", "user2:password2") assert connection1.get("/test_snapshot_acl1")[0] == b"data" with pytest.raises(NoAuthError): connection1.get("/test_snapshot_acl") - connection2 = get_fake_zk() assert connection2.get("/test_snapshot_acl2")[0] == b"data" with pytest.raises(NoAuthError): @@ -333,45 +490,55 @@ def test_auth_snapshot(started_cluster): connection2.get("/test_snapshot_acl1") -@pytest.mark.parametrize( - ('get_zk'), - [ - get_genuine_zk, - get_fake_zk - ] -) +@pytest.mark.parametrize(("get_zk"), [get_genuine_zk, get_fake_zk]) def test_get_set_acl(started_cluster, get_zk): auth_connection = get_zk() - auth_connection.add_auth('digest', 'username1:secret1') - auth_connection.add_auth('digest', 'username2:secret2') + auth_connection.add_auth("digest", "username1:secret1") + auth_connection.add_auth("digest", "username2:secret2") - auth_connection.create("/test_set_get_acl", b"data", acl=[make_acl("auth", "", all=True)]) + auth_connection.create( + "/test_set_get_acl", b"data", acl=[make_acl("auth", "", all=True)] + ) acls, stat = auth_connection.get_acls("/test_set_get_acl") assert stat.aversion == 0 assert len(acls) == 2 for acl in acls: - assert acl.acl_list == ['ALL'] - assert acl.id.scheme == 'digest' + assert acl.acl_list == ["ALL"] + assert acl.id.scheme == "digest" assert acl.perms == 31 - assert acl.id.id in ('username1:eGncMdBgOfGS/TCojt51xWsWv/Y=', 'username2:qgSSumukVlhftkVycylbHNvxhFU=') - + assert acl.id.id in ( + "username1:eGncMdBgOfGS/TCojt51xWsWv/Y=", + "username2:qgSSumukVlhftkVycylbHNvxhFU=", + ) other_auth_connection = get_zk() - other_auth_connection.add_auth('digest', 'username1:secret1') - other_auth_connection.add_auth('digest', 'username3:secret3') - other_auth_connection.set_acls("/test_set_get_acl", acls=[make_acl("auth", "", read=True, write=False, create=True, delete=True, admin=True)]) + other_auth_connection.add_auth("digest", "username1:secret1") + other_auth_connection.add_auth("digest", "username3:secret3") + other_auth_connection.set_acls( + "/test_set_get_acl", + acls=[ + make_acl( + "auth", "", read=True, write=False, create=True, delete=True, admin=True + ) + ], + ) acls, stat = other_auth_connection.get_acls("/test_set_get_acl") assert stat.aversion == 1 assert len(acls) == 2 for acl in acls: - assert acl.acl_list == ['READ', 'CREATE', 'DELETE', 'ADMIN'] - assert acl.id.scheme == 'digest' + assert acl.acl_list == ["READ", "CREATE", "DELETE", "ADMIN"] + assert acl.id.scheme == "digest" assert acl.perms == 29 - assert acl.id.id in ('username1:eGncMdBgOfGS/TCojt51xWsWv/Y=', 'username3:CvWITOxxTwk+u6S5PoGlQ4hNoWI=') + assert acl.id.id in ( + "username1:eGncMdBgOfGS/TCojt51xWsWv/Y=", + "username3:CvWITOxxTwk+u6S5PoGlQ4hNoWI=", + ) with pytest.raises(KazooException): - other_auth_connection.set_acls("/test_set_get_acl", acls=[make_acl("auth", "", all=True)], version=0) + other_auth_connection.set_acls( + "/test_set_get_acl", acls=[make_acl("auth", "", all=True)], version=0 + ) diff --git a/tests/integration/test_keeper_back_to_back/test.py b/tests/integration/test_keeper_back_to_back/test.py index 6de9971012f..73fface02b4 100644 --- a/tests/integration/test_keeper_back_to_back/test.py +++ b/tests/integration/test_keeper_back_to_back/test.py @@ -7,16 +7,26 @@ import time from multiprocessing.dummy import Pool cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=['configs/enable_keeper.xml'], with_zookeeper=True, use_keeper=False) +node = cluster.add_instance( + "node", + main_configs=["configs/enable_keeper.xml"], + with_zookeeper=True, + use_keeper=False, +) from kazoo.client import KazooClient, KazooState, KeeperState + def get_genuine_zk(): print("Zoo1", cluster.get_instance_ip("zoo1")) - return cluster.get_kazoo_client('zoo1') + return cluster.get_kazoo_client("zoo1") + def get_fake_zk(): print("node", cluster.get_instance_ip("node")) - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181", timeout=30.0) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip("node") + ":9181", timeout=30.0 + ) + def reset_last_zxid_listener(state): print("Fake zk callback called for state", state) nonlocal _fake_zk_instance @@ -27,14 +37,17 @@ def get_fake_zk(): _fake_zk_instance.start() return _fake_zk_instance + def random_string(length): - return ''.join(random.choices(string.ascii_lowercase + string.digits, k=length)) + return "".join(random.choices(string.ascii_lowercase + string.digits, k=length)) + def create_random_path(prefix="", depth=1): if depth == 0: return prefix return create_random_path(os.path.join(prefix, random_string(3)), depth - 1) + def stop_zk(zk): try: if zk: @@ -82,7 +95,9 @@ def test_sequential_nodes(started_cluster): genuine_zk.create("/test_sequential_nodes") fake_zk.create("/test_sequential_nodes") for i in range(1, 11): - genuine_zk.create("/test_sequential_nodes/" + ("a" * i) + "-", sequence=True) + genuine_zk.create( + "/test_sequential_nodes/" + ("a" * i) + "-", sequence=True + ) genuine_zk.create("/test_sequential_nodes/" + ("b" * i)) fake_zk.create("/test_sequential_nodes/" + ("a" * i) + "-", sequence=True) fake_zk.create("/test_sequential_nodes/" + ("b" * i)) @@ -115,7 +130,9 @@ def test_sequential_nodes(started_cluster): assert genuine_throw == True assert fake_throw == True - genuine_childs_1 = list(sorted(genuine_zk.get_children("/test_sequential_nodes_1"))) + genuine_childs_1 = list( + sorted(genuine_zk.get_children("/test_sequential_nodes_1")) + ) fake_childs_1 = list(sorted(fake_zk.get_children("/test_sequential_nodes_1"))) assert genuine_childs_1 == fake_childs_1 @@ -127,7 +144,9 @@ def test_sequential_nodes(started_cluster): genuine_zk.create("/test_sequential_nodes_2/node", sequence=True) fake_zk.create("/test_sequential_nodes_2/node", sequence=True) - genuine_childs_2 = list(sorted(genuine_zk.get_children("/test_sequential_nodes_2"))) + genuine_childs_2 = list( + sorted(genuine_zk.get_children("/test_sequential_nodes_2")) + ) fake_childs_2 = list(sorted(fake_zk.get_children("/test_sequential_nodes_2"))) assert genuine_childs_2 == fake_childs_2 finally: @@ -143,6 +162,7 @@ def assert_eq_stats(stat1, stat2): assert stat1.dataLength == stat2.dataLength assert stat1.numChildren == stat2.numChildren + def test_stats(started_cluster): try: genuine_zk = get_genuine_zk() @@ -162,10 +182,16 @@ def test_stats(started_cluster): fake_stats = fake_zk.exists("/test_stats_nodes") assert_eq_stats(genuine_stats, fake_stats) for i in range(1, 11): - print("/test_stats_nodes/" + ("a" * i) + "-" + "{:010d}".format((i - 1) * 2)) - genuine_zk.delete("/test_stats_nodes/" + ("a" * i) + "-" + "{:010d}".format((i - 1) * 2)) + print( + "/test_stats_nodes/" + ("a" * i) + "-" + "{:010d}".format((i - 1) * 2) + ) + genuine_zk.delete( + "/test_stats_nodes/" + ("a" * i) + "-" + "{:010d}".format((i - 1) * 2) + ) genuine_zk.delete("/test_stats_nodes/" + ("b" * i)) - fake_zk.delete("/test_stats_nodes/" + ("a" * i) + "-" + "{:010d}".format((i - 1) * 2)) + fake_zk.delete( + "/test_stats_nodes/" + ("a" * i) + "-" + "{:010d}".format((i - 1) * 2) + ) fake_zk.delete("/test_stats_nodes/" + ("b" * i)) genuine_stats = genuine_zk.exists("/test_stats_nodes") @@ -186,6 +212,7 @@ def test_stats(started_cluster): for zk in [genuine_zk, fake_zk]: stop_zk(zk) + def test_watchers(started_cluster): try: genuine_zk = get_genuine_zk() @@ -200,6 +227,7 @@ def test_watchers(started_cluster): genuine_data_watch_data = event fake_data_watch_data = None + def fake_callback(event): print("Fake data watch called") nonlocal fake_data_watch_data @@ -218,17 +246,18 @@ def test_watchers(started_cluster): print("Fake data", fake_data_watch_data) assert genuine_data_watch_data == fake_data_watch_data - genuine_zk.create("/test_data_watches/child", b"a") fake_zk.create("/test_data_watches/child", b"a") genuine_children = None + def genuine_child_callback(event): print("Genuine child watch called") nonlocal genuine_children genuine_children = event fake_children = None + def fake_child_callback(event): print("Fake child watch called") nonlocal fake_children @@ -262,32 +291,40 @@ def test_watchers(started_cluster): assert genuine_children == fake_children genuine_children_delete = None + def genuine_child_delete_callback(event): print("Genuine child watch called") nonlocal genuine_children_delete genuine_children_delete = event fake_children_delete = None + def fake_child_delete_callback(event): print("Fake child watch called") nonlocal fake_children_delete fake_children_delete = event genuine_child_delete = None + def genuine_own_delete_callback(event): print("Genuine child watch called") nonlocal genuine_child_delete genuine_child_delete = event fake_child_delete = None + def fake_own_delete_callback(event): print("Fake child watch called") nonlocal fake_child_delete fake_child_delete = event - genuine_zk.get_children("/test_data_watches", watch=genuine_child_delete_callback) + genuine_zk.get_children( + "/test_data_watches", watch=genuine_child_delete_callback + ) fake_zk.get_children("/test_data_watches", watch=fake_child_delete_callback) - genuine_zk.get_children("/test_data_watches/child", watch=genuine_own_delete_callback) + genuine_zk.get_children( + "/test_data_watches/child", watch=genuine_own_delete_callback + ) fake_zk.get_children("/test_data_watches/child", watch=fake_own_delete_callback) print("Calling genuine child delete") @@ -309,49 +346,59 @@ def test_watchers(started_cluster): for zk in [genuine_zk, fake_zk]: stop_zk(zk) + def test_multitransactions(started_cluster): try: genuine_zk = get_genuine_zk() fake_zk = get_fake_zk() for zk in [genuine_zk, fake_zk]: - zk.create('/test_multitransactions') + zk.create("/test_multitransactions") t = zk.transaction() - t.create('/test_multitransactions/freddy') - t.create('/test_multitransactions/fred', ephemeral=True) - t.create('/test_multitransactions/smith', sequence=True) + t.create("/test_multitransactions/freddy") + t.create("/test_multitransactions/fred", ephemeral=True) + t.create("/test_multitransactions/smith", sequence=True) results = t.commit() assert len(results) == 3 - assert results[0] == '/test_multitransactions/freddy' - assert results[2].startswith('/test_multitransactions/smith0') is True + assert results[0] == "/test_multitransactions/freddy" + assert results[2].startswith("/test_multitransactions/smith0") is True from kazoo.exceptions import RolledBackError, NoNodeError + for i, zk in enumerate([genuine_zk, fake_zk]): print("Processing ZK", i) t = zk.transaction() - t.create('/test_multitransactions/q') - t.delete('/test_multitransactions/a') - t.create('/test_multitransactions/x') + t.create("/test_multitransactions/q") + t.delete("/test_multitransactions/a") + t.create("/test_multitransactions/x") results = t.commit() print("Results", results) assert results[0].__class__ == RolledBackError assert results[1].__class__ == NoNodeError - assert zk.exists('/test_multitransactions/q') is None - assert zk.exists('/test_multitransactions/a') is None - assert zk.exists('/test_multitransactions/x') is None + assert zk.exists("/test_multitransactions/q") is None + assert zk.exists("/test_multitransactions/a") is None + assert zk.exists("/test_multitransactions/x") is None finally: for zk in [genuine_zk, fake_zk]: stop_zk(zk) + def exists(zk, path): result = zk.exists(path) return result is not None + def get(zk, path): result = zk.get(path) return result[0] + def get_children(zk, path): - return [elem for elem in list(sorted(zk.get_children(path))) if elem not in ('clickhouse', 'zookeeper')] + return [ + elem + for elem in list(sorted(zk.get_children(path))) + if elem not in ("clickhouse", "zookeeper") + ] + READ_REQUESTS = [ ("exists", exists), @@ -377,9 +424,8 @@ WRITE_REQUESTS = [ def delete(zk, path): zk.delete(path) -DELETE_REQUESTS = [ - ("delete", delete) -] + +DELETE_REQUESTS = [("delete", delete)] class Request(object): @@ -390,9 +436,10 @@ class Request(object): self.is_return = is_return def __str__(self): - arg_str = ', '.join([str(k) + "=" + str(v) for k, v in self.arguments.items()]) + arg_str = ", ".join([str(k) + "=" + str(v) for k, v in self.arguments.items()]) return "ZKRequest name {} with arguments {}".format(self.name, arg_str) + def generate_requests(prefix="/", iters=1): requests = [] existing_paths = [] @@ -404,18 +451,29 @@ def generate_requests(prefix="/", iters=1): path = create_random_path(path, 1) existing_paths.append(path) value = random_string(1000) - request = Request("create", {"path" : path, "value": value[0:10]}, lambda zk, path=path, value=value: create(zk, path, value), False) + request = Request( + "create", + {"path": path, "value": value[0:10]}, + lambda zk, path=path, value=value: create(zk, path, value), + False, + ) requests.append(request) for _ in range(100): path = random.choice(existing_paths) value = random_string(100) - request = Request("set", {"path": path, "value": value[0:10]}, lambda zk, path=path, value=value: set_data(zk, path, value), False) + request = Request( + "set", + {"path": path, "value": value[0:10]}, + lambda zk, path=path, value=value: set_data(zk, path, value), + False, + ) requests.append(request) for _ in range(100): path = random.choice(existing_paths) callback = random.choice(READ_REQUESTS) + def read_func1(zk, path=path, callback=callback): return callback[1](zk, path) @@ -424,13 +482,17 @@ def generate_requests(prefix="/", iters=1): for _ in range(30): path = random.choice(existing_paths) - request = Request("delete", {"path": path}, lambda zk, path=path: delete(zk, path), False) + request = Request( + "delete", {"path": path}, lambda zk, path=path: delete(zk, path), False + ) for _ in range(100): path = random.choice(existing_paths) callback = random.choice(READ_REQUESTS) + def read_func2(zk, path=path, callback=callback): return callback[1](zk, path) + request = Request(callback[0], {"path": path}, read_func2, True) requests.append(request) return requests @@ -463,15 +525,26 @@ def test_random_requests(started_cluster): print("Fake exception", str(ex)) fake_throw = True - assert fake_throw == genuine_throw, "Fake throw genuine not or vise versa request {}" + assert ( + fake_throw == genuine_throw + ), "Fake throw genuine not or vise versa request {}" assert fake_result == genuine_result, "Zookeeper results differ" - root_children_genuine = [elem for elem in list(sorted(genuine_zk.get_children("/test_random_requests"))) if elem not in ('clickhouse', 'zookeeper')] - root_children_fake = [elem for elem in list(sorted(fake_zk.get_children("/test_random_requests"))) if elem not in ('clickhouse', 'zookeeper')] + root_children_genuine = [ + elem + for elem in list(sorted(genuine_zk.get_children("/test_random_requests"))) + if elem not in ("clickhouse", "zookeeper") + ] + root_children_fake = [ + elem + for elem in list(sorted(fake_zk.get_children("/test_random_requests"))) + if elem not in ("clickhouse", "zookeeper") + ] assert root_children_fake == root_children_genuine finally: for zk in [genuine_zk, fake_zk]: stop_zk(zk) + def test_end_of_session(started_cluster): fake_zk1 = None @@ -484,20 +557,22 @@ def test_end_of_session(started_cluster): fake_zk1.start() fake_zk2 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181") fake_zk2.start() - genuine_zk1 = cluster.get_kazoo_client('zoo1') + genuine_zk1 = cluster.get_kazoo_client("zoo1") genuine_zk1.start() - genuine_zk2 = cluster.get_kazoo_client('zoo1') + genuine_zk2 = cluster.get_kazoo_client("zoo1") genuine_zk2.start() fake_zk1.create("/test_end_of_session") genuine_zk1.create("/test_end_of_session") fake_ephemeral_event = None + def fake_ephemeral_callback(event): print("Fake watch triggered") nonlocal fake_ephemeral_event fake_ephemeral_event = event genuine_ephemeral_event = None + def genuine_ephemeral_callback(event): print("Genuine watch triggered") nonlocal genuine_ephemeral_event @@ -509,8 +584,18 @@ def test_end_of_session(started_cluster): fake_zk1.create("/test_end_of_session/ephemeral_node", ephemeral=True) genuine_zk1.create("/test_end_of_session/ephemeral_node", ephemeral=True) - assert fake_zk2.exists("/test_end_of_session/ephemeral_node", watch=fake_ephemeral_callback) is not None - assert genuine_zk2.exists("/test_end_of_session/ephemeral_node", watch=genuine_ephemeral_callback) is not None + assert ( + fake_zk2.exists( + "/test_end_of_session/ephemeral_node", watch=fake_ephemeral_callback + ) + is not None + ) + assert ( + genuine_zk2.exists( + "/test_end_of_session/ephemeral_node", watch=genuine_ephemeral_callback + ) + is not None + ) print("Stopping genuine zk") genuine_zk1.stop() @@ -531,6 +616,7 @@ def test_end_of_session(started_cluster): for zk in [fake_zk1, fake_zk2, genuine_zk1, genuine_zk2]: stop_zk(zk) + def test_end_of_watches_session(started_cluster): fake_zk1 = None fake_zk2 = None @@ -544,6 +630,7 @@ def test_end_of_watches_session(started_cluster): fake_zk1.create("/test_end_of_watches_session") dummy_set = 0 + def dummy_callback(event): nonlocal dummy_set dummy_set += 1 @@ -551,22 +638,35 @@ def test_end_of_watches_session(started_cluster): for child_node in range(100): fake_zk1.create("/test_end_of_watches_session/" + str(child_node)) - fake_zk1.get_children("/test_end_of_watches_session/" + str(child_node), watch=dummy_callback) + fake_zk1.get_children( + "/test_end_of_watches_session/" + str(child_node), watch=dummy_callback + ) - fake_zk2.get_children("/test_end_of_watches_session/" + str(0), watch=dummy_callback) - fake_zk2.get_children("/test_end_of_watches_session/" + str(1), watch=dummy_callback) + fake_zk2.get_children( + "/test_end_of_watches_session/" + str(0), watch=dummy_callback + ) + fake_zk2.get_children( + "/test_end_of_watches_session/" + str(1), watch=dummy_callback + ) fake_zk1.stop() fake_zk1.close() for child_node in range(100): - fake_zk2.create("/test_end_of_watches_session/" + str(child_node) + "/" + str(child_node), b"somebytes") + fake_zk2.create( + "/test_end_of_watches_session/" + + str(child_node) + + "/" + + str(child_node), + b"somebytes", + ) assert dummy_set == 2 finally: for zk in [fake_zk1, fake_zk2]: stop_zk(zk) + def test_concurrent_watches(started_cluster): try: fake_zk = get_fake_zk() @@ -580,6 +680,7 @@ def test_concurrent_watches(started_cluster): existing_path = [] all_paths_created = [] watches_created = 0 + def create_path_and_watch(i): nonlocal watches_created nonlocal all_paths_created @@ -597,6 +698,7 @@ def test_concurrent_watches(started_cluster): existing_path.append(i) trigger_called = 0 + def trigger_watch(i): nonlocal trigger_called trigger_called += 1 diff --git a/tests/integration/test_keeper_four_word_command/test.py b/tests/integration/test_keeper_four_word_command/test.py index 31f9d8f24c1..caaf1d0c87a 100644 --- a/tests/integration/test_keeper_four_word_command/test.py +++ b/tests/integration/test_keeper_four_word_command/test.py @@ -13,12 +13,15 @@ import csv import re cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/enable_keeper1.xml'], - stay_alive=True) -node2 = cluster.add_instance('node2', main_configs=['configs/enable_keeper2.xml'], - stay_alive=True) -node3 = cluster.add_instance('node3', main_configs=['configs/enable_keeper3.xml'], - stay_alive=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/enable_keeper1.xml"], stay_alive=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/enable_keeper2.xml"], stay_alive=True +) +node3 = cluster.add_instance( + "node3", main_configs=["configs/enable_keeper3.xml"], stay_alive=True +) from kazoo.client import KazooClient, KazooState @@ -47,9 +50,9 @@ def clear_znodes(): zk = None try: zk = get_fake_zk(node3.name, timeout=30.0) - nodes = zk.get_children('/') - for node in [n for n in nodes if 'test_4lw_' in n]: - zk.delete('/' + node) + nodes = zk.get_children("/") + for node in [n for n in nodes if "test_4lw_" in n]: + zk.delete("/" + node) finally: destroy_zk_client(zk) @@ -77,7 +80,9 @@ def wait_nodes(): def get_fake_zk(nodename, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance @@ -99,14 +104,14 @@ def reset_node_stats(node_name=node1.name): client = None try: client = get_keeper_socket(node_name) - client.send(b'srst') + client.send(b"srst") client.recv(10) finally: if client is not None: client.close() -def send_4lw_cmd(node_name=node1.name, cmd='ruok'): +def send_4lw_cmd(node_name=node1.name, cmd="ruok"): client = None try: client = get_keeper_socket(node_name) @@ -123,7 +128,7 @@ def reset_conn_stats(node_name=node1.name): client = None try: client = get_keeper_socket(node_name) - client.send(b'crst') + client.send(b"crst") client.recv(10_000) finally: if client is not None: @@ -134,13 +139,15 @@ def test_cmd_ruok(started_cluster): client = None try: wait_nodes() - data = send_4lw_cmd(cmd='ruok') - assert data == 'imok' + data = send_4lw_cmd(cmd="ruok") + assert data == "imok" finally: close_keeper_socket(client) -def do_some_action(zk, create_cnt=0, get_cnt=0, set_cnt=0, ephemeral_cnt=0, watch_cnt=0, delete_cnt=0): +def do_some_action( + zk, create_cnt=0, get_cnt=0, set_cnt=0, ephemeral_cnt=0, watch_cnt=0, delete_cnt=0 +): assert create_cnt >= get_cnt assert create_cnt >= set_cnt assert create_cnt >= watch_cnt @@ -184,12 +191,20 @@ def test_cmd_mntr(started_cluster): reset_node_stats(node1.name) zk = get_fake_zk(node1.name, timeout=30.0) - do_some_action(zk, create_cnt=10, get_cnt=10, set_cnt=5, ephemeral_cnt=2, watch_cnt=2, delete_cnt=2) + do_some_action( + zk, + create_cnt=10, + get_cnt=10, + set_cnt=5, + ephemeral_cnt=2, + watch_cnt=2, + delete_cnt=2, + ) - data = send_4lw_cmd(cmd='mntr') + data = send_4lw_cmd(cmd="mntr") # print(data.decode()) - reader = csv.reader(data.split('\n'), delimiter='\t') + reader = csv.reader(data.split("\n"), delimiter="\t") result = {} for row in reader: @@ -205,7 +220,6 @@ def test_cmd_mntr(started_cluster): assert int(result["zk_min_latency"]) <= int(result["zk_avg_latency"]) assert int(result["zk_max_latency"]) >= int(result["zk_avg_latency"]) - assert int(result["zk_num_alive_connections"]) == 1 assert int(result["zk_outstanding_requests"]) == 0 @@ -239,14 +253,14 @@ def test_cmd_srst(started_cluster): wait_nodes() clear_znodes() - data = send_4lw_cmd(cmd='srst') + data = send_4lw_cmd(cmd="srst") assert data.strip() == "Server stats reset." - data = send_4lw_cmd(cmd='mntr') + data = send_4lw_cmd(cmd="mntr") assert len(data) != 0 # print(data) - reader = csv.reader(data.split('\n'), delimiter='\t') + reader = csv.reader(data.split("\n"), delimiter="\t") result = {} for row in reader: @@ -266,9 +280,9 @@ def test_cmd_conf(started_cluster): wait_nodes() clear_znodes() - data = send_4lw_cmd(cmd='conf') + data = send_4lw_cmd(cmd="conf") - reader = csv.reader(data.split('\n'), delimiter='=') + reader = csv.reader(data.split("\n"), delimiter="=") result = {} for row in reader: @@ -283,7 +297,10 @@ def test_cmd_conf(started_cluster): assert result["four_letter_word_allow_list"] == "*" assert result["log_storage_path"] == "/var/lib/clickhouse/coordination/log" - assert result["snapshot_storage_path"] == "/var/lib/clickhouse/coordination/snapshots" + assert ( + result["snapshot_storage_path"] + == "/var/lib/clickhouse/coordination/snapshots" + ) assert result["session_timeout_ms"] == "30000" assert result["min_session_timeout_ms"] == "10000" @@ -319,8 +336,8 @@ def test_cmd_conf(started_cluster): def test_cmd_isro(started_cluster): wait_nodes() - assert send_4lw_cmd(node1.name, 'isro') == 'rw' - assert send_4lw_cmd(node2.name, 'isro') == 'ro' + assert send_4lw_cmd(node1.name, "isro") == "rw" + assert send_4lw_cmd(node2.name, "isro") == "ro" def test_cmd_srvr(started_cluster): @@ -334,26 +351,26 @@ def test_cmd_srvr(started_cluster): zk = get_fake_zk(node1.name, timeout=30.0) do_some_action(zk, create_cnt=10) - data = send_4lw_cmd(cmd='srvr') + data = send_4lw_cmd(cmd="srvr") print("srvr output -------------------------------------") print(data) - reader = csv.reader(data.split('\n'), delimiter=':') + reader = csv.reader(data.split("\n"), delimiter=":") result = {} for row in reader: if len(row) != 0: result[row[0].strip()] = row[1].strip() - assert 'ClickHouse Keeper version' in result - assert 'Latency min/avg/max' in result - assert result['Received'] == '10' - assert result['Sent'] == '10' - assert int(result['Connections']) == 1 - assert int(result['Zxid']) > 14 - assert result['Mode'] == 'leader' - assert result['Node count'] == '11' + assert "ClickHouse Keeper version" in result + assert "Latency min/avg/max" in result + assert result["Received"] == "10" + assert result["Sent"] == "10" + assert int(result["Connections"]) == 1 + assert int(result["Zxid"]) > 14 + assert result["Mode"] == "leader" + assert result["Node count"] == "11" finally: destroy_zk_client(zk) @@ -370,45 +387,45 @@ def test_cmd_stat(started_cluster): zk = get_fake_zk(node1.name, timeout=30.0) do_some_action(zk, create_cnt=10) - data = send_4lw_cmd(cmd='stat') + data = send_4lw_cmd(cmd="stat") print("stat output -------------------------------------") print(data) # keeper statistics - stats = [n for n in data.split('\n') if '=' not in n] - reader = csv.reader(stats, delimiter=':') + stats = [n for n in data.split("\n") if "=" not in n] + reader = csv.reader(stats, delimiter=":") result = {} for row in reader: if len(row) != 0: result[row[0].strip()] = row[1].strip() - assert 'ClickHouse Keeper version' in result - assert 'Latency min/avg/max' in result - assert result['Received'] == '10' - assert result['Sent'] == '10' - assert int(result['Connections']) == 1 - assert int(result['Zxid']) > 14 - assert result['Mode'] == 'leader' - assert result['Node count'] == '11' + assert "ClickHouse Keeper version" in result + assert "Latency min/avg/max" in result + assert result["Received"] == "10" + assert result["Sent"] == "10" + assert int(result["Connections"]) == 1 + assert int(result["Zxid"]) > 14 + assert result["Mode"] == "leader" + assert result["Node count"] == "11" # filter connection statistics - cons = [n for n in data.split('\n') if '=' in n] + cons = [n for n in data.split("\n") if "=" in n] # filter connection created by 'cons' - cons = [n for n in cons if 'recved=0' not in n and len(n) > 0] + cons = [n for n in cons if "recved=0" not in n and len(n) > 0] assert len(cons) == 1 - conn_stat = re.match(r'(.*?)[:].*[(](.*?)[)].*', cons[0].strip(), re.S).group(2) + conn_stat = re.match(r"(.*?)[:].*[(](.*?)[)].*", cons[0].strip(), re.S).group(2) assert conn_stat is not None result = {} - for col in conn_stat.split(','): - col = col.strip().split('=') + for col in conn_stat.split(","): + col = col.strip().split("=") result[col[0]] = col[1] - assert result['recved'] == '10' - assert result['sent'] == '10' + assert result["recved"] == "10" + assert result["sent"] == "10" finally: destroy_zk_client(zk) @@ -424,36 +441,36 @@ def test_cmd_cons(started_cluster): zk = get_fake_zk(node1.name, timeout=30.0) do_some_action(zk, create_cnt=10) - data = send_4lw_cmd(cmd='cons') + data = send_4lw_cmd(cmd="cons") print("cons output -------------------------------------") print(data) # filter connection created by 'cons' - cons = [n for n in data.split('\n') if 'recved=0' not in n and len(n) > 0] + cons = [n for n in data.split("\n") if "recved=0" not in n and len(n) > 0] assert len(cons) == 1 - conn_stat = re.match(r'(.*?)[:].*[(](.*?)[)].*', cons[0].strip(), re.S).group(2) + conn_stat = re.match(r"(.*?)[:].*[(](.*?)[)].*", cons[0].strip(), re.S).group(2) assert conn_stat is not None result = {} - for col in conn_stat.split(','): - col = col.strip().split('=') + for col in conn_stat.split(","): + col = col.strip().split("=") result[col[0]] = col[1] - assert result['recved'] == '10' - assert result['sent'] == '10' - assert 'sid' in result - assert result['lop'] == 'Create' - assert 'est' in result - assert result['to'] == '30000' - assert result['lcxid'] == '0x000000000000000a' - assert 'lzxid' in result - assert 'lresp' in result - assert int(result['llat']) >= 0 - assert int(result['minlat']) >= 0 - assert int(result['avglat']) >= 0 - assert int(result['maxlat']) >= 0 + assert result["recved"] == "10" + assert result["sent"] == "10" + assert "sid" in result + assert result["lop"] == "Create" + assert "est" in result + assert result["to"] == "30000" + assert result["lcxid"] == "0x000000000000000a" + assert "lzxid" in result + assert "lresp" in result + assert int(result["llat"]) >= 0 + assert int(result["minlat"]) >= 0 + assert int(result["avglat"]) >= 0 + assert int(result["maxlat"]) >= 0 finally: destroy_zk_client(zk) @@ -469,43 +486,43 @@ def test_cmd_crst(started_cluster): zk = get_fake_zk(node1.name, timeout=30.0) do_some_action(zk, create_cnt=10) - data = send_4lw_cmd(cmd='crst') + data = send_4lw_cmd(cmd="crst") print("crst output -------------------------------------") print(data) - data = send_4lw_cmd(cmd='cons') + data = send_4lw_cmd(cmd="cons") print("cons output(after crst) -------------------------------------") print(data) # 2 connections, 1 for 'cons' command, 1 for zk - cons = [n for n in data.split('\n') if len(n) > 0] + cons = [n for n in data.split("\n") if len(n) > 0] assert len(cons) == 2 # connection for zk - zk_conn = [n for n in cons if not n.__contains__('sid=0xffffffffffffffff')][0] + zk_conn = [n for n in cons if not n.__contains__("sid=0xffffffffffffffff")][0] - conn_stat = re.match(r'(.*?)[:].*[(](.*?)[)].*', zk_conn.strip(), re.S).group(2) + conn_stat = re.match(r"(.*?)[:].*[(](.*?)[)].*", zk_conn.strip(), re.S).group(2) assert conn_stat is not None result = {} - for col in conn_stat.split(','): - col = col.strip().split('=') + for col in conn_stat.split(","): + col = col.strip().split("=") result[col[0]] = col[1] - assert result['recved'] == '0' - assert result['sent'] == '0' - assert 'sid' in result - assert result['lop'] == 'NA' - assert 'est' in result - assert result['to'] == '30000' - assert 'lcxid' not in result - assert result['lzxid'] == '0xffffffffffffffff' - assert result['lresp'] == '0' - assert int(result['llat']) == 0 - assert int(result['minlat']) == 0 - assert int(result['avglat']) == 0 - assert int(result['maxlat']) == 0 + assert result["recved"] == "0" + assert result["sent"] == "0" + assert "sid" in result + assert result["lop"] == "NA" + assert "est" in result + assert result["to"] == "30000" + assert "lcxid" not in result + assert result["lzxid"] == "0xffffffffffffffff" + assert result["lresp"] == "0" + assert int(result["llat"]) == 0 + assert int(result["minlat"]) == 0 + assert int(result["avglat"]) == 0 + assert int(result["maxlat"]) == 0 finally: destroy_zk_client(zk) @@ -521,18 +538,18 @@ def test_cmd_dump(started_cluster): zk = get_fake_zk(node1.name, timeout=30.0) do_some_action(zk, ephemeral_cnt=2) - data = send_4lw_cmd(cmd='dump') + data = send_4lw_cmd(cmd="dump") print("dump output -------------------------------------") print(data) - list_data = data.split('\n') + list_data = data.split("\n") - session_count = int(re.match(r'.*[(](.*?)[)].*', list_data[0], re.S).group(1)) + session_count = int(re.match(r".*[(](.*?)[)].*", list_data[0], re.S).group(1)) assert session_count == 1 - assert '\t' + '/test_4lw_ephemeral_node_0' in list_data - assert '\t' + '/test_4lw_ephemeral_node_1' in list_data + assert "\t" + "/test_4lw_ephemeral_node_0" in list_data + assert "\t" + "/test_4lw_ephemeral_node_1" in list_data finally: destroy_zk_client(zk) @@ -547,19 +564,23 @@ def test_cmd_wchs(started_cluster): zk = get_fake_zk(node1.name, timeout=30.0) do_some_action(zk, create_cnt=2, watch_cnt=2) - data = send_4lw_cmd(cmd='wchs') + data = send_4lw_cmd(cmd="wchs") print("wchs output -------------------------------------") print(data) - list_data = [n for n in data.split('\n') if len(n.strip()) > 0] + list_data = [n for n in data.split("\n") if len(n.strip()) > 0] # 37 connections watching 632141 paths # Total watches:632141 - matcher = re.match(r'([0-9].*) connections watching ([0-9].*) paths', list_data[0], re.S) + matcher = re.match( + r"([0-9].*) connections watching ([0-9].*) paths", list_data[0], re.S + ) conn_count = int(matcher.group(1)) watch_path_count = int(matcher.group(2)) - watch_count = int(re.match(r'Total watches:([0-9].*)', list_data[1], re.S).group(1)) + watch_count = int( + re.match(r"Total watches:([0-9].*)", list_data[1], re.S).group(1) + ) assert conn_count == 1 assert watch_path_count == 2 @@ -578,16 +599,16 @@ def test_cmd_wchc(started_cluster): zk = get_fake_zk(node1.name, timeout=30.0) do_some_action(zk, create_cnt=2, watch_cnt=2) - data = send_4lw_cmd(cmd='wchc') + data = send_4lw_cmd(cmd="wchc") print("wchc output -------------------------------------") print(data) - list_data = [n for n in data.split('\n') if len(n.strip()) > 0] + list_data = [n for n in data.split("\n") if len(n.strip()) > 0] assert len(list_data) == 3 - assert '\t' + '/test_4lw_normal_node_0' in list_data - assert '\t' + '/test_4lw_normal_node_1' in list_data + assert "\t" + "/test_4lw_normal_node_0" in list_data + assert "\t" + "/test_4lw_normal_node_1" in list_data finally: destroy_zk_client(zk) @@ -602,16 +623,15 @@ def test_cmd_wchp(started_cluster): zk = get_fake_zk(node1.name, timeout=30.0) do_some_action(zk, create_cnt=2, watch_cnt=2) - data = send_4lw_cmd(cmd='wchp') + data = send_4lw_cmd(cmd="wchp") print("wchp output -------------------------------------") print(data) - list_data = [n for n in data.split('\n') if len(n.strip()) > 0] + list_data = [n for n in data.split("\n") if len(n.strip()) > 0] assert len(list_data) == 4 - assert '/test_4lw_normal_node_0' in list_data - assert '/test_4lw_normal_node_1' in list_data + assert "/test_4lw_normal_node_0" in list_data + assert "/test_4lw_normal_node_1" in list_data finally: destroy_zk_client(zk) - diff --git a/tests/integration/test_keeper_four_word_command/test_allow_list.py b/tests/integration/test_keeper_four_word_command/test_allow_list.py index bdf19de5735..026bd1d59af 100644 --- a/tests/integration/test_keeper_four_word_command/test_allow_list.py +++ b/tests/integration/test_keeper_four_word_command/test_allow_list.py @@ -4,9 +4,19 @@ from helpers.cluster import ClickHouseCluster import time cluster = ClickHouseCluster(__file__, name="test_keeper_4lw_allow_list") -node1 = cluster.add_instance('node1', main_configs=['configs/keeper_config_with_allow_list.xml'], stay_alive=True) -node2 = cluster.add_instance('node2', main_configs=['configs/keeper_config_without_allow_list.xml'], stay_alive=True) -node3 = cluster.add_instance('node3', main_configs=['configs/keeper_config_with_allow_list_all.xml'], stay_alive=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/keeper_config_with_allow_list.xml"], stay_alive=True +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/keeper_config_without_allow_list.xml"], + stay_alive=True, +) +node3 = cluster.add_instance( + "node3", + main_configs=["configs/keeper_config_with_allow_list_all.xml"], + stay_alive=True, +) from kazoo.client import KazooClient, KazooState @@ -62,7 +72,9 @@ def get_keeper_socket(nodename): def get_fake_zk(nodename, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance @@ -73,7 +85,7 @@ def close_keeper_socket(cli): cli.close() -def send_cmd(node_name, command = "ruok"): +def send_cmd(node_name, command="ruok"): client = None try: wait_nodes() @@ -89,9 +101,9 @@ def test_allow_list(started_cluster): client = None try: wait_nodes() - assert send_cmd(node1.name) == 'imok' - assert send_cmd(node1.name, command = 'mntr') == '' - assert send_cmd(node2.name) == 'imok' - assert send_cmd(node3.name) == 'imok' + assert send_cmd(node1.name) == "imok" + assert send_cmd(node1.name, command="mntr") == "" + assert send_cmd(node2.name) == "imok" + assert send_cmd(node3.name) == "imok" finally: close_keeper_socket(client) diff --git a/tests/integration/test_keeper_incorrect_config/test.py b/tests/integration/test_keeper_incorrect_config/test.py index 4ab6b87d853..52c76c84e23 100644 --- a/tests/integration/test_keeper_incorrect_config/test.py +++ b/tests/integration/test_keeper_incorrect_config/test.py @@ -4,7 +4,10 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/enable_keeper1.xml'], stay_alive=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/enable_keeper1.xml"], stay_alive=True +) + @pytest.fixture(scope="module") def started_cluster(): @@ -102,18 +105,25 @@ NORMAL_CONFIG = """ """ + def test_duplicate_endpoint(started_cluster): node1.stop_clickhouse() - node1.replace_config("/etc/clickhouse-server/config.d/enable_keeper1.xml", DUPLICATE_ENDPOINT_CONFIG) + node1.replace_config( + "/etc/clickhouse-server/config.d/enable_keeper1.xml", DUPLICATE_ENDPOINT_CONFIG + ) with pytest.raises(Exception): node1.start_clickhouse(start_wait_sec=10) - node1.replace_config("/etc/clickhouse-server/config.d/enable_keeper1.xml", DUPLICATE_ID_CONFIG) + node1.replace_config( + "/etc/clickhouse-server/config.d/enable_keeper1.xml", DUPLICATE_ID_CONFIG + ) with pytest.raises(Exception): node1.start_clickhouse(start_wait_sec=10) - node1.replace_config("/etc/clickhouse-server/config.d/enable_keeper1.xml", NORMAL_CONFIG) + node1.replace_config( + "/etc/clickhouse-server/config.d/enable_keeper1.xml", NORMAL_CONFIG + ) node1.start_clickhouse() assert node1.query("SELECT 1") == "1\n" diff --git a/tests/integration/test_keeper_internal_secure/test.py b/tests/integration/test_keeper_internal_secure/test.py index b4bf62f9a37..2d45e95e4ff 100644 --- a/tests/integration/test_keeper_internal_secure/test.py +++ b/tests/integration/test_keeper_internal_secure/test.py @@ -8,12 +8,40 @@ import os import time cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/enable_secure_keeper1.xml', 'configs/ssl_conf.xml', 'configs/server.crt', 'configs/server.key', 'configs/rootCA.pem']) -node2 = cluster.add_instance('node2', main_configs=['configs/enable_secure_keeper2.xml', 'configs/ssl_conf.xml', 'configs/server.crt', 'configs/server.key', 'configs/rootCA.pem']) -node3 = cluster.add_instance('node3', main_configs=['configs/enable_secure_keeper3.xml', 'configs/ssl_conf.xml', 'configs/server.crt', 'configs/server.key', 'configs/rootCA.pem']) +node1 = cluster.add_instance( + "node1", + main_configs=[ + "configs/enable_secure_keeper1.xml", + "configs/ssl_conf.xml", + "configs/server.crt", + "configs/server.key", + "configs/rootCA.pem", + ], +) +node2 = cluster.add_instance( + "node2", + main_configs=[ + "configs/enable_secure_keeper2.xml", + "configs/ssl_conf.xml", + "configs/server.crt", + "configs/server.key", + "configs/rootCA.pem", + ], +) +node3 = cluster.add_instance( + "node3", + main_configs=[ + "configs/enable_secure_keeper3.xml", + "configs/ssl_conf.xml", + "configs/server.crt", + "configs/server.key", + "configs/rootCA.pem", + ], +) from kazoo.client import KazooClient, KazooState + @pytest.fixture(scope="module") def started_cluster(): try: @@ -24,11 +52,15 @@ def started_cluster(): finally: cluster.shutdown() + def get_fake_zk(nodename, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance + def test_secure_raft_works(started_cluster): try: node1_zk = get_fake_zk("node1") diff --git a/tests/integration/test_keeper_multinode_blocade_leader/test.py b/tests/integration/test_keeper_multinode_blocade_leader/test.py index 2101c2a973f..c2d4039e122 100644 --- a/tests/integration/test_keeper_multinode_blocade_leader/test.py +++ b/tests/integration/test_keeper_multinode_blocade_leader/test.py @@ -9,9 +9,21 @@ from helpers.network import PartitionManager from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/enable_keeper1.xml', 'configs/use_keeper.xml'], stay_alive=True) -node2 = cluster.add_instance('node2', main_configs=['configs/enable_keeper2.xml', 'configs/use_keeper.xml'], stay_alive=True) -node3 = cluster.add_instance('node3', main_configs=['configs/enable_keeper3.xml', 'configs/use_keeper.xml'], stay_alive=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/enable_keeper1.xml", "configs/use_keeper.xml"], + stay_alive=True, +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/enable_keeper2.xml", "configs/use_keeper.xml"], + stay_alive=True, +) +node3 = cluster.add_instance( + "node3", + main_configs=["configs/enable_keeper3.xml", "configs/use_keeper.xml"], + stay_alive=True, +) from kazoo.client import KazooClient, KazooState @@ -27,6 +39,7 @@ TODO find (or write) not so smart python client. TODO remove this when jepsen tests will be written. """ + @pytest.fixture(scope="module") def started_cluster(): try: @@ -37,8 +50,10 @@ def started_cluster(): finally: cluster.shutdown() + def smaller_exception(ex): - return '\n'.join(str(ex).split('\n')[0:2]) + return "\n".join(str(ex).split("\n")[0:2]) + def wait_node(node): for _ in range(100): @@ -59,13 +74,16 @@ def wait_node(node): else: raise Exception("Can't wait node", node.name, "to become ready") + def wait_nodes(): for node in [node1, node2, node3]: wait_node(node) def get_fake_zk(nodename, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance @@ -78,7 +96,11 @@ def test_blocade_leader(started_cluster): try: for i, node in enumerate([node1, node2, node3]): node.query("CREATE DATABASE IF NOT EXISTS ordinary ENGINE=Ordinary") - node.query("CREATE TABLE IF NOT EXISTS ordinary.t1 (value UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/t1', '{}') ORDER BY tuple()".format(i + 1)) + node.query( + "CREATE TABLE IF NOT EXISTS ordinary.t1 (value UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/t1', '{}') ORDER BY tuple()".format( + i + 1 + ) + ) break except Exception as ex: print("Got exception from node", smaller_exception(ex)) @@ -99,7 +121,9 @@ def test_blocade_leader(started_cluster): for i in range(100): try: - restart_replica_for_sure(node2, "ordinary.t1", "/clickhouse/t1/replicas/2") + restart_replica_for_sure( + node2, "ordinary.t1", "/clickhouse/t1/replicas/2" + ) node2.query("INSERT INTO ordinary.t1 SELECT rand() FROM numbers(100)") break except Exception as ex: @@ -111,12 +135,16 @@ def test_blocade_leader(started_cluster): time.sleep(0.5) else: for num, node in enumerate([node1, node2, node3]): - dump_zk(node, '/clickhouse/t1', '/clickhouse/t1/replicas/{}'.format(num + 1)) + dump_zk( + node, "/clickhouse/t1", "/clickhouse/t1/replicas/{}".format(num + 1) + ) assert False, "Cannot insert anything node2" for i in range(100): try: - restart_replica_for_sure(node3, "ordinary.t1", "/clickhouse/t1/replicas/3") + restart_replica_for_sure( + node3, "ordinary.t1", "/clickhouse/t1/replicas/3" + ) node3.query("INSERT INTO ordinary.t1 SELECT rand() FROM numbers(100)") break except Exception as ex: @@ -128,19 +156,26 @@ def test_blocade_leader(started_cluster): time.sleep(0.5) else: for num, node in enumerate([node1, node2, node3]): - dump_zk(node, '/clickhouse/t1', '/clickhouse/t1/replicas/{}'.format(num + 1)) + dump_zk( + node, "/clickhouse/t1", "/clickhouse/t1/replicas/{}".format(num + 1) + ) assert False, "Cannot insert anything node3" for n, node in enumerate([node1, node2, node3]): for i in range(100): try: - restart_replica_for_sure(node, "ordinary.t1", "/clickhouse/t1/replicas/{}".format(n + 1)) + restart_replica_for_sure( + node, "ordinary.t1", "/clickhouse/t1/replicas/{}".format(n + 1) + ) break except Exception as ex: try: node.query("ATTACH TABLE ordinary.t1") except Exception as attach_ex: - print("Got exception node{}".format(n + 1), smaller_exception(attach_ex)) + print( + "Got exception node{}".format(n + 1), + smaller_exception(attach_ex), + ) print("Got exception node{}".format(n + 1), smaller_exception(ex)) time.sleep(0.5) @@ -156,31 +191,42 @@ def test_blocade_leader(started_cluster): time.sleep(0.5) else: for num, node in enumerate([node1, node2, node3]): - dump_zk(node, '/clickhouse/t1', '/clickhouse/t1/replicas/{}'.format(num + 1)) + dump_zk( + node, "/clickhouse/t1", "/clickhouse/t1/replicas/{}".format(num + 1) + ) assert False, "Cannot insert anything node1" for n, node in enumerate([node1, node2, node3]): for i in range(100): try: - restart_replica_for_sure(node, "ordinary.t1", "/clickhouse/t1/replicas/{}".format(n + 1)) + restart_replica_for_sure( + node, "ordinary.t1", "/clickhouse/t1/replicas/{}".format(n + 1) + ) node.query("SYSTEM SYNC REPLICA ordinary.t1", timeout=10) break except Exception as ex: try: node.query("ATTACH TABLE ordinary.t1") except Exception as attach_ex: - print("Got exception node{}".format(n + 1), smaller_exception(attach_ex)) + print( + "Got exception node{}".format(n + 1), + smaller_exception(attach_ex), + ) print("Got exception node{}".format(n + 1), smaller_exception(ex)) time.sleep(0.5) else: for num, node in enumerate([node1, node2, node3]): - dump_zk(node, '/clickhouse/t1', '/clickhouse/t1/replicas/{}'.format(num + 1)) - assert False, "Cannot sync replica node{}".format(n+1) + dump_zk( + node, "/clickhouse/t1", "/clickhouse/t1/replicas/{}".format(num + 1) + ) + assert False, "Cannot sync replica node{}".format(n + 1) if node1.query("SELECT COUNT() FROM ordinary.t1") != "310\n": for num, node in enumerate([node1, node2, node3]): - dump_zk(node, '/clickhouse/t1', '/clickhouse/t1/replicas/{}'.format(num + 1)) + dump_zk( + node, "/clickhouse/t1", "/clickhouse/t1/replicas/{}".format(num + 1) + ) assert_eq_with_retry(node1, "SELECT COUNT() FROM ordinary.t1", "310") assert_eq_with_retry(node2, "SELECT COUNT() FROM ordinary.t1", "310") @@ -192,13 +238,38 @@ def dump_zk(node, zk_path, replica_path): print("Replicas") print(node.query("SELECT * FROM system.replicas FORMAT Vertical")) print("Replica 2 info") - print(node.query("SELECT * FROM system.zookeeper WHERE path = '{}' FORMAT Vertical".format(zk_path))) + print( + node.query( + "SELECT * FROM system.zookeeper WHERE path = '{}' FORMAT Vertical".format( + zk_path + ) + ) + ) print("Queue") - print(node.query("SELECT * FROM system.zookeeper WHERE path = '{}/queue' FORMAT Vertical".format(replica_path))) + print( + node.query( + "SELECT * FROM system.zookeeper WHERE path = '{}/queue' FORMAT Vertical".format( + replica_path + ) + ) + ) print("Log") - print(node.query("SELECT * FROM system.zookeeper WHERE path = '{}/log' FORMAT Vertical".format(zk_path))) + print( + node.query( + "SELECT * FROM system.zookeeper WHERE path = '{}/log' FORMAT Vertical".format( + zk_path + ) + ) + ) print("Parts") - print(node.query("SELECT name FROM system.zookeeper WHERE path = '{}/parts' FORMAT Vertical".format(replica_path))) + print( + node.query( + "SELECT name FROM system.zookeeper WHERE path = '{}/parts' FORMAT Vertical".format( + replica_path + ) + ) + ) + def restart_replica_for_sure(node, table_name, zk_replica_path): fake_zk = None @@ -218,7 +289,6 @@ def restart_replica_for_sure(node, table_name, zk_replica_path): fake_zk.close() - # in extremely rare case it can take more than 5 minutes in debug build with sanitizer @pytest.mark.timeout(600) def test_blocade_leader_twice(started_cluster): @@ -227,7 +297,11 @@ def test_blocade_leader_twice(started_cluster): try: for i, node in enumerate([node1, node2, node3]): node.query("CREATE DATABASE IF NOT EXISTS ordinary ENGINE=Ordinary") - node.query("CREATE TABLE IF NOT EXISTS ordinary.t2 (value UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/t2', '{}') ORDER BY tuple()".format(i + 1)) + node.query( + "CREATE TABLE IF NOT EXISTS ordinary.t2 (value UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/t2', '{}') ORDER BY tuple()".format( + i + 1 + ) + ) break except Exception as ex: print("Got exception from node", smaller_exception(ex)) @@ -248,7 +322,9 @@ def test_blocade_leader_twice(started_cluster): for i in range(100): try: - restart_replica_for_sure(node2, "ordinary.t2", "/clickhouse/t2/replicas/2") + restart_replica_for_sure( + node2, "ordinary.t2", "/clickhouse/t2/replicas/2" + ) node2.query("INSERT INTO ordinary.t2 SELECT rand() FROM numbers(100)") break except Exception as ex: @@ -260,12 +336,16 @@ def test_blocade_leader_twice(started_cluster): time.sleep(0.5) else: for num, node in enumerate([node1, node2, node3]): - dump_zk(node, '/clickhouse/t2', '/clickhouse/t2/replicas/{}'.format(num + 1)) + dump_zk( + node, "/clickhouse/t2", "/clickhouse/t2/replicas/{}".format(num + 1) + ) assert False, "Cannot reconnect for node2" for i in range(100): try: - restart_replica_for_sure(node3, "ordinary.t2", "/clickhouse/t2/replicas/3") + restart_replica_for_sure( + node3, "ordinary.t2", "/clickhouse/t2/replicas/3" + ) node3.query("SYSTEM SYNC REPLICA ordinary.t2", timeout=10) node3.query("INSERT INTO ordinary.t2 SELECT rand() FROM numbers(100)") break @@ -278,7 +358,9 @@ def test_blocade_leader_twice(started_cluster): time.sleep(0.5) else: for num, node in enumerate([node1, node2, node3]): - dump_zk(node, '/clickhouse/t2', '/clickhouse/t2/replicas/{}'.format(num + 1)) + dump_zk( + node, "/clickhouse/t2", "/clickhouse/t2/replicas/{}".format(num + 1) + ) assert False, "Cannot reconnect for node3" node2.query("SYSTEM SYNC REPLICA ordinary.t2", timeout=10) @@ -306,19 +388,26 @@ def test_blocade_leader_twice(started_cluster): for n, node in enumerate([node1, node2, node3]): for i in range(100): try: - restart_replica_for_sure(node, "ordinary.t2", "/clickhouse/t2/replicas/{}".format(n + 1)) + restart_replica_for_sure( + node, "ordinary.t2", "/clickhouse/t2/replicas/{}".format(n + 1) + ) break except Exception as ex: try: node.query("ATTACH TABLE ordinary.t2") except Exception as attach_ex: - print("Got exception node{}".format(n + 1), smaller_exception(attach_ex)) + print( + "Got exception node{}".format(n + 1), + smaller_exception(attach_ex), + ) print("Got exception node{}".format(n + 1), smaller_exception(ex)) time.sleep(0.5) else: for num, node in enumerate([node1, node2, node3]): - dump_zk(node, '/clickhouse/t2', '/clickhouse/t2/replicas/{}'.format(num + 1)) + dump_zk( + node, "/clickhouse/t2", "/clickhouse/t2/replicas/{}".format(num + 1) + ) assert False, "Cannot reconnect for node{}".format(n + 1) for n, node in enumerate([node1, node2, node3]): @@ -331,14 +420,18 @@ def test_blocade_leader_twice(started_cluster): time.sleep(0.5) else: for num, node in enumerate([node1, node2, node3]): - dump_zk(node, '/clickhouse/t2', '/clickhouse/t2/replicas/{}'.format(num + 1)) + dump_zk( + node, "/clickhouse/t2", "/clickhouse/t2/replicas/{}".format(num + 1) + ) assert False, "Cannot reconnect for node{}".format(n + 1) for i in range(100): all_done = True for n, node in enumerate([node1, node2, node3]): try: - restart_replica_for_sure(node, "ordinary.t2", "/clickhouse/t2/replicas/{}".format(n + 1)) + restart_replica_for_sure( + node, "ordinary.t2", "/clickhouse/t2/replicas/{}".format(n + 1) + ) node.query("SYSTEM SYNC REPLICA ordinary.t2", timeout=10) break except Exception as ex: @@ -346,7 +439,10 @@ def test_blocade_leader_twice(started_cluster): try: node.query("ATTACH TABLE ordinary.t2") except Exception as attach_ex: - print("Got exception node{}".format(n + 1), smaller_exception(attach_ex)) + print( + "Got exception node{}".format(n + 1), + smaller_exception(attach_ex), + ) print("Got exception node{}".format(n + 1), smaller_exception(ex)) time.sleep(0.5) @@ -355,13 +451,17 @@ def test_blocade_leader_twice(started_cluster): break else: for num, node in enumerate([node1, node2, node3]): - dump_zk(node, '/clickhouse/t2', '/clickhouse/t2/replicas/{}'.format(num + 1)) + dump_zk( + node, "/clickhouse/t2", "/clickhouse/t2/replicas/{}".format(num + 1) + ) assert False, "Cannot reconnect in i {} retries".format(i) assert_eq_with_retry(node1, "SELECT COUNT() FROM ordinary.t2", "510") if node2.query("SELECT COUNT() FROM ordinary.t2") != "510\n": for num, node in enumerate([node1, node2, node3]): - dump_zk(node, '/clickhouse/t2', '/clickhouse/t2/replicas/{}'.format(num + 1)) + dump_zk( + node, "/clickhouse/t2", "/clickhouse/t2/replicas/{}".format(num + 1) + ) assert_eq_with_retry(node2, "SELECT COUNT() FROM ordinary.t2", "510") assert_eq_with_retry(node3, "SELECT COUNT() FROM ordinary.t2", "510") diff --git a/tests/integration/test_keeper_multinode_simple/test.py b/tests/integration/test_keeper_multinode_simple/test.py index d7cd4dd927e..694600acc67 100644 --- a/tests/integration/test_keeper_multinode_simple/test.py +++ b/tests/integration/test_keeper_multinode_simple/test.py @@ -9,12 +9,25 @@ from helpers.network import PartitionManager from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/enable_keeper1.xml', 'configs/use_keeper.xml'], stay_alive=True) -node2 = cluster.add_instance('node2', main_configs=['configs/enable_keeper2.xml', 'configs/use_keeper.xml'], stay_alive=True) -node3 = cluster.add_instance('node3', main_configs=['configs/enable_keeper3.xml', 'configs/use_keeper.xml'], stay_alive=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/enable_keeper1.xml", "configs/use_keeper.xml"], + stay_alive=True, +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/enable_keeper2.xml", "configs/use_keeper.xml"], + stay_alive=True, +) +node3 = cluster.add_instance( + "node3", + main_configs=["configs/enable_keeper3.xml", "configs/use_keeper.xml"], + stay_alive=True, +) from kazoo.client import KazooClient, KazooState + @pytest.fixture(scope="module") def started_cluster(): try: @@ -25,8 +38,10 @@ def started_cluster(): finally: cluster.shutdown() + def smaller_exception(ex): - return '\n'.join(str(ex).split('\n')[0:2]) + return "\n".join(str(ex).split("\n")[0:2]) + def wait_node(node): for _ in range(100): @@ -47,16 +62,20 @@ def wait_node(node): else: raise Exception("Can't wait node", node.name, "to become ready") + def wait_nodes(): for node in [node1, node2, node3]: wait_node(node) def get_fake_zk(nodename, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance + def test_read_write_multinode(started_cluster): try: wait_nodes() @@ -111,6 +130,7 @@ def test_watch_on_follower(started_cluster): node3_zk.set("/test_data_watches", b"world") node1_data = None + def node1_callback(event): print("node1 data watch called") nonlocal node1_data @@ -119,6 +139,7 @@ def test_watch_on_follower(started_cluster): node1_zk.get("/test_data_watches", watch=node1_callback) node2_data = None + def node2_callback(event): print("node2 data watch called") nonlocal node2_data @@ -127,6 +148,7 @@ def test_watch_on_follower(started_cluster): node2_zk.get("/test_data_watches", watch=node2_callback) node3_data = None + def node3_callback(event): print("node3 data watch called") nonlocal node3_data @@ -169,7 +191,10 @@ def test_session_expiration(started_cluster): node3_zk.stop() node3_zk.close() for _ in range(100): - if node1_zk.exists("/test_ephemeral_node") is None and node2_zk.exists("/test_ephemeral_node") is None: + if ( + node1_zk.exists("/test_ephemeral_node") is None + and node2_zk.exists("/test_ephemeral_node") is None + ): break print("Node1 exists", node1_zk.exists("/test_ephemeral_node")) print("Node2 exists", node2_zk.exists("/test_ephemeral_node")) @@ -221,7 +246,11 @@ def test_follower_restart(started_cluster): def test_simple_replicated_table(started_cluster): wait_nodes() for i, node in enumerate([node1, node2, node3]): - node.query("CREATE TABLE t (value UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/t', '{}') ORDER BY tuple()".format(i + 1)) + node.query( + "CREATE TABLE t (value UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/t', '{}') ORDER BY tuple()".format( + i + 1 + ) + ) node2.query("INSERT INTO t SELECT number FROM numbers(10)") diff --git a/tests/integration/test_keeper_nodes_add/test.py b/tests/integration/test_keeper_nodes_add/test.py index ae4a996f6b3..ad7d7c21182 100644 --- a/tests/integration/test_keeper_nodes_add/test.py +++ b/tests/integration/test_keeper_nodes_add/test.py @@ -12,15 +12,19 @@ from helpers.test_tools import assert_eq_with_retry from kazoo.client import KazooClient, KazooState cluster = ClickHouseCluster(__file__) -CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'configs') +CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs") -node1 = cluster.add_instance('node1', main_configs=['configs/enable_keeper1.xml'], stay_alive=True) -node2 = cluster.add_instance('node2', main_configs=[], stay_alive=True) -node3 = cluster.add_instance('node3', main_configs=[], stay_alive=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/enable_keeper1.xml"], stay_alive=True +) +node2 = cluster.add_instance("node2", main_configs=[], stay_alive=True) +node3 = cluster.add_instance("node3", main_configs=[], stay_alive=True) def get_fake_zk(node, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(node.name) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(node.name) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance @@ -35,8 +39,9 @@ def started_cluster(): finally: cluster.shutdown() + def start(node): - node.start_clickhouse() + node.start_clickhouse() def test_nodes_add(started_cluster): @@ -47,9 +52,15 @@ def test_nodes_add(started_cluster): p = Pool(3) node2.stop_clickhouse() - node2.copy_file_to_container(os.path.join(CONFIG_DIR, "enable_keeper_two_nodes_2.xml"), "/etc/clickhouse-server/config.d/enable_keeper2.xml") + node2.copy_file_to_container( + os.path.join(CONFIG_DIR, "enable_keeper_two_nodes_2.xml"), + "/etc/clickhouse-server/config.d/enable_keeper2.xml", + ) waiter = p.apply_async(start, (node2,)) - node1.copy_file_to_container(os.path.join(CONFIG_DIR, "enable_keeper_two_nodes_1.xml"), "/etc/clickhouse-server/config.d/enable_keeper1.xml") + node1.copy_file_to_container( + os.path.join(CONFIG_DIR, "enable_keeper_two_nodes_1.xml"), + "/etc/clickhouse-server/config.d/enable_keeper1.xml", + ) node1.query("SYSTEM RELOAD CONFIG") waiter.wait() @@ -65,10 +76,19 @@ def test_nodes_add(started_cluster): node3.stop_clickhouse() - node3.copy_file_to_container(os.path.join(CONFIG_DIR, "enable_keeper_three_nodes_3.xml"), "/etc/clickhouse-server/config.d/enable_keeper3.xml") + node3.copy_file_to_container( + os.path.join(CONFIG_DIR, "enable_keeper_three_nodes_3.xml"), + "/etc/clickhouse-server/config.d/enable_keeper3.xml", + ) waiter = p.apply_async(start, (node3,)) - node2.copy_file_to_container(os.path.join(CONFIG_DIR, "enable_keeper_three_nodes_2.xml"), "/etc/clickhouse-server/config.d/enable_keeper2.xml") - node1.copy_file_to_container(os.path.join(CONFIG_DIR, "enable_keeper_three_nodes_1.xml"), "/etc/clickhouse-server/config.d/enable_keeper1.xml") + node2.copy_file_to_container( + os.path.join(CONFIG_DIR, "enable_keeper_three_nodes_2.xml"), + "/etc/clickhouse-server/config.d/enable_keeper2.xml", + ) + node1.copy_file_to_container( + os.path.join(CONFIG_DIR, "enable_keeper_three_nodes_1.xml"), + "/etc/clickhouse-server/config.d/enable_keeper1.xml", + ) node1.query("SYSTEM RELOAD CONFIG") node2.query("SYSTEM RELOAD CONFIG") diff --git a/tests/integration/test_keeper_nodes_move/test.py b/tests/integration/test_keeper_nodes_move/test.py index e3f1a161b07..9a571cd8ed6 100644 --- a/tests/integration/test_keeper_nodes_move/test.py +++ b/tests/integration/test_keeper_nodes_move/test.py @@ -15,12 +15,18 @@ from helpers.test_tools import assert_eq_with_retry from kazoo.client import KazooClient, KazooState cluster = ClickHouseCluster(__file__) -CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'configs') +CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs") -node1 = cluster.add_instance('node1', main_configs=['configs/enable_keeper1.xml'], stay_alive=True) -node2 = cluster.add_instance('node2', main_configs=['configs/enable_keeper2.xml'], stay_alive=True) -node3 = cluster.add_instance('node3', main_configs=['configs/enable_keeper3.xml'], stay_alive=True) -node4 = cluster.add_instance('node4', stay_alive=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/enable_keeper1.xml"], stay_alive=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/enable_keeper2.xml"], stay_alive=True +) +node3 = cluster.add_instance( + "node3", main_configs=["configs/enable_keeper3.xml"], stay_alive=True +) +node4 = cluster.add_instance("node4", stay_alive=True) @pytest.fixture(scope="module") @@ -37,8 +43,11 @@ def started_cluster(): def start(node): node.start_clickhouse() + def get_fake_zk(node, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(node.name) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(node.name) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance @@ -60,11 +69,20 @@ def test_node_move(started_cluster): assert zk_conn3.exists("test_four_" + str(i)) is not None node4.stop_clickhouse() - node4.copy_file_to_container(os.path.join(CONFIG_DIR, "enable_keeper_node4_4.xml"), "/etc/clickhouse-server/config.d/enable_keeper4.xml") + node4.copy_file_to_container( + os.path.join(CONFIG_DIR, "enable_keeper_node4_4.xml"), + "/etc/clickhouse-server/config.d/enable_keeper4.xml", + ) p = Pool(3) waiter = p.apply_async(start, (node4,)) - node1.copy_file_to_container(os.path.join(CONFIG_DIR, "enable_keeper_node4_1.xml"), "/etc/clickhouse-server/config.d/enable_keeper1.xml") - node2.copy_file_to_container(os.path.join(CONFIG_DIR, "enable_keeper_node4_2.xml"), "/etc/clickhouse-server/config.d/enable_keeper2.xml") + node1.copy_file_to_container( + os.path.join(CONFIG_DIR, "enable_keeper_node4_1.xml"), + "/etc/clickhouse-server/config.d/enable_keeper1.xml", + ) + node2.copy_file_to_container( + os.path.join(CONFIG_DIR, "enable_keeper_node4_2.xml"), + "/etc/clickhouse-server/config.d/enable_keeper2.xml", + ) node1.query("SYSTEM RELOAD CONFIG") node2.query("SYSTEM RELOAD CONFIG") diff --git a/tests/integration/test_keeper_nodes_remove/test.py b/tests/integration/test_keeper_nodes_remove/test.py index 6df4ee1c497..13303d320eb 100644 --- a/tests/integration/test_keeper_nodes_remove/test.py +++ b/tests/integration/test_keeper_nodes_remove/test.py @@ -6,11 +6,18 @@ import os from kazoo.client import KazooClient, KazooState cluster = ClickHouseCluster(__file__) -CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'configs') +CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs") + +node1 = cluster.add_instance( + "node1", main_configs=["configs/enable_keeper1.xml"], stay_alive=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/enable_keeper2.xml"], stay_alive=True +) +node3 = cluster.add_instance( + "node3", main_configs=["configs/enable_keeper3.xml"], stay_alive=True +) -node1 = cluster.add_instance('node1', main_configs=['configs/enable_keeper1.xml'], stay_alive=True) -node2 = cluster.add_instance('node2', main_configs=['configs/enable_keeper2.xml'], stay_alive=True) -node3 = cluster.add_instance('node3', main_configs=['configs/enable_keeper3.xml'], stay_alive=True) @pytest.fixture(scope="module") def started_cluster(): @@ -24,7 +31,9 @@ def started_cluster(): def get_fake_zk(node, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(node.name) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(node.name) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance @@ -45,8 +54,14 @@ def test_nodes_remove(started_cluster): assert zk_conn2.exists("test_two_" + str(i)) is not None assert zk_conn3.exists("test_two_" + str(i)) is not None - node2.copy_file_to_container(os.path.join(CONFIG_DIR, "enable_keeper_two_nodes_2.xml"), "/etc/clickhouse-server/config.d/enable_keeper2.xml") - node1.copy_file_to_container(os.path.join(CONFIG_DIR, "enable_keeper_two_nodes_1.xml"), "/etc/clickhouse-server/config.d/enable_keeper1.xml") + node2.copy_file_to_container( + os.path.join(CONFIG_DIR, "enable_keeper_two_nodes_2.xml"), + "/etc/clickhouse-server/config.d/enable_keeper2.xml", + ) + node1.copy_file_to_container( + os.path.join(CONFIG_DIR, "enable_keeper_two_nodes_1.xml"), + "/etc/clickhouse-server/config.d/enable_keeper1.xml", + ) node1.query("SYSTEM RELOAD CONFIG") node2.query("SYSTEM RELOAD CONFIG") @@ -70,7 +85,10 @@ def test_nodes_remove(started_cluster): node3.stop_clickhouse() - node1.copy_file_to_container(os.path.join(CONFIG_DIR, "enable_single_keeper1.xml"), "/etc/clickhouse-server/config.d/enable_keeper1.xml") + node1.copy_file_to_container( + os.path.join(CONFIG_DIR, "enable_single_keeper1.xml"), + "/etc/clickhouse-server/config.d/enable_keeper1.xml", + ) node1.query("SYSTEM RELOAD CONFIG") zk_conn = get_fake_zk(node1) diff --git a/tests/integration/test_keeper_persistent_log/test.py b/tests/integration/test_keeper_persistent_log/test.py index eec2d4cbbdc..377fa436a87 100644 --- a/tests/integration/test_keeper_persistent_log/test.py +++ b/tests/integration/test_keeper_persistent_log/test.py @@ -10,17 +10,23 @@ from kazoo.client import KazooClient, KazooState cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=['configs/enable_keeper.xml', 'configs/use_keeper.xml'], stay_alive=True) +node = cluster.add_instance( + "node", + main_configs=["configs/enable_keeper.xml", "configs/use_keeper.xml"], + stay_alive=True, +) def random_string(length): - return ''.join(random.choices(string.ascii_lowercase + string.digits, k=length)) + return "".join(random.choices(string.ascii_lowercase + string.digits, k=length)) + def create_random_path(prefix="", depth=1): if depth == 0: return prefix return create_random_path(os.path.join(prefix, random_string(3)), depth - 1) + @pytest.fixture(scope="module") def started_cluster(): try: @@ -31,11 +37,15 @@ def started_cluster(): finally: cluster.shutdown() + def get_connection_zk(nodename, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance + def test_state_after_restart(started_cluster): try: node_zk = None @@ -59,10 +69,18 @@ def test_state_after_restart(started_cluster): assert node_zk2.get("/test_state_after_restart")[0] == b"somevalue" for i in range(100): if i % 7 == 0: - assert node_zk2.exists("/test_state_after_restart/node" + str(i)) is None + assert ( + node_zk2.exists("/test_state_after_restart/node" + str(i)) is None + ) else: - assert len(node_zk2.get("/test_state_after_restart/node" + str(i))[0]) == 123 - assert node_zk2.get("/test_state_after_restart/node" + str(i))[0] == strs[i] + assert ( + len(node_zk2.get("/test_state_after_restart/node" + str(i))[0]) + == 123 + ) + assert ( + node_zk2.get("/test_state_after_restart/node" + str(i))[0] + == strs[i] + ) finally: try: if node_zk is not None: @@ -75,6 +93,7 @@ def test_state_after_restart(started_cluster): except: pass + def test_state_duplicate_restart(started_cluster): try: node_zk = None @@ -107,10 +126,19 @@ def test_state_duplicate_restart(started_cluster): assert node_zk3.get("/test_state_duplicated_restart")[0] == b"somevalue" for i in range(100): if i % 7 == 0: - assert node_zk3.exists("/test_state_duplicated_restart/node" + str(i)) is None + assert ( + node_zk3.exists("/test_state_duplicated_restart/node" + str(i)) + is None + ) else: - assert len(node_zk3.get("/test_state_duplicated_restart/node" + str(i))[0]) == 123 - assert node_zk3.get("/test_state_duplicated_restart/node" + str(i))[0] == strs[i] + assert ( + len(node_zk3.get("/test_state_duplicated_restart/node" + str(i))[0]) + == 123 + ) + assert ( + node_zk3.get("/test_state_duplicated_restart/node" + str(i))[0] + == strs[i] + ) finally: try: if node_zk is not None: @@ -129,7 +157,6 @@ def test_state_duplicate_restart(started_cluster): pass - # http://zookeeper-user.578899.n2.nabble.com/Why-are-ephemeral-nodes-written-to-disk-tp7583403p7583418.html def test_ephemeral_after_restart(started_cluster): try: @@ -141,7 +168,9 @@ def test_ephemeral_after_restart(started_cluster): strs = [] for i in range(100): strs.append(random_string(123).encode()) - node_zk.create("/test_ephemeral_after_restart/node" + str(i), strs[i], ephemeral=True) + node_zk.create( + "/test_ephemeral_after_restart/node" + str(i), strs[i], ephemeral=True + ) for i in range(100): if i % 7 == 0: @@ -154,10 +183,19 @@ def test_ephemeral_after_restart(started_cluster): assert node_zk2.get("/test_ephemeral_after_restart")[0] == b"somevalue" for i in range(100): if i % 7 == 0: - assert node_zk2.exists("/test_ephemeral_after_restart/node" + str(i)) is None + assert ( + node_zk2.exists("/test_ephemeral_after_restart/node" + str(i)) + is None + ) else: - assert len(node_zk2.get("/test_ephemeral_after_restart/node" + str(i))[0]) == 123 - assert node_zk2.get("/test_ephemeral_after_restart/node" + str(i))[0] == strs[i] + assert ( + len(node_zk2.get("/test_ephemeral_after_restart/node" + str(i))[0]) + == 123 + ) + assert ( + node_zk2.get("/test_ephemeral_after_restart/node" + str(i))[0] + == strs[i] + ) finally: try: if node_zk is not None: diff --git a/tests/integration/test_keeper_persistent_log_multinode/test.py b/tests/integration/test_keeper_persistent_log_multinode/test.py index 8c02f269a60..f15e772fd5f 100644 --- a/tests/integration/test_keeper_persistent_log_multinode/test.py +++ b/tests/integration/test_keeper_persistent_log_multinode/test.py @@ -7,12 +7,25 @@ import os import time cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/enable_keeper1.xml', 'configs/use_keeper.xml'], stay_alive=True) -node2 = cluster.add_instance('node2', main_configs=['configs/enable_keeper2.xml', 'configs/use_keeper.xml'], stay_alive=True) -node3 = cluster.add_instance('node3', main_configs=['configs/enable_keeper3.xml', 'configs/use_keeper.xml'], stay_alive=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/enable_keeper1.xml", "configs/use_keeper.xml"], + stay_alive=True, +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/enable_keeper2.xml", "configs/use_keeper.xml"], + stay_alive=True, +) +node3 = cluster.add_instance( + "node3", + main_configs=["configs/enable_keeper3.xml", "configs/use_keeper.xml"], + stay_alive=True, +) from kazoo.client import KazooClient, KazooState + @pytest.fixture(scope="module") def started_cluster(): try: @@ -23,11 +36,15 @@ def started_cluster(): finally: cluster.shutdown() + def get_fake_zk(nodename, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance + def stop_zk(zk): try: if zk: @@ -36,6 +53,7 @@ def stop_zk(zk): except: pass + def test_restart_multinode(started_cluster): try: node1_zk = node2_zk = node3_zk = None @@ -45,7 +63,10 @@ def test_restart_multinode(started_cluster): node3_zk = get_fake_zk("node3") for i in range(100): - node1_zk.create("/test_read_write_multinode_node" + str(i), ("somedata" + str(i)).encode()) + node1_zk.create( + "/test_read_write_multinode_node" + str(i), + ("somedata" + str(i)).encode(), + ) for i in range(100): if i % 10 == 0: @@ -56,11 +77,21 @@ def test_restart_multinode(started_cluster): for i in range(100): if i % 10 != 0: - assert node2_zk.get("/test_read_write_multinode_node" + str(i))[0] == ("somedata" + str(i)).encode() - assert node3_zk.get("/test_read_write_multinode_node" + str(i))[0] == ("somedata" + str(i)).encode() + assert ( + node2_zk.get("/test_read_write_multinode_node" + str(i))[0] + == ("somedata" + str(i)).encode() + ) + assert ( + node3_zk.get("/test_read_write_multinode_node" + str(i))[0] + == ("somedata" + str(i)).encode() + ) else: - assert node2_zk.exists("/test_read_write_multinode_node" + str(i)) is None - assert node3_zk.exists("/test_read_write_multinode_node" + str(i)) is None + assert ( + node2_zk.exists("/test_read_write_multinode_node" + str(i)) is None + ) + assert ( + node3_zk.exists("/test_read_write_multinode_node" + str(i)) is None + ) finally: for zk in [node1_zk, node2_zk, node3_zk]: @@ -76,13 +107,31 @@ def test_restart_multinode(started_cluster): node3_zk = get_fake_zk("node3") for i in range(100): if i % 10 != 0: - assert node1_zk.get("/test_read_write_multinode_node" + str(i))[0] == ("somedata" + str(i)).encode() - assert node2_zk.get("/test_read_write_multinode_node" + str(i))[0] == ("somedata" + str(i)).encode() - assert node3_zk.get("/test_read_write_multinode_node" + str(i))[0] == ("somedata" + str(i)).encode() + assert ( + node1_zk.get("/test_read_write_multinode_node" + str(i))[0] + == ("somedata" + str(i)).encode() + ) + assert ( + node2_zk.get("/test_read_write_multinode_node" + str(i))[0] + == ("somedata" + str(i)).encode() + ) + assert ( + node3_zk.get("/test_read_write_multinode_node" + str(i))[0] + == ("somedata" + str(i)).encode() + ) else: - assert node1_zk.exists("/test_read_write_multinode_node" + str(i)) is None - assert node2_zk.exists("/test_read_write_multinode_node" + str(i)) is None - assert node3_zk.exists("/test_read_write_multinode_node" + str(i)) is None + assert ( + node1_zk.exists("/test_read_write_multinode_node" + str(i)) + is None + ) + assert ( + node2_zk.exists("/test_read_write_multinode_node" + str(i)) + is None + ) + assert ( + node3_zk.exists("/test_read_write_multinode_node" + str(i)) + is None + ) break except Exception as ex: print("Got exception as ex", ex) diff --git a/tests/integration/test_keeper_restore_from_snapshot/test.py b/tests/integration/test_keeper_restore_from_snapshot/test.py index 7a0323d95b4..7270c84bdda 100644 --- a/tests/integration/test_keeper_restore_from_snapshot/test.py +++ b/tests/integration/test_keeper_restore_from_snapshot/test.py @@ -7,12 +7,19 @@ import os import time cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/enable_keeper1.xml'], stay_alive=True) -node2 = cluster.add_instance('node2', main_configs=['configs/enable_keeper2.xml'], stay_alive=True) -node3 = cluster.add_instance('node3', main_configs=['configs/enable_keeper3.xml'], stay_alive=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/enable_keeper1.xml"], stay_alive=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/enable_keeper2.xml"], stay_alive=True +) +node3 = cluster.add_instance( + "node3", main_configs=["configs/enable_keeper3.xml"], stay_alive=True +) from kazoo.client import KazooClient, KazooState + @pytest.fixture(scope="module") def started_cluster(): try: @@ -23,11 +30,15 @@ def started_cluster(): finally: cluster.shutdown() + def get_fake_zk(nodename, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance + def stop_zk(zk): try: if zk: @@ -57,7 +68,10 @@ def test_recover_from_snapshot(started_cluster): # at least we will have 2 snapshots for i in range(435): - node1_zk.create("/test_snapshot_multinode_recover" + str(i), ("somedata" + str(i)).encode()) + node1_zk.create( + "/test_snapshot_multinode_recover" + str(i), + ("somedata" + str(i)).encode(), + ) for i in range(435): if i % 10 == 0: @@ -88,13 +102,28 @@ def test_recover_from_snapshot(started_cluster): for i in range(435): if i % 10 != 0: - assert node1_zk.get("/test_snapshot_multinode_recover" + str(i))[0] == ("somedata" + str(i)).encode() - assert node2_zk.get("/test_snapshot_multinode_recover" + str(i))[0] == ("somedata" + str(i)).encode() - assert node3_zk.get("/test_snapshot_multinode_recover" + str(i))[0] == ("somedata" + str(i)).encode() + assert ( + node1_zk.get("/test_snapshot_multinode_recover" + str(i))[0] + == ("somedata" + str(i)).encode() + ) + assert ( + node2_zk.get("/test_snapshot_multinode_recover" + str(i))[0] + == ("somedata" + str(i)).encode() + ) + assert ( + node3_zk.get("/test_snapshot_multinode_recover" + str(i))[0] + == ("somedata" + str(i)).encode() + ) else: - assert node1_zk.exists("/test_snapshot_multinode_recover" + str(i)) is None - assert node2_zk.exists("/test_snapshot_multinode_recover" + str(i)) is None - assert node3_zk.exists("/test_snapshot_multinode_recover" + str(i)) is None + assert ( + node1_zk.exists("/test_snapshot_multinode_recover" + str(i)) is None + ) + assert ( + node2_zk.exists("/test_snapshot_multinode_recover" + str(i)) is None + ) + assert ( + node3_zk.exists("/test_snapshot_multinode_recover" + str(i)) is None + ) finally: for zk in [node1_zk, node2_zk, node3_zk]: stop_zk(zk) diff --git a/tests/integration/test_keeper_secure_client/test.py b/tests/integration/test_keeper_secure_client/test.py index fe03ed8dcf8..55e00880da0 100644 --- a/tests/integration/test_keeper_secure_client/test.py +++ b/tests/integration/test_keeper_secure_client/test.py @@ -6,8 +6,25 @@ import os import time cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/enable_secure_keeper.xml', 'configs/ssl_conf.xml', "configs/dhparam.pem", "configs/server.crt", "configs/server.key"]) -node2 = cluster.add_instance('node2', main_configs=['configs/use_secure_keeper.xml', 'configs/ssl_conf.xml', "configs/server.crt", "configs/server.key"]) +node1 = cluster.add_instance( + "node1", + main_configs=[ + "configs/enable_secure_keeper.xml", + "configs/ssl_conf.xml", + "configs/dhparam.pem", + "configs/server.crt", + "configs/server.key", + ], +) +node2 = cluster.add_instance( + "node2", + main_configs=[ + "configs/use_secure_keeper.xml", + "configs/ssl_conf.xml", + "configs/server.crt", + "configs/server.key", + ], +) @pytest.fixture(scope="module") diff --git a/tests/integration/test_keeper_session/test.py b/tests/integration/test_keeper_session/test.py index 6867f2bf5cd..30db4d9548c 100644 --- a/tests/integration/test_keeper_session/test.py +++ b/tests/integration/test_keeper_session/test.py @@ -5,21 +5,24 @@ import socket import struct from kazoo.client import KazooClient + # from kazoo.protocol.serialization import Connect, read_buffer, write_buffer cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/keeper_config.xml'], stay_alive=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/keeper_config.xml"], stay_alive=True +) -bool_struct = struct.Struct('B') -int_struct = struct.Struct('!i') -int_int_struct = struct.Struct('!ii') -int_int_long_struct = struct.Struct('!iiq') +bool_struct = struct.Struct("B") +int_struct = struct.Struct("!i") +int_int_struct = struct.Struct("!ii") +int_int_long_struct = struct.Struct("!iiq") -int_long_int_long_struct = struct.Struct('!iqiq') -long_struct = struct.Struct('!q') -multiheader_struct = struct.Struct('!iBi') -reply_header_struct = struct.Struct('!iqi') -stat_struct = struct.Struct('!qqqqiiiqiiq') +int_long_int_long_struct = struct.Struct("!iqiq") +long_struct = struct.Struct("!q") +multiheader_struct = struct.Struct("!iBi") +reply_header_struct = struct.Struct("!iqi") +stat_struct = struct.Struct("!qqqqiiiqiiq") @pytest.fixture(scope="module") @@ -63,7 +66,9 @@ def wait_nodes(): def get_fake_zk(nodename, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance @@ -96,7 +101,7 @@ def read_buffer(bytes, offset): else: index = offset offset += length - return bytes[index:index + length], offset + return bytes[index : index + length], offset def handshake(node_name=node1.name, session_timeout=1000, session_id=0): @@ -105,14 +110,18 @@ def handshake(node_name=node1.name, session_timeout=1000, session_id=0): client = get_keeper_socket(node_name) protocol_version = 0 last_zxid_seen = 0 - session_passwd = b'\x00' * 16 + session_passwd = b"\x00" * 16 read_only = 0 # Handshake serialize and deserialize code is from 'kazoo.protocol.serialization'. # serialize handshake req = bytearray() - req.extend(int_long_int_long_struct.pack(protocol_version, last_zxid_seen, session_timeout, session_id)) + req.extend( + int_long_int_long_struct.pack( + protocol_version, last_zxid_seen, session_timeout, session_id + ) + ) req.extend(write_buffer(session_passwd)) req.extend([1 if read_only else 0]) # add header @@ -127,7 +136,9 @@ def handshake(node_name=node1.name, session_timeout=1000, session_id=0): print("handshake response - len:", data.hex(), len(data)) # ignore header offset = 4 - proto_version, negotiated_timeout, session_id = int_int_long_struct.unpack_from(data, offset) + proto_version, negotiated_timeout, session_id = int_int_long_struct.unpack_from( + data, offset + ) offset += int_int_long_struct.size password, offset = read_buffer(data, offset) try: @@ -153,4 +164,4 @@ def test_session_timeout(started_cluster): assert negotiated_timeout == 8000 negotiated_timeout, _ = handshake(node1.name, session_timeout=20000, session_id=0) - assert negotiated_timeout == 10000 + assert negotiated_timeout == 10000 diff --git a/tests/integration/test_keeper_snapshot_small_distance/test.py b/tests/integration/test_keeper_snapshot_small_distance/test.py index 4acd76806b4..4351c5ac96f 100644 --- a/tests/integration/test_keeper_snapshot_small_distance/test.py +++ b/tests/integration/test_keeper_snapshot_small_distance/test.py @@ -10,42 +10,68 @@ import os import time cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/keeper_config1.xml'], stay_alive=True) -node2 = cluster.add_instance('node2', main_configs=['configs/keeper_config2.xml'], stay_alive=True) -node3 = cluster.add_instance('node3', main_configs=['configs/keeper_config3.xml'], stay_alive=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/keeper_config1.xml"], stay_alive=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/keeper_config2.xml"], stay_alive=True +) +node3 = cluster.add_instance( + "node3", main_configs=["configs/keeper_config3.xml"], stay_alive=True +) + def start_zookeeper(node): - node1.exec_in_container(['bash', '-c', '/opt/zookeeper/bin/zkServer.sh start']) + node1.exec_in_container(["bash", "-c", "/opt/zookeeper/bin/zkServer.sh start"]) + def stop_zookeeper(node): - node.exec_in_container(['bash', '-c', '/opt/zookeeper/bin/zkServer.sh stop']) + node.exec_in_container(["bash", "-c", "/opt/zookeeper/bin/zkServer.sh stop"]) + def clear_zookeeper(node): - node.exec_in_container(['bash', '-c', 'rm -fr /zookeeper/*']) + node.exec_in_container(["bash", "-c", "rm -fr /zookeeper/*"]) + def restart_and_clear_zookeeper(node): stop_zookeeper(node) clear_zookeeper(node) start_zookeeper(node) + def clear_clickhouse_data(node): - node.exec_in_container(['bash', '-c', 'rm -fr /var/lib/clickhouse/coordination/logs/* /var/lib/clickhouse/coordination/snapshots/*']) + node.exec_in_container( + [ + "bash", + "-c", + "rm -fr /var/lib/clickhouse/coordination/logs/* /var/lib/clickhouse/coordination/snapshots/*", + ] + ) + def convert_zookeeper_data(node): - cmd = '/usr/bin/clickhouse keeper-converter --zookeeper-logs-dir /zookeeper/version-2/ --zookeeper-snapshots-dir /zookeeper/version-2/ --output-dir /var/lib/clickhouse/coordination/snapshots' - node.exec_in_container(['bash', '-c', cmd]) - return os.path.join('/var/lib/clickhouse/coordination/snapshots', node.exec_in_container(['bash', '-c', 'ls /var/lib/clickhouse/coordination/snapshots']).strip()) + cmd = "/usr/bin/clickhouse keeper-converter --zookeeper-logs-dir /zookeeper/version-2/ --zookeeper-snapshots-dir /zookeeper/version-2/ --output-dir /var/lib/clickhouse/coordination/snapshots" + node.exec_in_container(["bash", "-c", cmd]) + return os.path.join( + "/var/lib/clickhouse/coordination/snapshots", + node.exec_in_container( + ["bash", "-c", "ls /var/lib/clickhouse/coordination/snapshots"] + ).strip(), + ) + def stop_clickhouse(node): node.stop_clickhouse() + def start_clickhouse(node): node.start_clickhouse() + def copy_zookeeper_data(make_zk_snapshots, node): stop_zookeeper(node) - if make_zk_snapshots: # force zookeeper to create snapshot + if make_zk_snapshots: # force zookeeper to create snapshot start_zookeeper(node) stop_zookeeper(node) @@ -66,13 +92,19 @@ def started_cluster(): finally: cluster.shutdown() + def get_fake_zk(node, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(node.name) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(node.name) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance + def get_genuine_zk(node, timeout=30.0): - _genuine_zk_instance = KazooClient(hosts=cluster.get_instance_ip(node.name) + ":2181", timeout=timeout) + _genuine_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(node.name) + ":2181", timeout=timeout + ) _genuine_zk_instance.start() return _genuine_zk_instance @@ -99,7 +131,9 @@ def test_snapshot_and_load(started_cluster): print("Resulted path", resulted_path) for node in [node2, node3]: print("Copy snapshot from", node1.name, "to", node.name) - cluster.copy_file_from_container_to_container(node1, resulted_path, node, '/var/lib/clickhouse/coordination/snapshots') + cluster.copy_file_from_container_to_container( + node1, resulted_path, node, "/var/lib/clickhouse/coordination/snapshots" + ) print("Starting clickhouses") diff --git a/tests/integration/test_keeper_snapshots/test.py b/tests/integration/test_keeper_snapshots/test.py index 607e461d835..08f60e538a4 100644 --- a/tests/integration/test_keeper_snapshots/test.py +++ b/tests/integration/test_keeper_snapshots/test.py @@ -13,16 +13,24 @@ from kazoo.client import KazooClient, KazooState cluster = ClickHouseCluster(__file__) # clickhouse itself will use external zookeeper -node = cluster.add_instance('node', main_configs=['configs/enable_keeper.xml'], stay_alive=True, with_zookeeper=True) +node = cluster.add_instance( + "node", + main_configs=["configs/enable_keeper.xml"], + stay_alive=True, + with_zookeeper=True, +) + def random_string(length): - return ''.join(random.choices(string.ascii_lowercase + string.digits, k=length)) + return "".join(random.choices(string.ascii_lowercase + string.digits, k=length)) + def create_random_path(prefix="", depth=1): if depth == 0: return prefix return create_random_path(os.path.join(prefix, random_string(3)), depth - 1) + @pytest.fixture(scope="module") def started_cluster(): try: @@ -33,11 +41,15 @@ def started_cluster(): finally: cluster.shutdown() + def get_connection_zk(nodename, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance + def test_state_after_restart(started_cluster): try: node_zk = None @@ -57,7 +69,6 @@ def test_state_after_restart(started_cluster): else: existing_children.append("node" + str(i)) - node.restart_clickhouse(kill=True) node_zk2 = get_connection_zk("node") @@ -65,14 +76,18 @@ def test_state_after_restart(started_cluster): assert node_zk2.get("/test_state_after_restart")[0] == b"somevalue" for i in range(100): if i % 7 == 0: - assert node_zk2.exists("/test_state_after_restart/node" + str(i)) is None + assert ( + node_zk2.exists("/test_state_after_restart/node" + str(i)) is None + ) else: data, stat = node_zk2.get("/test_state_after_restart/node" + str(i)) assert len(data) == 123 assert data == strs[i] assert stat.ephemeralOwner == 0 - assert list(sorted(existing_children)) == list(sorted(node_zk2.get_children("/test_state_after_restart"))) + assert list(sorted(existing_children)) == list( + sorted(node_zk2.get_children("/test_state_after_restart")) + ) finally: try: if node_zk is not None: @@ -97,7 +112,9 @@ def test_ephemeral_after_restart(started_cluster): strs = [] for i in range(100): strs.append(random_string(123).encode()) - node_zk.create("/test_ephemeral_after_restart/node" + str(i), strs[i], ephemeral=True) + node_zk.create( + "/test_ephemeral_after_restart/node" + str(i), strs[i], ephemeral=True + ) existing_children = [] for i in range(100): @@ -113,13 +130,18 @@ def test_ephemeral_after_restart(started_cluster): assert node_zk2.get("/test_ephemeral_after_restart")[0] == b"somevalue" for i in range(100): if i % 7 == 0: - assert node_zk2.exists("/test_ephemeral_after_restart/node" + str(i)) is None + assert ( + node_zk2.exists("/test_ephemeral_after_restart/node" + str(i)) + is None + ) else: data, stat = node_zk2.get("/test_ephemeral_after_restart/node" + str(i)) assert len(data) == 123 assert data == strs[i] assert stat.ephemeralOwner == session_id - assert list(sorted(existing_children)) == list(sorted(node_zk2.get_children("/test_ephemeral_after_restart"))) + assert list(sorted(existing_children)) == list( + sorted(node_zk2.get_children("/test_ephemeral_after_restart")) + ) finally: try: if node_zk is not None: diff --git a/tests/integration/test_keeper_snapshots_multinode/test.py b/tests/integration/test_keeper_snapshots_multinode/test.py index de4ed3a1a8f..1461f35e6a4 100644 --- a/tests/integration/test_keeper_snapshots_multinode/test.py +++ b/tests/integration/test_keeper_snapshots_multinode/test.py @@ -7,12 +7,19 @@ import os import time cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/enable_keeper1.xml'], stay_alive=True) -node2 = cluster.add_instance('node2', main_configs=['configs/enable_keeper2.xml'], stay_alive=True) -node3 = cluster.add_instance('node3', main_configs=['configs/enable_keeper3.xml'], stay_alive=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/enable_keeper1.xml"], stay_alive=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/enable_keeper2.xml"], stay_alive=True +) +node3 = cluster.add_instance( + "node3", main_configs=["configs/enable_keeper3.xml"], stay_alive=True +) from kazoo.client import KazooClient, KazooState + @pytest.fixture(scope="module") def started_cluster(): try: @@ -23,11 +30,15 @@ def started_cluster(): finally: cluster.shutdown() + def get_fake_zk(nodename, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance + def stop_zk(zk): try: if zk: @@ -36,6 +47,7 @@ def stop_zk(zk): except: pass + def test_restart_multinode(started_cluster): try: node1_zk = node2_zk = node3_zk = None @@ -45,7 +57,10 @@ def test_restart_multinode(started_cluster): node3_zk = get_fake_zk("node3") for i in range(100): - node1_zk.create("/test_read_write_multinode_node" + str(i), ("somedata" + str(i)).encode()) + node1_zk.create( + "/test_read_write_multinode_node" + str(i), + ("somedata" + str(i)).encode(), + ) for i in range(100): if i % 10 == 0: @@ -56,11 +71,21 @@ def test_restart_multinode(started_cluster): for i in range(100): if i % 10 != 0: - assert node2_zk.get("/test_read_write_multinode_node" + str(i))[0] == ("somedata" + str(i)).encode() - assert node3_zk.get("/test_read_write_multinode_node" + str(i))[0] == ("somedata" + str(i)).encode() + assert ( + node2_zk.get("/test_read_write_multinode_node" + str(i))[0] + == ("somedata" + str(i)).encode() + ) + assert ( + node3_zk.get("/test_read_write_multinode_node" + str(i))[0] + == ("somedata" + str(i)).encode() + ) else: - assert node2_zk.exists("/test_read_write_multinode_node" + str(i)) is None - assert node3_zk.exists("/test_read_write_multinode_node" + str(i)) is None + assert ( + node2_zk.exists("/test_read_write_multinode_node" + str(i)) is None + ) + assert ( + node3_zk.exists("/test_read_write_multinode_node" + str(i)) is None + ) finally: for zk in [node1_zk, node2_zk, node3_zk]: @@ -76,13 +101,31 @@ def test_restart_multinode(started_cluster): node3_zk = get_fake_zk("node3") for i in range(100): if i % 10 != 0: - assert node1_zk.get("/test_read_write_multinode_node" + str(i))[0] == ("somedata" + str(i)).encode() - assert node2_zk.get("/test_read_write_multinode_node" + str(i))[0] == ("somedata" + str(i)).encode() - assert node3_zk.get("/test_read_write_multinode_node" + str(i))[0] == ("somedata" + str(i)).encode() + assert ( + node1_zk.get("/test_read_write_multinode_node" + str(i))[0] + == ("somedata" + str(i)).encode() + ) + assert ( + node2_zk.get("/test_read_write_multinode_node" + str(i))[0] + == ("somedata" + str(i)).encode() + ) + assert ( + node3_zk.get("/test_read_write_multinode_node" + str(i))[0] + == ("somedata" + str(i)).encode() + ) else: - assert node1_zk.exists("/test_read_write_multinode_node" + str(i)) is None - assert node2_zk.exists("/test_read_write_multinode_node" + str(i)) is None - assert node3_zk.exists("/test_read_write_multinode_node" + str(i)) is None + assert ( + node1_zk.exists("/test_read_write_multinode_node" + str(i)) + is None + ) + assert ( + node2_zk.exists("/test_read_write_multinode_node" + str(i)) + is None + ) + assert ( + node3_zk.exists("/test_read_write_multinode_node" + str(i)) + is None + ) break except Exception as ex: print("Got exception as ex", ex) diff --git a/tests/integration/test_keeper_three_nodes_start/test.py b/tests/integration/test_keeper_three_nodes_start/test.py index 7828f21d0d7..f23ef5440c1 100644 --- a/tests/integration/test_keeper_three_nodes_start/test.py +++ b/tests/integration/test_keeper_three_nodes_start/test.py @@ -13,14 +13,22 @@ from helpers.test_tools import assert_eq_with_retry from kazoo.client import KazooClient, KazooState cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/enable_keeper1.xml'], stay_alive=True) -node2 = cluster.add_instance('node2', main_configs=['configs/enable_keeper2.xml'], stay_alive=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/enable_keeper1.xml"], stay_alive=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/enable_keeper2.xml"], stay_alive=True +) + def get_fake_zk(nodename, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance + def test_smoke(): try: cluster.start() diff --git a/tests/integration/test_keeper_three_nodes_two_alive/test.py b/tests/integration/test_keeper_three_nodes_two_alive/test.py index d79a185b367..11ff1d8cc08 100644 --- a/tests/integration/test_keeper_three_nodes_two_alive/test.py +++ b/tests/integration/test_keeper_three_nodes_two_alive/test.py @@ -11,13 +11,27 @@ from helpers.test_tools import assert_eq_with_retry from kazoo.client import KazooClient, KazooState cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/enable_keeper1.xml', 'configs/keeper_conf.xml'], stay_alive=True) -node2 = cluster.add_instance('node2', main_configs=['configs/enable_keeper2.xml', 'configs/keeper_conf.xml'], stay_alive=True) -node3 = cluster.add_instance('node3', main_configs=['configs/enable_keeper3.xml', 'configs/keeper_conf.xml'], stay_alive=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/enable_keeper1.xml", "configs/keeper_conf.xml"], + stay_alive=True, +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/enable_keeper2.xml", "configs/keeper_conf.xml"], + stay_alive=True, +) +node3 = cluster.add_instance( + "node3", + main_configs=["configs/enable_keeper3.xml", "configs/keeper_conf.xml"], + stay_alive=True, +) def get_fake_zk(nodename, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance @@ -32,8 +46,9 @@ def started_cluster(): finally: cluster.shutdown() + def start(node): - node.start_clickhouse() + node.start_clickhouse() def delete_with_retry(node_name, path): @@ -59,8 +74,12 @@ def test_start_offline(started_cluster): time.sleep(3) p.map(start, [node2, node3]) - assert node2.contains_in_log("Cannot connect to ZooKeeper (or Keeper) before internal Keeper start") - assert node3.contains_in_log("Cannot connect to ZooKeeper (or Keeper) before internal Keeper start") + assert node2.contains_in_log( + "Cannot connect to ZooKeeper (or Keeper) before internal Keeper start" + ) + assert node3.contains_in_log( + "Cannot connect to ZooKeeper (or Keeper) before internal Keeper start" + ) node2_zk = get_fake_zk("node2") node2_zk.create("/c", b"data") @@ -77,20 +96,40 @@ def test_start_non_existing(started_cluster): node2.stop_clickhouse() node3.stop_clickhouse() - node1.replace_in_config('/etc/clickhouse-server/config.d/enable_keeper1.xml', 'node3', 'non_existing_node') - node2.replace_in_config('/etc/clickhouse-server/config.d/enable_keeper2.xml', 'node3', 'non_existing_node') + node1.replace_in_config( + "/etc/clickhouse-server/config.d/enable_keeper1.xml", + "node3", + "non_existing_node", + ) + node2.replace_in_config( + "/etc/clickhouse-server/config.d/enable_keeper2.xml", + "node3", + "non_existing_node", + ) time.sleep(3) p.map(start, [node2, node1]) - assert node1.contains_in_log("Cannot connect to ZooKeeper (or Keeper) before internal Keeper start") - assert node2.contains_in_log("Cannot connect to ZooKeeper (or Keeper) before internal Keeper start") + assert node1.contains_in_log( + "Cannot connect to ZooKeeper (or Keeper) before internal Keeper start" + ) + assert node2.contains_in_log( + "Cannot connect to ZooKeeper (or Keeper) before internal Keeper start" + ) node2_zk = get_fake_zk("node2") node2_zk.create("/test_non_exising", b"data") finally: - node1.replace_in_config('/etc/clickhouse-server/config.d/enable_keeper1.xml', 'non_existing_node', 'node3') - node2.replace_in_config('/etc/clickhouse-server/config.d/enable_keeper2.xml', 'non_existing_node', 'node3') + node1.replace_in_config( + "/etc/clickhouse-server/config.d/enable_keeper1.xml", + "non_existing_node", + "node3", + ) + node2.replace_in_config( + "/etc/clickhouse-server/config.d/enable_keeper2.xml", + "non_existing_node", + "node3", + ) p.map(start, [node1, node2, node3]) delete_with_retry("node2", "/test_non_exising") @@ -101,5 +140,7 @@ def test_restart_third_node(started_cluster): node3.restart_clickhouse() - assert node3.contains_in_log("Connected to ZooKeeper (or Keeper) before internal Keeper start") + assert node3.contains_in_log( + "Connected to ZooKeeper (or Keeper) before internal Keeper start" + ) node1_zk.delete("/test_restart") diff --git a/tests/integration/test_keeper_two_nodes_cluster/test.py b/tests/integration/test_keeper_two_nodes_cluster/test.py index 4cafa1d17f4..8c0276f7d77 100644 --- a/tests/integration/test_keeper_two_nodes_cluster/test.py +++ b/tests/integration/test_keeper_two_nodes_cluster/test.py @@ -11,11 +11,20 @@ from helpers.network import PartitionManager from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/enable_keeper1.xml', 'configs/use_keeper.xml'], stay_alive=True) -node2 = cluster.add_instance('node2', main_configs=['configs/enable_keeper2.xml', 'configs/use_keeper.xml'], stay_alive=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/enable_keeper1.xml", "configs/use_keeper.xml"], + stay_alive=True, +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/enable_keeper2.xml", "configs/use_keeper.xml"], + stay_alive=True, +) from kazoo.client import KazooClient, KazooState + @pytest.fixture(scope="module") def started_cluster(): try: @@ -26,8 +35,10 @@ def started_cluster(): finally: cluster.shutdown() + def smaller_exception(ex): - return '\n'.join(str(ex).split('\n')[0:2]) + return "\n".join(str(ex).split("\n")[0:2]) + def wait_node(node): for _ in range(100): @@ -48,16 +59,20 @@ def wait_node(node): else: raise Exception("Can't wait node", node.name, "to become ready") + def wait_nodes(): for node in [node1, node2]: wait_node(node) def get_fake_zk(nodename, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance + def test_read_write_two_nodes(started_cluster): try: wait_nodes() @@ -89,6 +104,7 @@ def test_read_write_two_nodes(started_cluster): except: pass + def test_read_write_two_nodes_with_blocade(started_cluster): try: wait_nodes() @@ -108,7 +124,6 @@ def test_read_write_two_nodes_with_blocade(started_cluster): with pytest.raises(Exception): node2_zk.create("/test_read_write_blocked_node2", b"somedata2") - print("Nodes unblocked") for i in range(10): try: @@ -118,7 +133,6 @@ def test_read_write_two_nodes_with_blocade(started_cluster): except: time.sleep(0.5) - for i in range(100): try: node1_zk.create("/test_after_block1", b"somedata12") diff --git a/tests/integration/test_keeper_znode_time/test.py b/tests/integration/test_keeper_znode_time/test.py index cbe89970d31..f50f03ac168 100644 --- a/tests/integration/test_keeper_znode_time/test.py +++ b/tests/integration/test_keeper_znode_time/test.py @@ -9,12 +9,25 @@ from helpers.network import PartitionManager from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/enable_keeper1.xml', 'configs/use_keeper.xml'], stay_alive=True) -node2 = cluster.add_instance('node2', main_configs=['configs/enable_keeper2.xml', 'configs/use_keeper.xml'], stay_alive=True) -node3 = cluster.add_instance('node3', main_configs=['configs/enable_keeper3.xml', 'configs/use_keeper.xml'], stay_alive=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/enable_keeper1.xml", "configs/use_keeper.xml"], + stay_alive=True, +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/enable_keeper2.xml", "configs/use_keeper.xml"], + stay_alive=True, +) +node3 = cluster.add_instance( + "node3", + main_configs=["configs/enable_keeper3.xml", "configs/use_keeper.xml"], + stay_alive=True, +) from kazoo.client import KazooClient, KazooState + @pytest.fixture(scope="module") def started_cluster(): try: @@ -25,8 +38,10 @@ def started_cluster(): finally: cluster.shutdown() + def smaller_exception(ex): - return '\n'.join(str(ex).split('\n')[0:2]) + return "\n".join(str(ex).split("\n")[0:2]) + def wait_node(node): for _ in range(100): @@ -47,16 +62,20 @@ def wait_node(node): else: raise Exception("Can't wait node", node.name, "to become ready") + def wait_nodes(): for node in [node1, node2, node3]: wait_node(node) def get_fake_zk(nodename, timeout=30.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance + def assert_eq_stats(stat1, stat2): assert stat1.version == stat2.version assert stat1.cversion == stat2.cversion @@ -67,6 +86,7 @@ def assert_eq_stats(stat1, stat2): assert stat1.ctime == stat2.ctime assert stat1.mtime == stat2.mtime + def test_between_servers(started_cluster): try: wait_nodes() diff --git a/tests/integration/test_keeper_zookeeper_converter/test.py b/tests/integration/test_keeper_zookeeper_converter/test.py index 6829b4a9000..87975a57019 100644 --- a/tests/integration/test_keeper_zookeeper_converter/test.py +++ b/tests/integration/test_keeper_zookeeper_converter/test.py @@ -3,44 +3,68 @@ import pytest from helpers.cluster import ClickHouseCluster from kazoo.client import KazooClient, KazooState from kazoo.security import ACL, make_digest_acl, make_acl -from kazoo.exceptions import AuthFailedError, InvalidACLError, NoAuthError, KazooException +from kazoo.exceptions import ( + AuthFailedError, + InvalidACLError, + NoAuthError, + KazooException, +) import os cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=['configs/keeper_config.xml', 'configs/logs_conf.xml'], stay_alive=True) +node = cluster.add_instance( + "node", + main_configs=["configs/keeper_config.xml", "configs/logs_conf.xml"], + stay_alive=True, +) + def start_zookeeper(): - node.exec_in_container(['bash', '-c', '/opt/zookeeper/bin/zkServer.sh start']) + node.exec_in_container(["bash", "-c", "/opt/zookeeper/bin/zkServer.sh start"]) + def stop_zookeeper(): - node.exec_in_container(['bash', '-c', '/opt/zookeeper/bin/zkServer.sh stop']) + node.exec_in_container(["bash", "-c", "/opt/zookeeper/bin/zkServer.sh stop"]) + def clear_zookeeper(): - node.exec_in_container(['bash', '-c', 'rm -fr /zookeeper/*']) + node.exec_in_container(["bash", "-c", "rm -fr /zookeeper/*"]) + def restart_and_clear_zookeeper(): stop_zookeeper() clear_zookeeper() start_zookeeper() + def clear_clickhouse_data(): - node.exec_in_container(['bash', '-c', 'rm -fr /var/lib/clickhouse/coordination/logs/* /var/lib/clickhouse/coordination/snapshots/*']) + node.exec_in_container( + [ + "bash", + "-c", + "rm -fr /var/lib/clickhouse/coordination/logs/* /var/lib/clickhouse/coordination/snapshots/*", + ] + ) + def convert_zookeeper_data(): - cmd = '/usr/bin/clickhouse keeper-converter --zookeeper-logs-dir /zookeeper/version-2/ --zookeeper-snapshots-dir /zookeeper/version-2/ --output-dir /var/lib/clickhouse/coordination/snapshots' - node.exec_in_container(['bash', '-c', cmd]) + cmd = "/usr/bin/clickhouse keeper-converter --zookeeper-logs-dir /zookeeper/version-2/ --zookeeper-snapshots-dir /zookeeper/version-2/ --output-dir /var/lib/clickhouse/coordination/snapshots" + node.exec_in_container(["bash", "-c", cmd]) + def stop_clickhouse(): node.stop_clickhouse() + def start_clickhouse(): node.start_clickhouse() + def copy_zookeeper_data(make_zk_snapshots): stop_zookeeper() - if make_zk_snapshots: # force zookeeper to create snapshot + if make_zk_snapshots: # force zookeeper to create snapshot start_zookeeper() stop_zookeeper() @@ -50,6 +74,7 @@ def copy_zookeeper_data(make_zk_snapshots): start_zookeeper() start_clickhouse() + @pytest.fixture(scope="module") def started_cluster(): try: @@ -60,26 +85,97 @@ def started_cluster(): finally: cluster.shutdown() + def get_fake_zk(timeout=60.0): - _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip('node') + ":9181", timeout=timeout) + _fake_zk_instance = KazooClient( + hosts=cluster.get_instance_ip("node") + ":9181", timeout=timeout + ) _fake_zk_instance.start() return _fake_zk_instance + def get_genuine_zk(timeout=60.0): - _genuine_zk_instance = KazooClient(hosts=cluster.get_instance_ip('node') + ":2181", timeout=timeout) + _genuine_zk_instance = KazooClient( + hosts=cluster.get_instance_ip("node") + ":2181", timeout=timeout + ) _genuine_zk_instance.start() return _genuine_zk_instance + def compare_stats(stat1, stat2, path): - assert stat1.czxid == stat2.czxid, "path " + path + " cxzids not equal for stats: " + str(stat1.czxid) + " != " + str(stat2.zxid) - assert stat1.mzxid == stat2.mzxid, "path " + path + " mxzids not equal for stats: " + str(stat1.mzxid) + " != " + str(stat2.mzxid) - assert stat1.version == stat2.version, "path " + path + " versions not equal for stats: " + str(stat1.version) + " != " + str(stat2.version) - assert stat1.cversion == stat2.cversion, "path " + path + " cversions not equal for stats: " + str(stat1.cversion) + " != " + str(stat2.cversion) - assert stat1.aversion == stat2.aversion, "path " + path + " aversions not equal for stats: " + str(stat1.aversion) + " != " + str(stat2.aversion) - assert stat1.ephemeralOwner == stat2.ephemeralOwner,"path " + path + " ephemeralOwners not equal for stats: " + str(stat1.ephemeralOwner) + " != " + str(stat2.ephemeralOwner) - assert stat1.dataLength == stat2.dataLength , "path " + path + " ephemeralOwners not equal for stats: " + str(stat1.dataLength) + " != " + str(stat2.dataLength) - assert stat1.numChildren == stat2.numChildren, "path " + path + " numChildren not equal for stats: " + str(stat1.numChildren) + " != " + str(stat2.numChildren) - assert stat1.pzxid == stat2.pzxid, "path " + path + " pzxid not equal for stats: " + str(stat1.pzxid) + " != " + str(stat2.pzxid) + assert stat1.czxid == stat2.czxid, ( + "path " + + path + + " cxzids not equal for stats: " + + str(stat1.czxid) + + " != " + + str(stat2.zxid) + ) + assert stat1.mzxid == stat2.mzxid, ( + "path " + + path + + " mxzids not equal for stats: " + + str(stat1.mzxid) + + " != " + + str(stat2.mzxid) + ) + assert stat1.version == stat2.version, ( + "path " + + path + + " versions not equal for stats: " + + str(stat1.version) + + " != " + + str(stat2.version) + ) + assert stat1.cversion == stat2.cversion, ( + "path " + + path + + " cversions not equal for stats: " + + str(stat1.cversion) + + " != " + + str(stat2.cversion) + ) + assert stat1.aversion == stat2.aversion, ( + "path " + + path + + " aversions not equal for stats: " + + str(stat1.aversion) + + " != " + + str(stat2.aversion) + ) + assert stat1.ephemeralOwner == stat2.ephemeralOwner, ( + "path " + + path + + " ephemeralOwners not equal for stats: " + + str(stat1.ephemeralOwner) + + " != " + + str(stat2.ephemeralOwner) + ) + assert stat1.dataLength == stat2.dataLength, ( + "path " + + path + + " ephemeralOwners not equal for stats: " + + str(stat1.dataLength) + + " != " + + str(stat2.dataLength) + ) + assert stat1.numChildren == stat2.numChildren, ( + "path " + + path + + " numChildren not equal for stats: " + + str(stat1.numChildren) + + " != " + + str(stat2.numChildren) + ) + assert stat1.pzxid == stat2.pzxid, ( + "path " + + path + + " pzxid not equal for stats: " + + str(stat1.pzxid) + + " != " + + str(stat2.pzxid) + ) + def compare_states(zk1, zk2, path="/"): data1, stat1 = zk1.get(path) @@ -101,12 +197,8 @@ def compare_states(zk1, zk2, path="/"): print("Checking child", os.path.join(path, children)) compare_states(zk1, zk2, os.path.join(path, children)) -@pytest.mark.parametrize( - ('create_snapshots'), - [ - True, False - ] -) + +@pytest.mark.parametrize(("create_snapshots"), [True, False]) def test_smoke(started_cluster, create_snapshots): restart_and_clear_zookeeper() @@ -122,15 +214,12 @@ def test_smoke(started_cluster, create_snapshots): compare_states(genuine_connection, fake_connection) + def get_bytes(s): return s.encode() -@pytest.mark.parametrize( - ('create_snapshots'), - [ - True, False - ] -) + +@pytest.mark.parametrize(("create_snapshots"), [True, False]) def test_simple_crud_requests(started_cluster, create_snapshots): restart_and_clear_zookeeper() @@ -151,14 +240,19 @@ def test_simple_crud_requests(started_cluster, create_snapshots): genuine_connection.create(path, get_bytes("data" + str(i))) path = os.path.join(path, str(i)) - genuine_connection.create("/test_sequential", b"") for i in range(10): - genuine_connection.create("/test_sequential/" + "a" * i + "-", get_bytes("dataX" + str(i)), sequence=True) + genuine_connection.create( + "/test_sequential/" + "a" * i + "-", + get_bytes("dataX" + str(i)), + sequence=True, + ) genuine_connection.create("/test_ephemeral", b"") for i in range(10): - genuine_connection.create("/test_ephemeral/" + str(i), get_bytes("dataX" + str(i)), ephemeral=True) + genuine_connection.create( + "/test_ephemeral/" + str(i), get_bytes("dataX" + str(i)), ephemeral=True + ) copy_zookeeper_data(create_snapshots) @@ -168,54 +262,64 @@ def test_simple_crud_requests(started_cluster, create_snapshots): compare_states(genuine_connection, fake_connection) # especially ensure that counters are the same - genuine_connection.create("/test_sequential/" + "a" * 10 + "-", get_bytes("dataX" + str(i)), sequence=True) - fake_connection.create("/test_sequential/" + "a" * 10 + "-", get_bytes("dataX" + str(i)), sequence=True) + genuine_connection.create( + "/test_sequential/" + "a" * 10 + "-", get_bytes("dataX" + str(i)), sequence=True + ) + fake_connection.create( + "/test_sequential/" + "a" * 10 + "-", get_bytes("dataX" + str(i)), sequence=True + ) first_children = list(sorted(genuine_connection.get_children("/test_sequential"))) second_children = list(sorted(fake_connection.get_children("/test_sequential"))) assert first_children == second_children, "Childrens are not equal on path " + path -@pytest.mark.parametrize( - ('create_snapshots'), - [ - True, False - ] -) + +@pytest.mark.parametrize(("create_snapshots"), [True, False]) def test_multi_and_failed_requests(started_cluster, create_snapshots): restart_and_clear_zookeeper() genuine_connection = get_genuine_zk() - genuine_connection.create('/test_multitransactions') + genuine_connection.create("/test_multitransactions") for i in range(10): t = genuine_connection.transaction() - t.create('/test_multitransactions/freddy' + str(i), get_bytes('data' + str(i))) - t.create('/test_multitransactions/fred' + str(i), get_bytes('value' + str(i)), ephemeral=True) - t.create('/test_multitransactions/smith' + str(i), get_bytes('entity' + str(i)), sequence=True) - t.set_data('/test_multitransactions', get_bytes("somedata" + str(i))) + t.create("/test_multitransactions/freddy" + str(i), get_bytes("data" + str(i))) + t.create( + "/test_multitransactions/fred" + str(i), + get_bytes("value" + str(i)), + ephemeral=True, + ) + t.create( + "/test_multitransactions/smith" + str(i), + get_bytes("entity" + str(i)), + sequence=True, + ) + t.set_data("/test_multitransactions", get_bytes("somedata" + str(i))) t.commit() with pytest.raises(Exception): - genuine_connection.set('/test_multitransactions/freddy0', get_bytes('mustfail' + str(i)), version=1) + genuine_connection.set( + "/test_multitransactions/freddy0", get_bytes("mustfail" + str(i)), version=1 + ) t = genuine_connection.transaction() - t.create('/test_bad_transaction', get_bytes('data' + str(1))) - t.check('/test_multitransactions', version=32) - t.create('/test_bad_transaction1', get_bytes('data' + str(2))) + t.create("/test_bad_transaction", get_bytes("data" + str(1))) + t.check("/test_multitransactions", version=32) + t.create("/test_bad_transaction1", get_bytes("data" + str(2))) # should fail t.commit() - assert genuine_connection.exists('/test_bad_transaction') is None - assert genuine_connection.exists('/test_bad_transaction1') is None + assert genuine_connection.exists("/test_bad_transaction") is None + assert genuine_connection.exists("/test_bad_transaction1") is None t = genuine_connection.transaction() - t.create('/test_bad_transaction2', get_bytes('data' + str(1))) - t.delete('/test_multitransactions/freddy0', version=5) + t.create("/test_bad_transaction2", get_bytes("data" + str(1))) + t.delete("/test_multitransactions/freddy0", version=5) # should fail t.commit() - assert genuine_connection.exists('/test_bad_transaction2') is None - assert genuine_connection.exists('/test_multitransactions/freddy0') is not None + assert genuine_connection.exists("/test_bad_transaction2") is None + assert genuine_connection.exists("/test_multitransactions/freddy0") is not None copy_zookeeper_data(create_snapshots) @@ -224,35 +328,40 @@ def test_multi_and_failed_requests(started_cluster, create_snapshots): compare_states(genuine_connection, fake_connection) -@pytest.mark.parametrize( - ('create_snapshots'), - [ - True, False - ] -) + +@pytest.mark.parametrize(("create_snapshots"), [True, False]) def test_acls(started_cluster, create_snapshots): restart_and_clear_zookeeper() genuine_connection = get_genuine_zk() - genuine_connection.add_auth('digest', 'user1:password1') - genuine_connection.add_auth('digest', 'user2:password2') - genuine_connection.add_auth('digest', 'user3:password3') + genuine_connection.add_auth("digest", "user1:password1") + genuine_connection.add_auth("digest", "user2:password2") + genuine_connection.add_auth("digest", "user3:password3") - genuine_connection.create("/test_multi_all_acl", b"data", acl=[make_acl("auth", "", all=True)]) + genuine_connection.create( + "/test_multi_all_acl", b"data", acl=[make_acl("auth", "", all=True)] + ) other_connection = get_genuine_zk() - other_connection.add_auth('digest', 'user1:password1') + other_connection.add_auth("digest", "user1:password1") other_connection.set("/test_multi_all_acl", b"X") assert other_connection.get("/test_multi_all_acl")[0] == b"X" yet_other_auth_connection = get_genuine_zk() - yet_other_auth_connection.add_auth('digest', 'user2:password2') + yet_other_auth_connection.add_auth("digest", "user2:password2") yet_other_auth_connection.set("/test_multi_all_acl", b"Y") - genuine_connection.add_auth('digest', 'user3:password3') + genuine_connection.add_auth("digest", "user3:password3") # just to check that we are able to deserialize it - genuine_connection.set_acls("/test_multi_all_acl", acls=[make_acl("auth", "", read=True, write=False, create=True, delete=True, admin=True)]) + genuine_connection.set_acls( + "/test_multi_all_acl", + acls=[ + make_acl( + "auth", "", read=True, write=False, create=True, delete=True, admin=True + ) + ], + ) no_auth_connection = get_genuine_zk() @@ -262,14 +371,14 @@ def test_acls(started_cluster, create_snapshots): copy_zookeeper_data(create_snapshots) genuine_connection = get_genuine_zk() - genuine_connection.add_auth('digest', 'user1:password1') - genuine_connection.add_auth('digest', 'user2:password2') - genuine_connection.add_auth('digest', 'user3:password3') + genuine_connection.add_auth("digest", "user1:password1") + genuine_connection.add_auth("digest", "user2:password2") + genuine_connection.add_auth("digest", "user3:password3") fake_connection = get_fake_zk() - fake_connection.add_auth('digest', 'user1:password1') - fake_connection.add_auth('digest', 'user2:password2') - fake_connection.add_auth('digest', 'user3:password3') + fake_connection.add_auth("digest", "user1:password1") + fake_connection.add_auth("digest", "user2:password2") + fake_connection.add_auth("digest", "user3:password3") compare_states(genuine_connection, fake_connection) @@ -278,7 +387,11 @@ def test_acls(started_cluster, create_snapshots): assert stat.aversion == 1 assert len(acls) == 3 for acl in acls: - assert acl.acl_list == ['READ', 'CREATE', 'DELETE', 'ADMIN'] - assert acl.id.scheme == 'digest' + assert acl.acl_list == ["READ", "CREATE", "DELETE", "ADMIN"] + assert acl.id.scheme == "digest" assert acl.perms == 29 - assert acl.id.id in ('user1:XDkd2dsEuhc9ImU3q8pa8UOdtpI=', 'user2:lo/iTtNMP+gEZlpUNaCqLYO3i5U=', 'user3:wr5Y0kEs9nFX3bKrTMKxrlcFeWo=') + assert acl.id.id in ( + "user1:XDkd2dsEuhc9ImU3q8pa8UOdtpI=", + "user2:lo/iTtNMP+gEZlpUNaCqLYO3i5U=", + "user3:wr5Y0kEs9nFX3bKrTMKxrlcFeWo=", + ) diff --git a/tests/integration/test_library_bridge/test.py b/tests/integration/test_library_bridge/test.py index 12a967ebaa4..6e2c2ec0597 100644 --- a/tests/integration/test_library_bridge/test.py +++ b/tests/integration/test_library_bridge/test.py @@ -8,13 +8,18 @@ from helpers.cluster import ClickHouseCluster, run_and_check cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', - dictionaries=['configs/dictionaries/dict1.xml'], main_configs=['configs/config.d/config.xml'], stay_alive=True) +instance = cluster.add_instance( + "instance", + dictionaries=["configs/dictionaries/dict1.xml"], + main_configs=["configs/config.d/config.xml"], + stay_alive=True, +) def create_dict_simple(): - instance.query('DROP DICTIONARY IF EXISTS lib_dict_c') - instance.query(''' + instance.query("DROP DICTIONARY IF EXISTS lib_dict_c") + instance.query( + """ CREATE DICTIONARY lib_dict_c (key UInt64, value1 UInt64, value2 UInt64, value3 UInt64) PRIMARY KEY key SOURCE(library(PATH '/etc/clickhouse-server/config.d/dictionaries_lib/dict_lib.so')) LAYOUT(CACHE( @@ -24,30 +29,52 @@ def create_dict_simple(): READ_BUFFER_SIZE 1048576 MAX_STORED_KEYS 1048576)) LIFETIME(2) ; - ''') + """ + ) @pytest.fixture(scope="module") def ch_cluster(): try: cluster.start() - instance.query('CREATE DATABASE test') - container_lib_path = '/etc/clickhouse-server/config.d/dictionarites_lib/dict_lib.cpp' + instance.query("CREATE DATABASE test") + container_lib_path = ( + "/etc/clickhouse-server/config.d/dictionarites_lib/dict_lib.cpp" + ) - instance.copy_file_to_container(os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs/dict_lib.cpp"), - "/etc/clickhouse-server/config.d/dictionaries_lib/dict_lib.cpp") + instance.copy_file_to_container( + os.path.join( + os.path.dirname(os.path.realpath(__file__)), "configs/dict_lib.cpp" + ), + "/etc/clickhouse-server/config.d/dictionaries_lib/dict_lib.cpp", + ) instance.query("SYSTEM RELOAD CONFIG") instance.exec_in_container( - ['bash', '-c', - '/usr/bin/g++ -shared -o /etc/clickhouse-server/config.d/dictionaries_lib/dict_lib.so -fPIC /etc/clickhouse-server/config.d/dictionaries_lib/dict_lib.cpp'], - user='root') + [ + "bash", + "-c", + "/usr/bin/g++ -shared -o /etc/clickhouse-server/config.d/dictionaries_lib/dict_lib.so -fPIC /etc/clickhouse-server/config.d/dictionaries_lib/dict_lib.cpp", + ], + user="root", + ) instance.exec_in_container( - ['bash', '-c', - '/usr/bin/g++ -shared -o /dict_lib_copy.so -fPIC /etc/clickhouse-server/config.d/dictionaries_lib/dict_lib.cpp'], user='root') - instance.exec_in_container(['bash', '-c', 'ln -s /dict_lib_copy.so /etc/clickhouse-server/config.d/dictionaries_lib/dict_lib_symlink.so']) + [ + "bash", + "-c", + "/usr/bin/g++ -shared -o /dict_lib_copy.so -fPIC /etc/clickhouse-server/config.d/dictionaries_lib/dict_lib.cpp", + ], + user="root", + ) + instance.exec_in_container( + [ + "bash", + "-c", + "ln -s /dict_lib_copy.so /etc/clickhouse-server/config.d/dictionaries_lib/dict_lib_symlink.so", + ] + ) yield cluster @@ -64,8 +91,9 @@ def test_load_all(ch_cluster): if instance.is_built_with_memory_sanitizer(): pytest.skip("Memory Sanitizer cannot work with third-party shared libraries") - instance.query('DROP DICTIONARY IF EXISTS lib_dict') - instance.query(''' + instance.query("DROP DICTIONARY IF EXISTS lib_dict") + instance.query( + """ CREATE DICTIONARY lib_dict (key UInt64, value1 UInt64, value2 UInt64, value3 UInt64) PRIMARY KEY key SOURCE(library( @@ -73,41 +101,45 @@ def test_load_all(ch_cluster): SETTINGS (test_type test_simple))) LAYOUT(HASHED()) LIFETIME (MIN 0 MAX 10) - ''') + """ + ) - result = instance.query('SELECT * FROM lib_dict ORDER BY key') + result = instance.query("SELECT * FROM lib_dict ORDER BY key") expected = ( -"0\t10\t20\t30\n" + -"1\t11\t21\t31\n" + -"2\t12\t22\t32\n" + -"3\t13\t23\t33\n" + -"4\t14\t24\t34\n" + -"5\t15\t25\t35\n" + -"6\t16\t26\t36\n" + -"7\t17\t27\t37\n" + -"8\t18\t28\t38\n" + -"9\t19\t29\t39\n" -) - instance.query('SYSTEM RELOAD DICTIONARY dict1') - instance.query('DROP DICTIONARY lib_dict') - assert(result == expected) + "0\t10\t20\t30\n" + + "1\t11\t21\t31\n" + + "2\t12\t22\t32\n" + + "3\t13\t23\t33\n" + + "4\t14\t24\t34\n" + + "5\t15\t25\t35\n" + + "6\t16\t26\t36\n" + + "7\t17\t27\t37\n" + + "8\t18\t28\t38\n" + + "9\t19\t29\t39\n" + ) + instance.query("SYSTEM RELOAD DICTIONARY dict1") + instance.query("DROP DICTIONARY lib_dict") + assert result == expected - instance.query(""" + instance.query( + """ CREATE TABLE IF NOT EXISTS `dict1_table` ( key UInt64, value1 UInt64, value2 UInt64, value3 UInt64 ) ENGINE = Dictionary(dict1) - """) + """ + ) - result = instance.query('SELECT * FROM dict1_table ORDER BY key') - assert(result == expected) + result = instance.query("SELECT * FROM dict1_table ORDER BY key") + assert result == expected def test_load_ids(ch_cluster): if instance.is_built_with_memory_sanitizer(): pytest.skip("Memory Sanitizer cannot work with third-party shared libraries") - instance.query('DROP DICTIONARY IF EXISTS lib_dict_c') - instance.query(''' + instance.query("DROP DICTIONARY IF EXISTS lib_dict_c") + instance.query( + """ CREATE DICTIONARY lib_dict_c (key UInt64, value1 UInt64, value2 UInt64, value3 UInt64) PRIMARY KEY key SOURCE(library(PATH '/etc/clickhouse-server/config.d/dictionaries_lib/dict_lib.so')) LAYOUT(CACHE( @@ -117,37 +149,46 @@ def test_load_ids(ch_cluster): READ_BUFFER_SIZE 1048576 MAX_STORED_KEYS 1048576)) LIFETIME(2) ; - ''') + """ + ) - result = instance.query('''select dictGet(lib_dict_c, 'value1', toUInt64(0));''') - assert(result.strip() == '100') + result = instance.query("""select dictGet(lib_dict_c, 'value1', toUInt64(0));""") + assert result.strip() == "100" # Just check bridge is ok with a large vector of random ids - instance.query('''select number, dictGet(lib_dict_c, 'value1', toUInt64(rand())) from numbers(1000);''') + instance.query( + """select number, dictGet(lib_dict_c, 'value1', toUInt64(rand())) from numbers(1000);""" + ) - result = instance.query('''select dictGet(lib_dict_c, 'value1', toUInt64(1));''') - assert(result.strip() == '101') - instance.query('DROP DICTIONARY lib_dict_c') + result = instance.query("""select dictGet(lib_dict_c, 'value1', toUInt64(1));""") + assert result.strip() == "101" + instance.query("DROP DICTIONARY lib_dict_c") def test_load_keys(ch_cluster): if instance.is_built_with_memory_sanitizer(): pytest.skip("Memory Sanitizer cannot work with third-party shared libraries") - instance.query('DROP DICTIONARY IF EXISTS lib_dict_ckc') - instance.query(''' + instance.query("DROP DICTIONARY IF EXISTS lib_dict_ckc") + instance.query( + """ CREATE DICTIONARY lib_dict_ckc (key UInt64, value1 UInt64, value2 UInt64, value3 UInt64) PRIMARY KEY key SOURCE(library(PATH '/etc/clickhouse-server/config.d/dictionaries_lib/dict_lib.so')) LAYOUT(COMPLEX_KEY_CACHE( SIZE_IN_CELLS 10000000)) LIFETIME(2); - ''') + """ + ) - result = instance.query('''select dictGet(lib_dict_ckc, 'value1', tuple(toUInt64(0)));''') - assert(result.strip() == '100') - result = instance.query('''select dictGet(lib_dict_ckc, 'value2', tuple(toUInt64(0)));''') - assert(result.strip() == '200') - instance.query('DROP DICTIONARY lib_dict_ckc') + result = instance.query( + """select dictGet(lib_dict_ckc, 'value1', tuple(toUInt64(0)));""" + ) + assert result.strip() == "100" + result = instance.query( + """select dictGet(lib_dict_ckc, 'value2', tuple(toUInt64(0)));""" + ) + assert result.strip() == "200" + instance.query("DROP DICTIONARY lib_dict_ckc") def test_load_all_many_rows(ch_cluster): @@ -155,9 +196,10 @@ def test_load_all_many_rows(ch_cluster): pytest.skip("Memory Sanitizer cannot work with third-party shared libraries") num_rows = [1000, 10000, 100000, 1000000] - instance.query('DROP DICTIONARY IF EXISTS lib_dict') + instance.query("DROP DICTIONARY IF EXISTS lib_dict") for num in num_rows: - instance.query(''' + instance.query( + """ CREATE DICTIONARY lib_dict (key UInt64, value1 UInt64, value2 UInt64, value3 UInt64) PRIMARY KEY key SOURCE(library( @@ -165,28 +207,35 @@ def test_load_all_many_rows(ch_cluster): SETTINGS (num_rows {} test_type test_many_rows))) LAYOUT(HASHED()) LIFETIME (MIN 0 MAX 10) - '''.format(num)) + """.format( + num + ) + ) - result = instance.query('SELECT * FROM lib_dict ORDER BY key') - expected = instance.query('SELECT number, number, number, number FROM numbers({})'.format(num)) - instance.query('DROP DICTIONARY lib_dict') - assert(result == expected) + result = instance.query("SELECT * FROM lib_dict ORDER BY key") + expected = instance.query( + "SELECT number, number, number, number FROM numbers({})".format(num) + ) + instance.query("DROP DICTIONARY lib_dict") + assert result == expected def test_null_values(ch_cluster): if instance.is_built_with_memory_sanitizer(): pytest.skip("Memory Sanitizer cannot work with third-party shared libraries") - instance.query('SYSTEM RELOAD DICTIONARY dict2') - instance.query(""" + instance.query("SYSTEM RELOAD DICTIONARY dict2") + instance.query( + """ CREATE TABLE IF NOT EXISTS `dict2_table` ( key UInt64, value1 UInt64, value2 UInt64, value3 UInt64 ) ENGINE = Dictionary(dict2) - """) + """ + ) - result = instance.query('SELECT * FROM dict2_table ORDER BY key') + result = instance.query("SELECT * FROM dict2_table ORDER BY key") expected = "0\t12\t12\t12\n" - assert(result == expected) + assert result == expected def test_recover_after_bridge_crash(ch_cluster): @@ -195,21 +244,25 @@ def test_recover_after_bridge_crash(ch_cluster): create_dict_simple() - result = instance.query('''select dictGet(lib_dict_c, 'value1', toUInt64(0));''') - assert(result.strip() == '100') - result = instance.query('''select dictGet(lib_dict_c, 'value1', toUInt64(1));''') - assert(result.strip() == '101') + result = instance.query("""select dictGet(lib_dict_c, 'value1', toUInt64(0));""") + assert result.strip() == "100" + result = instance.query("""select dictGet(lib_dict_c, 'value1', toUInt64(1));""") + assert result.strip() == "101" - instance.exec_in_container(['bash', '-c', 'kill -9 `pidof clickhouse-library-bridge`'], user='root') - instance.query('SYSTEM RELOAD DICTIONARY lib_dict_c') + instance.exec_in_container( + ["bash", "-c", "kill -9 `pidof clickhouse-library-bridge`"], user="root" + ) + instance.query("SYSTEM RELOAD DICTIONARY lib_dict_c") - result = instance.query('''select dictGet(lib_dict_c, 'value1', toUInt64(0));''') - assert(result.strip() == '100') - result = instance.query('''select dictGet(lib_dict_c, 'value1', toUInt64(1));''') - assert(result.strip() == '101') + result = instance.query("""select dictGet(lib_dict_c, 'value1', toUInt64(0));""") + assert result.strip() == "100" + result = instance.query("""select dictGet(lib_dict_c, 'value1', toUInt64(1));""") + assert result.strip() == "101" - instance.exec_in_container(['bash', '-c', 'kill -9 `pidof clickhouse-library-bridge`'], user='root') - instance.query('DROP DICTIONARY lib_dict_c') + instance.exec_in_container( + ["bash", "-c", "kill -9 `pidof clickhouse-library-bridge`"], user="root" + ) + instance.query("DROP DICTIONARY lib_dict_c") def test_server_restart_bridge_might_be_stil_alive(ch_cluster): @@ -218,32 +271,36 @@ def test_server_restart_bridge_might_be_stil_alive(ch_cluster): create_dict_simple() - result = instance.query('''select dictGet(lib_dict_c, 'value1', toUInt64(1));''') - assert(result.strip() == '101') + result = instance.query("""select dictGet(lib_dict_c, 'value1', toUInt64(1));""") + assert result.strip() == "101" instance.restart_clickhouse() - result = instance.query('''select dictGet(lib_dict_c, 'value1', toUInt64(1));''') - assert(result.strip() == '101') + result = instance.query("""select dictGet(lib_dict_c, 'value1', toUInt64(1));""") + assert result.strip() == "101" - instance.exec_in_container(['bash', '-c', 'kill -9 `pidof clickhouse-library-bridge`'], user='root') + instance.exec_in_container( + ["bash", "-c", "kill -9 `pidof clickhouse-library-bridge`"], user="root" + ) instance.restart_clickhouse() - result = instance.query('''select dictGet(lib_dict_c, 'value1', toUInt64(1));''') - assert(result.strip() == '101') + result = instance.query("""select dictGet(lib_dict_c, 'value1', toUInt64(1));""") + assert result.strip() == "101" - instance.query('DROP DICTIONARY lib_dict_c') + instance.query("DROP DICTIONARY lib_dict_c") def test_bridge_dies_with_parent(ch_cluster): if instance.is_built_with_memory_sanitizer(): pytest.skip("Memory Sanitizer cannot work with third-party shared libraries") if instance.is_built_with_address_sanitizer(): - pytest.skip("Leak sanitizer falsely reports about a leak of 16 bytes in clickhouse-odbc-bridge") + pytest.skip( + "Leak sanitizer falsely reports about a leak of 16 bytes in clickhouse-odbc-bridge" + ) create_dict_simple() - result = instance.query('''select dictGet(lib_dict_c, 'value1', toUInt64(1));''') - assert(result.strip() == '101') + result = instance.query("""select dictGet(lib_dict_c, 'value1', toUInt64(1));""") + assert result.strip() == "101" clickhouse_pid = instance.get_process_pid("clickhouse server") bridge_pid = instance.get_process_pid("library-bridge") @@ -252,7 +309,9 @@ def test_bridge_dies_with_parent(ch_cluster): while clickhouse_pid is not None: try: - instance.exec_in_container(["kill", str(clickhouse_pid)], privileged=True, user='root') + instance.exec_in_container( + ["kill", str(clickhouse_pid)], privileged=True, user="root" + ) except: pass clickhouse_pid = instance.get_process_pid("clickhouse server") @@ -265,22 +324,26 @@ def test_bridge_dies_with_parent(ch_cluster): break if bridge_pid: - out = instance.exec_in_container(["gdb", "-p", str(bridge_pid), "--ex", "thread apply all bt", "--ex", "q"], - privileged=True, user='root') + out = instance.exec_in_container( + ["gdb", "-p", str(bridge_pid), "--ex", "thread apply all bt", "--ex", "q"], + privileged=True, + user="root", + ) logging.debug(f"Bridge is running, gdb output:\n{out}") assert clickhouse_pid is None assert bridge_pid is None instance.start_clickhouse(20) - instance.query('DROP DICTIONARY lib_dict_c') + instance.query("DROP DICTIONARY lib_dict_c") def test_path_validation(ch_cluster): if instance.is_built_with_memory_sanitizer(): pytest.skip("Memory Sanitizer cannot work with third-party shared libraries") - instance.query('DROP DICTIONARY IF EXISTS lib_dict_c') - instance.query(''' + instance.query("DROP DICTIONARY IF EXISTS lib_dict_c") + instance.query( + """ CREATE DICTIONARY lib_dict_c (key UInt64, value1 UInt64, value2 UInt64, value3 UInt64) PRIMARY KEY key SOURCE(library(PATH '/etc/clickhouse-server/config.d/dictionaries_lib/dict_lib_symlink.so')) LAYOUT(CACHE( @@ -290,13 +353,15 @@ def test_path_validation(ch_cluster): READ_BUFFER_SIZE 1048576 MAX_STORED_KEYS 1048576)) LIFETIME(2) ; - ''') + """ + ) - result = instance.query('''select dictGet(lib_dict_c, 'value1', toUInt64(1));''') - assert(result.strip() == '101') + result = instance.query("""select dictGet(lib_dict_c, 'value1', toUInt64(1));""") + assert result.strip() == "101" - instance.query('DROP DICTIONARY IF EXISTS lib_dict_c') - instance.query(''' + instance.query("DROP DICTIONARY IF EXISTS lib_dict_c") + instance.query( + """ CREATE DICTIONARY lib_dict_c (key UInt64, value1 UInt64, value2 UInt64, value3 UInt64) PRIMARY KEY key SOURCE(library(PATH '/etc/clickhouse-server/config.d/dictionaries_lib/../../../../dict_lib_copy.so')) LAYOUT(CACHE( @@ -306,12 +371,18 @@ def test_path_validation(ch_cluster): READ_BUFFER_SIZE 1048576 MAX_STORED_KEYS 1048576)) LIFETIME(2) ; - ''') - result = instance.query_and_get_error('''select dictGet(lib_dict_c, 'value1', toUInt64(1));''') - assert('DB::Exception: File path /etc/clickhouse-server/config.d/dictionaries_lib/../../../../dict_lib_copy.so is not inside /etc/clickhouse-server/config.d/dictionaries_lib' in result) + """ + ) + result = instance.query_and_get_error( + """select dictGet(lib_dict_c, 'value1', toUInt64(1));""" + ) + assert ( + "DB::Exception: File path /etc/clickhouse-server/config.d/dictionaries_lib/../../../../dict_lib_copy.so is not inside /etc/clickhouse-server/config.d/dictionaries_lib" + in result + ) -if __name__ == '__main__': +if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") cluster.shutdown() diff --git a/tests/integration/test_limited_replicated_fetches/test.py b/tests/integration/test_limited_replicated_fetches/test.py index 7b0c7aed15d..e3271100b74 100644 --- a/tests/integration/test_limited_replicated_fetches/test.py +++ b/tests/integration/test_limited_replicated_fetches/test.py @@ -10,11 +10,16 @@ import os cluster = ClickHouseCluster(__file__) SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -node1 = cluster.add_instance('node1', user_configs=['configs/custom_settings.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', user_configs=['configs/custom_settings.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", user_configs=["configs/custom_settings.xml"], with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", user_configs=["configs/custom_settings.xml"], with_zookeeper=True +) MAX_THREADS_FOR_FETCH = 3 + @pytest.fixture(scope="module") def started_cluster(): try: @@ -27,32 +32,72 @@ def started_cluster(): def get_random_string(length): - return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length)) + return "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(length) + ) def test_limited_fetches(started_cluster): """ - Test checks that that we utilize all available threads for fetches + Test checks that that we utilize all available threads for fetches """ - node1.query("CREATE TABLE t (key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/test/t', '1') ORDER BY tuple() PARTITION BY key") - node2.query("CREATE TABLE t (key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/test/t', '2') ORDER BY tuple() PARTITION BY key") + node1.query( + "CREATE TABLE t (key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/test/t', '1') ORDER BY tuple() PARTITION BY key" + ) + node2.query( + "CREATE TABLE t (key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/test/t', '2') ORDER BY tuple() PARTITION BY key" + ) with PartitionManager() as pm: node2.query("SYSTEM STOP FETCHES t") - node1.query("INSERT INTO t SELECT 1, '{}' FROM numbers(5000)".format(get_random_string(104857))) - node1.query("INSERT INTO t SELECT 2, '{}' FROM numbers(5000)".format(get_random_string(104857))) - node1.query("INSERT INTO t SELECT 3, '{}' FROM numbers(5000)".format(get_random_string(104857))) - node1.query("INSERT INTO t SELECT 4, '{}' FROM numbers(5000)".format(get_random_string(104857))) - node1.query("INSERT INTO t SELECT 5, '{}' FROM numbers(5000)".format(get_random_string(104857))) - node1.query("INSERT INTO t SELECT 6, '{}' FROM numbers(5000)".format(get_random_string(104857))) + node1.query( + "INSERT INTO t SELECT 1, '{}' FROM numbers(5000)".format( + get_random_string(104857) + ) + ) + node1.query( + "INSERT INTO t SELECT 2, '{}' FROM numbers(5000)".format( + get_random_string(104857) + ) + ) + node1.query( + "INSERT INTO t SELECT 3, '{}' FROM numbers(5000)".format( + get_random_string(104857) + ) + ) + node1.query( + "INSERT INTO t SELECT 4, '{}' FROM numbers(5000)".format( + get_random_string(104857) + ) + ) + node1.query( + "INSERT INTO t SELECT 5, '{}' FROM numbers(5000)".format( + get_random_string(104857) + ) + ) + node1.query( + "INSERT INTO t SELECT 6, '{}' FROM numbers(5000)".format( + get_random_string(104857) + ) + ) pm.add_network_delay(node1, 80) node2.query("SYSTEM START FETCHES t") fetches_result = [] background_fetches_metric = [] fetched_parts = set([]) for _ in range(1000): - result = node2.query("SELECT result_part_name FROM system.replicated_fetches").strip().split() - background_fetches_metric.append(int(node2.query("select value from system.metrics where metric = 'BackgroundFetchesPoolTask'").strip())) + result = ( + node2.query("SELECT result_part_name FROM system.replicated_fetches") + .strip() + .split() + ) + background_fetches_metric.append( + int( + node2.query( + "select value from system.metrics where metric = 'BackgroundFetchesPoolTask'" + ).strip() + ) + ) if not result: if len(fetched_parts) == 6: break @@ -67,10 +112,16 @@ def test_limited_fetches(started_cluster): for concurrently_fetching_parts in fetches_result: if len(concurrently_fetching_parts) > MAX_THREADS_FOR_FETCH: - assert False, "Found more than {} concurrently fetching parts: {}".format(MAX_THREADS_FOR_FETCH, ', '.join(concurrently_fetching_parts)) + assert False, "Found more than {} concurrently fetching parts: {}".format( + MAX_THREADS_FOR_FETCH, ", ".join(concurrently_fetching_parts) + ) - assert max([len(parts) for parts in fetches_result]) == 3, "Strange, but we don't utilize max concurrent threads for fetches" - assert(max(background_fetches_metric)) == 3, "Just checking metric consistent with table" + assert ( + max([len(parts) for parts in fetches_result]) == 3 + ), "Strange, but we don't utilize max concurrent threads for fetches" + assert ( + max(background_fetches_metric) + ) == 3, "Just checking metric consistent with table" node1.query("DROP TABLE IF EXISTS t SYNC") node2.query("DROP TABLE IF EXISTS t SYNC") diff --git a/tests/integration/test_log_family_hdfs/test.py b/tests/integration/test_log_family_hdfs/test.py index 7bb9cdfeaf5..e8afe364ec4 100644 --- a/tests/integration/test_log_family_hdfs/test.py +++ b/tests/integration/test_log_family_hdfs/test.py @@ -11,26 +11,27 @@ from pyhdfs import HdfsClient def started_cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("node", - main_configs=["configs/storage_conf.xml"], - with_hdfs=True) + cluster.add_instance( + "node", main_configs=["configs/storage_conf.xml"], with_hdfs=True + ) logging.info("Starting cluster...") cluster.start() logging.info("Cluster started") fs = HdfsClient(hosts=cluster.hdfs_ip) - fs.mkdirs('/clickhouse') + fs.mkdirs("/clickhouse") yield cluster finally: cluster.shutdown() -def assert_objects_count(started_cluster, objects_count, path='data/'): +def assert_objects_count(started_cluster, objects_count, path="data/"): fs = HdfsClient(hosts=started_cluster.hdfs_ip) - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") assert objects_count == len(hdfs_objects) + # TinyLog: files: id.bin, sizes.json # INSERT overwrites 1 file (`sizes.json`) and appends 1 file (`id.bin`), so # files_overhead=1, files_overhead_per_insert=1 @@ -44,26 +45,41 @@ def assert_objects_count(started_cluster, objects_count, path='data/'): # files_overhead=1, files_overhead_per_insert=2 @pytest.mark.parametrize( "log_engine,files_overhead,files_overhead_per_insert", - [("TinyLog", 1, 1), ("Log", 1, 2), ("StripeLog", 1, 2)]) -def test_log_family_hdfs(started_cluster, log_engine, files_overhead, files_overhead_per_insert): + [("TinyLog", 1, 1), ("Log", 1, 2), ("StripeLog", 1, 2)], +) +def test_log_family_hdfs( + started_cluster, log_engine, files_overhead, files_overhead_per_insert +): node = started_cluster.instances["node"] - node.query("CREATE TABLE hdfs_test (id UInt64) ENGINE={} SETTINGS disk = 'hdfs'".format(log_engine)) + node.query( + "CREATE TABLE hdfs_test (id UInt64) ENGINE={} SETTINGS disk = 'hdfs'".format( + log_engine + ) + ) node.query("INSERT INTO hdfs_test SELECT number FROM numbers(5)") assert node.query("SELECT * FROM hdfs_test") == "0\n1\n2\n3\n4\n" assert_objects_count(started_cluster, files_overhead_per_insert + files_overhead) node.query("INSERT INTO hdfs_test SELECT number + 5 FROM numbers(3)") - assert node.query("SELECT * FROM hdfs_test order by id") == "0\n1\n2\n3\n4\n5\n6\n7\n" - assert_objects_count(started_cluster, files_overhead_per_insert * 2 + files_overhead) + assert ( + node.query("SELECT * FROM hdfs_test order by id") == "0\n1\n2\n3\n4\n5\n6\n7\n" + ) + assert_objects_count( + started_cluster, files_overhead_per_insert * 2 + files_overhead + ) node.query("INSERT INTO hdfs_test SELECT number + 8 FROM numbers(1)") - assert node.query("SELECT * FROM hdfs_test order by id") == "0\n1\n2\n3\n4\n5\n6\n7\n8\n" - assert_objects_count(started_cluster, files_overhead_per_insert * 3 + files_overhead) + assert ( + node.query("SELECT * FROM hdfs_test order by id") + == "0\n1\n2\n3\n4\n5\n6\n7\n8\n" + ) + assert_objects_count( + started_cluster, files_overhead_per_insert * 3 + files_overhead + ) node.query("TRUNCATE TABLE hdfs_test") assert_objects_count(started_cluster, 0) node.query("DROP TABLE hdfs_test") - diff --git a/tests/integration/test_log_family_s3/test.py b/tests/integration/test_log_family_s3/test.py index 8531edd635f..234b079ba00 100644 --- a/tests/integration/test_log_family_s3/test.py +++ b/tests/integration/test_log_family_s3/test.py @@ -9,9 +9,11 @@ from helpers.cluster import ClickHouseCluster def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("node", - main_configs=["configs/minio.xml", "configs/ssl.xml"], - with_minio=True) + cluster.add_instance( + "node", + main_configs=["configs/minio.xml", "configs/ssl.xml"], + with_minio=True, + ) logging.info("Starting cluster...") cluster.start() logging.info("Cluster started") @@ -21,7 +23,7 @@ def cluster(): cluster.shutdown() -def assert_objects_count(cluster, objects_count, path='data/'): +def assert_objects_count(cluster, objects_count, path="data/"): minio = cluster.minio_client s3_objects = list(minio.list_objects(cluster.minio_bucket, path)) if objects_count != len(s3_objects): @@ -30,6 +32,7 @@ def assert_objects_count(cluster, objects_count, path='data/'): logging.info("Existing S3 object: %s", str(object_meta)) assert objects_count == len(s3_objects) + # TinyLog: files: id.bin, sizes.json # INSERT overwrites 1 file (`sizes.json`) and appends 1 file (`id.bin`), so # files_overhead=1, files_overhead_per_insert=1 @@ -43,11 +46,16 @@ def assert_objects_count(cluster, objects_count, path='data/'): # files_overhead=1, files_overhead_per_insert=2 @pytest.mark.parametrize( "log_engine,files_overhead,files_overhead_per_insert", - [("TinyLog", 1, 1), ("Log", 1, 2), ("StripeLog", 1, 2)]) + [("TinyLog", 1, 1), ("Log", 1, 2), ("StripeLog", 1, 2)], +) def test_log_family_s3(cluster, log_engine, files_overhead, files_overhead_per_insert): node = cluster.instances["node"] - node.query("CREATE TABLE s3_test (id UInt64) ENGINE={} SETTINGS disk = 's3'".format(log_engine)) + node.query( + "CREATE TABLE s3_test (id UInt64) ENGINE={} SETTINGS disk = 's3'".format( + log_engine + ) + ) node.query("INSERT INTO s3_test SELECT number FROM numbers(5)") assert node.query("SELECT * FROM s3_test") == "0\n1\n2\n3\n4\n" @@ -58,7 +66,9 @@ def test_log_family_s3(cluster, log_engine, files_overhead, files_overhead_per_i assert_objects_count(cluster, files_overhead_per_insert * 2 + files_overhead) node.query("INSERT INTO s3_test SELECT number + 8 FROM numbers(1)") - assert node.query("SELECT * FROM s3_test order by id") == "0\n1\n2\n3\n4\n5\n6\n7\n8\n" + assert ( + node.query("SELECT * FROM s3_test order by id") == "0\n1\n2\n3\n4\n5\n6\n7\n8\n" + ) assert_objects_count(cluster, files_overhead_per_insert * 3 + files_overhead) node.query("TRUNCATE TABLE s3_test") diff --git a/tests/integration/test_log_levels_update/test.py b/tests/integration/test_log_levels_update/test.py index f631677a400..86719390f33 100644 --- a/tests/integration/test_log_levels_update/test.py +++ b/tests/integration/test_log_levels_update/test.py @@ -4,14 +4,14 @@ import re from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__, name="log_quries_probability") -node = cluster.add_instance('node', with_zookeeper=False) +node = cluster.add_instance("node", with_zookeeper=False) -config = ''' +config = """ information /var/log/clickhouse-server/clickhouse-server.log -''' +""" @pytest.fixture(scope="module") @@ -25,7 +25,10 @@ def start_cluster(): def get_log(node): - return node.exec_in_container(["bash", "-c", "cat /var/log/clickhouse-server/clickhouse-server.log"]) + return node.exec_in_container( + ["bash", "-c", "cat /var/log/clickhouse-server/clickhouse-server.log"] + ) + def test_log_levels_update(start_cluster): # Make sure that there are enough log messages for the test @@ -37,14 +40,13 @@ def test_log_levels_update(start_cluster): node.replace_config("/etc/clickhouse-server/config.d/log.xml", config) node.query("SYSTEM RELOAD CONFIG;") - node.exec_in_container(["bash", "-c", "> /var/log/clickhouse-server/clickhouse-server.log"]) - + node.exec_in_container( + ["bash", "-c", "> /var/log/clickhouse-server/clickhouse-server.log"] + ) + for i in range(5): node.query("SELECT 1") log = get_log(node) assert len(log) > 0 assert not re.search("(|)", log) - - - diff --git a/tests/integration/test_log_lz4_streaming/test.py b/tests/integration/test_log_lz4_streaming/test.py index 75b46a378c5..05c0c809b5a 100644 --- a/tests/integration/test_log_lz4_streaming/test.py +++ b/tests/integration/test_log_lz4_streaming/test.py @@ -5,7 +5,8 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=['configs/logs.xml'], stay_alive=True) +node = cluster.add_instance("node", main_configs=["configs/logs.xml"], stay_alive=True) + @pytest.fixture(scope="module") def started_cluster(): @@ -20,10 +21,26 @@ def started_cluster(): def check_log_file(): assert node.path_exists("/var/log/clickhouse-server/clickhouse-server.log.lz4") - lz4_output = node.exec_in_container(["bash", "-c", "lz4 -t /var/log/clickhouse-server/clickhouse-server.log.lz4 2>&1"], user='root') - assert lz4_output.count('Error') == 0, lz4_output + lz4_output = node.exec_in_container( + [ + "bash", + "-c", + "lz4 -t /var/log/clickhouse-server/clickhouse-server.log.lz4 2>&1", + ], + user="root", + ) + assert lz4_output.count("Error") == 0, lz4_output - compressed_size = int(node.exec_in_container(["bash", "-c", "du -b /var/log/clickhouse-server/clickhouse-server.log.lz4 | awk {' print $1 '}"], user='root')) + compressed_size = int( + node.exec_in_container( + [ + "bash", + "-c", + "du -b /var/log/clickhouse-server/clickhouse-server.log.lz4 | awk {' print $1 '}", + ], + user="root", + ) + ) uncompressed_size = int(lz4_output.split()[3]) assert 0 < compressed_size < uncompressed_size, lz4_output diff --git a/tests/integration/test_log_query_probability/test.py b/tests/integration/test_log_query_probability/test.py index d1e19974e75..d13ecc276cb 100644 --- a/tests/integration/test_log_query_probability/test.py +++ b/tests/integration/test_log_query_probability/test.py @@ -3,8 +3,8 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__, name="log_quries_probability") -node1 = cluster.add_instance('node1', with_zookeeper=False) -node2 = cluster.add_instance('node2', with_zookeeper=False) +node1 = cluster.add_instance("node1", with_zookeeper=False) +node2 = cluster.add_instance("node2", with_zookeeper=False) @pytest.fixture(scope="module") @@ -19,26 +19,48 @@ def start_cluster(): def test_log_quries_probability_one(start_cluster): for i in range(100): - node1.query("SELECT 12345", settings={"log_queries_probability":0.5}) + node1.query("SELECT 12345", settings={"log_queries_probability": 0.5}) node1.query("SYSTEM FLUSH LOGS") - assert node1.query("SELECT count() < (2 * 100) FROM system.query_log WHERE query LIKE '%12345%' AND query NOT LIKE '%system.query_log%'") == "1\n" - assert node1.query("SELECT count() > 0 FROM system.query_log WHERE query LIKE '%12345%' AND query NOT LIKE '%system.query_log%'") == "1\n" - assert node1.query("SELECT count() % 2 FROM system.query_log WHERE query LIKE '%12345%' AND query NOT LIKE '%system.query_log%'") == "0\n" + assert ( + node1.query( + "SELECT count() < (2 * 100) FROM system.query_log WHERE query LIKE '%12345%' AND query NOT LIKE '%system.query_log%'" + ) + == "1\n" + ) + assert ( + node1.query( + "SELECT count() > 0 FROM system.query_log WHERE query LIKE '%12345%' AND query NOT LIKE '%system.query_log%'" + ) + == "1\n" + ) + assert ( + node1.query( + "SELECT count() % 2 FROM system.query_log WHERE query LIKE '%12345%' AND query NOT LIKE '%system.query_log%'" + ) + == "0\n" + ) node1.query("TRUNCATE TABLE system.query_log") def test_log_quries_probability_two(start_cluster): for i in range(100): - node1.query("SELECT 12345 FROM remote('node2', system, one)", settings={"log_queries_probability":0.5}) + node1.query( + "SELECT 12345 FROM remote('node2', system, one)", + settings={"log_queries_probability": 0.5}, + ) node1.query("SYSTEM FLUSH LOGS") node2.query("SYSTEM FLUSH LOGS") - ans1 = node1.query("SELECT count() FROM system.query_log WHERE query LIKE '%12345%' AND query NOT LIKE '%system.query_log%'") - ans2 = node2.query("SELECT count() FROM system.query_log WHERE query LIKE '%12345%' AND query NOT LIKE '%system.query_log%'") + ans1 = node1.query( + "SELECT count() FROM system.query_log WHERE query LIKE '%12345%' AND query NOT LIKE '%system.query_log%'" + ) + ans2 = node2.query( + "SELECT count() FROM system.query_log WHERE query LIKE '%12345%' AND query NOT LIKE '%system.query_log%'" + ) assert ans1 == ans2 diff --git a/tests/integration/test_logs_level/test.py b/tests/integration/test_logs_level/test.py index 9aa3f7ffd9a..7262861944f 100644 --- a/tests/integration/test_logs_level/test.py +++ b/tests/integration/test_logs_level/test.py @@ -3,7 +3,7 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=['configs/config_information.xml']) +node = cluster.add_instance("node", main_configs=["configs/config_information.xml"]) @pytest.fixture(scope="module") @@ -16,5 +16,7 @@ def start_cluster(): def test_check_client_logs_level(start_cluster): - logs = node.query_and_get_answer_with_error("SELECT 1", settings={"send_logs_level": 'trace'})[1] - assert logs.count('Trace') != 0 + logs = node.query_and_get_answer_with_error( + "SELECT 1", settings={"send_logs_level": "trace"} + )[1] + assert logs.count("Trace") != 0 diff --git a/tests/integration/test_lost_part/test.py b/tests/integration/test_lost_part/test.py index 7b2d54a5ea4..405888c552b 100644 --- a/tests/integration/test_lost_part/test.py +++ b/tests/integration/test_lost_part/test.py @@ -10,8 +10,9 @@ from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True) -node2 = cluster.add_instance('node2', with_zookeeper=True) +node1 = cluster.add_instance("node1", with_zookeeper=True) +node2 = cluster.add_instance("node2", with_zookeeper=True) + @pytest.fixture(scope="module") def start_cluster(): @@ -25,16 +26,24 @@ def start_cluster(): def remove_part_from_disk(node, table, part_name): part_path = node.query( - "SELECT path FROM system.parts WHERE table = '{}' and name = '{}'".format(table, part_name)).strip() + "SELECT path FROM system.parts WHERE table = '{}' and name = '{}'".format( + table, part_name + ) + ).strip() if not part_path: raise Exception("Part " + part_name + "doesn't exist") - node.exec_in_container(['bash', '-c', 'rm -r {p}/*'.format(p=part_path)], privileged=True) + node.exec_in_container( + ["bash", "-c", "rm -r {p}/*".format(p=part_path)], privileged=True + ) def test_lost_part_same_replica(start_cluster): for node in [node1, node2]: node.query( - "CREATE TABLE mt0 (id UInt64, date Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/t', '{}') ORDER BY tuple() PARTITION BY date".format(node.name)) + "CREATE TABLE mt0 (id UInt64, date Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/t', '{}') ORDER BY tuple() PARTITION BY date".format( + node.name + ) + ) node1.query("SYSTEM STOP MERGES mt0") node2.query("SYSTEM STOP REPLICATION QUEUES") @@ -43,7 +52,9 @@ def test_lost_part_same_replica(start_cluster): node1.query("INSERT INTO mt0 VALUES ({}, toDate('2020-10-01'))".format(i)) for i in range(20): - parts_to_merge = node1.query("SELECT parts_to_merge FROM system.replication_queue") + parts_to_merge = node1.query( + "SELECT parts_to_merge FROM system.replication_queue" + ) if parts_to_merge: parts_list = list(sorted(ast.literal_eval(parts_to_merge))) print("Got parts list", parts_list) @@ -55,7 +66,7 @@ def test_lost_part_same_replica(start_cluster): victim_part_from_the_middle = random.choice(parts_list[1:-1]) print("Will corrupt part", victim_part_from_the_middle) - remove_part_from_disk(node1, 'mt0', victim_part_from_the_middle) + remove_part_from_disk(node1, "mt0", victim_part_from_the_middle) node1.query("DETACH TABLE mt0") @@ -69,9 +80,15 @@ def test_lost_part_same_replica(start_cluster): break time.sleep(1) else: - assert False, "Still have something in replication queue:\n" + node1.query("SELECT count() FROM system.replication_queue FORMAT Vertical") + assert False, "Still have something in replication queue:\n" + node1.query( + "SELECT count() FROM system.replication_queue FORMAT Vertical" + ) - assert node1.contains_in_log("Created empty part"), "Seems like empty part {} is not created or log message changed".format(victim_part_from_the_middle) + assert node1.contains_in_log( + "Created empty part" + ), "Seems like empty part {} is not created or log message changed".format( + victim_part_from_the_middle + ) assert node1.query("SELECT COUNT() FROM mt0") == "4\n" @@ -80,10 +97,14 @@ def test_lost_part_same_replica(start_cluster): assert_eq_with_retry(node2, "SELECT COUNT() FROM mt0", "4") assert_eq_with_retry(node2, "SELECT COUNT() FROM system.replication_queue", "0") + def test_lost_part_other_replica(start_cluster): for node in [node1, node2]: node.query( - "CREATE TABLE mt1 (id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t1', '{}') ORDER BY tuple()".format(node.name)) + "CREATE TABLE mt1 (id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t1', '{}') ORDER BY tuple()".format( + node.name + ) + ) node1.query("SYSTEM STOP MERGES mt1") node2.query("SYSTEM STOP REPLICATION QUEUES") @@ -92,7 +113,9 @@ def test_lost_part_other_replica(start_cluster): node1.query("INSERT INTO mt1 VALUES ({})".format(i)) for i in range(20): - parts_to_merge = node1.query("SELECT parts_to_merge FROM system.replication_queue") + parts_to_merge = node1.query( + "SELECT parts_to_merge FROM system.replication_queue" + ) if parts_to_merge: parts_list = list(sorted(ast.literal_eval(parts_to_merge))) print("Got parts list", parts_list) @@ -104,7 +127,7 @@ def test_lost_part_other_replica(start_cluster): victim_part_from_the_middle = random.choice(parts_list[1:-1]) print("Will corrupt part", victim_part_from_the_middle) - remove_part_from_disk(node1, 'mt1', victim_part_from_the_middle) + remove_part_from_disk(node1, "mt1", victim_part_from_the_middle) # other way to detect broken parts node1.query("CHECK TABLE mt1") @@ -117,9 +140,15 @@ def test_lost_part_other_replica(start_cluster): break time.sleep(1) else: - assert False, "Still have something in replication queue:\n" + node2.query("SELECT * FROM system.replication_queue FORMAT Vertical") + assert False, "Still have something in replication queue:\n" + node2.query( + "SELECT * FROM system.replication_queue FORMAT Vertical" + ) - assert node1.contains_in_log("Created empty part"), "Seems like empty part {} is not created or log message changed".format(victim_part_from_the_middle) + assert node1.contains_in_log( + "Created empty part" + ), "Seems like empty part {} is not created or log message changed".format( + victim_part_from_the_middle + ) assert_eq_with_retry(node2, "SELECT COUNT() FROM mt1", "4") assert_eq_with_retry(node2, "SELECT COUNT() FROM system.replication_queue", "0") @@ -129,10 +158,14 @@ def test_lost_part_other_replica(start_cluster): assert_eq_with_retry(node1, "SELECT COUNT() FROM mt1", "4") assert_eq_with_retry(node1, "SELECT COUNT() FROM system.replication_queue", "0") + def test_lost_part_mutation(start_cluster): for node in [node1, node2]: node.query( - "CREATE TABLE mt2 (id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t2', '{}') ORDER BY tuple()".format(node.name)) + "CREATE TABLE mt2 (id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t2', '{}') ORDER BY tuple()".format( + node.name + ) + ) node1.query("SYSTEM STOP MERGES mt2") node2.query("SYSTEM STOP REPLICATION QUEUES") @@ -140,7 +173,9 @@ def test_lost_part_mutation(start_cluster): for i in range(2): node1.query("INSERT INTO mt2 VALUES ({})".format(i)) - node1.query("ALTER TABLE mt2 UPDATE id = 777 WHERE 1", settings={"mutations_sync": "0"}) + node1.query( + "ALTER TABLE mt2 UPDATE id = 777 WHERE 1", settings={"mutations_sync": "0"} + ) for i in range(20): parts_to_mutate = node1.query("SELECT count() FROM system.replication_queue") @@ -149,7 +184,7 @@ def test_lost_part_mutation(start_cluster): break time.sleep(1) - remove_part_from_disk(node1, 'mt2', 'all_1_1_0') + remove_part_from_disk(node1, "mt2", "all_1_1_0") # other way to detect broken parts node1.query("CHECK TABLE mt2") @@ -162,7 +197,9 @@ def test_lost_part_mutation(start_cluster): break time.sleep(1) else: - assert False, "Still have something in replication queue:\n" + node1.query("SELECT * FROM system.replication_queue FORMAT Vertical") + assert False, "Still have something in replication queue:\n" + node1.query( + "SELECT * FROM system.replication_queue FORMAT Vertical" + ) assert_eq_with_retry(node1, "SELECT COUNT() FROM mt2", "1") assert_eq_with_retry(node1, "SELECT SUM(id) FROM mt2", "777") @@ -179,7 +216,8 @@ def test_lost_last_part(start_cluster): for node in [node1, node2]: node.query( "CREATE TABLE mt3 (id UInt64, p String) ENGINE ReplicatedMergeTree('/clickhouse/tables/t3', '{}') " - "ORDER BY tuple() PARTITION BY p".format(node.name)) + "ORDER BY tuple() PARTITION BY p".format(node.name) + ) node1.query("SYSTEM STOP MERGES mt3") node2.query("SYSTEM STOP REPLICATION QUEUES") @@ -188,10 +226,12 @@ def test_lost_last_part(start_cluster): node1.query("INSERT INTO mt3 VALUES ({}, 'x')".format(i)) # actually not important - node1.query("ALTER TABLE mt3 UPDATE id = 777 WHERE 1", settings={"mutations_sync": "0"}) + node1.query( + "ALTER TABLE mt3 UPDATE id = 777 WHERE 1", settings={"mutations_sync": "0"} + ) partition_id = node1.query("select partitionId('x')").strip() - remove_part_from_disk(node1, 'mt3', '{}_0_0_0'.format(partition_id)) + remove_part_from_disk(node1, "mt3", "{}_0_0_0".format(partition_id)) # other way to detect broken parts node1.query("CHECK TABLE mt3") @@ -200,8 +240,12 @@ def test_lost_last_part(start_cluster): for i in range(10): result = node1.query("SELECT count() FROM system.replication_queue") - assert int(result) <= 1, "Have a lot of entries in queue {}".format(node1.query("SELECT * FROM system.replication_queue FORMAT Vertical")) - if node1.contains_in_log("Cannot create empty part") and node1.contains_in_log("DROP/DETACH PARTITION"): + assert int(result) <= 1, "Have a lot of entries in queue {}".format( + node1.query("SELECT * FROM system.replication_queue FORMAT Vertical") + ) + if node1.contains_in_log("Cannot create empty part") and node1.contains_in_log( + "DROP/DETACH PARTITION" + ): break time.sleep(1) else: diff --git a/tests/integration/test_lost_part_during_startup/test.py b/tests/integration/test_lost_part_during_startup/test.py index f9d24682354..b110a17704b 100644 --- a/tests/integration/test_lost_part_during_startup/test.py +++ b/tests/integration/test_lost_part_during_startup/test.py @@ -6,8 +6,8 @@ from helpers.cluster import ClickHouseCluster from helpers.network import PartitionManager cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True, stay_alive=True) -node2 = cluster.add_instance('node2', with_zookeeper=True, stay_alive=True) +node1 = cluster.add_instance("node1", with_zookeeper=True, stay_alive=True) +node2 = cluster.add_instance("node2", with_zookeeper=True, stay_alive=True) @pytest.fixture(scope="module") @@ -22,17 +22,25 @@ def start_cluster(): finally: cluster.shutdown() + def remove_part_from_disk(node, table, part_name): part_path = node.query( - "SELECT path FROM system.parts WHERE table = '{}' and name = '{}'".format(table, part_name)).strip() + "SELECT path FROM system.parts WHERE table = '{}' and name = '{}'".format( + table, part_name + ) + ).strip() if not part_path: raise Exception("Part " + part_name + "doesn't exist") - node.exec_in_container(['bash', '-c', 'rm -r {p}/*'.format(p=part_path)], privileged=True) + node.exec_in_container( + ["bash", "-c", "rm -r {p}/*".format(p=part_path)], privileged=True + ) def test_lost_part_during_startup(start_cluster): for i, node in enumerate([node1, node2]): - node.query(f"CREATE TABLE test_lost (value UInt64) Engine = ReplicatedMergeTree('/clickhouse/test_lost', '{i + 1}') ORDER BY tuple()") + node.query( + f"CREATE TABLE test_lost (value UInt64) Engine = ReplicatedMergeTree('/clickhouse/test_lost', '{i + 1}') ORDER BY tuple()" + ) for i in range(4): node2.query(f"INSERT INTO test_lost VALUES({i})") @@ -40,9 +48,14 @@ def test_lost_part_during_startup(start_cluster): node2.query("OPTIMIZE TABLE test_lost FINAL") node1.query("SYSTEM SYNC REPLICA test_lost") - assert node2.query("SELECT sum(value) FROM test_lost") == str(sum(i for i in range(4))) + '\n' - assert node1.query("SELECT sum(value) FROM test_lost") == str(sum(i for i in range(4))) + '\n' - + assert ( + node2.query("SELECT sum(value) FROM test_lost") + == str(sum(i for i in range(4))) + "\n" + ) + assert ( + node1.query("SELECT sum(value) FROM test_lost") + == str(sum(i for i in range(4))) + "\n" + ) remove_part_from_disk(node2, "test_lost", "all_0_3_1") remove_part_from_disk(node2, "test_lost", "all_1_1_0") @@ -71,5 +84,11 @@ def test_lost_part_during_startup(start_cluster): node2.query("SYSTEM SYNC REPLICA test_lost") node1.query("SYSTEM SYNC REPLICA test_lost") - assert node2.query("SELECT sum(value) FROM test_lost") == str(sum(i for i in range(4)) + sum(i for i in range(7, 13))) + '\n' - assert node1.query("SELECT sum(value) FROM test_lost") == str(sum(i for i in range(4)) + sum(i for i in range(7, 13))) + '\n' + assert ( + node2.query("SELECT sum(value) FROM test_lost") + == str(sum(i for i in range(4)) + sum(i for i in range(7, 13))) + "\n" + ) + assert ( + node1.query("SELECT sum(value) FROM test_lost") + == str(sum(i for i in range(4)) + sum(i for i in range(7, 13))) + "\n" + ) diff --git a/tests/integration/test_match_process_uid_against_data_owner/test.py b/tests/integration/test_match_process_uid_against_data_owner/test.py index cf8a4bc711b..bbcee941833 100644 --- a/tests/integration/test_match_process_uid_against_data_owner/test.py +++ b/tests/integration/test_match_process_uid_against_data_owner/test.py @@ -5,10 +5,11 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', stay_alive=True) -other_user_id = pwd.getpwnam('nobody').pw_uid +node = cluster.add_instance("node", stay_alive=True) +other_user_id = pwd.getpwnam("nobody").pw_uid current_user_id = os.getuid() + @pytest.fixture(scope="module", autouse=True) def started_cluster(): try: @@ -25,14 +26,22 @@ def started_cluster(): def test_different_user(started_cluster): with pytest.raises(Exception): node.stop_clickhouse() - node.exec_in_container(["bash", "-c", f"chown {other_user_id} /var/lib/clickhouse"], privileged=True) + node.exec_in_container( + ["bash", "-c", f"chown {other_user_id} /var/lib/clickhouse"], + privileged=True, + ) node.start_clickhouse(start_wait_sec=3) log = node.grep_in_log("Effective") expected_message = "Effective user of the process \(.*\) does not match the owner of the data \(.*\)\. Run under 'sudo -u .*'\." if re.search(expected_message, log) is None: pytest.fail( - 'Expected the server to fail with a message "{}", but the last message is "{}"'.format(expected_message, log)) - node.exec_in_container(["bash", "-c", f"chown {current_user_id} /var/lib/clickhouse"], privileged=True) + 'Expected the server to fail with a message "{}", but the last message is "{}"'.format( + expected_message, log + ) + ) + node.exec_in_container( + ["bash", "-c", f"chown {current_user_id} /var/lib/clickhouse"], privileged=True + ) node.start_clickhouse() node.rotate_logs() diff --git a/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py b/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py index 48d577a9250..227c872c111 100644 --- a/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py +++ b/tests/integration/test_materialized_mysql_database/materialize_with_ddl.py @@ -12,8 +12,9 @@ import threading from multiprocessing.dummy import Pool from helpers.test_tools import assert_eq_with_retry + def check_query(clickhouse_node, query, result_set, retry_count=10, interval_seconds=3): - lastest_result = '' + lastest_result = "" for i in range(retry_count): try: @@ -28,7 +29,9 @@ def check_query(clickhouse_node, query, result_set, retry_count=10, interval_sec time.sleep(interval_seconds) else: result_got = clickhouse_node.query(query) - assert result_got == result_set, f"Got result {result_got}, while expected result {result_set}" + assert ( + result_got == result_set + ), f"Got result {result_got}, while expected result {result_set}" def dml_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name): @@ -37,49 +40,67 @@ def dml_with_materialized_mysql_database(clickhouse_node, mysql_node, service_na mysql_node.query("CREATE DATABASE test_database_dml DEFAULT CHARACTER SET 'utf8'") # existed before the mapping was created - mysql_node.query("CREATE TABLE test_database_dml.test_table_1 (" - "`key` INT NOT NULL PRIMARY KEY, " - "unsigned_tiny_int TINYINT UNSIGNED, tiny_int TINYINT, " - "unsigned_small_int SMALLINT UNSIGNED, small_int SMALLINT, " - "unsigned_medium_int MEDIUMINT UNSIGNED, medium_int MEDIUMINT, " - "unsigned_int INT UNSIGNED, _int INT, " - "unsigned_integer INTEGER UNSIGNED, _integer INTEGER, " - "unsigned_bigint BIGINT UNSIGNED, _bigint BIGINT, " - "/* Need ClickHouse support read mysql decimal unsigned_decimal DECIMAL(19, 10) UNSIGNED, _decimal DECIMAL(19, 10), */" - "unsigned_float FLOAT UNSIGNED, _float FLOAT, " - "unsigned_double DOUBLE UNSIGNED, _double DOUBLE, " - "_varchar VARCHAR(10), _char CHAR(10), binary_col BINARY(8), " - "/* Need ClickHouse support Enum('a', 'b', 'v') _enum ENUM('a', 'b', 'c'), */" - "_date Date, _datetime DateTime, _timestamp TIMESTAMP, _bool BOOLEAN) ENGINE = InnoDB;") + mysql_node.query( + "CREATE TABLE test_database_dml.test_table_1 (" + "`key` INT NOT NULL PRIMARY KEY, " + "unsigned_tiny_int TINYINT UNSIGNED, tiny_int TINYINT, " + "unsigned_small_int SMALLINT UNSIGNED, small_int SMALLINT, " + "unsigned_medium_int MEDIUMINT UNSIGNED, medium_int MEDIUMINT, " + "unsigned_int INT UNSIGNED, _int INT, " + "unsigned_integer INTEGER UNSIGNED, _integer INTEGER, " + "unsigned_bigint BIGINT UNSIGNED, _bigint BIGINT, " + "/* Need ClickHouse support read mysql decimal unsigned_decimal DECIMAL(19, 10) UNSIGNED, _decimal DECIMAL(19, 10), */" + "unsigned_float FLOAT UNSIGNED, _float FLOAT, " + "unsigned_double DOUBLE UNSIGNED, _double DOUBLE, " + "_varchar VARCHAR(10), _char CHAR(10), binary_col BINARY(8), " + "/* Need ClickHouse support Enum('a', 'b', 'v') _enum ENUM('a', 'b', 'c'), */" + "_date Date, _datetime DateTime, _timestamp TIMESTAMP, _bool BOOLEAN) ENGINE = InnoDB;" + ) # it already has some data - mysql_node.query(""" + mysql_node.query( + """ INSERT INTO test_database_dml.test_table_1 VALUES(1, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary', '2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', true); - """) + """ + ) clickhouse_node.query( "CREATE DATABASE test_database_dml ENGINE = MaterializeMySQL('{}:3306', 'test_database_dml', 'root', 'clickhouse')".format( - service_name)) + service_name + ) + ) assert "test_database_dml" in clickhouse_node.query("SHOW DATABASES") - check_query(clickhouse_node, "SELECT * FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV", - "1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t" - "2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n") + check_query( + clickhouse_node, + "SELECT * FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV", + "1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t" + "2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n", + ) - mysql_node.query(""" + mysql_node.query( + """ INSERT INTO test_database_dml.test_table_1 VALUES(2, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary', '2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', false); - """) + """ + ) - check_query(clickhouse_node, "SELECT * FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV", - "1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t" - "2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t" - "varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t2020-01-01 00:00:00\t0\n") + check_query( + clickhouse_node, + "SELECT * FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV", + "1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t" + "2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t" + "varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t2020-01-01 00:00:00\t0\n", + ) - mysql_node.query("UPDATE test_database_dml.test_table_1 SET unsigned_tiny_int = 2 WHERE `key` = 1") + mysql_node.query( + "UPDATE test_database_dml.test_table_1 SET unsigned_tiny_int = 2 WHERE `key` = 1" + ) - check_query(clickhouse_node, """ + check_query( + clickhouse_node, + """ SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int, small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, @@ -88,31 +109,46 @@ def dml_with_materialized_mysql_database(clickhouse_node, mysql_node, service_na """, "1\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t" "2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t" - "varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t0\n") + "varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t0\n", + ) # update primary key - mysql_node.query("UPDATE test_database_dml.test_table_1 SET `key` = 3 WHERE `unsigned_tiny_int` = 2") + mysql_node.query( + "UPDATE test_database_dml.test_table_1 SET `key` = 3 WHERE `unsigned_tiny_int` = 2" + ) - check_query(clickhouse_node, "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int," - " small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, " - " unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, " - " _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ " - " _bool FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV", - "2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t" - "varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t0\n3\t2\t-1\t2\t-2\t3\t-3\t" - "4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t1\n") + check_query( + clickhouse_node, + "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int," + " small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, " + " unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, " + " _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ " + " _bool FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV", + "2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t" + "varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t0\n3\t2\t-1\t2\t-2\t3\t-3\t" + "4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t1\n", + ) - mysql_node.query('DELETE FROM test_database_dml.test_table_1 WHERE `key` = 2') - check_query(clickhouse_node, "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int," - " small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, " - " unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, " - " _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ " - " _bool FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV", - "3\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t" - "2020-01-01 00:00:00\t1\n") + mysql_node.query("DELETE FROM test_database_dml.test_table_1 WHERE `key` = 2") + check_query( + clickhouse_node, + "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int," + " small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, " + " unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, " + " _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ " + " _bool FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV", + "3\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t" + "2020-01-01 00:00:00\t1\n", + ) - mysql_node.query('DELETE FROM test_database_dml.test_table_1 WHERE `unsigned_tiny_int` = 2') - check_query(clickhouse_node, "SELECT * FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV", "") + mysql_node.query( + "DELETE FROM test_database_dml.test_table_1 WHERE `unsigned_tiny_int` = 2" + ) + check_query( + clickhouse_node, + "SELECT * FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV", + "", + ) clickhouse_node.query("DROP DATABASE test_database_dml") mysql_node.query("DROP DATABASE test_database_dml") @@ -124,109 +160,232 @@ def materialized_mysql_database_with_views(clickhouse_node, mysql_node, service_ mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'") # existed before the mapping was created - mysql_node.query("CREATE TABLE test_database.test_table_1 (" - "`key` INT NOT NULL PRIMARY KEY, " - "unsigned_tiny_int TINYINT UNSIGNED, tiny_int TINYINT, " - "unsigned_small_int SMALLINT UNSIGNED, small_int SMALLINT, " - "unsigned_medium_int MEDIUMINT UNSIGNED, medium_int MEDIUMINT, " - "unsigned_int INT UNSIGNED, _int INT, " - "unsigned_integer INTEGER UNSIGNED, _integer INTEGER, " - "unsigned_bigint BIGINT UNSIGNED, _bigint BIGINT, " - "/* Need ClickHouse support read mysql decimal unsigned_decimal DECIMAL(19, 10) UNSIGNED, _decimal DECIMAL(19, 10), */" - "unsigned_float FLOAT UNSIGNED, _float FLOAT, " - "unsigned_double DOUBLE UNSIGNED, _double DOUBLE, " - "_varchar VARCHAR(10), _char CHAR(10), binary_col BINARY(8), " - "/* Need ClickHouse support Enum('a', 'b', 'v') _enum ENUM('a', 'b', 'c'), */" - "_date Date, _datetime DateTime, _timestamp TIMESTAMP, _bool BOOLEAN) ENGINE = InnoDB;") + mysql_node.query( + "CREATE TABLE test_database.test_table_1 (" + "`key` INT NOT NULL PRIMARY KEY, " + "unsigned_tiny_int TINYINT UNSIGNED, tiny_int TINYINT, " + "unsigned_small_int SMALLINT UNSIGNED, small_int SMALLINT, " + "unsigned_medium_int MEDIUMINT UNSIGNED, medium_int MEDIUMINT, " + "unsigned_int INT UNSIGNED, _int INT, " + "unsigned_integer INTEGER UNSIGNED, _integer INTEGER, " + "unsigned_bigint BIGINT UNSIGNED, _bigint BIGINT, " + "/* Need ClickHouse support read mysql decimal unsigned_decimal DECIMAL(19, 10) UNSIGNED, _decimal DECIMAL(19, 10), */" + "unsigned_float FLOAT UNSIGNED, _float FLOAT, " + "unsigned_double DOUBLE UNSIGNED, _double DOUBLE, " + "_varchar VARCHAR(10), _char CHAR(10), binary_col BINARY(8), " + "/* Need ClickHouse support Enum('a', 'b', 'v') _enum ENUM('a', 'b', 'c'), */" + "_date Date, _datetime DateTime, _timestamp TIMESTAMP, _bool BOOLEAN) ENGINE = InnoDB;" + ) - mysql_node.query("CREATE VIEW test_database.test_table_1_view AS SELECT SUM(tiny_int) FROM test_database.test_table_1 GROUP BY _date;") + mysql_node.query( + "CREATE VIEW test_database.test_table_1_view AS SELECT SUM(tiny_int) FROM test_database.test_table_1 GROUP BY _date;" + ) # it already has some data - mysql_node.query(""" + mysql_node.query( + """ INSERT INTO test_database.test_table_1 VALUES(1, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary', '2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', true); - """) + """ + ) clickhouse_node.query( "CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format( - service_name)) + service_name + ) + ) assert "test_database" in clickhouse_node.query("SHOW DATABASES") - check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_1\n") + check_query( + clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_1\n" + ) clickhouse_node.query("DROP DATABASE test_database") mysql_node.query("DROP DATABASE test_database") -def materialized_mysql_database_with_datetime_and_decimal(clickhouse_node, mysql_node, service_name): +def materialized_mysql_database_with_datetime_and_decimal( + clickhouse_node, mysql_node, service_name +): mysql_node.query("DROP DATABASE IF EXISTS test_database_dt") clickhouse_node.query("DROP DATABASE IF EXISTS test_database_dt") mysql_node.query("CREATE DATABASE test_database_dt DEFAULT CHARACTER SET 'utf8'") - mysql_node.query("CREATE TABLE test_database_dt.test_table_1 (`key` INT NOT NULL PRIMARY KEY, _datetime DateTime(6), _timestamp TIMESTAMP(3), _decimal DECIMAL(65, 30)) ENGINE = InnoDB;") - mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(1, '2020-01-01 01:02:03.999999', '2020-01-01 01:02:03.999', " + ('9' * 35) + "." + ('9' * 30) + ")") - mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(2, '2020-01-01 01:02:03.000000', '2020-01-01 01:02:03.000', ." + ('0' * 29) + "1)") - mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(3, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.99', -" + ('9' * 35) + "." + ('9' * 30) + ")") - mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(4, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.9999', -." + ('0' * 29) + "1)") + mysql_node.query( + "CREATE TABLE test_database_dt.test_table_1 (`key` INT NOT NULL PRIMARY KEY, _datetime DateTime(6), _timestamp TIMESTAMP(3), _decimal DECIMAL(65, 30)) ENGINE = InnoDB;" + ) + mysql_node.query( + "INSERT INTO test_database_dt.test_table_1 VALUES(1, '2020-01-01 01:02:03.999999', '2020-01-01 01:02:03.999', " + + ("9" * 35) + + "." + + ("9" * 30) + + ")" + ) + mysql_node.query( + "INSERT INTO test_database_dt.test_table_1 VALUES(2, '2020-01-01 01:02:03.000000', '2020-01-01 01:02:03.000', ." + + ("0" * 29) + + "1)" + ) + mysql_node.query( + "INSERT INTO test_database_dt.test_table_1 VALUES(3, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.99', -" + + ("9" * 35) + + "." + + ("9" * 30) + + ")" + ) + mysql_node.query( + "INSERT INTO test_database_dt.test_table_1 VALUES(4, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.9999', -." + + ("0" * 29) + + "1)" + ) - clickhouse_node.query("CREATE DATABASE test_database_dt ENGINE = MaterializedMySQL('{}:3306', 'test_database_dt', 'root', 'clickhouse')".format(service_name)) + clickhouse_node.query( + "CREATE DATABASE test_database_dt ENGINE = MaterializedMySQL('{}:3306', 'test_database_dt', 'root', 'clickhouse')".format( + service_name + ) + ) assert "test_database_dt" in clickhouse_node.query("SHOW DATABASES") - check_query(clickhouse_node, "SELECT * FROM test_database_dt.test_table_1 ORDER BY key FORMAT TSV", - "1\t2020-01-01 01:02:03.999999\t2020-01-01 01:02:03.999\t" + ('9' * 35) + "." + ('9' * 30) + "\n" - "2\t2020-01-01 01:02:03.000000\t2020-01-01 01:02:03.000\t0." + ('0' * 29) + "1\n" - "3\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:03.990\t-" + ('9' * 35) + "." + ('9' * 30) + "\n" - "4\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:04.000\t-0." + ('0' * 29) + "1\n") + check_query( + clickhouse_node, + "SELECT * FROM test_database_dt.test_table_1 ORDER BY key FORMAT TSV", + "1\t2020-01-01 01:02:03.999999\t2020-01-01 01:02:03.999\t" + + ("9" * 35) + + "." + + ("9" * 30) + + "\n" + "2\t2020-01-01 01:02:03.000000\t2020-01-01 01:02:03.000\t0." + + ("0" * 29) + + "1\n" + "3\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:03.990\t-" + + ("9" * 35) + + "." + + ("9" * 30) + + "\n" + "4\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:04.000\t-0." + + ("0" * 29) + + "1\n", + ) - mysql_node.query("CREATE TABLE test_database_dt.test_table_2 (`key` INT NOT NULL PRIMARY KEY, _datetime DateTime(6), _timestamp TIMESTAMP(3), _decimal DECIMAL(65, 30)) ENGINE = InnoDB;") - mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(1, '2020-01-01 01:02:03.999999', '2020-01-01 01:02:03.999', " + ('9' * 35) + "." + ('9' * 30) + ")") - mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(2, '2020-01-01 01:02:03.000000', '2020-01-01 01:02:03.000', ." + ('0' * 29) + "1)") - mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(3, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.99', -" + ('9' * 35) + "." + ('9' * 30) + ")") - mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(4, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.9999', -." + ('0' * 29) + "1)") + mysql_node.query( + "CREATE TABLE test_database_dt.test_table_2 (`key` INT NOT NULL PRIMARY KEY, _datetime DateTime(6), _timestamp TIMESTAMP(3), _decimal DECIMAL(65, 30)) ENGINE = InnoDB;" + ) + mysql_node.query( + "INSERT INTO test_database_dt.test_table_2 VALUES(1, '2020-01-01 01:02:03.999999', '2020-01-01 01:02:03.999', " + + ("9" * 35) + + "." + + ("9" * 30) + + ")" + ) + mysql_node.query( + "INSERT INTO test_database_dt.test_table_2 VALUES(2, '2020-01-01 01:02:03.000000', '2020-01-01 01:02:03.000', ." + + ("0" * 29) + + "1)" + ) + mysql_node.query( + "INSERT INTO test_database_dt.test_table_2 VALUES(3, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.99', -" + + ("9" * 35) + + "." + + ("9" * 30) + + ")" + ) + mysql_node.query( + "INSERT INTO test_database_dt.test_table_2 VALUES(4, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.9999', -." + + ("0" * 29) + + "1)" + ) - check_query(clickhouse_node, "SELECT * FROM test_database_dt.test_table_2 ORDER BY key FORMAT TSV", - "1\t2020-01-01 01:02:03.999999\t2020-01-01 01:02:03.999\t" + ('9' * 35) + "." + ('9' * 30) + "\n" - "2\t2020-01-01 01:02:03.000000\t2020-01-01 01:02:03.000\t0." + ('0' * 29) + "1\n" - "3\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:03.990\t-" + ('9' * 35) + "." + ('9' * 30) + "\n" - "4\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:04.000\t-0." + ('0' * 29) + "1\n") + check_query( + clickhouse_node, + "SELECT * FROM test_database_dt.test_table_2 ORDER BY key FORMAT TSV", + "1\t2020-01-01 01:02:03.999999\t2020-01-01 01:02:03.999\t" + + ("9" * 35) + + "." + + ("9" * 30) + + "\n" + "2\t2020-01-01 01:02:03.000000\t2020-01-01 01:02:03.000\t0." + + ("0" * 29) + + "1\n" + "3\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:03.990\t-" + + ("9" * 35) + + "." + + ("9" * 30) + + "\n" + "4\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:04.000\t-0." + + ("0" * 29) + + "1\n", + ) clickhouse_node.query("DROP DATABASE test_database_dt") mysql_node.query("DROP DATABASE test_database_dt") -def drop_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name): +def drop_table_with_materialized_mysql_database( + clickhouse_node, mysql_node, service_name +): mysql_node.query("DROP DATABASE IF EXISTS test_database_drop") clickhouse_node.query("DROP DATABASE IF EXISTS test_database_drop") mysql_node.query("CREATE DATABASE test_database_drop DEFAULT CHARACTER SET 'utf8'") - mysql_node.query("CREATE TABLE test_database_drop.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;") + mysql_node.query( + "CREATE TABLE test_database_drop.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;" + ) mysql_node.query("DROP TABLE test_database_drop.test_table_1;") - mysql_node.query("CREATE TABLE test_database_drop.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;") + mysql_node.query( + "CREATE TABLE test_database_drop.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;" + ) mysql_node.query("TRUNCATE TABLE test_database_drop.test_table_2;") # create mapping clickhouse_node.query( "CREATE DATABASE test_database_drop ENGINE = MaterializedMySQL('{}:3306', 'test_database_drop', 'root', 'clickhouse')".format( - service_name)) + service_name + ) + ) assert "test_database_drop" in clickhouse_node.query("SHOW DATABASES") - check_query(clickhouse_node, "SELECT * FROM test_database_drop.test_table_2 ORDER BY id FORMAT TSV", "") + check_query( + clickhouse_node, + "SELECT * FROM test_database_drop.test_table_2 ORDER BY id FORMAT TSV", + "", + ) - mysql_node.query("INSERT INTO test_database_drop.test_table_2 VALUES(1), (2), (3), (4), (5), (6)") - mysql_node.query("CREATE TABLE test_database_drop.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;") - check_query(clickhouse_node, "SHOW TABLES FROM test_database_drop FORMAT TSV", "test_table_1\ntest_table_2\n") - check_query(clickhouse_node, "SELECT * FROM test_database_drop.test_table_2 ORDER BY id FORMAT TSV", - "1\n2\n3\n4\n5\n6\n") + mysql_node.query( + "INSERT INTO test_database_drop.test_table_2 VALUES(1), (2), (3), (4), (5), (6)" + ) + mysql_node.query( + "CREATE TABLE test_database_drop.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;" + ) + check_query( + clickhouse_node, + "SHOW TABLES FROM test_database_drop FORMAT TSV", + "test_table_1\ntest_table_2\n", + ) + check_query( + clickhouse_node, + "SELECT * FROM test_database_drop.test_table_2 ORDER BY id FORMAT TSV", + "1\n2\n3\n4\n5\n6\n", + ) mysql_node.query("DROP TABLE test_database_drop.test_table_1;") mysql_node.query("TRUNCATE TABLE test_database_drop.test_table_2;") - check_query(clickhouse_node, "SHOW TABLES FROM test_database_drop FORMAT TSV", "test_table_2\n") - check_query(clickhouse_node, "SELECT * FROM test_database_drop.test_table_2 ORDER BY id FORMAT TSV", "") + check_query( + clickhouse_node, + "SHOW TABLES FROM test_database_drop FORMAT TSV", + "test_table_2\n", + ) + check_query( + clickhouse_node, + "SELECT * FROM test_database_drop.test_table_2 ORDER BY id FORMAT TSV", + "", + ) clickhouse_node.query("DROP DATABASE test_database_drop") mysql_node.query("DROP DATABASE test_database_drop") -def create_table_like_with_materialize_mysql_database(clickhouse_node, mysql_node, service_name): +def create_table_like_with_materialize_mysql_database( + clickhouse_node, mysql_node, service_name +): mysql_node.query("DROP DATABASE IF EXISTS create_like") mysql_node.query("DROP DATABASE IF EXISTS create_like2") clickhouse_node.query("DROP DATABASE IF EXISTS create_like") @@ -237,7 +396,8 @@ def create_table_like_with_materialize_mysql_database(clickhouse_node, mysql_nod mysql_node.query("CREATE TABLE create_like2.t1 LIKE create_like.t1") clickhouse_node.query( - f"CREATE DATABASE create_like ENGINE = MaterializeMySQL('{service_name}:3306', 'create_like', 'root', 'clickhouse')") + f"CREATE DATABASE create_like ENGINE = MaterializeMySQL('{service_name}:3306', 'create_like', 'root', 'clickhouse')" + ) mysql_node.query("CREATE TABLE create_like.t2 LIKE create_like.t1") check_query(clickhouse_node, "SHOW TABLES FROM create_like", "t1\nt2\n") @@ -253,204 +413,388 @@ def create_table_like_with_materialize_mysql_database(clickhouse_node, mysql_nod mysql_node.query("DROP DATABASE create_like2") -def create_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name): +def create_table_with_materialized_mysql_database( + clickhouse_node, mysql_node, service_name +): mysql_node.query("DROP DATABASE IF EXISTS test_database_create") clickhouse_node.query("DROP DATABASE IF EXISTS test_database_create") - mysql_node.query("CREATE DATABASE test_database_create DEFAULT CHARACTER SET 'utf8'") + mysql_node.query( + "CREATE DATABASE test_database_create DEFAULT CHARACTER SET 'utf8'" + ) # existed before the mapping was created - mysql_node.query("CREATE TABLE test_database_create.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;") + mysql_node.query( + "CREATE TABLE test_database_create.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;" + ) # it already has some data - mysql_node.query("INSERT INTO test_database_create.test_table_1 VALUES(1), (2), (3), (5), (6), (7);") + mysql_node.query( + "INSERT INTO test_database_create.test_table_1 VALUES(1), (2), (3), (5), (6), (7);" + ) # create mapping clickhouse_node.query( "CREATE DATABASE test_database_create ENGINE = MaterializedMySQL('{}:3306', 'test_database_create', 'root', 'clickhouse')".format( - service_name)) + service_name + ) + ) # Check for pre-existing status assert "test_database_create" in clickhouse_node.query("SHOW DATABASES") - check_query(clickhouse_node, "SELECT * FROM test_database_create.test_table_1 ORDER BY id FORMAT TSV", - "1\n2\n3\n5\n6\n7\n") + check_query( + clickhouse_node, + "SELECT * FROM test_database_create.test_table_1 ORDER BY id FORMAT TSV", + "1\n2\n3\n5\n6\n7\n", + ) - mysql_node.query("CREATE TABLE test_database_create.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;") - mysql_node.query("INSERT INTO test_database_create.test_table_2 VALUES(1), (2), (3), (4), (5), (6);") - check_query(clickhouse_node, "SELECT * FROM test_database_create.test_table_2 ORDER BY id FORMAT TSV", - "1\n2\n3\n4\n5\n6\n") + mysql_node.query( + "CREATE TABLE test_database_create.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;" + ) + mysql_node.query( + "INSERT INTO test_database_create.test_table_2 VALUES(1), (2), (3), (4), (5), (6);" + ) + check_query( + clickhouse_node, + "SELECT * FROM test_database_create.test_table_2 ORDER BY id FORMAT TSV", + "1\n2\n3\n4\n5\n6\n", + ) clickhouse_node.query("DROP DATABASE test_database_create") mysql_node.query("DROP DATABASE test_database_create") -def rename_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name): +def rename_table_with_materialized_mysql_database( + clickhouse_node, mysql_node, service_name +): mysql_node.query("DROP DATABASE IF EXISTS test_database_rename") clickhouse_node.query("DROP DATABASE IF EXISTS test_database_rename") - mysql_node.query("CREATE DATABASE test_database_rename DEFAULT CHARACTER SET 'utf8'") - mysql_node.query("CREATE TABLE test_database_rename.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;") + mysql_node.query( + "CREATE DATABASE test_database_rename DEFAULT CHARACTER SET 'utf8'" + ) + mysql_node.query( + "CREATE TABLE test_database_rename.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;" + ) - mysql_node.query("RENAME TABLE test_database_rename.test_table_1 TO test_database_rename.test_table_2") + mysql_node.query( + "RENAME TABLE test_database_rename.test_table_1 TO test_database_rename.test_table_2" + ) # create mapping clickhouse_node.query( "CREATE DATABASE test_database_rename ENGINE = MaterializedMySQL('{}:3306', 'test_database_rename', 'root', 'clickhouse')".format( - service_name)) + service_name + ) + ) assert "test_database_rename" in clickhouse_node.query("SHOW DATABASES") - check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename FORMAT TSV", "test_table_2\n") - mysql_node.query("RENAME TABLE test_database_rename.test_table_2 TO test_database_rename.test_table_1") - check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename FORMAT TSV", "test_table_1\n") + check_query( + clickhouse_node, + "SHOW TABLES FROM test_database_rename FORMAT TSV", + "test_table_2\n", + ) + mysql_node.query( + "RENAME TABLE test_database_rename.test_table_2 TO test_database_rename.test_table_1" + ) + check_query( + clickhouse_node, + "SHOW TABLES FROM test_database_rename FORMAT TSV", + "test_table_1\n", + ) clickhouse_node.query("DROP DATABASE test_database_rename") mysql_node.query("DROP DATABASE test_database_rename") -def alter_add_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name): +def alter_add_column_with_materialized_mysql_database( + clickhouse_node, mysql_node, service_name +): mysql_node.query("DROP DATABASE IF EXISTS test_database_add") clickhouse_node.query("DROP DATABASE IF EXISTS test_database_add") mysql_node.query("CREATE DATABASE test_database_add DEFAULT CHARACTER SET 'utf8'") - mysql_node.query("CREATE TABLE test_database_add.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;") + mysql_node.query( + "CREATE TABLE test_database_add.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;" + ) - mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_1 INT NOT NULL") - mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_2 INT NOT NULL FIRST") - mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_3 INT NOT NULL AFTER add_column_1") - mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_4 INT NOT NULL DEFAULT " + ( - "0" if service_name == "mysql57" else "(id)")) + mysql_node.query( + "ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_1 INT NOT NULL" + ) + mysql_node.query( + "ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_2 INT NOT NULL FIRST" + ) + mysql_node.query( + "ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_3 INT NOT NULL AFTER add_column_1" + ) + mysql_node.query( + "ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_4 INT NOT NULL DEFAULT " + + ("0" if service_name == "mysql57" else "(id)") + ) # create mapping clickhouse_node.query( "CREATE DATABASE test_database_add ENGINE = MaterializedMySQL('{}:3306', 'test_database_add', 'root', 'clickhouse')".format( - service_name)) + service_name + ) + ) assert "test_database_add" in clickhouse_node.query("SHOW DATABASES") - check_query(clickhouse_node, "DESC test_database_add.test_table_1 FORMAT TSV", - "add_column_2\tInt32\t\t\t\t\t\nid\tInt32\t\t\t\t\t\nadd_column_1\tInt32\t\t\t\t\t\nadd_column_3\tInt32\t\t\t\t\t\nadd_column_4\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") - mysql_node.query("CREATE TABLE test_database_add.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;") - check_query(clickhouse_node, "SHOW TABLES FROM test_database_add FORMAT TSV", "test_table_1\ntest_table_2\n") - check_query(clickhouse_node, "DESC test_database_add.test_table_2 FORMAT TSV", - "id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + check_query( + clickhouse_node, + "DESC test_database_add.test_table_1 FORMAT TSV", + "add_column_2\tInt32\t\t\t\t\t\nid\tInt32\t\t\t\t\t\nadd_column_1\tInt32\t\t\t\t\t\nadd_column_3\tInt32\t\t\t\t\t\nadd_column_4\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) mysql_node.query( - "ALTER TABLE test_database_add.test_table_2 ADD COLUMN add_column_1 INT NOT NULL, ADD COLUMN add_column_2 INT NOT NULL FIRST") + "CREATE TABLE test_database_add.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;" + ) + check_query( + clickhouse_node, + "SHOW TABLES FROM test_database_add FORMAT TSV", + "test_table_1\ntest_table_2\n", + ) + check_query( + clickhouse_node, + "DESC test_database_add.test_table_2 FORMAT TSV", + "id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) mysql_node.query( - "ALTER TABLE test_database_add.test_table_2 ADD COLUMN add_column_3 INT NOT NULL AFTER add_column_1, ADD COLUMN add_column_4 INT NOT NULL DEFAULT " + ( - "0" if service_name == "mysql57" else "(id)")) + "ALTER TABLE test_database_add.test_table_2 ADD COLUMN add_column_1 INT NOT NULL, ADD COLUMN add_column_2 INT NOT NULL FIRST" + ) + mysql_node.query( + "ALTER TABLE test_database_add.test_table_2 ADD COLUMN add_column_3 INT NOT NULL AFTER add_column_1, ADD COLUMN add_column_4 INT NOT NULL DEFAULT " + + ("0" if service_name == "mysql57" else "(id)") + ) default_expression = "DEFAULT\t0" if service_name == "mysql57" else "DEFAULT\tid" - check_query(clickhouse_node, "DESC test_database_add.test_table_2 FORMAT TSV", - "add_column_2\tInt32\t\t\t\t\t\nid\tInt32\t\t\t\t\t\nadd_column_1\tInt32\t\t\t\t\t\nadd_column_3\tInt32\t\t\t\t\t\nadd_column_4\tInt32\t" + default_expression + "\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + check_query( + clickhouse_node, + "DESC test_database_add.test_table_2 FORMAT TSV", + "add_column_2\tInt32\t\t\t\t\t\nid\tInt32\t\t\t\t\t\nadd_column_1\tInt32\t\t\t\t\t\nadd_column_3\tInt32\t\t\t\t\t\nadd_column_4\tInt32\t" + + default_expression + + "\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) - mysql_node.query("INSERT INTO test_database_add.test_table_2 VALUES(1, 2, 3, 4, 5), (6, 7, 8, 9, 10)") - check_query(clickhouse_node, "SELECT * FROM test_database_add.test_table_2 ORDER BY id FORMAT TSV", - "1\t2\t3\t4\t5\n6\t7\t8\t9\t10\n") + mysql_node.query( + "INSERT INTO test_database_add.test_table_2 VALUES(1, 2, 3, 4, 5), (6, 7, 8, 9, 10)" + ) + check_query( + clickhouse_node, + "SELECT * FROM test_database_add.test_table_2 ORDER BY id FORMAT TSV", + "1\t2\t3\t4\t5\n6\t7\t8\t9\t10\n", + ) clickhouse_node.query("DROP DATABASE test_database_add") mysql_node.query("DROP DATABASE test_database_add") -def alter_drop_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name): +def alter_drop_column_with_materialized_mysql_database( + clickhouse_node, mysql_node, service_name +): mysql_node.query("DROP DATABASE IF EXISTS test_database_alter_drop") clickhouse_node.query("DROP DATABASE IF EXISTS test_database_alter_drop") - mysql_node.query("CREATE DATABASE test_database_alter_drop DEFAULT CHARACTER SET 'utf8'") mysql_node.query( - "CREATE TABLE test_database_alter_drop.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT) ENGINE = InnoDB;") + "CREATE DATABASE test_database_alter_drop DEFAULT CHARACTER SET 'utf8'" + ) + mysql_node.query( + "CREATE TABLE test_database_alter_drop.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT) ENGINE = InnoDB;" + ) - mysql_node.query("ALTER TABLE test_database_alter_drop.test_table_1 DROP COLUMN drop_column") + mysql_node.query( + "ALTER TABLE test_database_alter_drop.test_table_1 DROP COLUMN drop_column" + ) # create mapping clickhouse_node.query( "CREATE DATABASE test_database_alter_drop ENGINE = MaterializedMySQL('{}:3306', 'test_database_alter_drop', 'root', 'clickhouse')".format( - service_name)) + service_name + ) + ) assert "test_database_alter_drop" in clickhouse_node.query("SHOW DATABASES") - check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_drop FORMAT TSV", "test_table_1\n") - check_query(clickhouse_node, "DESC test_database_alter_drop.test_table_1 FORMAT TSV", - "id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + check_query( + clickhouse_node, + "SHOW TABLES FROM test_database_alter_drop FORMAT TSV", + "test_table_1\n", + ) + check_query( + clickhouse_node, + "DESC test_database_alter_drop.test_table_1 FORMAT TSV", + "id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) mysql_node.query( - "CREATE TABLE test_database_alter_drop.test_table_2 (id INT NOT NULL PRIMARY KEY, drop_column INT NOT NULL) ENGINE = InnoDB;") - check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_drop FORMAT TSV", "test_table_1\ntest_table_2\n") - check_query(clickhouse_node, "DESC test_database_alter_drop.test_table_2 FORMAT TSV", - "id\tInt32\t\t\t\t\t\ndrop_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") - mysql_node.query("ALTER TABLE test_database_alter_drop.test_table_2 DROP COLUMN drop_column") - check_query(clickhouse_node, "DESC test_database_alter_drop.test_table_2 FORMAT TSV", - "id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + "CREATE TABLE test_database_alter_drop.test_table_2 (id INT NOT NULL PRIMARY KEY, drop_column INT NOT NULL) ENGINE = InnoDB;" + ) + check_query( + clickhouse_node, + "SHOW TABLES FROM test_database_alter_drop FORMAT TSV", + "test_table_1\ntest_table_2\n", + ) + check_query( + clickhouse_node, + "DESC test_database_alter_drop.test_table_2 FORMAT TSV", + "id\tInt32\t\t\t\t\t\ndrop_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) + mysql_node.query( + "ALTER TABLE test_database_alter_drop.test_table_2 DROP COLUMN drop_column" + ) + check_query( + clickhouse_node, + "DESC test_database_alter_drop.test_table_2 FORMAT TSV", + "id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) - mysql_node.query("INSERT INTO test_database_alter_drop.test_table_2 VALUES(1), (2), (3), (4), (5)") - check_query(clickhouse_node, "SELECT * FROM test_database_alter_drop.test_table_2 ORDER BY id FORMAT TSV", "1\n2\n3\n4\n5\n") + mysql_node.query( + "INSERT INTO test_database_alter_drop.test_table_2 VALUES(1), (2), (3), (4), (5)" + ) + check_query( + clickhouse_node, + "SELECT * FROM test_database_alter_drop.test_table_2 ORDER BY id FORMAT TSV", + "1\n2\n3\n4\n5\n", + ) clickhouse_node.query("DROP DATABASE test_database_alter_drop") mysql_node.query("DROP DATABASE test_database_alter_drop") -def alter_rename_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name): +def alter_rename_column_with_materialized_mysql_database( + clickhouse_node, mysql_node, service_name +): mysql_node.query("DROP DATABASE IF EXISTS test_database_alter_rename") clickhouse_node.query("DROP DATABASE IF EXISTS test_database_alter_rename") - mysql_node.query("CREATE DATABASE test_database_alter_rename DEFAULT CHARACTER SET 'utf8'") + mysql_node.query( + "CREATE DATABASE test_database_alter_rename DEFAULT CHARACTER SET 'utf8'" + ) # maybe should test rename primary key? mysql_node.query( - "CREATE TABLE test_database_alter_rename.test_table_1 (id INT NOT NULL PRIMARY KEY, rename_column INT NOT NULL) ENGINE = InnoDB;") + "CREATE TABLE test_database_alter_rename.test_table_1 (id INT NOT NULL PRIMARY KEY, rename_column INT NOT NULL) ENGINE = InnoDB;" + ) - mysql_node.query("ALTER TABLE test_database_alter_rename.test_table_1 RENAME COLUMN rename_column TO new_column_name") + mysql_node.query( + "ALTER TABLE test_database_alter_rename.test_table_1 RENAME COLUMN rename_column TO new_column_name" + ) # create mapping clickhouse_node.query( "CREATE DATABASE test_database_alter_rename ENGINE = MaterializedMySQL('{}:3306', 'test_database_alter_rename', 'root', 'clickhouse')".format( - service_name)) + service_name + ) + ) assert "test_database_alter_rename" in clickhouse_node.query("SHOW DATABASES") - check_query(clickhouse_node, "DESC test_database_alter_rename.test_table_1 FORMAT TSV", - "id\tInt32\t\t\t\t\t\nnew_column_name\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + check_query( + clickhouse_node, + "DESC test_database_alter_rename.test_table_1 FORMAT TSV", + "id\tInt32\t\t\t\t\t\nnew_column_name\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) mysql_node.query( - "CREATE TABLE test_database_alter_rename.test_table_2 (id INT NOT NULL PRIMARY KEY, rename_column INT NOT NULL) ENGINE = InnoDB;") - check_query(clickhouse_node, "DESC test_database_alter_rename.test_table_2 FORMAT TSV", - "id\tInt32\t\t\t\t\t\nrename_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") - mysql_node.query("ALTER TABLE test_database_alter_rename.test_table_2 RENAME COLUMN rename_column TO new_column_name") - check_query(clickhouse_node, "DESC test_database_alter_rename.test_table_2 FORMAT TSV", - "id\tInt32\t\t\t\t\t\nnew_column_name\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + "CREATE TABLE test_database_alter_rename.test_table_2 (id INT NOT NULL PRIMARY KEY, rename_column INT NOT NULL) ENGINE = InnoDB;" + ) + check_query( + clickhouse_node, + "DESC test_database_alter_rename.test_table_2 FORMAT TSV", + "id\tInt32\t\t\t\t\t\nrename_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) + mysql_node.query( + "ALTER TABLE test_database_alter_rename.test_table_2 RENAME COLUMN rename_column TO new_column_name" + ) + check_query( + clickhouse_node, + "DESC test_database_alter_rename.test_table_2 FORMAT TSV", + "id\tInt32\t\t\t\t\t\nnew_column_name\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) - mysql_node.query("INSERT INTO test_database_alter_rename.test_table_2 VALUES(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)") - check_query(clickhouse_node, "SELECT * FROM test_database_alter_rename.test_table_2 ORDER BY id FORMAT TSV", - "1\t2\n3\t4\n5\t6\n7\t8\n9\t10\n") + mysql_node.query( + "INSERT INTO test_database_alter_rename.test_table_2 VALUES(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)" + ) + check_query( + clickhouse_node, + "SELECT * FROM test_database_alter_rename.test_table_2 ORDER BY id FORMAT TSV", + "1\t2\n3\t4\n5\t6\n7\t8\n9\t10\n", + ) clickhouse_node.query("DROP DATABASE test_database_alter_rename") mysql_node.query("DROP DATABASE test_database_alter_rename") -def alter_modify_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name): +def alter_modify_column_with_materialized_mysql_database( + clickhouse_node, mysql_node, service_name +): mysql_node.query("DROP DATABASE IF EXISTS test_database_alter_modify") clickhouse_node.query("DROP DATABASE IF EXISTS test_database_alter_modify") - mysql_node.query("CREATE DATABASE test_database_alter_modify DEFAULT CHARACTER SET 'utf8'") + mysql_node.query( + "CREATE DATABASE test_database_alter_modify DEFAULT CHARACTER SET 'utf8'" + ) # maybe should test rename primary key? mysql_node.query( - "CREATE TABLE test_database_alter_modify.test_table_1 (id INT NOT NULL PRIMARY KEY, modify_column INT NOT NULL) ENGINE = InnoDB;") + "CREATE TABLE test_database_alter_modify.test_table_1 (id INT NOT NULL PRIMARY KEY, modify_column INT NOT NULL) ENGINE = InnoDB;" + ) - mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_1 MODIFY COLUMN modify_column INT") + mysql_node.query( + "ALTER TABLE test_database_alter_modify.test_table_1 MODIFY COLUMN modify_column INT" + ) # create mapping clickhouse_node.query( "CREATE DATABASE test_database_alter_modify ENGINE = MaterializedMySQL('{}:3306', 'test_database_alter_modify', 'root', 'clickhouse')".format( - service_name)) + service_name + ) + ) assert "test_database_alter_modify" in clickhouse_node.query("SHOW DATABASES") - check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_modify FORMAT TSV", "test_table_1\n") - check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_1 FORMAT TSV", - "id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + check_query( + clickhouse_node, + "SHOW TABLES FROM test_database_alter_modify FORMAT TSV", + "test_table_1\n", + ) + check_query( + clickhouse_node, + "DESC test_database_alter_modify.test_table_1 FORMAT TSV", + "id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) mysql_node.query( - "CREATE TABLE test_database_alter_modify.test_table_2 (id INT NOT NULL PRIMARY KEY, modify_column INT NOT NULL) ENGINE = InnoDB;") - check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_modify FORMAT TSV", "test_table_1\ntest_table_2\n") - check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV", - "id\tInt32\t\t\t\t\t\nmodify_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") - mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_2 MODIFY COLUMN modify_column INT") - check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV", - "id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") - mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_2 MODIFY COLUMN modify_column INT FIRST") - check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV", - "modify_column\tNullable(Int32)\t\t\t\t\t\nid\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") - mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_2 MODIFY COLUMN modify_column INT AFTER id") - check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV", - "id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + "CREATE TABLE test_database_alter_modify.test_table_2 (id INT NOT NULL PRIMARY KEY, modify_column INT NOT NULL) ENGINE = InnoDB;" + ) + check_query( + clickhouse_node, + "SHOW TABLES FROM test_database_alter_modify FORMAT TSV", + "test_table_1\ntest_table_2\n", + ) + check_query( + clickhouse_node, + "DESC test_database_alter_modify.test_table_2 FORMAT TSV", + "id\tInt32\t\t\t\t\t\nmodify_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) + mysql_node.query( + "ALTER TABLE test_database_alter_modify.test_table_2 MODIFY COLUMN modify_column INT" + ) + check_query( + clickhouse_node, + "DESC test_database_alter_modify.test_table_2 FORMAT TSV", + "id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) + mysql_node.query( + "ALTER TABLE test_database_alter_modify.test_table_2 MODIFY COLUMN modify_column INT FIRST" + ) + check_query( + clickhouse_node, + "DESC test_database_alter_modify.test_table_2 FORMAT TSV", + "modify_column\tNullable(Int32)\t\t\t\t\t\nid\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) + mysql_node.query( + "ALTER TABLE test_database_alter_modify.test_table_2 MODIFY COLUMN modify_column INT AFTER id" + ) + check_query( + clickhouse_node, + "DESC test_database_alter_modify.test_table_2 FORMAT TSV", + "id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) - mysql_node.query("INSERT INTO test_database_alter_modify.test_table_2 VALUES(1, 2), (3, NULL)") - check_query(clickhouse_node, "SELECT * FROM test_database_alter_modify.test_table_2 ORDER BY id FORMAT TSV", "1\t2\n3\t\\N\n") + mysql_node.query( + "INSERT INTO test_database_alter_modify.test_table_2 VALUES(1, 2), (3, NULL)" + ) + check_query( + clickhouse_node, + "SELECT * FROM test_database_alter_modify.test_table_2 ORDER BY id FORMAT TSV", + "1\t2\n3\t\\N\n", + ) clickhouse_node.query("DROP DATABASE test_database_alter_modify") mysql_node.query("DROP DATABASE test_database_alter_modify") @@ -460,38 +804,76 @@ def alter_modify_column_with_materialized_mysql_database(clickhouse_node, mysql_ # def test_mysql_alter_change_column_for_materialized_mysql_database(started_cluster): # pass -def alter_rename_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name): + +def alter_rename_table_with_materialized_mysql_database( + clickhouse_node, mysql_node, service_name +): mysql_node.query("DROP DATABASE IF EXISTS test_database_rename_table") clickhouse_node.query("DROP DATABASE IF EXISTS test_database_rename_table") - mysql_node.query("CREATE DATABASE test_database_rename_table DEFAULT CHARACTER SET 'utf8'") mysql_node.query( - "CREATE TABLE test_database_rename_table.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT) ENGINE = InnoDB;") + "CREATE DATABASE test_database_rename_table DEFAULT CHARACTER SET 'utf8'" + ) + mysql_node.query( + "CREATE TABLE test_database_rename_table.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT) ENGINE = InnoDB;" + ) mysql_node.query( - "ALTER TABLE test_database_rename_table.test_table_1 DROP COLUMN drop_column, RENAME TO test_database_rename_table.test_table_2, RENAME TO test_database_rename_table.test_table_3") + "ALTER TABLE test_database_rename_table.test_table_1 DROP COLUMN drop_column, RENAME TO test_database_rename_table.test_table_2, RENAME TO test_database_rename_table.test_table_3" + ) # create mapping clickhouse_node.query( "CREATE DATABASE test_database_rename_table ENGINE = MaterializedMySQL('{}:3306', 'test_database_rename_table', 'root', 'clickhouse')".format( - service_name)) + service_name + ) + ) assert "test_database_rename_table" in clickhouse_node.query("SHOW DATABASES") - check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename_table FORMAT TSV", "test_table_3\n") - check_query(clickhouse_node, "DESC test_database_rename_table.test_table_3 FORMAT TSV", - "id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + check_query( + clickhouse_node, + "SHOW TABLES FROM test_database_rename_table FORMAT TSV", + "test_table_3\n", + ) + check_query( + clickhouse_node, + "DESC test_database_rename_table.test_table_3 FORMAT TSV", + "id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) mysql_node.query( - "CREATE TABLE test_database_rename_table.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT NOT NULL) ENGINE = InnoDB;") - check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename_table FORMAT TSV", "test_table_1\ntest_table_3\n") - check_query(clickhouse_node, "DESC test_database_rename_table.test_table_1 FORMAT TSV", - "id\tInt32\t\t\t\t\t\ndrop_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + "CREATE TABLE test_database_rename_table.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT NOT NULL) ENGINE = InnoDB;" + ) + check_query( + clickhouse_node, + "SHOW TABLES FROM test_database_rename_table FORMAT TSV", + "test_table_1\ntest_table_3\n", + ) + check_query( + clickhouse_node, + "DESC test_database_rename_table.test_table_1 FORMAT TSV", + "id\tInt32\t\t\t\t\t\ndrop_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) mysql_node.query( - "ALTER TABLE test_database_rename_table.test_table_1 DROP COLUMN drop_column, RENAME TO test_database_rename_table.test_table_2, RENAME TO test_database_rename_table.test_table_4") - check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename_table FORMAT TSV", "test_table_3\ntest_table_4\n") - check_query(clickhouse_node, "DESC test_database_rename_table.test_table_4 FORMAT TSV", - "id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + "ALTER TABLE test_database_rename_table.test_table_1 DROP COLUMN drop_column, RENAME TO test_database_rename_table.test_table_2, RENAME TO test_database_rename_table.test_table_4" + ) + check_query( + clickhouse_node, + "SHOW TABLES FROM test_database_rename_table FORMAT TSV", + "test_table_3\ntest_table_4\n", + ) + check_query( + clickhouse_node, + "DESC test_database_rename_table.test_table_4 FORMAT TSV", + "id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) - mysql_node.query("INSERT INTO test_database_rename_table.test_table_4 VALUES(1), (2), (3), (4), (5)") - check_query(clickhouse_node, "SELECT * FROM test_database_rename_table.test_table_4 ORDER BY id FORMAT TSV", "1\n2\n3\n4\n5\n") + mysql_node.query( + "INSERT INTO test_database_rename_table.test_table_4 VALUES(1), (2), (3), (4), (5)" + ) + check_query( + clickhouse_node, + "SELECT * FROM test_database_rename_table.test_table_4 ORDER BY id FORMAT TSV", + "1\n2\n3\n4\n5\n", + ) clickhouse_node.query("DROP DATABASE test_database_rename_table") mysql_node.query("DROP DATABASE test_database_rename_table") @@ -503,12 +885,16 @@ def query_event_with_empty_transaction(clickhouse_node, mysql_node, service_name mysql_node.query("CREATE DATABASE test_database_event") mysql_node.query("RESET MASTER") - mysql_node.query("CREATE TABLE test_database_event.t1(a INT NOT NULL PRIMARY KEY, b VARCHAR(255) DEFAULT 'BEGIN')") + mysql_node.query( + "CREATE TABLE test_database_event.t1(a INT NOT NULL PRIMARY KEY, b VARCHAR(255) DEFAULT 'BEGIN')" + ) mysql_node.query("INSERT INTO test_database_event.t1(a) VALUES(1)") clickhouse_node.query( "CREATE DATABASE test_database_event ENGINE = MaterializedMySQL('{}:3306', 'test_database_event', 'root', 'clickhouse')".format( - service_name)) + service_name + ) + ) # Reject one empty GTID QUERY event with 'BEGIN' and 'COMMIT' mysql_cursor = mysql_node.alloc_connection().cursor(pymysql.cursors.DictCursor) @@ -526,8 +912,14 @@ def query_event_with_empty_transaction(clickhouse_node, mysql_node, service_name mysql_node.query("INSERT INTO test_database_event.t1(a) VALUES(2)") mysql_node.query("/* start */ commit /* end */") - check_query(clickhouse_node, "SHOW TABLES FROM test_database_event FORMAT TSV", "t1\n") - check_query(clickhouse_node, "SELECT * FROM test_database_event.t1 ORDER BY a FORMAT TSV", "1\tBEGIN\n2\tBEGIN\n") + check_query( + clickhouse_node, "SHOW TABLES FROM test_database_event FORMAT TSV", "t1\n" + ) + check_query( + clickhouse_node, + "SELECT * FROM test_database_event.t1 ORDER BY a FORMAT TSV", + "1\tBEGIN\n2\tBEGIN\n", + ) clickhouse_node.query("DROP DATABASE test_database_event") mysql_node.query("DROP DATABASE test_database_event") @@ -538,7 +930,10 @@ def select_without_columns(clickhouse_node, mysql_node, service_name): mysql_node.query("CREATE DATABASE db") mysql_node.query("CREATE TABLE db.t (a INT PRIMARY KEY, b INT)") clickhouse_node.query( - "CREATE DATABASE db ENGINE = MaterializedMySQL('{}:3306', 'db', 'root', 'clickhouse') SETTINGS max_flush_data_time = 100000".format(service_name)) + "CREATE DATABASE db ENGINE = MaterializedMySQL('{}:3306', 'db', 'root', 'clickhouse') SETTINGS max_flush_data_time = 100000".format( + service_name + ) + ) check_query(clickhouse_node, "SHOW TABLES FROM db FORMAT TSV", "t\n") clickhouse_node.query("SYSTEM STOP MERGES db.t") clickhouse_node.query("CREATE VIEW v AS SELECT * FROM db.t") @@ -547,26 +942,51 @@ def select_without_columns(clickhouse_node, mysql_node, service_name): # We need to execute a DDL for flush data buffer mysql_node.query("CREATE TABLE db.temporary(a INT PRIMARY KEY, b INT)") - optimize_on_insert = clickhouse_node.query("SELECT value FROM system.settings WHERE name='optimize_on_insert'").strip() + optimize_on_insert = clickhouse_node.query( + "SELECT value FROM system.settings WHERE name='optimize_on_insert'" + ).strip() if optimize_on_insert == "0": res = ["3\n", "2\n", "2\n"] else: res = ["2\n", "2\n", "1\n"] - check_query(clickhouse_node, "SELECT count((_sign, _version)) FROM db.t FORMAT TSV", res[0]) + check_query( + clickhouse_node, "SELECT count((_sign, _version)) FROM db.t FORMAT TSV", res[0] + ) assert clickhouse_node.query("SELECT count(_sign) FROM db.t FORMAT TSV") == res[1] - assert_eq_with_retry(clickhouse_node, "SELECT count(_version) FROM db.t", res[2].strip(), sleep_time=2, retry_count=3) + assert_eq_with_retry( + clickhouse_node, + "SELECT count(_version) FROM db.t", + res[2].strip(), + sleep_time=2, + retry_count=3, + ) assert clickhouse_node.query("SELECT count() FROM db.t FORMAT TSV") == "1\n" assert clickhouse_node.query("SELECT count(*) FROM db.t FORMAT TSV") == "1\n" - assert clickhouse_node.query("SELECT count() FROM (SELECT * FROM db.t) FORMAT TSV") == "1\n" + assert ( + clickhouse_node.query("SELECT count() FROM (SELECT * FROM db.t) FORMAT TSV") + == "1\n" + ) assert clickhouse_node.query("SELECT count() FROM v FORMAT TSV") == "1\n" - assert clickhouse_node.query("SELECT count() FROM merge('db', 't') FORMAT TSV") == "1\n" - assert clickhouse_node.query("SELECT count() FROM remote('localhost', 'db', 't') FORMAT TSV") == "1\n" + assert ( + clickhouse_node.query("SELECT count() FROM merge('db', 't') FORMAT TSV") + == "1\n" + ) + assert ( + clickhouse_node.query( + "SELECT count() FROM remote('localhost', 'db', 't') FORMAT TSV" + ) + == "1\n" + ) assert clickhouse_node.query("SELECT _part FROM db.t FORMAT TSV") == "0_1_1_0\n" - assert clickhouse_node.query("SELECT _part FROM remote('localhost', 'db', 't') FORMAT TSV") == "0_1_1_0\n" - + assert ( + clickhouse_node.query( + "SELECT _part FROM remote('localhost', 'db', 't') FORMAT TSV" + ) + == "0_1_1_0\n" + ) clickhouse_node.query("DROP VIEW v") clickhouse_node.query("DROP DATABASE db") @@ -576,45 +996,79 @@ def select_without_columns(clickhouse_node, mysql_node, service_name): def insert_with_modify_binlog_checksum(clickhouse_node, mysql_node, service_name): mysql_node.query("CREATE DATABASE test_checksum") mysql_node.query("CREATE TABLE test_checksum.t (a INT PRIMARY KEY, b varchar(200))") - clickhouse_node.query("CREATE DATABASE test_checksum ENGINE = MaterializedMySQL('{}:3306', 'test_checksum', 'root', 'clickhouse')".format(service_name)) + clickhouse_node.query( + "CREATE DATABASE test_checksum ENGINE = MaterializedMySQL('{}:3306', 'test_checksum', 'root', 'clickhouse')".format( + service_name + ) + ) check_query(clickhouse_node, "SHOW TABLES FROM test_checksum FORMAT TSV", "t\n") mysql_node.query("INSERT INTO test_checksum.t VALUES(1, '1111')") - check_query(clickhouse_node, "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", "1\t1111\n") + check_query( + clickhouse_node, + "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", + "1\t1111\n", + ) mysql_node.query("SET GLOBAL binlog_checksum=NONE") mysql_node.query("INSERT INTO test_checksum.t VALUES(2, '2222')") - check_query(clickhouse_node, "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", "1\t1111\n2\t2222\n") + check_query( + clickhouse_node, + "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", + "1\t1111\n2\t2222\n", + ) mysql_node.query("SET GLOBAL binlog_checksum=CRC32") mysql_node.query("INSERT INTO test_checksum.t VALUES(3, '3333')") - check_query(clickhouse_node, "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", "1\t1111\n2\t2222\n3\t3333\n") + check_query( + clickhouse_node, + "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", + "1\t1111\n2\t2222\n3\t3333\n", + ) clickhouse_node.query("DROP DATABASE test_checksum") mysql_node.query("DROP DATABASE test_checksum") -def err_sync_user_privs_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name): +def err_sync_user_privs_with_materialized_mysql_database( + clickhouse_node, mysql_node, service_name +): clickhouse_node.query("DROP DATABASE IF EXISTS priv_err_db") mysql_node.query("DROP DATABASE IF EXISTS priv_err_db") mysql_node.query("CREATE DATABASE priv_err_db DEFAULT CHARACTER SET 'utf8'") - mysql_node.query("CREATE TABLE priv_err_db.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;") + mysql_node.query( + "CREATE TABLE priv_err_db.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;" + ) mysql_node.query("INSERT INTO priv_err_db.test_table_1 VALUES(1);") mysql_node.create_min_priv_user("test", "123") mysql_node.result("SHOW GRANTS FOR 'test'@'%';") clickhouse_node.query( "CREATE DATABASE priv_err_db ENGINE = MaterializedMySQL('{}:3306', 'priv_err_db', 'test', '123')".format( - service_name)) + service_name + ) + ) - check_query(clickhouse_node, "SELECT count() FROM priv_err_db.test_table_1 FORMAT TSV", "1\n", 30, 5) + check_query( + clickhouse_node, + "SELECT count() FROM priv_err_db.test_table_1 FORMAT TSV", + "1\n", + 30, + 5, + ) mysql_node.query("INSERT INTO priv_err_db.test_table_1 VALUES(2);") - check_query(clickhouse_node, "SELECT count() FROM priv_err_db.test_table_1 FORMAT TSV", "2\n") + check_query( + clickhouse_node, + "SELECT count() FROM priv_err_db.test_table_1 FORMAT TSV", + "2\n", + ) clickhouse_node.query("DROP DATABASE priv_err_db;") mysql_node.query("REVOKE REPLICATION SLAVE ON *.* FROM 'test'@'%'") clickhouse_node.query( "CREATE DATABASE priv_err_db ENGINE = MaterializedMySQL('{}:3306', 'priv_err_db', 'test', '123')".format( - service_name)) + service_name + ) + ) assert "priv_err_db" in clickhouse_node.query("SHOW DATABASES") assert "test_table_1" not in clickhouse_node.query("SHOW TABLES FROM priv_err_db") clickhouse_node.query("DROP DATABASE priv_err_db") @@ -622,7 +1076,9 @@ def err_sync_user_privs_with_materialized_mysql_database(clickhouse_node, mysql_ mysql_node.query("REVOKE REPLICATION CLIENT, RELOAD ON *.* FROM 'test'@'%'") clickhouse_node.query( "CREATE DATABASE priv_err_db ENGINE = MaterializedMySQL('{}:3306', 'priv_err_db', 'test', '123')".format( - service_name)) + service_name + ) + ) assert "priv_err_db" in clickhouse_node.query("SHOW DATABASES") assert "test_table_1" not in clickhouse_node.query("SHOW TABLES FROM priv_err_db") clickhouse_node.query_with_retry("DETACH DATABASE priv_err_db") @@ -633,7 +1089,7 @@ def err_sync_user_privs_with_materialized_mysql_database(clickhouse_node, mysql_ with pytest.raises(QueryRuntimeException) as exception: clickhouse_node.query("ATTACH DATABASE priv_err_db") - assert 'MySQL SYNC USER ACCESS ERR:' in str(exception.value) + assert "MySQL SYNC USER ACCESS ERR:" in str(exception.value) assert "priv_err_db" not in clickhouse_node.query("SHOW DATABASES") mysql_node.query("GRANT SELECT ON priv_err_db.* TO 'test'@'%'") @@ -646,52 +1102,101 @@ def err_sync_user_privs_with_materialized_mysql_database(clickhouse_node, mysql_ mysql_node.query("DROP USER 'test'@'%'") -def restore_instance_mysql_connections(clickhouse_node, pm, action='REJECT'): +def restore_instance_mysql_connections(clickhouse_node, pm, action="REJECT"): pm._check_instance(clickhouse_node) - pm._delete_rule({'source': clickhouse_node.ip_address, 'destination_port': 3306, 'action': action}) - pm._delete_rule({'destination': clickhouse_node.ip_address, 'source_port': 3306, 'action': action}) + pm._delete_rule( + { + "source": clickhouse_node.ip_address, + "destination_port": 3306, + "action": action, + } + ) + pm._delete_rule( + { + "destination": clickhouse_node.ip_address, + "source_port": 3306, + "action": action, + } + ) time.sleep(5) -def drop_instance_mysql_connections(clickhouse_node, pm, action='REJECT'): + +def drop_instance_mysql_connections(clickhouse_node, pm, action="REJECT"): pm._check_instance(clickhouse_node) - pm._add_rule({'source': clickhouse_node.ip_address, 'destination_port': 3306, 'action': action}) - pm._add_rule({'destination': clickhouse_node.ip_address, 'source_port': 3306, 'action': action}) + pm._add_rule( + { + "source": clickhouse_node.ip_address, + "destination_port": 3306, + "action": action, + } + ) + pm._add_rule( + { + "destination": clickhouse_node.ip_address, + "source_port": 3306, + "action": action, + } + ) time.sleep(5) + def network_partition_test(clickhouse_node, mysql_node, service_name): clickhouse_node.query("DROP DATABASE IF EXISTS test_database_network") clickhouse_node.query("DROP DATABASE IF EXISTS test") mysql_node.query("DROP DATABASE IF EXISTS test_database_network") mysql_node.query("DROP DATABASE IF EXISTS test") mysql_node.query("CREATE DATABASE test_database_network;") - mysql_node.query("CREATE TABLE test_database_network.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;") + mysql_node.query( + "CREATE TABLE test_database_network.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;" + ) mysql_node.query("CREATE DATABASE test;") clickhouse_node.query( - "CREATE DATABASE test_database_network ENGINE = MaterializedMySQL('{}:3306', 'test_database_network', 'root', 'clickhouse')".format(service_name)) - check_query(clickhouse_node, "SELECT * FROM test_database_network.test_table", '') + "CREATE DATABASE test_database_network ENGINE = MaterializedMySQL('{}:3306', 'test_database_network', 'root', 'clickhouse')".format( + service_name + ) + ) + check_query(clickhouse_node, "SELECT * FROM test_database_network.test_table", "") with PartitionManager() as pm: drop_instance_mysql_connections(clickhouse_node, pm) - mysql_node.query('INSERT INTO test_database_network.test_table VALUES(1)') - check_query(clickhouse_node, "SELECT * FROM test_database_network.test_table", '') + mysql_node.query("INSERT INTO test_database_network.test_table VALUES(1)") + check_query( + clickhouse_node, "SELECT * FROM test_database_network.test_table", "" + ) with pytest.raises(QueryRuntimeException) as exception: clickhouse_node.query( - "CREATE DATABASE test ENGINE = MaterializedMySQL('{}:3306', 'test', 'root', 'clickhouse')".format(service_name)) + "CREATE DATABASE test ENGINE = MaterializedMySQL('{}:3306', 'test', 'root', 'clickhouse')".format( + service_name + ) + ) assert "Can't connect to MySQL server" in str(exception.value) restore_instance_mysql_connections(clickhouse_node, pm) - check_query(clickhouse_node, "SELECT * FROM test_database_network.test_table FORMAT TSV", '1\n') + check_query( + clickhouse_node, + "SELECT * FROM test_database_network.test_table FORMAT TSV", + "1\n", + ) clickhouse_node.query( - "CREATE DATABASE test ENGINE = MaterializedMySQL('{}:3306', 'test', 'root', 'clickhouse')".format(service_name)) - check_query(clickhouse_node, "SHOW TABLES FROM test_database_network FORMAT TSV", "test_table\n") + "CREATE DATABASE test ENGINE = MaterializedMySQL('{}:3306', 'test', 'root', 'clickhouse')".format( + service_name + ) + ) + check_query( + clickhouse_node, + "SHOW TABLES FROM test_database_network FORMAT TSV", + "test_table\n", + ) - mysql_node.query("CREATE TABLE test.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;") + mysql_node.query( + "CREATE TABLE test.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;" + ) check_query(clickhouse_node, "SHOW TABLES FROM test FORMAT TSV", "test\n") clickhouse_node.query("DROP DATABASE test_database_network") @@ -706,28 +1211,53 @@ def mysql_kill_sync_thread_restore_test(clickhouse_node, mysql_node, service_nam mysql_node.query("DROP DATABASE IF EXISTS test_database;") mysql_node.query("CREATE DATABASE test_database;") - mysql_node.query("CREATE TABLE test_database.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;") + mysql_node.query( + "CREATE TABLE test_database.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;" + ) mysql_node.query("INSERT INTO test_database.test_table VALUES (1)") mysql_node.query("DROP DATABASE IF EXISTS test_database_auto;") mysql_node.query("CREATE DATABASE test_database_auto;") - mysql_node.query("CREATE TABLE test_database_auto.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;") + mysql_node.query( + "CREATE TABLE test_database_auto.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;" + ) mysql_node.query("INSERT INTO test_database_auto.test_table VALUES (11)") - clickhouse_node.query("CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse') SETTINGS max_wait_time_when_mysql_unavailable=-1".format(service_name)) - clickhouse_node.query("CREATE DATABASE test_database_auto ENGINE = MaterializedMySQL('{}:3306', 'test_database_auto', 'root', 'clickhouse')".format(service_name)) - - check_query(clickhouse_node, "SELECT * FROM test_database.test_table FORMAT TSV", '1\n') - check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table FORMAT TSV", '11\n') + clickhouse_node.query( + "CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse') SETTINGS max_wait_time_when_mysql_unavailable=-1".format( + service_name + ) + ) + clickhouse_node.query( + "CREATE DATABASE test_database_auto ENGINE = MaterializedMySQL('{}:3306', 'test_database_auto', 'root', 'clickhouse')".format( + service_name + ) + ) + check_query( + clickhouse_node, "SELECT * FROM test_database.test_table FORMAT TSV", "1\n" + ) + check_query( + clickhouse_node, + "SELECT * FROM test_database_auto.test_table FORMAT TSV", + "11\n", + ) # When ClickHouse dump all history data we can query it on ClickHouse # but it don't mean that the sync thread is already to connect to MySQL. # So After ClickHouse can query data, insert some rows to MySQL. Use this to re-check sync successed. mysql_node.query("INSERT INTO test_database_auto.test_table VALUES (22)") mysql_node.query("INSERT INTO test_database.test_table VALUES (2)") - check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n') - check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table ORDER BY id FORMAT TSV", '11\n22\n') + check_query( + clickhouse_node, + "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", + "1\n2\n", + ) + check_query( + clickhouse_node, + "SELECT * FROM test_database_auto.test_table ORDER BY id FORMAT TSV", + "11\n22\n", + ) get_sync_id_query = "SELECT id FROM information_schema.processlist WHERE state LIKE '% has sent all binlog to % waiting for more updates%';" result = mysql_node.query_and_get_data(get_sync_id_query) @@ -746,13 +1276,25 @@ def mysql_kill_sync_thread_restore_test(clickhouse_node, mysql_node, service_nam clickhouse_node.query_with_retry("DETACH DATABASE test_database") clickhouse_node.query("ATTACH DATABASE test_database") - check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n') + check_query( + clickhouse_node, + "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", + "1\n2\n", + ) mysql_node.query("INSERT INTO test_database.test_table VALUES (3)") - check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n3\n') + check_query( + clickhouse_node, + "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", + "1\n2\n3\n", + ) mysql_node.query("INSERT INTO test_database_auto.test_table VALUES (33)") - check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table ORDER BY id FORMAT TSV", '11\n22\n33\n') + check_query( + clickhouse_node, + "SELECT * FROM test_database_auto.test_table ORDER BY id FORMAT TSV", + "11\n22\n33\n", + ) clickhouse_node.query("DROP DATABASE test_database") clickhouse_node.query("DROP DATABASE test_database_auto") @@ -764,14 +1306,25 @@ def mysql_killed_while_insert(clickhouse_node, mysql_node, service_name): mysql_node.query("DROP DATABASE IF EXISTS kill_mysql_while_insert") clickhouse_node.query("DROP DATABASE IF EXISTS kill_mysql_while_insert") mysql_node.query("CREATE DATABASE kill_mysql_while_insert") - mysql_node.query("CREATE TABLE kill_mysql_while_insert.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;") - clickhouse_node.query("CREATE DATABASE kill_mysql_while_insert ENGINE = MaterializedMySQL('{}:3306', 'kill_mysql_while_insert', 'root', 'clickhouse') SETTINGS max_wait_time_when_mysql_unavailable=-1".format(service_name)) - check_query(clickhouse_node, "SHOW TABLES FROM kill_mysql_while_insert FORMAT TSV", 'test\n') + mysql_node.query( + "CREATE TABLE kill_mysql_while_insert.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;" + ) + clickhouse_node.query( + "CREATE DATABASE kill_mysql_while_insert ENGINE = MaterializedMySQL('{}:3306', 'kill_mysql_while_insert', 'root', 'clickhouse') SETTINGS max_wait_time_when_mysql_unavailable=-1".format( + service_name + ) + ) + check_query( + clickhouse_node, "SHOW TABLES FROM kill_mysql_while_insert FORMAT TSV", "test\n" + ) try: + def insert(num): for i in range(num): - query = "INSERT INTO kill_mysql_while_insert.test VALUES({v});".format( v = i + 1 ) + query = "INSERT INTO kill_mysql_while_insert.test VALUES({v});".format( + v=i + 1 + ) mysql_node.query(query) t = threading.Thread(target=insert, args=(10000,)) @@ -788,10 +1341,14 @@ def mysql_killed_while_insert(clickhouse_node, mysql_node, service_name): clickhouse_node.query_with_retry("DETACH DATABASE kill_mysql_while_insert") clickhouse_node.query("ATTACH DATABASE kill_mysql_while_insert") - result = mysql_node.query_and_get_data("SELECT COUNT(1) FROM kill_mysql_while_insert.test") + result = mysql_node.query_and_get_data( + "SELECT COUNT(1) FROM kill_mysql_while_insert.test" + ) for row in result: - res = str(row[0]) + '\n' - check_query(clickhouse_node, "SELECT count() FROM kill_mysql_while_insert.test", res) + res = str(row[0]) + "\n" + check_query( + clickhouse_node, "SELECT count() FROM kill_mysql_while_insert.test", res + ) mysql_node.query("DROP DATABASE kill_mysql_while_insert") clickhouse_node.query("DROP DATABASE kill_mysql_while_insert") @@ -800,13 +1357,25 @@ def mysql_killed_while_insert(clickhouse_node, mysql_node, service_name): def clickhouse_killed_while_insert(clickhouse_node, mysql_node, service_name): mysql_node.query("DROP DATABASE IF EXISTS kill_clickhouse_while_insert") mysql_node.query("CREATE DATABASE kill_clickhouse_while_insert") - mysql_node.query("CREATE TABLE kill_clickhouse_while_insert.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;") - clickhouse_node.query("CREATE DATABASE kill_clickhouse_while_insert ENGINE = MaterializedMySQL('{}:3306', 'kill_clickhouse_while_insert', 'root', 'clickhouse')".format(service_name)) - check_query(clickhouse_node, "SHOW TABLES FROM kill_clickhouse_while_insert FORMAT TSV", 'test\n') + mysql_node.query( + "CREATE TABLE kill_clickhouse_while_insert.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;" + ) + clickhouse_node.query( + "CREATE DATABASE kill_clickhouse_while_insert ENGINE = MaterializedMySQL('{}:3306', 'kill_clickhouse_while_insert', 'root', 'clickhouse')".format( + service_name + ) + ) + check_query( + clickhouse_node, + "SHOW TABLES FROM kill_clickhouse_while_insert FORMAT TSV", + "test\n", + ) def insert(num): for i in range(num): - query = "INSERT INTO kill_clickhouse_while_insert.test VALUES({v});".format( v = i + 1 ) + query = "INSERT INTO kill_clickhouse_while_insert.test VALUES({v});".format( + v=i + 1 + ) mysql_node.query(query) t = threading.Thread(target=insert, args=(1000,)) @@ -816,76 +1385,154 @@ def clickhouse_killed_while_insert(clickhouse_node, mysql_node, service_name): clickhouse_node.restart_clickhouse(20, kill=True) t.join() - result = mysql_node.query_and_get_data("SELECT COUNT(1) FROM kill_clickhouse_while_insert.test") + result = mysql_node.query_and_get_data( + "SELECT COUNT(1) FROM kill_clickhouse_while_insert.test" + ) for row in result: - res = str(row[0]) + '\n' - check_query(clickhouse_node, "SELECT count() FROM kill_clickhouse_while_insert.test FORMAT TSV", res) + res = str(row[0]) + "\n" + check_query( + clickhouse_node, + "SELECT count() FROM kill_clickhouse_while_insert.test FORMAT TSV", + res, + ) mysql_node.query("DROP DATABASE kill_clickhouse_while_insert") clickhouse_node.query("DROP DATABASE kill_clickhouse_while_insert") + def utf8mb4_test(clickhouse_node, mysql_node, service_name): mysql_node.query("DROP DATABASE IF EXISTS utf8mb4_test") clickhouse_node.query("DROP DATABASE IF EXISTS utf8mb4_test") mysql_node.query("CREATE DATABASE utf8mb4_test") - mysql_node.query("CREATE TABLE utf8mb4_test.test (id INT(11) NOT NULL PRIMARY KEY, name VARCHAR(255)) ENGINE=InnoDB DEFAULT CHARACTER SET utf8mb4") + mysql_node.query( + "CREATE TABLE utf8mb4_test.test (id INT(11) NOT NULL PRIMARY KEY, name VARCHAR(255)) ENGINE=InnoDB DEFAULT CHARACTER SET utf8mb4" + ) mysql_node.query("INSERT INTO utf8mb4_test.test VALUES(1, '🦄'),(2, '\u2601')") - clickhouse_node.query("CREATE DATABASE utf8mb4_test ENGINE = MaterializedMySQL('{}:3306', 'utf8mb4_test', 'root', 'clickhouse')".format(service_name)) + clickhouse_node.query( + "CREATE DATABASE utf8mb4_test ENGINE = MaterializedMySQL('{}:3306', 'utf8mb4_test', 'root', 'clickhouse')".format( + service_name + ) + ) check_query(clickhouse_node, "SHOW TABLES FROM utf8mb4_test FORMAT TSV", "test\n") - check_query(clickhouse_node, "SELECT id, name FROM utf8mb4_test.test ORDER BY id", "1\t\U0001F984\n2\t\u2601\n") + check_query( + clickhouse_node, + "SELECT id, name FROM utf8mb4_test.test ORDER BY id", + "1\t\U0001F984\n2\t\u2601\n", + ) + def system_parts_test(clickhouse_node, mysql_node, service_name): mysql_node.query("DROP DATABASE IF EXISTS system_parts_test") clickhouse_node.query("DROP DATABASE IF EXISTS system_parts_test") mysql_node.query("CREATE DATABASE system_parts_test") - mysql_node.query("CREATE TABLE system_parts_test.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;") + mysql_node.query( + "CREATE TABLE system_parts_test.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;" + ) mysql_node.query("INSERT INTO system_parts_test.test VALUES(1),(2),(3)") + def check_active_parts(num): - check_query(clickhouse_node, "SELECT count() FROM system.parts WHERE database = 'system_parts_test' AND table = 'test' AND active = 1", "{}\n".format(num)) - clickhouse_node.query("CREATE DATABASE system_parts_test ENGINE = MaterializedMySQL('{}:3306', 'system_parts_test', 'root', 'clickhouse')".format(service_name)) + check_query( + clickhouse_node, + "SELECT count() FROM system.parts WHERE database = 'system_parts_test' AND table = 'test' AND active = 1", + "{}\n".format(num), + ) + + clickhouse_node.query( + "CREATE DATABASE system_parts_test ENGINE = MaterializedMySQL('{}:3306', 'system_parts_test', 'root', 'clickhouse')".format( + service_name + ) + ) check_active_parts(1) mysql_node.query("INSERT INTO system_parts_test.test VALUES(4),(5),(6)") check_active_parts(2) clickhouse_node.query("OPTIMIZE TABLE system_parts_test.test") check_active_parts(1) + def multi_table_update_test(clickhouse_node, mysql_node, service_name): mysql_node.query("DROP DATABASE IF EXISTS multi_table_update") clickhouse_node.query("DROP DATABASE IF EXISTS multi_table_update") mysql_node.query("CREATE DATABASE multi_table_update") - mysql_node.query("CREATE TABLE multi_table_update.a (id INT(11) NOT NULL PRIMARY KEY, value VARCHAR(255))") - mysql_node.query("CREATE TABLE multi_table_update.b (id INT(11) NOT NULL PRIMARY KEY, othervalue VARCHAR(255))") + mysql_node.query( + "CREATE TABLE multi_table_update.a (id INT(11) NOT NULL PRIMARY KEY, value VARCHAR(255))" + ) + mysql_node.query( + "CREATE TABLE multi_table_update.b (id INT(11) NOT NULL PRIMARY KEY, othervalue VARCHAR(255))" + ) mysql_node.query("INSERT INTO multi_table_update.a VALUES(1, 'foo')") mysql_node.query("INSERT INTO multi_table_update.b VALUES(1, 'bar')") - clickhouse_node.query("CREATE DATABASE multi_table_update ENGINE = MaterializedMySQL('{}:3306', 'multi_table_update', 'root', 'clickhouse')".format(service_name)) + clickhouse_node.query( + "CREATE DATABASE multi_table_update ENGINE = MaterializedMySQL('{}:3306', 'multi_table_update', 'root', 'clickhouse')".format( + service_name + ) + ) check_query(clickhouse_node, "SHOW TABLES FROM multi_table_update", "a\nb\n") - mysql_node.query("UPDATE multi_table_update.a, multi_table_update.b SET value='baz', othervalue='quux' where a.id=b.id") + mysql_node.query( + "UPDATE multi_table_update.a, multi_table_update.b SET value='baz', othervalue='quux' where a.id=b.id" + ) check_query(clickhouse_node, "SELECT * FROM multi_table_update.a", "1\tbaz\n") check_query(clickhouse_node, "SELECT * FROM multi_table_update.b", "1\tquux\n") + def system_tables_test(clickhouse_node, mysql_node, service_name): mysql_node.query("DROP DATABASE IF EXISTS system_tables_test") clickhouse_node.query("DROP DATABASE IF EXISTS system_tables_test") mysql_node.query("CREATE DATABASE system_tables_test") - mysql_node.query("CREATE TABLE system_tables_test.test (id int NOT NULL PRIMARY KEY) ENGINE=InnoDB") - clickhouse_node.query("CREATE DATABASE system_tables_test ENGINE = MaterializedMySQL('{}:3306', 'system_tables_test', 'root', 'clickhouse')".format(service_name)) - check_query(clickhouse_node, "SELECT partition_key, sorting_key, primary_key FROM system.tables WHERE database = 'system_tables_test' AND name = 'test'", "intDiv(id, 4294967)\tid\tid\n") + mysql_node.query( + "CREATE TABLE system_tables_test.test (id int NOT NULL PRIMARY KEY) ENGINE=InnoDB" + ) + clickhouse_node.query( + "CREATE DATABASE system_tables_test ENGINE = MaterializedMySQL('{}:3306', 'system_tables_test', 'root', 'clickhouse')".format( + service_name + ) + ) + check_query( + clickhouse_node, + "SELECT partition_key, sorting_key, primary_key FROM system.tables WHERE database = 'system_tables_test' AND name = 'test'", + "intDiv(id, 4294967)\tid\tid\n", + ) + def materialize_with_column_comments_test(clickhouse_node, mysql_node, service_name): mysql_node.query("DROP DATABASE IF EXISTS materialize_with_column_comments_test") - clickhouse_node.query("DROP DATABASE IF EXISTS materialize_with_column_comments_test") + clickhouse_node.query( + "DROP DATABASE IF EXISTS materialize_with_column_comments_test" + ) mysql_node.query("CREATE DATABASE materialize_with_column_comments_test") - mysql_node.query("CREATE TABLE materialize_with_column_comments_test.test (id int NOT NULL PRIMARY KEY, value VARCHAR(255) COMMENT 'test comment') ENGINE=InnoDB") - clickhouse_node.query("CREATE DATABASE materialize_with_column_comments_test ENGINE = MaterializedMySQL('{}:3306', 'materialize_with_column_comments_test', 'root', 'clickhouse')".format(service_name)) - check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_column_comments_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\ttest comment\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") - mysql_node.query("ALTER TABLE materialize_with_column_comments_test.test MODIFY value VARCHAR(255) COMMENT 'comment test'") - check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_column_comments_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\tcomment test\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") - mysql_node.query("ALTER TABLE materialize_with_column_comments_test.test ADD value2 int COMMENT 'test comment 2'") - check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_column_comments_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\tcomment test\t\t\nvalue2\tNullable(Int32)\t\t\ttest comment 2\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + mysql_node.query( + "CREATE TABLE materialize_with_column_comments_test.test (id int NOT NULL PRIMARY KEY, value VARCHAR(255) COMMENT 'test comment') ENGINE=InnoDB" + ) + clickhouse_node.query( + "CREATE DATABASE materialize_with_column_comments_test ENGINE = MaterializedMySQL('{}:3306', 'materialize_with_column_comments_test', 'root', 'clickhouse')".format( + service_name + ) + ) + check_query( + clickhouse_node, + "DESCRIBE TABLE materialize_with_column_comments_test.test", + "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\ttest comment\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) + mysql_node.query( + "ALTER TABLE materialize_with_column_comments_test.test MODIFY value VARCHAR(255) COMMENT 'comment test'" + ) + check_query( + clickhouse_node, + "DESCRIBE TABLE materialize_with_column_comments_test.test", + "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\tcomment test\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) + mysql_node.query( + "ALTER TABLE materialize_with_column_comments_test.test ADD value2 int COMMENT 'test comment 2'" + ) + check_query( + clickhouse_node, + "DESCRIBE TABLE materialize_with_column_comments_test.test", + "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\tcomment test\t\t\nvalue2\tNullable(Int32)\t\t\ttest comment 2\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) clickhouse_node.query("DROP DATABASE materialize_with_column_comments_test") mysql_node.query("DROP DATABASE materialize_with_column_comments_test") + def materialize_with_enum8_test(clickhouse_node, mysql_node, service_name): mysql_node.query("DROP DATABASE IF EXISTS materialize_with_enum8_test") clickhouse_node.query("DROP DATABASE IF EXISTS materialize_with_enum8_test") @@ -894,20 +1541,49 @@ def materialize_with_enum8_test(clickhouse_node, mysql_node, service_name): enum8_values = "" enum8_values_with_backslash = "" for i in range(1, enum8_values_count): - enum8_values += '\'' + str(i) + "\', " - enum8_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", " - enum8_values += '\'' + str(enum8_values_count) + '\'' - enum8_values_with_backslash += "\\\'" + str(enum8_values_count) +"\\\' = " + str(enum8_values_count) - mysql_node.query("CREATE TABLE materialize_with_enum8_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + enum8_values + ")) ENGINE=InnoDB") - mysql_node.query("INSERT INTO materialize_with_enum8_test.test (id, value) VALUES (1, '1'),(2, '2')") - clickhouse_node.query("CREATE DATABASE materialize_with_enum8_test ENGINE = MaterializedMySQL('{}:3306', 'materialize_with_enum8_test', 'root', 'clickhouse')".format(service_name)) - check_query(clickhouse_node, "SELECT value FROM materialize_with_enum8_test.test ORDER BY id", "1\n2\n") - mysql_node.query("INSERT INTO materialize_with_enum8_test.test (id, value) VALUES (3, '127')") - check_query(clickhouse_node, "SELECT value FROM materialize_with_enum8_test.test ORDER BY id", "1\n2\n127\n") - check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_enum8_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum8(" + enum8_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + enum8_values += "'" + str(i) + "', " + enum8_values_with_backslash += "\\'" + str(i) + "\\' = " + str(i) + ", " + enum8_values += "'" + str(enum8_values_count) + "'" + enum8_values_with_backslash += ( + "\\'" + str(enum8_values_count) + "\\' = " + str(enum8_values_count) + ) + mysql_node.query( + "CREATE TABLE materialize_with_enum8_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + + enum8_values + + ")) ENGINE=InnoDB" + ) + mysql_node.query( + "INSERT INTO materialize_with_enum8_test.test (id, value) VALUES (1, '1'),(2, '2')" + ) + clickhouse_node.query( + "CREATE DATABASE materialize_with_enum8_test ENGINE = MaterializedMySQL('{}:3306', 'materialize_with_enum8_test', 'root', 'clickhouse')".format( + service_name + ) + ) + check_query( + clickhouse_node, + "SELECT value FROM materialize_with_enum8_test.test ORDER BY id", + "1\n2\n", + ) + mysql_node.query( + "INSERT INTO materialize_with_enum8_test.test (id, value) VALUES (3, '127')" + ) + check_query( + clickhouse_node, + "SELECT value FROM materialize_with_enum8_test.test ORDER BY id", + "1\n2\n127\n", + ) + check_query( + clickhouse_node, + "DESCRIBE TABLE materialize_with_enum8_test.test", + "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum8(" + + enum8_values_with_backslash + + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) clickhouse_node.query("DROP DATABASE materialize_with_enum8_test") mysql_node.query("DROP DATABASE materialize_with_enum8_test") + def materialize_with_enum16_test(clickhouse_node, mysql_node, service_name): mysql_node.query("DROP DATABASE IF EXISTS materialize_with_enum16_test") clickhouse_node.query("DROP DATABASE IF EXISTS materialize_with_enum16_test") @@ -916,20 +1592,49 @@ def materialize_with_enum16_test(clickhouse_node, mysql_node, service_name): enum16_values = "" enum16_values_with_backslash = "" for i in range(1, enum16_values_count): - enum16_values += '\'' + str(i) + "\', " - enum16_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", " - enum16_values += '\'' + str(enum16_values_count) + '\'' - enum16_values_with_backslash += "\\\'" + str(enum16_values_count) +"\\\' = " + str(enum16_values_count) - mysql_node.query("CREATE TABLE materialize_with_enum16_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + enum16_values + ")) ENGINE=InnoDB") - mysql_node.query("INSERT INTO materialize_with_enum16_test.test (id, value) VALUES (1, '1'),(2, '2')") - clickhouse_node.query("CREATE DATABASE materialize_with_enum16_test ENGINE = MaterializedMySQL('{}:3306', 'materialize_with_enum16_test', 'root', 'clickhouse')".format(service_name)) - check_query(clickhouse_node, "SELECT value FROM materialize_with_enum16_test.test ORDER BY id", "1\n2\n") - mysql_node.query("INSERT INTO materialize_with_enum16_test.test (id, value) VALUES (3, '500')") - check_query(clickhouse_node, "SELECT value FROM materialize_with_enum16_test.test ORDER BY id", "1\n2\n500\n") - check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_enum16_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum16(" + enum16_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + enum16_values += "'" + str(i) + "', " + enum16_values_with_backslash += "\\'" + str(i) + "\\' = " + str(i) + ", " + enum16_values += "'" + str(enum16_values_count) + "'" + enum16_values_with_backslash += ( + "\\'" + str(enum16_values_count) + "\\' = " + str(enum16_values_count) + ) + mysql_node.query( + "CREATE TABLE materialize_with_enum16_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + + enum16_values + + ")) ENGINE=InnoDB" + ) + mysql_node.query( + "INSERT INTO materialize_with_enum16_test.test (id, value) VALUES (1, '1'),(2, '2')" + ) + clickhouse_node.query( + "CREATE DATABASE materialize_with_enum16_test ENGINE = MaterializedMySQL('{}:3306', 'materialize_with_enum16_test', 'root', 'clickhouse')".format( + service_name + ) + ) + check_query( + clickhouse_node, + "SELECT value FROM materialize_with_enum16_test.test ORDER BY id", + "1\n2\n", + ) + mysql_node.query( + "INSERT INTO materialize_with_enum16_test.test (id, value) VALUES (3, '500')" + ) + check_query( + clickhouse_node, + "SELECT value FROM materialize_with_enum16_test.test ORDER BY id", + "1\n2\n500\n", + ) + check_query( + clickhouse_node, + "DESCRIBE TABLE materialize_with_enum16_test.test", + "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum16(" + + enum16_values_with_backslash + + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) clickhouse_node.query("DROP DATABASE materialize_with_enum16_test") mysql_node.query("DROP DATABASE materialize_with_enum16_test") + def alter_enum8_to_enum16_test(clickhouse_node, mysql_node, service_name): mysql_node.query("DROP DATABASE IF EXISTS alter_enum8_to_enum16_test") clickhouse_node.query("DROP DATABASE IF EXISTS alter_enum8_to_enum16_test") @@ -939,106 +1644,219 @@ def alter_enum8_to_enum16_test(clickhouse_node, mysql_node, service_name): enum8_values = "" enum8_values_with_backslash = "" for i in range(1, enum8_values_count): - enum8_values += '\'' + str(i) + "\', " - enum8_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", " - enum8_values += '\'' + str(enum8_values_count) + '\'' - enum8_values_with_backslash += "\\\'" + str(enum8_values_count) +"\\\' = " + str(enum8_values_count) - mysql_node.query("CREATE TABLE alter_enum8_to_enum16_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + enum8_values + ")) ENGINE=InnoDB") - mysql_node.query("INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (1, '1'),(2, '2')") - clickhouse_node.query("CREATE DATABASE alter_enum8_to_enum16_test ENGINE = MaterializedMySQL('{}:3306', 'alter_enum8_to_enum16_test', 'root', 'clickhouse')".format(service_name)) - mysql_node.query("INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (3, '75')") - check_query(clickhouse_node, "SELECT value FROM alter_enum8_to_enum16_test.test ORDER BY id", "1\n2\n75\n") - check_query(clickhouse_node, "DESCRIBE TABLE alter_enum8_to_enum16_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum8(" + enum8_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") + enum8_values += "'" + str(i) + "', " + enum8_values_with_backslash += "\\'" + str(i) + "\\' = " + str(i) + ", " + enum8_values += "'" + str(enum8_values_count) + "'" + enum8_values_with_backslash += ( + "\\'" + str(enum8_values_count) + "\\' = " + str(enum8_values_count) + ) + mysql_node.query( + "CREATE TABLE alter_enum8_to_enum16_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + + enum8_values + + ")) ENGINE=InnoDB" + ) + mysql_node.query( + "INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (1, '1'),(2, '2')" + ) + clickhouse_node.query( + "CREATE DATABASE alter_enum8_to_enum16_test ENGINE = MaterializedMySQL('{}:3306', 'alter_enum8_to_enum16_test', 'root', 'clickhouse')".format( + service_name + ) + ) + mysql_node.query( + "INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (3, '75')" + ) + check_query( + clickhouse_node, + "SELECT value FROM alter_enum8_to_enum16_test.test ORDER BY id", + "1\n2\n75\n", + ) + check_query( + clickhouse_node, + "DESCRIBE TABLE alter_enum8_to_enum16_test.test", + "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum8(" + + enum8_values_with_backslash + + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) enum16_values_count = 600 enum16_values = "" enum16_values_with_backslash = "" for i in range(1, enum16_values_count): - enum16_values += '\'' + str(i) + "\', " - enum16_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", " - enum16_values += '\'' + str(enum16_values_count) + '\'' - enum16_values_with_backslash += "\\\'" + str(enum16_values_count) +"\\\' = " + str(enum16_values_count) - mysql_node.query("ALTER TABLE alter_enum8_to_enum16_test.test MODIFY COLUMN value ENUM(" + enum16_values + ")") - check_query(clickhouse_node, "DESCRIBE TABLE alter_enum8_to_enum16_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum16(" + enum16_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n") - mysql_node.query("INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (4, '500')") - check_query(clickhouse_node, "SELECT value FROM alter_enum8_to_enum16_test.test ORDER BY id", "1\n2\n75\n500\n") + enum16_values += "'" + str(i) + "', " + enum16_values_with_backslash += "\\'" + str(i) + "\\' = " + str(i) + ", " + enum16_values += "'" + str(enum16_values_count) + "'" + enum16_values_with_backslash += ( + "\\'" + str(enum16_values_count) + "\\' = " + str(enum16_values_count) + ) + mysql_node.query( + "ALTER TABLE alter_enum8_to_enum16_test.test MODIFY COLUMN value ENUM(" + + enum16_values + + ")" + ) + check_query( + clickhouse_node, + "DESCRIBE TABLE alter_enum8_to_enum16_test.test", + "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum16(" + + enum16_values_with_backslash + + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n", + ) + mysql_node.query( + "INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (4, '500')" + ) + check_query( + clickhouse_node, + "SELECT value FROM alter_enum8_to_enum16_test.test ORDER BY id", + "1\n2\n75\n500\n", + ) clickhouse_node.query("DROP DATABASE alter_enum8_to_enum16_test") mysql_node.query("DROP DATABASE alter_enum8_to_enum16_test") + def move_to_prewhere_and_column_filtering(clickhouse_node, mysql_node, service_name): clickhouse_node.query("DROP DATABASE IF EXISTS cond_on_key_col") mysql_node.query("DROP DATABASE IF EXISTS cond_on_key_col") mysql_node.query("CREATE DATABASE cond_on_key_col") - clickhouse_node.query("CREATE DATABASE cond_on_key_col ENGINE = MaterializedMySQL('{}:3306', 'cond_on_key_col', 'root', 'clickhouse')".format(service_name)) - mysql_node.query("create table cond_on_key_col.products (id int primary key, product_id int not null, catalog_id int not null, brand_id int not null, name text)") - mysql_node.query("insert into cond_on_key_col.products (id, name, catalog_id, brand_id, product_id) values (915, 'ertyui', 5287, 15837, 0), (990, 'wer', 1053, 24390, 1), (781, 'qwerty', 1041, 1176, 2);") - mysql_node.query("create table cond_on_key_col.test (id int(11) NOT NULL AUTO_INCREMENT, a int(11) DEFAULT NULL, b int(11) DEFAULT NULL, PRIMARY KEY (id)) ENGINE=InnoDB AUTO_INCREMENT=6 DEFAULT CHARSET=utf8mb4;") + clickhouse_node.query( + "CREATE DATABASE cond_on_key_col ENGINE = MaterializedMySQL('{}:3306', 'cond_on_key_col', 'root', 'clickhouse')".format( + service_name + ) + ) + mysql_node.query( + "create table cond_on_key_col.products (id int primary key, product_id int not null, catalog_id int not null, brand_id int not null, name text)" + ) + mysql_node.query( + "insert into cond_on_key_col.products (id, name, catalog_id, brand_id, product_id) values (915, 'ertyui', 5287, 15837, 0), (990, 'wer', 1053, 24390, 1), (781, 'qwerty', 1041, 1176, 2);" + ) + mysql_node.query( + "create table cond_on_key_col.test (id int(11) NOT NULL AUTO_INCREMENT, a int(11) DEFAULT NULL, b int(11) DEFAULT NULL, PRIMARY KEY (id)) ENGINE=InnoDB AUTO_INCREMENT=6 DEFAULT CHARSET=utf8mb4;" + ) mysql_node.query("insert into cond_on_key_col.test values (42, 123, 1);") - mysql_node.query("CREATE TABLE cond_on_key_col.balance_change_record (id bigint(20) NOT NULL AUTO_INCREMENT, type tinyint(4) DEFAULT NULL, value decimal(10,4) DEFAULT NULL, time timestamp NULL DEFAULT NULL, " - "initiative_id varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, passivity_id varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, " - "person_id varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, tenant_code varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, " - "created_time timestamp NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', updated_time timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, " - "value_snapshot decimal(10,4) DEFAULT NULL, PRIMARY KEY (id), KEY balance_change_record_initiative_id (person_id) USING BTREE, " - "KEY type (type) USING BTREE, KEY balance_change_record_type (time) USING BTREE, KEY initiative_id (initiative_id) USING BTREE, " - "KEY balance_change_record_tenant_code (passivity_id) USING BTREE, KEY tenant_code (tenant_code) USING BTREE) ENGINE=InnoDB AUTO_INCREMENT=1691049 DEFAULT CHARSET=utf8") - mysql_node.query("insert into cond_on_key_col.balance_change_record values (123, 1, 3.14, null, 'qwe', 'asd', 'zxc', 'rty', null, null, 2.7);") - mysql_node.query("CREATE TABLE cond_on_key_col.test1 (id int(11) NOT NULL AUTO_INCREMENT, c1 varchar(32) NOT NULL, c2 varchar(32), PRIMARY KEY (id)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4") - mysql_node.query("insert into cond_on_key_col.test1(c1,c2) values ('a','b'), ('c', null);") - check_query(clickhouse_node, "SELECT DISTINCT P.id, P.name, P.catalog_id FROM cond_on_key_col.products P WHERE P.name ILIKE '%e%' and P.catalog_id=5287", '915\tertyui\t5287\n') - check_query(clickhouse_node, "select count(a) from cond_on_key_col.test where b = 1;", "1\n") - check_query(clickhouse_node, "select id from cond_on_key_col.balance_change_record where type=1;", "123\n") - check_query(clickhouse_node, "select count(c1) from cond_on_key_col.test1 where c2='b';", "1\n") + mysql_node.query( + "CREATE TABLE cond_on_key_col.balance_change_record (id bigint(20) NOT NULL AUTO_INCREMENT, type tinyint(4) DEFAULT NULL, value decimal(10,4) DEFAULT NULL, time timestamp NULL DEFAULT NULL, " + "initiative_id varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, passivity_id varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, " + "person_id varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, tenant_code varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, " + "created_time timestamp NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', updated_time timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, " + "value_snapshot decimal(10,4) DEFAULT NULL, PRIMARY KEY (id), KEY balance_change_record_initiative_id (person_id) USING BTREE, " + "KEY type (type) USING BTREE, KEY balance_change_record_type (time) USING BTREE, KEY initiative_id (initiative_id) USING BTREE, " + "KEY balance_change_record_tenant_code (passivity_id) USING BTREE, KEY tenant_code (tenant_code) USING BTREE) ENGINE=InnoDB AUTO_INCREMENT=1691049 DEFAULT CHARSET=utf8" + ) + mysql_node.query( + "insert into cond_on_key_col.balance_change_record values (123, 1, 3.14, null, 'qwe', 'asd', 'zxc', 'rty', null, null, 2.7);" + ) + mysql_node.query( + "CREATE TABLE cond_on_key_col.test1 (id int(11) NOT NULL AUTO_INCREMENT, c1 varchar(32) NOT NULL, c2 varchar(32), PRIMARY KEY (id)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4" + ) + mysql_node.query( + "insert into cond_on_key_col.test1(c1,c2) values ('a','b'), ('c', null);" + ) + check_query( + clickhouse_node, + "SELECT DISTINCT P.id, P.name, P.catalog_id FROM cond_on_key_col.products P WHERE P.name ILIKE '%e%' and P.catalog_id=5287", + "915\tertyui\t5287\n", + ) + check_query( + clickhouse_node, "select count(a) from cond_on_key_col.test where b = 1;", "1\n" + ) + check_query( + clickhouse_node, + "select id from cond_on_key_col.balance_change_record where type=1;", + "123\n", + ) + check_query( + clickhouse_node, + "select count(c1) from cond_on_key_col.test1 where c2='b';", + "1\n", + ) clickhouse_node.query("DROP DATABASE cond_on_key_col") mysql_node.query("DROP DATABASE cond_on_key_col") + def mysql_settings_test(clickhouse_node, mysql_node, service_name): mysql_node.query("DROP DATABASE IF EXISTS test_database") clickhouse_node.query("DROP DATABASE IF EXISTS test_database") mysql_node.query("CREATE DATABASE test_database") - mysql_node.query("CREATE TABLE test_database.a (id INT(11) NOT NULL PRIMARY KEY, value VARCHAR(255))") + mysql_node.query( + "CREATE TABLE test_database.a (id INT(11) NOT NULL PRIMARY KEY, value VARCHAR(255))" + ) mysql_node.query("INSERT INTO test_database.a VALUES(1, 'foo')") mysql_node.query("INSERT INTO test_database.a VALUES(2, 'bar')") - clickhouse_node.query("CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(service_name)) - check_query(clickhouse_node, "SELECT COUNT() FROM test_database.a FORMAT TSV", "2\n") + clickhouse_node.query( + "CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format( + service_name + ) + ) + check_query( + clickhouse_node, "SELECT COUNT() FROM test_database.a FORMAT TSV", "2\n" + ) - assert clickhouse_node.query("SELECT COUNT(DISTINCT blockNumber()) FROM test_database.a FORMAT TSV") == "2\n" + assert ( + clickhouse_node.query( + "SELECT COUNT(DISTINCT blockNumber()) FROM test_database.a FORMAT TSV" + ) + == "2\n" + ) clickhouse_node.query("DROP DATABASE test_database") mysql_node.query("DROP DATABASE test_database") + def materialized_mysql_large_transaction(clickhouse_node, mysql_node, service_name): mysql_node.query("DROP DATABASE IF EXISTS largetransaction") clickhouse_node.query("DROP DATABASE IF EXISTS largetransaction") mysql_node.query("CREATE DATABASE largetransaction") - mysql_node.query("CREATE TABLE largetransaction.test_table (" - "`key` INT NOT NULL PRIMARY KEY AUTO_INCREMENT, " - "`value` INT NOT NULL) ENGINE = InnoDB;") + mysql_node.query( + "CREATE TABLE largetransaction.test_table (" + "`key` INT NOT NULL PRIMARY KEY AUTO_INCREMENT, " + "`value` INT NOT NULL) ENGINE = InnoDB;" + ) num_rows = 200000 rows_per_insert = 5000 values = ",".join(["(1)" for _ in range(rows_per_insert)]) - for i in range(num_rows//rows_per_insert): - mysql_node.query(f"INSERT INTO largetransaction.test_table (`value`) VALUES {values};") + for i in range(num_rows // rows_per_insert): + mysql_node.query( + f"INSERT INTO largetransaction.test_table (`value`) VALUES {values};" + ) - - clickhouse_node.query("CREATE DATABASE largetransaction ENGINE = MaterializedMySQL('{}:3306', 'largetransaction', 'root', 'clickhouse')".format(service_name)) - check_query(clickhouse_node, "SELECT COUNT() FROM largetransaction.test_table", f"{num_rows}\n") + clickhouse_node.query( + "CREATE DATABASE largetransaction ENGINE = MaterializedMySQL('{}:3306', 'largetransaction', 'root', 'clickhouse')".format( + service_name + ) + ) + check_query( + clickhouse_node, + "SELECT COUNT() FROM largetransaction.test_table", + f"{num_rows}\n", + ) mysql_node.query("UPDATE largetransaction.test_table SET value = 2;") # Attempt to restart clickhouse after it has started processing # the transaction, but before it has completed it. - while int(clickhouse_node.query("SELECT COUNT() FROM largetransaction.test_table WHERE value = 2")) == 0: + while ( + int( + clickhouse_node.query( + "SELECT COUNT() FROM largetransaction.test_table WHERE value = 2" + ) + ) + == 0 + ): time.sleep(0.2) clickhouse_node.restart_clickhouse() - check_query(clickhouse_node, "SELECT COUNT() FROM largetransaction.test_table WHERE value = 2", f"{num_rows}\n") + check_query( + clickhouse_node, + "SELECT COUNT() FROM largetransaction.test_table WHERE value = 2", + f"{num_rows}\n", + ) clickhouse_node.query("DROP DATABASE largetransaction") mysql_node.query("DROP DATABASE largetransaction") + def table_table(clickhouse_node, mysql_node, service_name): mysql_node.query("DROP DATABASE IF EXISTS table_test") clickhouse_node.query("DROP DATABASE IF EXISTS table_test") @@ -1048,78 +1866,112 @@ def table_table(clickhouse_node, mysql_node, service_name): mysql_node.query("CREATE TABLE table_test.table (id INT UNSIGNED PRIMARY KEY)") mysql_node.query("INSERT INTO table_test.table VALUES (0),(1),(2),(3),(4)") - clickhouse_node.query("CREATE DATABASE table_test ENGINE=MaterializeMySQL('{}:3306', 'table_test', 'root', 'clickhouse')".format(service_name)) + clickhouse_node.query( + "CREATE DATABASE table_test ENGINE=MaterializeMySQL('{}:3306', 'table_test', 'root', 'clickhouse')".format( + service_name + ) + ) check_query(clickhouse_node, "SELECT COUNT(*) FROM table_test.table", "5\n") mysql_node.query("DROP DATABASE table_test") clickhouse_node.query("DROP DATABASE table_test") + def table_overrides(clickhouse_node, mysql_node, service_name): mysql_node.query("DROP DATABASE IF EXISTS table_overrides") clickhouse_node.query("DROP DATABASE IF EXISTS table_overrides") mysql_node.query("CREATE DATABASE table_overrides") - mysql_node.query("CREATE TABLE table_overrides.t1 (sensor_id INT UNSIGNED, timestamp DATETIME, temperature FLOAT, PRIMARY KEY(timestamp, sensor_id))") + mysql_node.query( + "CREATE TABLE table_overrides.t1 (sensor_id INT UNSIGNED, timestamp DATETIME, temperature FLOAT, PRIMARY KEY(timestamp, sensor_id))" + ) for id in range(10): mysql_node.query("BEGIN") for day in range(100): - mysql_node.query(f"INSERT INTO table_overrides.t1 VALUES({id}, TIMESTAMP('2021-01-01') + INTERVAL {day} DAY, (RAND()*20)+20)") + mysql_node.query( + f"INSERT INTO table_overrides.t1 VALUES({id}, TIMESTAMP('2021-01-01') + INTERVAL {day} DAY, (RAND()*20)+20)" + ) mysql_node.query("COMMIT") - clickhouse_node.query(f""" + clickhouse_node.query( + f""" CREATE DATABASE table_overrides ENGINE=MaterializeMySQL('{service_name}:3306', 'table_overrides', 'root', 'clickhouse') TABLE OVERRIDE t1 (COLUMNS (sensor_id UInt64, temp_f Nullable(Float32) ALIAS if(isNull(temperature), NULL, (temperature * 9 / 5) + 32))) - """) + """ + ) check_query( clickhouse_node, "SELECT type FROM system.columns WHERE database = 'table_overrides' AND table = 't1' AND name = 'sensor_id'", - "UInt64\n") + "UInt64\n", + ) check_query( clickhouse_node, "SELECT type, default_kind FROM system.columns WHERE database = 'table_overrides' AND table = 't1' AND name = 'temp_f'", - "Nullable(Float32)\tALIAS\n") + "Nullable(Float32)\tALIAS\n", + ) check_query(clickhouse_node, "SELECT count() FROM table_overrides.t1", "1000\n") - mysql_node.query("INSERT INTO table_overrides.t1 VALUES(1001, '2021-10-01 00:00:00', 42.0)") + mysql_node.query( + "INSERT INTO table_overrides.t1 VALUES(1001, '2021-10-01 00:00:00', 42.0)" + ) check_query(clickhouse_node, "SELECT count() FROM table_overrides.t1", "1001\n") explain_with_table_func = f"EXPLAIN TABLE OVERRIDE mysql('{service_name}:3306', 'table_overrides', 't1', 'root', 'clickhouse')" - for what in ['ORDER BY', 'PRIMARY KEY', 'SAMPLE BY', 'PARTITION BY', 'TTL']: + for what in ["ORDER BY", "PRIMARY KEY", "SAMPLE BY", "PARTITION BY", "TTL"]: with pytest.raises(QueryRuntimeException) as exc: clickhouse_node.query(f"{explain_with_table_func} {what} temperature") - assert f'{what} override refers to nullable column `temperature`' in \ - str(exc.value) - assert f"{what} uses columns: `temperature` Nullable(Float32)" in \ - clickhouse_node.query(f"{explain_with_table_func} {what} assumeNotNull(temperature)") + assert f"{what} override refers to nullable column `temperature`" in str( + exc.value + ) + assert ( + f"{what} uses columns: `temperature` Nullable(Float32)" + in clickhouse_node.query( + f"{explain_with_table_func} {what} assumeNotNull(temperature)" + ) + ) for testcase in [ - ('COLUMNS (temperature Nullable(Float32) MATERIALIZED 1.0)', - 'column `temperature`: modifying default specifier is not allowed'), - ('COLUMNS (sensor_id UInt64 ALIAS 42)', - 'column `sensor_id`: modifying default specifier is not allowed') + ( + "COLUMNS (temperature Nullable(Float32) MATERIALIZED 1.0)", + "column `temperature`: modifying default specifier is not allowed", + ), + ( + "COLUMNS (sensor_id UInt64 ALIAS 42)", + "column `sensor_id`: modifying default specifier is not allowed", + ), ]: with pytest.raises(QueryRuntimeException) as exc: clickhouse_node.query(f"{explain_with_table_func} {testcase[0]}") assert testcase[1] in str(exc.value) for testcase in [ - ('COLUMNS (temperature Nullable(Float64))', - 'Modified columns: `temperature` Nullable(Float32) -> Nullable(Float64)'), - ('COLUMNS (temp_f Nullable(Float32) ALIAS if(temperature IS NULL, NULL, (temperature * 9.0 / 5.0) + 32),\ - temp_k Nullable(Float32) ALIAS if(temperature IS NULL, NULL, temperature + 273.15))', - 'Added columns: `temp_f` Nullable(Float32), `temp_k` Nullable(Float32)') + ( + "COLUMNS (temperature Nullable(Float64))", + "Modified columns: `temperature` Nullable(Float32) -> Nullable(Float64)", + ), + ( + "COLUMNS (temp_f Nullable(Float32) ALIAS if(temperature IS NULL, NULL, (temperature * 9.0 / 5.0) + 32),\ + temp_k Nullable(Float32) ALIAS if(temperature IS NULL, NULL, temperature + 273.15))", + "Added columns: `temp_f` Nullable(Float32), `temp_k` Nullable(Float32)", + ), ]: assert testcase[1] in clickhouse_node.query( - f"{explain_with_table_func} {testcase[0]}") + f"{explain_with_table_func} {testcase[0]}" + ) clickhouse_node.query("DROP DATABASE IF EXISTS table_overrides") mysql_node.query("DROP DATABASE IF EXISTS table_overrides") -def materialized_database_support_all_kinds_of_mysql_datatype(clickhouse_node, mysql_node, service_name): +def materialized_database_support_all_kinds_of_mysql_datatype( + clickhouse_node, mysql_node, service_name +): mysql_node.query("DROP DATABASE IF EXISTS test_database_datatype") clickhouse_node.query("DROP DATABASE IF EXISTS test_database_datatype") - mysql_node.query("CREATE DATABASE test_database_datatype DEFAULT CHARACTER SET 'utf8'") - mysql_node.query(""" + mysql_node.query( + "CREATE DATABASE test_database_datatype DEFAULT CHARACTER SET 'utf8'" + ) + mysql_node.query( + """ CREATE TABLE test_database_datatype.t1 ( `v1` int(10) unsigned AUTO_INCREMENT, `v2` TINYINT, @@ -1156,88 +2008,146 @@ def materialized_database_support_all_kinds_of_mysql_datatype(clickhouse_node, m `v32` ENUM('RED','GREEN','BLUE'), PRIMARY KEY (`v1`) ) ENGINE=InnoDB; - """) + """ + ) - mysql_node.query(""" + mysql_node.query( + """ INSERT INTO test_database_datatype.t1 (v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v28, v29, v30, v31, v32) values (1, 11, 9223372036854775807, -1, 1, 11, 18446744073709551615, -1.1, 1.1, -1.111, 1.111, 1.1111, '2021-10-06', 'text', 'varchar', 'BLOB', '2021-10-06 18:32:57', '2021-10-06 18:32:57.482786', '2021-10-06 18:32:57', '2021-10-06 18:32:57.482786', '2021', '838:59:59', '838:59:59.000000', ST_GeometryFromText('point(0.0 0.0)'), b'1010', 'a', 11, 'varbinary', 'binary', 'RED'); - """) + """ + ) clickhouse_node.query( "CREATE DATABASE test_database_datatype ENGINE = MaterializeMySQL('{}:3306', 'test_database_datatype', 'root', 'clickhouse')".format( - service_name)) + service_name + ) + ) - check_query(clickhouse_node, "SELECT name FROM system.tables WHERE database = 'test_database_datatype'", "t1\n") + check_query( + clickhouse_node, + "SELECT name FROM system.tables WHERE database = 'test_database_datatype'", + "t1\n", + ) # full synchronization check - check_query(clickhouse_node, "SELECT v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, hex(v25), v26, v28, v29, v30, v32 FROM test_database_datatype.t1 FORMAT TSV", - "1\t1\t11\t9223372036854775807\t-1\t1\t11\t18446744073709551615\t-1.1\t1.1\t-1.111\t1.111\t1.1111\t2021-10-06\ttext\tvarchar\tBLOB\t2021-10-06 18:32:57\t2021-10-06 18:32:57.482786\t2021-10-06 18:32:57" + - "\t2021-10-06 18:32:57.482786\t2021\t3020399000000\t3020399000000\t00000000010100000000000000000000000000000000000000\t10\t1\t11\tvarbinary\tRED\n") + check_query( + clickhouse_node, + "SELECT v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, hex(v25), v26, v28, v29, v30, v32 FROM test_database_datatype.t1 FORMAT TSV", + "1\t1\t11\t9223372036854775807\t-1\t1\t11\t18446744073709551615\t-1.1\t1.1\t-1.111\t1.111\t1.1111\t2021-10-06\ttext\tvarchar\tBLOB\t2021-10-06 18:32:57\t2021-10-06 18:32:57.482786\t2021-10-06 18:32:57" + + "\t2021-10-06 18:32:57.482786\t2021\t3020399000000\t3020399000000\t00000000010100000000000000000000000000000000000000\t10\t1\t11\tvarbinary\tRED\n", + ) - mysql_node.query(""" + mysql_node.query( + """ INSERT INTO test_database_datatype.t1 (v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v28, v29, v30, v31, v32) values (2, 22, 9223372036854775807, -2, 2, 22, 18446744073709551615, -2.2, 2.2, -2.22, 2.222, 2.2222, '2021-10-07', 'text', 'varchar', 'BLOB', '2021-10-07 18:32:57', '2021-10-07 18:32:57.482786', '2021-10-07 18:32:57', '2021-10-07 18:32:57.482786', '2021', '-838:59:59', '-12:59:58.000001', ST_GeometryFromText('point(120.153576 30.287459)'), b'1011', 'a,c', 22, 'varbinary', 'binary', 'GREEN' ); - """) + """ + ) # increment synchronization check - check_query(clickhouse_node, "SELECT v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, hex(v25), v26, v28, v29, v30, v32 FROM test_database_datatype.t1 FORMAT TSV", - "1\t1\t11\t9223372036854775807\t-1\t1\t11\t18446744073709551615\t-1.1\t1.1\t-1.111\t1.111\t1.1111\t2021-10-06\ttext\tvarchar\tBLOB\t2021-10-06 18:32:57\t2021-10-06 18:32:57.482786\t2021-10-06 18:32:57\t2021-10-06 18:32:57.482786" + - "\t2021\t3020399000000\t3020399000000\t00000000010100000000000000000000000000000000000000\t10\t1\t11\tvarbinary\tRED\n" + - "2\t2\t22\t9223372036854775807\t-2\t2\t22\t18446744073709551615\t-2.2\t2.2\t-2.22\t2.222\t2.2222\t2021-10-07\ttext\tvarchar\tBLOB\t2021-10-07 18:32:57\t2021-10-07 18:32:57.482786\t2021-10-07 18:32:57\t2021-10-07 18:32:57.482786" + - "\t2021\t-3020399000000\t-46798000001\t000000000101000000D55C6E30D4095E40DCF0BBE996493E40\t11\t3\t22\tvarbinary\tGREEN\n") + check_query( + clickhouse_node, + "SELECT v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, hex(v25), v26, v28, v29, v30, v32 FROM test_database_datatype.t1 FORMAT TSV", + "1\t1\t11\t9223372036854775807\t-1\t1\t11\t18446744073709551615\t-1.1\t1.1\t-1.111\t1.111\t1.1111\t2021-10-06\ttext\tvarchar\tBLOB\t2021-10-06 18:32:57\t2021-10-06 18:32:57.482786\t2021-10-06 18:32:57\t2021-10-06 18:32:57.482786" + + "\t2021\t3020399000000\t3020399000000\t00000000010100000000000000000000000000000000000000\t10\t1\t11\tvarbinary\tRED\n" + + "2\t2\t22\t9223372036854775807\t-2\t2\t22\t18446744073709551615\t-2.2\t2.2\t-2.22\t2.222\t2.2222\t2021-10-07\ttext\tvarchar\tBLOB\t2021-10-07 18:32:57\t2021-10-07 18:32:57.482786\t2021-10-07 18:32:57\t2021-10-07 18:32:57.482786" + + "\t2021\t-3020399000000\t-46798000001\t000000000101000000D55C6E30D4095E40DCF0BBE996493E40\t11\t3\t22\tvarbinary\tGREEN\n", + ) -def materialized_database_settings_materialized_mysql_tables_list(clickhouse_node, mysql_node, service_name): +def materialized_database_settings_materialized_mysql_tables_list( + clickhouse_node, mysql_node, service_name +): mysql_node.query("DROP DATABASE IF EXISTS test_database") clickhouse_node.query("DROP DATABASE IF EXISTS test_database") mysql_node.query("CREATE DATABASE test_database") - mysql_node.query("CREATE TABLE test_database.a (id INT(11) NOT NULL PRIMARY KEY, value VARCHAR(255))") + mysql_node.query( + "CREATE TABLE test_database.a (id INT(11) NOT NULL PRIMARY KEY, value VARCHAR(255))" + ) mysql_node.query("INSERT INTO test_database.a VALUES(1, 'foo')") mysql_node.query("INSERT INTO test_database.a VALUES(2, 'bar')") # table b(include json type, not in materialized_mysql_tables_list) can be skip - mysql_node.query("CREATE TABLE test_database.b (id INT(11) NOT NULL PRIMARY KEY, value JSON)") + mysql_node.query( + "CREATE TABLE test_database.b (id INT(11) NOT NULL PRIMARY KEY, value JSON)" + ) - clickhouse_node.query("CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse') SETTINGS materialized_mysql_tables_list = ' a,c,d'".format(service_name)) + clickhouse_node.query( + "CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse') SETTINGS materialized_mysql_tables_list = ' a,c,d'".format( + service_name + ) + ) - check_query(clickhouse_node, "SELECT name from system.tables where database = 'test_database' FORMAT TSV", "a\n") - check_query(clickhouse_node, "SELECT COUNT() FROM test_database.a FORMAT TSV", "2\n") + check_query( + clickhouse_node, + "SELECT name from system.tables where database = 'test_database' FORMAT TSV", + "a\n", + ) + check_query( + clickhouse_node, "SELECT COUNT() FROM test_database.a FORMAT TSV", "2\n" + ) # mysql data(binlog) can be skip - mysql_node.query("INSERT INTO test_database.b VALUES(1, '{\"name\":\"testjson\"}')") - mysql_node.query("INSERT INTO test_database.b VALUES(2, '{\"name\":\"testjson\"}')") + mysql_node.query('INSERT INTO test_database.b VALUES(1, \'{"name":"testjson"}\')') + mysql_node.query('INSERT INTO test_database.b VALUES(2, \'{"name":"testjson"}\')') # irrelevant database can be skip mysql_node.query("DROP DATABASE IF EXISTS other_database") mysql_node.query("CREATE DATABASE other_database") - mysql_node.query("CREATE TABLE other_database.d (id INT(11) NOT NULL PRIMARY KEY, value json)") - mysql_node.query("INSERT INTO other_database.d VALUES(1, '{\"name\":\"testjson\"}')") + mysql_node.query( + "CREATE TABLE other_database.d (id INT(11) NOT NULL PRIMARY KEY, value json)" + ) + mysql_node.query('INSERT INTO other_database.d VALUES(1, \'{"name":"testjson"}\')') - mysql_node.query("CREATE TABLE test_database.c (id INT(11) NOT NULL PRIMARY KEY, value VARCHAR(255))") + mysql_node.query( + "CREATE TABLE test_database.c (id INT(11) NOT NULL PRIMARY KEY, value VARCHAR(255))" + ) mysql_node.query("INSERT INTO test_database.c VALUES(1, 'foo')") mysql_node.query("INSERT INTO test_database.c VALUES(2, 'bar')") - check_query(clickhouse_node, "SELECT name from system.tables where database = 'test_database' FORMAT TSV", "a\nc\n") - check_query(clickhouse_node, "SELECT COUNT() FROM test_database.c FORMAT TSV", "2\n") + check_query( + clickhouse_node, + "SELECT name from system.tables where database = 'test_database' FORMAT TSV", + "a\nc\n", + ) + check_query( + clickhouse_node, "SELECT COUNT() FROM test_database.c FORMAT TSV", "2\n" + ) clickhouse_node.query("DROP DATABASE test_database") mysql_node.query("DROP DATABASE test_database") -def materialized_database_mysql_date_type_to_date32(clickhouse_node, mysql_node, service_name): +def materialized_database_mysql_date_type_to_date32( + clickhouse_node, mysql_node, service_name +): mysql_node.query("DROP DATABASE IF EXISTS test_database") clickhouse_node.query("DROP DATABASE IF EXISTS test_database") mysql_node.query("CREATE DATABASE test_database") - mysql_node.query("CREATE TABLE test_database.a (a INT(11) NOT NULL PRIMARY KEY, b date DEFAULT NULL)") + mysql_node.query( + "CREATE TABLE test_database.a (a INT(11) NOT NULL PRIMARY KEY, b date DEFAULT NULL)" + ) # can't support date that less than 1925 year for now mysql_node.query("INSERT INTO test_database.a VALUES(1, '1900-04-16')") # test date that is older than 1925 mysql_node.query("INSERT INTO test_database.a VALUES(3, '1971-02-16')") mysql_node.query("INSERT INTO test_database.a VALUES(4, '2101-05-16')") - clickhouse_node.query("CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(service_name)) - check_query(clickhouse_node, "SELECT b from test_database.a order by a FORMAT TSV", "1970-01-01\n1971-02-16\n2101-05-16\n") + clickhouse_node.query( + "CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format( + service_name + ) + ) + check_query( + clickhouse_node, + "SELECT b from test_database.a order by a FORMAT TSV", + "1970-01-01\n1971-02-16\n2101-05-16\n", + ) mysql_node.query("INSERT INTO test_database.a VALUES(6, '2022-02-16')") mysql_node.query("INSERT INTO test_database.a VALUES(7, '2104-06-06')") - check_query(clickhouse_node, "SELECT b from test_database.a order by a FORMAT TSV", "1970-01-01\n1971-02-16\n2101-05-16\n2022-02-16\n" + - "2104-06-06\n") + check_query( + clickhouse_node, + "SELECT b from test_database.a order by a FORMAT TSV", + "1970-01-01\n1971-02-16\n2101-05-16\n2022-02-16\n" + "2104-06-06\n", + ) diff --git a/tests/integration/test_materialized_mysql_database/test.py b/tests/integration/test_materialized_mysql_database/test.py index f4fb957a547..a672ec72275 100644 --- a/tests/integration/test_materialized_mysql_database/test.py +++ b/tests/integration/test_materialized_mysql_database/test.py @@ -5,7 +5,12 @@ import pwd import re import pymysql.cursors import pytest -from helpers.cluster import ClickHouseCluster, ClickHouseInstance, get_docker_compose_path, run_and_check +from helpers.cluster import ( + ClickHouseCluster, + ClickHouseInstance, + get_docker_compose_path, + run_and_check, +) import docker import logging @@ -17,9 +22,28 @@ cluster = ClickHouseCluster(__file__) mysql_node = None mysql8_node = None -node_db = cluster.add_instance('node1', main_configs=["configs/timezone_config.xml"], user_configs=["configs/users.xml"], with_mysql=True, with_mysql8=True, stay_alive=True) -node_disable_bytes_settings = cluster.add_instance('node2', main_configs=["configs/timezone_config.xml"], user_configs=["configs/users_disable_bytes_settings.xml"], with_mysql=False, stay_alive=True) -node_disable_rows_settings = cluster.add_instance('node3', main_configs=["configs/timezone_config.xml"], user_configs=["configs/users_disable_rows_settings.xml"], with_mysql=False, stay_alive=True) +node_db = cluster.add_instance( + "node1", + main_configs=["configs/timezone_config.xml"], + user_configs=["configs/users.xml"], + with_mysql=True, + with_mysql8=True, + stay_alive=True, +) +node_disable_bytes_settings = cluster.add_instance( + "node2", + main_configs=["configs/timezone_config.xml"], + user_configs=["configs/users_disable_bytes_settings.xml"], + with_mysql=False, + stay_alive=True, +) +node_disable_rows_settings = cluster.add_instance( + "node3", + main_configs=["configs/timezone_config.xml"], + user_configs=["configs/users_disable_rows_settings.xml"], + with_mysql=False, + stay_alive=True, +) @pytest.fixture(scope="module") @@ -32,7 +56,15 @@ def started_cluster(): class MySQLConnection: - def __init__(self, port, user='root', password='clickhouse', ip_address=None, docker_compose=None, project_name=cluster.project_name): + def __init__( + self, + port, + user="root", + password="clickhouse", + ip_address=None, + docker_compose=None, + project_name=cluster.project_name, + ): self.user = user self.port = port self.ip_address = ip_address @@ -44,11 +76,20 @@ class MySQLConnection: for _ in range(5): try: if self.mysql_connection is None: - self.mysql_connection = pymysql.connect(user=self.user, password=self.password, host=self.ip_address, - port=self.port, autocommit=True) + self.mysql_connection = pymysql.connect( + user=self.user, + password=self.password, + host=self.ip_address, + port=self.port, + autocommit=True, + ) else: self.mysql_connection.ping(reconnect=True) - logging.debug("MySQL Connection establised: {}:{}".format(self.ip_address, self.port)) + logging.debug( + "MySQL Connection establised: {}:{}".format( + self.ip_address, self.port + ) + ) return self.mysql_connection except Exception as e: errors += [str(e)] @@ -63,8 +104,12 @@ class MySQLConnection: self.query("CREATE USER '" + user + "'@'%' IDENTIFIED BY '" + password + "'") self.grant_min_priv_for_user(user) - def grant_min_priv_for_user(self, user, db='priv_err_db'): - self.query("GRANT REPLICATION SLAVE, REPLICATION CLIENT, RELOAD ON *.* TO '" + user + "'@'%'") + def grant_min_priv_for_user(self, user, db="priv_err_db"): + self.query( + "GRANT REPLICATION SLAVE, REPLICATION CLIENT, RELOAD ON *.* TO '" + + user + + "'@'%'" + ) self.query("GRANT SELECT ON " + db + ".* TO '" + user + "'@'%'") def result(self, execution_query): @@ -85,183 +130,382 @@ class MySQLConnection: @pytest.fixture(scope="module") def started_mysql_5_7(): - mysql_node = MySQLConnection(cluster.mysql_port, 'root', 'clickhouse', cluster.mysql_ip) + mysql_node = MySQLConnection( + cluster.mysql_port, "root", "clickhouse", cluster.mysql_ip + ) yield mysql_node @pytest.fixture(scope="module") def started_mysql_8_0(): - mysql8_node = MySQLConnection(cluster.mysql8_port, 'root', 'clickhouse', cluster.mysql8_ip) + mysql8_node = MySQLConnection( + cluster.mysql8_port, "root", "clickhouse", cluster.mysql8_ip + ) yield mysql8_node -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def clickhouse_node(): yield node_db -def test_materialized_database_dml_with_mysql_5_7(started_cluster, started_mysql_5_7, clickhouse_node: ClickHouseInstance): - materialize_with_ddl.dml_with_materialized_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") - materialize_with_ddl.materialized_mysql_database_with_views(clickhouse_node, started_mysql_5_7, "mysql57") - materialize_with_ddl.materialized_mysql_database_with_datetime_and_decimal(clickhouse_node, started_mysql_5_7, "mysql57") - materialize_with_ddl.move_to_prewhere_and_column_filtering(clickhouse_node, started_mysql_5_7, "mysql57") +def test_materialized_database_dml_with_mysql_5_7( + started_cluster, started_mysql_5_7, clickhouse_node: ClickHouseInstance +): + materialize_with_ddl.dml_with_materialized_mysql_database( + clickhouse_node, started_mysql_5_7, "mysql57" + ) + materialize_with_ddl.materialized_mysql_database_with_views( + clickhouse_node, started_mysql_5_7, "mysql57" + ) + materialize_with_ddl.materialized_mysql_database_with_datetime_and_decimal( + clickhouse_node, started_mysql_5_7, "mysql57" + ) + materialize_with_ddl.move_to_prewhere_and_column_filtering( + clickhouse_node, started_mysql_5_7, "mysql57" + ) -def test_materialized_database_dml_with_mysql_8_0(started_cluster, started_mysql_8_0, clickhouse_node): - materialize_with_ddl.dml_with_materialized_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") - materialize_with_ddl.materialized_mysql_database_with_views(clickhouse_node, started_mysql_8_0, "mysql80") - materialize_with_ddl.materialized_mysql_database_with_datetime_and_decimal(clickhouse_node, started_mysql_8_0, "mysql80") - materialize_with_ddl.move_to_prewhere_and_column_filtering(clickhouse_node, started_mysql_8_0, "mysql80") +def test_materialized_database_dml_with_mysql_8_0( + started_cluster, started_mysql_8_0, clickhouse_node +): + materialize_with_ddl.dml_with_materialized_mysql_database( + clickhouse_node, started_mysql_8_0, "mysql80" + ) + materialize_with_ddl.materialized_mysql_database_with_views( + clickhouse_node, started_mysql_8_0, "mysql80" + ) + materialize_with_ddl.materialized_mysql_database_with_datetime_and_decimal( + clickhouse_node, started_mysql_8_0, "mysql80" + ) + materialize_with_ddl.move_to_prewhere_and_column_filtering( + clickhouse_node, started_mysql_8_0, "mysql80" + ) -def test_materialized_database_ddl_with_mysql_5_7(started_cluster, started_mysql_5_7, clickhouse_node): - materialize_with_ddl.drop_table_with_materialized_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") - materialize_with_ddl.create_table_with_materialized_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") - materialize_with_ddl.rename_table_with_materialized_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") - materialize_with_ddl.alter_add_column_with_materialized_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") - materialize_with_ddl.alter_drop_column_with_materialized_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") +def test_materialized_database_ddl_with_mysql_5_7( + started_cluster, started_mysql_5_7, clickhouse_node +): + materialize_with_ddl.drop_table_with_materialized_mysql_database( + clickhouse_node, started_mysql_5_7, "mysql57" + ) + materialize_with_ddl.create_table_with_materialized_mysql_database( + clickhouse_node, started_mysql_5_7, "mysql57" + ) + materialize_with_ddl.rename_table_with_materialized_mysql_database( + clickhouse_node, started_mysql_5_7, "mysql57" + ) + materialize_with_ddl.alter_add_column_with_materialized_mysql_database( + clickhouse_node, started_mysql_5_7, "mysql57" + ) + materialize_with_ddl.alter_drop_column_with_materialized_mysql_database( + clickhouse_node, started_mysql_5_7, "mysql57" + ) # mysql 5.7 cannot support alter rename column # materialize_with_ddl.alter_rename_column_with_materialized_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") - materialize_with_ddl.alter_rename_table_with_materialized_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") - materialize_with_ddl.alter_modify_column_with_materialized_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") - materialize_with_ddl.create_table_like_with_materialize_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") + materialize_with_ddl.alter_rename_table_with_materialized_mysql_database( + clickhouse_node, started_mysql_5_7, "mysql57" + ) + materialize_with_ddl.alter_modify_column_with_materialized_mysql_database( + clickhouse_node, started_mysql_5_7, "mysql57" + ) + materialize_with_ddl.create_table_like_with_materialize_mysql_database( + clickhouse_node, started_mysql_5_7, "mysql57" + ) -def test_materialized_database_ddl_with_mysql_8_0(started_cluster, started_mysql_8_0, clickhouse_node): - materialize_with_ddl.drop_table_with_materialized_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") - materialize_with_ddl.create_table_with_materialized_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") - materialize_with_ddl.rename_table_with_materialized_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") - materialize_with_ddl.alter_add_column_with_materialized_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") - materialize_with_ddl.alter_drop_column_with_materialized_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") - materialize_with_ddl.alter_rename_table_with_materialized_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") - materialize_with_ddl.alter_rename_column_with_materialized_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") - materialize_with_ddl.alter_modify_column_with_materialized_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") - materialize_with_ddl.create_table_like_with_materialize_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") +def test_materialized_database_ddl_with_mysql_8_0( + started_cluster, started_mysql_8_0, clickhouse_node +): + materialize_with_ddl.drop_table_with_materialized_mysql_database( + clickhouse_node, started_mysql_8_0, "mysql80" + ) + materialize_with_ddl.create_table_with_materialized_mysql_database( + clickhouse_node, started_mysql_8_0, "mysql80" + ) + materialize_with_ddl.rename_table_with_materialized_mysql_database( + clickhouse_node, started_mysql_8_0, "mysql80" + ) + materialize_with_ddl.alter_add_column_with_materialized_mysql_database( + clickhouse_node, started_mysql_8_0, "mysql80" + ) + materialize_with_ddl.alter_drop_column_with_materialized_mysql_database( + clickhouse_node, started_mysql_8_0, "mysql80" + ) + materialize_with_ddl.alter_rename_table_with_materialized_mysql_database( + clickhouse_node, started_mysql_8_0, "mysql80" + ) + materialize_with_ddl.alter_rename_column_with_materialized_mysql_database( + clickhouse_node, started_mysql_8_0, "mysql80" + ) + materialize_with_ddl.alter_modify_column_with_materialized_mysql_database( + clickhouse_node, started_mysql_8_0, "mysql80" + ) + materialize_with_ddl.create_table_like_with_materialize_mysql_database( + clickhouse_node, started_mysql_8_0, "mysql80" + ) -def test_materialized_database_ddl_with_empty_transaction_5_7(started_cluster, started_mysql_5_7, clickhouse_node): - materialize_with_ddl.query_event_with_empty_transaction(clickhouse_node, started_mysql_5_7, "mysql57") +def test_materialized_database_ddl_with_empty_transaction_5_7( + started_cluster, started_mysql_5_7, clickhouse_node +): + materialize_with_ddl.query_event_with_empty_transaction( + clickhouse_node, started_mysql_5_7, "mysql57" + ) -def test_materialized_database_ddl_with_empty_transaction_8_0(started_cluster, started_mysql_8_0, clickhouse_node): - materialize_with_ddl.query_event_with_empty_transaction(clickhouse_node, started_mysql_8_0, "mysql80") +def test_materialized_database_ddl_with_empty_transaction_8_0( + started_cluster, started_mysql_8_0, clickhouse_node +): + materialize_with_ddl.query_event_with_empty_transaction( + clickhouse_node, started_mysql_8_0, "mysql80" + ) -def test_select_without_columns_5_7(started_cluster, started_mysql_5_7, clickhouse_node): - materialize_with_ddl.select_without_columns(clickhouse_node, started_mysql_5_7, "mysql57") +def test_select_without_columns_5_7( + started_cluster, started_mysql_5_7, clickhouse_node +): + materialize_with_ddl.select_without_columns( + clickhouse_node, started_mysql_5_7, "mysql57" + ) -def test_select_without_columns_8_0(started_cluster, started_mysql_8_0, clickhouse_node): - materialize_with_ddl.select_without_columns(clickhouse_node, started_mysql_8_0, "mysql80") +def test_select_without_columns_8_0( + started_cluster, started_mysql_8_0, clickhouse_node +): + materialize_with_ddl.select_without_columns( + clickhouse_node, started_mysql_8_0, "mysql80" + ) -def test_insert_with_modify_binlog_checksum_5_7(started_cluster, started_mysql_5_7, clickhouse_node): - materialize_with_ddl.insert_with_modify_binlog_checksum(clickhouse_node, started_mysql_5_7, "mysql57") +def test_insert_with_modify_binlog_checksum_5_7( + started_cluster, started_mysql_5_7, clickhouse_node +): + materialize_with_ddl.insert_with_modify_binlog_checksum( + clickhouse_node, started_mysql_5_7, "mysql57" + ) -def test_insert_with_modify_binlog_checksum_8_0(started_cluster, started_mysql_8_0, clickhouse_node): - materialize_with_ddl.insert_with_modify_binlog_checksum(clickhouse_node, started_mysql_8_0, "mysql80") +def test_insert_with_modify_binlog_checksum_8_0( + started_cluster, started_mysql_8_0, clickhouse_node +): + materialize_with_ddl.insert_with_modify_binlog_checksum( + clickhouse_node, started_mysql_8_0, "mysql80" + ) -def test_materialized_database_err_sync_user_privs_5_7(started_cluster, started_mysql_5_7, clickhouse_node): - materialize_with_ddl.err_sync_user_privs_with_materialized_mysql_database(clickhouse_node, started_mysql_5_7, "mysql57") +def test_materialized_database_err_sync_user_privs_5_7( + started_cluster, started_mysql_5_7, clickhouse_node +): + materialize_with_ddl.err_sync_user_privs_with_materialized_mysql_database( + clickhouse_node, started_mysql_5_7, "mysql57" + ) -def test_materialized_database_err_sync_user_privs_8_0(started_cluster, started_mysql_8_0, clickhouse_node): - materialize_with_ddl.err_sync_user_privs_with_materialized_mysql_database(clickhouse_node, started_mysql_8_0, "mysql80") +def test_materialized_database_err_sync_user_privs_8_0( + started_cluster, started_mysql_8_0, clickhouse_node +): + materialize_with_ddl.err_sync_user_privs_with_materialized_mysql_database( + clickhouse_node, started_mysql_8_0, "mysql80" + ) def test_network_partition_5_7(started_cluster, started_mysql_5_7, clickhouse_node): - materialize_with_ddl.network_partition_test(clickhouse_node, started_mysql_5_7, "mysql57") + materialize_with_ddl.network_partition_test( + clickhouse_node, started_mysql_5_7, "mysql57" + ) def test_network_partition_8_0(started_cluster, started_mysql_8_0, clickhouse_node): - materialize_with_ddl.network_partition_test(clickhouse_node, started_mysql_8_0, "mysql80") + materialize_with_ddl.network_partition_test( + clickhouse_node, started_mysql_8_0, "mysql80" + ) -def test_mysql_kill_sync_thread_restore_5_7(started_cluster, started_mysql_5_7, clickhouse_node): - materialize_with_ddl.mysql_kill_sync_thread_restore_test(clickhouse_node, started_mysql_5_7, "mysql57") +def test_mysql_kill_sync_thread_restore_5_7( + started_cluster, started_mysql_5_7, clickhouse_node +): + materialize_with_ddl.mysql_kill_sync_thread_restore_test( + clickhouse_node, started_mysql_5_7, "mysql57" + ) -def test_mysql_kill_sync_thread_restore_8_0(started_cluster, started_mysql_8_0, clickhouse_node): - materialize_with_ddl.mysql_kill_sync_thread_restore_test(clickhouse_node, started_mysql_8_0, "mysql80") +def test_mysql_kill_sync_thread_restore_8_0( + started_cluster, started_mysql_8_0, clickhouse_node +): + materialize_with_ddl.mysql_kill_sync_thread_restore_test( + clickhouse_node, started_mysql_8_0, "mysql80" + ) -def test_mysql_killed_while_insert_5_7(started_cluster, started_mysql_5_7, clickhouse_node): - materialize_with_ddl.mysql_killed_while_insert(clickhouse_node, started_mysql_5_7, "mysql57") +def test_mysql_killed_while_insert_5_7( + started_cluster, started_mysql_5_7, clickhouse_node +): + materialize_with_ddl.mysql_killed_while_insert( + clickhouse_node, started_mysql_5_7, "mysql57" + ) -def test_mysql_killed_while_insert_8_0(started_cluster, started_mysql_8_0, clickhouse_node): - materialize_with_ddl.mysql_killed_while_insert(clickhouse_node, started_mysql_8_0, "mysql80") +def test_mysql_killed_while_insert_8_0( + started_cluster, started_mysql_8_0, clickhouse_node +): + materialize_with_ddl.mysql_killed_while_insert( + clickhouse_node, started_mysql_8_0, "mysql80" + ) -def test_clickhouse_killed_while_insert_5_7(started_cluster, started_mysql_5_7, clickhouse_node): - materialize_with_ddl.clickhouse_killed_while_insert(clickhouse_node, started_mysql_5_7, "mysql57") +def test_clickhouse_killed_while_insert_5_7( + started_cluster, started_mysql_5_7, clickhouse_node +): + materialize_with_ddl.clickhouse_killed_while_insert( + clickhouse_node, started_mysql_5_7, "mysql57" + ) -def test_clickhouse_killed_while_insert_8_0(started_cluster, started_mysql_8_0, clickhouse_node): - materialize_with_ddl.clickhouse_killed_while_insert(clickhouse_node, started_mysql_8_0, "mysql80") +def test_clickhouse_killed_while_insert_8_0( + started_cluster, started_mysql_8_0, clickhouse_node +): + materialize_with_ddl.clickhouse_killed_while_insert( + clickhouse_node, started_mysql_8_0, "mysql80" + ) -def test_utf8mb4(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): +def test_utf8mb4( + started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node +): materialize_with_ddl.utf8mb4_test(clickhouse_node, started_mysql_5_7, "mysql57") materialize_with_ddl.utf8mb4_test(clickhouse_node, started_mysql_8_0, "mysql80") def test_system_parts_table(started_cluster, started_mysql_8_0, clickhouse_node): - materialize_with_ddl.system_parts_test(clickhouse_node, started_mysql_8_0, "mysql80") + materialize_with_ddl.system_parts_test( + clickhouse_node, started_mysql_8_0, "mysql80" + ) -def test_multi_table_update(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): - materialize_with_ddl.multi_table_update_test(clickhouse_node, started_mysql_5_7, "mysql57") - materialize_with_ddl.multi_table_update_test(clickhouse_node, started_mysql_8_0, "mysql80") +def test_multi_table_update( + started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node +): + materialize_with_ddl.multi_table_update_test( + clickhouse_node, started_mysql_5_7, "mysql57" + ) + materialize_with_ddl.multi_table_update_test( + clickhouse_node, started_mysql_8_0, "mysql80" + ) -def test_system_tables_table(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): - materialize_with_ddl.system_tables_test(clickhouse_node, started_mysql_5_7, "mysql57") - materialize_with_ddl.system_tables_test(clickhouse_node, started_mysql_8_0, "mysql80") +def test_system_tables_table( + started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node +): + materialize_with_ddl.system_tables_test( + clickhouse_node, started_mysql_5_7, "mysql57" + ) + materialize_with_ddl.system_tables_test( + clickhouse_node, started_mysql_8_0, "mysql80" + ) -def test_materialized_with_column_comments(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): - materialize_with_ddl.materialize_with_column_comments_test(clickhouse_node, started_mysql_5_7, "mysql57") - materialize_with_ddl.materialize_with_column_comments_test(clickhouse_node, started_mysql_8_0, "mysql80") +def test_materialized_with_column_comments( + started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node +): + materialize_with_ddl.materialize_with_column_comments_test( + clickhouse_node, started_mysql_5_7, "mysql57" + ) + materialize_with_ddl.materialize_with_column_comments_test( + clickhouse_node, started_mysql_8_0, "mysql80" + ) -def test_materialized_with_enum(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): - materialize_with_ddl.materialize_with_enum8_test(clickhouse_node, started_mysql_5_7, "mysql57") - materialize_with_ddl.materialize_with_enum16_test(clickhouse_node, started_mysql_5_7, "mysql57") - materialize_with_ddl.alter_enum8_to_enum16_test(clickhouse_node, started_mysql_5_7, "mysql57") - materialize_with_ddl.materialize_with_enum8_test(clickhouse_node, started_mysql_8_0, "mysql80") - materialize_with_ddl.materialize_with_enum16_test(clickhouse_node, started_mysql_8_0, "mysql80") - materialize_with_ddl.alter_enum8_to_enum16_test(clickhouse_node, started_mysql_8_0, "mysql80") +def test_materialized_with_enum( + started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node +): + materialize_with_ddl.materialize_with_enum8_test( + clickhouse_node, started_mysql_5_7, "mysql57" + ) + materialize_with_ddl.materialize_with_enum16_test( + clickhouse_node, started_mysql_5_7, "mysql57" + ) + materialize_with_ddl.alter_enum8_to_enum16_test( + clickhouse_node, started_mysql_5_7, "mysql57" + ) + materialize_with_ddl.materialize_with_enum8_test( + clickhouse_node, started_mysql_8_0, "mysql80" + ) + materialize_with_ddl.materialize_with_enum16_test( + clickhouse_node, started_mysql_8_0, "mysql80" + ) + materialize_with_ddl.alter_enum8_to_enum16_test( + clickhouse_node, started_mysql_8_0, "mysql80" + ) -@pytest.mark.parametrize(('clickhouse_node'), [node_disable_bytes_settings, node_disable_rows_settings]) -def test_mysql_settings(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): - materialize_with_ddl.mysql_settings_test(clickhouse_node, started_mysql_5_7, "mysql57") - materialize_with_ddl.mysql_settings_test(clickhouse_node, started_mysql_8_0, "mysql80") +@pytest.mark.parametrize( + ("clickhouse_node"), [node_disable_bytes_settings, node_disable_rows_settings] +) +def test_mysql_settings( + started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node +): + materialize_with_ddl.mysql_settings_test( + clickhouse_node, started_mysql_5_7, "mysql57" + ) + materialize_with_ddl.mysql_settings_test( + clickhouse_node, started_mysql_8_0, "mysql80" + ) -def test_large_transaction(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): - materialize_with_ddl.materialized_mysql_large_transaction(clickhouse_node, started_mysql_8_0, "mysql80") - materialize_with_ddl.materialized_mysql_large_transaction(clickhouse_node, started_mysql_5_7, "mysql57") +def test_large_transaction( + started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node +): + materialize_with_ddl.materialized_mysql_large_transaction( + clickhouse_node, started_mysql_8_0, "mysql80" + ) + materialize_with_ddl.materialized_mysql_large_transaction( + clickhouse_node, started_mysql_5_7, "mysql57" + ) -def test_table_table(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): + +def test_table_table( + started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node +): materialize_with_ddl.table_table(clickhouse_node, started_mysql_8_0, "mysql80") materialize_with_ddl.table_table(clickhouse_node, started_mysql_5_7, "mysql57") -def test_table_overrides(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): + +def test_table_overrides( + started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node +): materialize_with_ddl.table_overrides(clickhouse_node, started_mysql_5_7, "mysql57") materialize_with_ddl.table_overrides(clickhouse_node, started_mysql_8_0, "mysql80") -def test_materialized_database_support_all_kinds_of_mysql_datatype(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): - materialize_with_ddl.materialized_database_support_all_kinds_of_mysql_datatype(clickhouse_node, started_mysql_8_0, "mysql80") - materialize_with_ddl.materialized_database_support_all_kinds_of_mysql_datatype(clickhouse_node, started_mysql_5_7, "mysql57") -def test_materialized_database_settings_materialized_mysql_tables_list(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): - materialize_with_ddl.materialized_database_settings_materialized_mysql_tables_list(clickhouse_node, started_mysql_8_0, "mysql80") - materialize_with_ddl.materialized_database_settings_materialized_mysql_tables_list(clickhouse_node, started_mysql_5_7, "mysql57") +def test_materialized_database_support_all_kinds_of_mysql_datatype( + started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node +): + materialize_with_ddl.materialized_database_support_all_kinds_of_mysql_datatype( + clickhouse_node, started_mysql_8_0, "mysql80" + ) + materialize_with_ddl.materialized_database_support_all_kinds_of_mysql_datatype( + clickhouse_node, started_mysql_5_7, "mysql57" + ) -def test_materialized_database_mysql_date_type_to_date32(started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node): - materialize_with_ddl.materialized_database_mysql_date_type_to_date32(clickhouse_node, started_mysql_8_0, "mysql80") - materialize_with_ddl.materialized_database_mysql_date_type_to_date32(clickhouse_node, started_mysql_5_7, "mysql57") + +def test_materialized_database_settings_materialized_mysql_tables_list( + started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node +): + materialize_with_ddl.materialized_database_settings_materialized_mysql_tables_list( + clickhouse_node, started_mysql_8_0, "mysql80" + ) + materialize_with_ddl.materialized_database_settings_materialized_mysql_tables_list( + clickhouse_node, started_mysql_5_7, "mysql57" + ) + + +def test_materialized_database_mysql_date_type_to_date32( + started_cluster, started_mysql_8_0, started_mysql_5_7, clickhouse_node +): + materialize_with_ddl.materialized_database_mysql_date_type_to_date32( + clickhouse_node, started_mysql_8_0, "mysql80" + ) + materialize_with_ddl.materialized_database_mysql_date_type_to_date32( + clickhouse_node, started_mysql_5_7, "mysql57" + ) diff --git a/tests/integration/test_max_http_connections_for_replication/test.py b/tests/integration/test_max_http_connections_for_replication/test.py index 67b3c5b53aa..bcb779ee913 100644 --- a/tests/integration/test_max_http_connections_for_replication/test.py +++ b/tests/integration/test_max_http_connections_for_replication/test.py @@ -9,7 +9,7 @@ from helpers.test_tools import assert_eq_with_retry def _fill_nodes(nodes, shard, connections_count): for node in nodes: node.query( - ''' + """ CREATE DATABASE test; CREATE TABLE test_table(date Date, id UInt32, dummy UInt32) @@ -19,14 +19,25 @@ def _fill_nodes(nodes, shard, connections_count): SETTINGS replicated_max_parallel_fetches_for_host={connections}, index_granularity=8192; - '''.format(shard=shard, replica=node.name, connections=connections_count)) + """.format( + shard=shard, replica=node.name, connections=connections_count + ) + ) cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', user_configs=[], - main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', user_configs=[], - main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", + user_configs=[], + main_configs=["configs/remote_servers.xml"], + with_zookeeper=True, +) +node2 = cluster.add_instance( + "node2", + user_configs=[], + main_configs=["configs/remote_servers.xml"], + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -45,6 +56,7 @@ def start_small_cluster(): def test_single_endpoint_connections_count(start_small_cluster): node1.query("TRUNCATE TABLE test_table") node2.query("SYSTEM SYNC REPLICA test_table") + def task(count): print(("Inserting ten times from {}".format(count))) for i in range(count, count + 10): @@ -56,7 +68,12 @@ def test_single_endpoint_connections_count(start_small_cluster): assert_eq_with_retry(node1, "select count() from test_table", "100") assert_eq_with_retry(node2, "select count() from test_table", "100") - assert node2.query("SELECT value FROM system.events where event='CreatedHTTPConnections'") == '1\n' + assert ( + node2.query( + "SELECT value FROM system.events where event='CreatedHTTPConnections'" + ) + == "1\n" + ) def test_keepalive_timeout(start_small_cluster): @@ -75,12 +92,29 @@ def test_keepalive_timeout(start_small_cluster): assert_eq_with_retry(node2, "select count() from test_table", str(2)) - assert not node2.contains_in_log("No message received"), "Found 'No message received' in clickhouse-server.log" + assert not node2.contains_in_log( + "No message received" + ), "Found 'No message received' in clickhouse-server.log" -node3 = cluster.add_instance('node3', user_configs=[], main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node4 = cluster.add_instance('node4', user_configs=[], main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node5 = cluster.add_instance('node5', user_configs=[], main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node3 = cluster.add_instance( + "node3", + user_configs=[], + main_configs=["configs/remote_servers.xml"], + with_zookeeper=True, +) +node4 = cluster.add_instance( + "node4", + user_configs=[], + main_configs=["configs/remote_servers.xml"], + with_zookeeper=True, +) +node5 = cluster.add_instance( + "node5", + user_configs=[], + main_configs=["configs/remote_servers.xml"], + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -115,4 +149,9 @@ def test_multiple_endpoint_connections_count(start_big_cluster): assert_eq_with_retry(node5, "select count() from test_table", "100") # Two per each host or sometimes less, if fetches are not performed in parallel. But not more. - assert node5.query("SELECT value FROM system.events where event='CreatedHTTPConnections'") <= '4\n' + assert ( + node5.query( + "SELECT value FROM system.events where event='CreatedHTTPConnections'" + ) + <= "4\n" + ) diff --git a/tests/integration/test_max_suspicious_broken_parts/test.py b/tests/integration/test_max_suspicious_broken_parts/test.py index 31f53fdbc3c..c1f34adbb62 100644 --- a/tests/integration/test_max_suspicious_broken_parts/test.py +++ b/tests/integration/test_max_suspicious_broken_parts/test.py @@ -8,9 +8,10 @@ from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', stay_alive=True) +node = cluster.add_instance("node", stay_alive=True) -@pytest.fixture(scope='module', autouse=True) + +@pytest.fixture(scope="module", autouse=True) def start_cluster(): try: cluster.start() @@ -18,52 +19,69 @@ def start_cluster(): finally: cluster.shutdown() + def break_part(table, part_name): - node.exec_in_container(['bash', '-c', f'rm /var/lib/clickhouse/data/default/{table}/{part_name}/columns.txt']) + node.exec_in_container( + [ + "bash", + "-c", + f"rm /var/lib/clickhouse/data/default/{table}/{part_name}/columns.txt", + ] + ) + def remove_part(table, part_name): - node.exec_in_container(['bash', '-c', f'rm -r /var/lib/clickhouse/data/default/{table}/{part_name}']) + node.exec_in_container( + ["bash", "-c", f"rm -r /var/lib/clickhouse/data/default/{table}/{part_name}"] + ) + def get_count(table): - return int(node.query(f'SELECT count() FROM {table}').strip()) + return int(node.query(f"SELECT count() FROM {table}").strip()) + def detach_table(table): - node.query(f'DETACH TABLE {table}') + node.query(f"DETACH TABLE {table}") + + def attach_table(table): - node.query(f'ATTACH TABLE {table}') + node.query(f"ATTACH TABLE {table}") + def check_table(table): rows = 900 per_part_rows = 90 - node.query(f'INSERT INTO {table} SELECT * FROM numbers(900)') + node.query(f"INSERT INTO {table} SELECT * FROM numbers(900)") assert get_count(table) == rows # break one part, and check that clickhouse will be alive - break_part(table, '0_1_1_0') + break_part(table, "0_1_1_0") rows -= per_part_rows detach_table(table) attach_table(table) assert get_count(table) == rows # break two parts, and check that clickhouse will not start - break_part(table, '1_2_2_0') - break_part(table, '2_3_3_0') - rows -= per_part_rows*2 + break_part(table, "1_2_2_0") + break_part(table, "2_3_3_0") + rows -= per_part_rows * 2 detach_table(table) with pytest.raises(QueryRuntimeException): attach_table(table) # now remove one part, and check - remove_part(table, '1_2_2_0') + remove_part(table, "1_2_2_0") attach_table(table) assert get_count(table) == rows - node.query(f'DROP TABLE {table}') + node.query(f"DROP TABLE {table}") + def test_max_suspicious_broken_parts(): - node.query(""" + node.query( + """ CREATE TABLE test_max_suspicious_broken_parts ( key Int ) @@ -72,11 +90,14 @@ def test_max_suspicious_broken_parts(): PARTITION BY key%10 SETTINGS max_suspicious_broken_parts = 1; - """) - check_table('test_max_suspicious_broken_parts') + """ + ) + check_table("test_max_suspicious_broken_parts") + def test_max_suspicious_broken_parts_bytes(): - node.query(""" + node.query( + """ CREATE TABLE test_max_suspicious_broken_parts_bytes ( key Int ) @@ -87,11 +108,14 @@ def test_max_suspicious_broken_parts_bytes(): max_suspicious_broken_parts = 10, /* one part takes ~751 byte, so we allow failure of one part with these limit */ max_suspicious_broken_parts_bytes = 1000; - """) - check_table('test_max_suspicious_broken_parts_bytes') + """ + ) + check_table("test_max_suspicious_broken_parts_bytes") + def test_max_suspicious_broken_parts__wide(): - node.query(""" + node.query( + """ CREATE TABLE test_max_suspicious_broken_parts__wide ( key Int ) @@ -101,11 +125,14 @@ def test_max_suspicious_broken_parts__wide(): SETTINGS min_bytes_for_wide_part = 0, max_suspicious_broken_parts = 1; - """) - check_table('test_max_suspicious_broken_parts__wide') + """ + ) + check_table("test_max_suspicious_broken_parts__wide") + def test_max_suspicious_broken_parts_bytes__wide(): - node.query(""" + node.query( + """ CREATE TABLE test_max_suspicious_broken_parts_bytes__wide ( key Int ) @@ -117,5 +144,6 @@ def test_max_suspicious_broken_parts_bytes__wide(): max_suspicious_broken_parts = 10, /* one part takes ~750 byte, so we allow failure of one part with these limit */ max_suspicious_broken_parts_bytes = 1000; - """) - check_table('test_max_suspicious_broken_parts_bytes__wide') + """ + ) + check_table("test_max_suspicious_broken_parts_bytes__wide") diff --git a/tests/integration/test_merge_table_over_distributed/test.py b/tests/integration/test_merge_table_over_distributed/test.py index ab294867126..5ee542079a7 100644 --- a/tests/integration/test_merge_table_over_distributed/test.py +++ b/tests/integration/test_merge_table_over_distributed/test.py @@ -5,8 +5,8 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml']) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml']) +node1 = cluster.add_instance("node1", main_configs=["configs/remote_servers.xml"]) +node2 = cluster.add_instance("node2", main_configs=["configs/remote_servers.xml"]) @pytest.fixture(scope="module") @@ -15,19 +15,23 @@ def started_cluster(): cluster.start() for node in (node1, node2): - node.query(''' + node.query( + """ CREATE TABLE local_table(id UInt32, val String) ENGINE = MergeTree ORDER BY id; CREATE TABLE local_table_2(id UInt32, val String) ENGINE = MergeTree ORDER BY id; -''') +""" + ) node1.query("INSERT INTO local_table VALUES (1, 'node1')") node2.query("INSERT INTO local_table VALUES (2, 'node2')") - node1.query(''' + node1.query( + """ CREATE TABLE distributed_table(id UInt32, val String) ENGINE = Distributed(test_cluster, default, local_table); CREATE TABLE distributed_table_2(id UInt32, val String) ENGINE = Distributed(test_cluster, default, local_table_2); CREATE TABLE merge_table(id UInt32, val String) ENGINE = Merge(default, '^distributed_table') -''') +""" + ) yield cluster @@ -36,26 +40,47 @@ CREATE TABLE merge_table(id UInt32, val String) ENGINE = Merge(default, '^distri def test_global_in(started_cluster): - assert node1.query( - "SELECT val FROM distributed_table WHERE id GLOBAL IN (SELECT toUInt32(3 - id) FROM local_table)").rstrip() \ - == 'node2' + assert ( + node1.query( + "SELECT val FROM distributed_table WHERE id GLOBAL IN (SELECT toUInt32(3 - id) FROM local_table)" + ).rstrip() + == "node2" + ) - assert node1.query( - "SELECT val FROM merge_table WHERE id GLOBAL IN (SELECT toUInt32(3 - id) FROM local_table)").rstrip() \ - == 'node2' + assert ( + node1.query( + "SELECT val FROM merge_table WHERE id GLOBAL IN (SELECT toUInt32(3 - id) FROM local_table)" + ).rstrip() + == "node2" + ) def test_filtering(started_cluster): - assert node1.query("SELECT id, val FROM merge_table WHERE id = 1").rstrip() == '1\tnode1' + assert ( + node1.query("SELECT id, val FROM merge_table WHERE id = 1").rstrip() + == "1\tnode1" + ) - assert node1.query("SELECT id + 1, val FROM merge_table WHERE id = 1").rstrip() == '2\tnode1' + assert ( + node1.query("SELECT id + 1, val FROM merge_table WHERE id = 1").rstrip() + == "2\tnode1" + ) - assert node1.query("SELECT id + 1 FROM merge_table WHERE val = 'node1'").rstrip() == '2' + assert ( + node1.query("SELECT id + 1 FROM merge_table WHERE val = 'node1'").rstrip() + == "2" + ) - assert node1.query( - "SELECT id + 1, val FROM merge_table PREWHERE id = 1 WHERE _table != '_dummy'").rstrip() == '2\tnode1' + assert ( + node1.query( + "SELECT id + 1, val FROM merge_table PREWHERE id = 1 WHERE _table != '_dummy'" + ).rstrip() + == "2\tnode1" + ) - assert node1.query("SELECT count() FROM merge_table PREWHERE id = 1").rstrip() == '1' + assert ( + node1.query("SELECT count() FROM merge_table PREWHERE id = 1").rstrip() == "1" + ) def test_select_table_name_from_merge_over_distributed(started_cluster): @@ -63,10 +88,12 @@ def test_select_table_name_from_merge_over_distributed(started_cluster): node2.query("INSERT INTO local_table_2 VALUES (2, 'node2')") node1.query("select _table == 'distributed_table' from merge_table") - node1.query("select * from (select _table == 'distributed_table' from merge_table limit 1)") + node1.query( + "select * from (select _table == 'distributed_table' from merge_table limit 1)" + ) -if __name__ == '__main__': +if __name__ == "__main__": with contextmanager(started_cluster)() as cluster: for name, instance in list(cluster.instances.items()): print(name, instance.ip_address) diff --git a/tests/integration/test_merge_tree_azure_blob_storage/test.py b/tests/integration/test_merge_tree_azure_blob_storage/test.py index 92b9d52cf86..bc549210b39 100644 --- a/tests/integration/test_merge_tree_azure_blob_storage/test.py +++ b/tests/integration/test_merge_tree_azure_blob_storage/test.py @@ -8,7 +8,10 @@ from helpers.utility import generate_values, replace_config, SafeThread SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -CONFIG_PATH = os.path.join(SCRIPT_DIR, './{}/node/configs/config.d/storage_conf.xml'.format(get_instances_dir())) +CONFIG_PATH = os.path.join( + SCRIPT_DIR, + "./{}/node/configs/config.d/storage_conf.xml".format(get_instances_dir()), +) NODE_NAME = "node" TABLE_NAME = "blob_storage_table" @@ -21,9 +24,14 @@ CONTAINER_NAME = "cont" def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance(NODE_NAME, - main_configs=["configs/config.d/storage_conf.xml", "configs/config.d/bg_processing_pool_conf.xml"], - with_azurite=True) + cluster.add_instance( + NODE_NAME, + main_configs=[ + "configs/config.d/storage_conf.xml", + "configs/config.d/bg_processing_pool_conf.xml", + ], + with_azurite=True, + ) logging.info("Starting cluster...") cluster.start() logging.info("Cluster started") @@ -32,6 +40,7 @@ def cluster(): finally: cluster.shutdown() + # Note: use this for selects and inserts and create table queries. # For inserts there is no guarantee that retries will not result in duplicates. # But it is better to retry anyway because 'Connection was closed by the server' error @@ -42,7 +51,7 @@ def azure_query(node, query, try_num=3): return node.query(query) except Exception as ex: retriable_errors = [ - 'DB::Exception: Azure::Core::Http::TransportException: Connection was closed by the server while trying to read a response', + "DB::Exception: Azure::Core::Http::TransportException: Connection was closed by the server while trying to read a response", ] retry = False for error in retriable_errors: @@ -54,11 +63,12 @@ def azure_query(node, query, try_num=3): raise Exception(ex) continue + def create_table(node, table_name, **additional_settings): settings = { "storage_policy": "blob_storage_policy", "old_parts_lifetime": 1, - "index_granularity": 512 + "index_granularity": 512, } settings.update(additional_settings) @@ -75,7 +85,9 @@ def create_table(node, table_name, **additional_settings): node.query(f"DROP TABLE IF EXISTS {table_name}") azure_query(node, create_table_statement) - assert azure_query(node, f"SELECT COUNT(*) FROM {table_name} FORMAT Values") == "(0)" + assert ( + azure_query(node, f"SELECT COUNT(*) FROM {table_name} FORMAT Values") == "(0)" + ) def test_create_table(cluster): @@ -92,10 +104,16 @@ def test_read_after_cache_is_wiped(cluster): azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {values}") # Wipe cache - cluster.exec_in_container(cluster.get_container_id(NODE_NAME), ["rm", "-rf", "/var/lib/clickhouse/disks/blob_storage_disk/cache/"]) + cluster.exec_in_container( + cluster.get_container_id(NODE_NAME), + ["rm", "-rf", "/var/lib/clickhouse/disks/blob_storage_disk/cache/"], + ) # After cache is populated again, only .bin files should be accessed from Blob Storage. - assert azure_query(node, f"SELECT * FROM {TABLE_NAME} order by dt, id FORMAT Values") == values + assert ( + azure_query(node, f"SELECT * FROM {TABLE_NAME} order by dt, id FORMAT Values") + == values + ) def test_simple_insert_select(cluster): @@ -104,55 +122,96 @@ def test_simple_insert_select(cluster): values = "('2021-11-13',3,'hello')" azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {values}") - assert azure_query(node, f"SELECT dt, id, data FROM {TABLE_NAME} FORMAT Values") == values - blob_container_client = cluster.blob_service_client.get_container_client(CONTAINER_NAME) - assert len(list(blob_container_client.list_blobs())) >= 12 # 1 format file + 2 skip index files + 9 regular MergeTree files + leftovers from other tests + assert ( + azure_query(node, f"SELECT dt, id, data FROM {TABLE_NAME} FORMAT Values") + == values + ) + blob_container_client = cluster.blob_service_client.get_container_client( + CONTAINER_NAME + ) + assert ( + len(list(blob_container_client.list_blobs())) >= 12 + ) # 1 format file + 2 skip index files + 9 regular MergeTree files + leftovers from other tests def test_inserts_selects(cluster): node = cluster.instances[NODE_NAME] create_table(node, TABLE_NAME) - values1 = generate_values('2020-01-03', 4096) + values1 = generate_values("2020-01-03", 4096) azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {values1}") - assert azure_query(node, f"SELECT * FROM {TABLE_NAME} order by dt, id FORMAT Values") == values1 + assert ( + azure_query(node, f"SELECT * FROM {TABLE_NAME} order by dt, id FORMAT Values") + == values1 + ) - values2 = generate_values('2020-01-04', 4096) + values2 = generate_values("2020-01-04", 4096) azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {values2}") - assert azure_query(node, f"SELECT * FROM {TABLE_NAME} ORDER BY dt, id FORMAT Values") == values1 + "," + values2 + assert ( + azure_query(node, f"SELECT * FROM {TABLE_NAME} ORDER BY dt, id FORMAT Values") + == values1 + "," + values2 + ) - assert azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} where id = 1 FORMAT Values") == "(2)" + assert ( + azure_query( + node, f"SELECT count(*) FROM {TABLE_NAME} where id = 1 FORMAT Values" + ) + == "(2)" + ) @pytest.mark.parametrize( - "merge_vertical", [ + "merge_vertical", + [ (True), (False), -]) + ], +) def test_insert_same_partition_and_merge(cluster, merge_vertical): settings = {} if merge_vertical: - settings['vertical_merge_algorithm_min_rows_to_activate'] = 0 - settings['vertical_merge_algorithm_min_columns_to_activate'] = 0 + settings["vertical_merge_algorithm_min_rows_to_activate"] = 0 + settings["vertical_merge_algorithm_min_columns_to_activate"] = 0 node = cluster.instances[NODE_NAME] create_table(node, TABLE_NAME, **settings) node.query(f"SYSTEM STOP MERGES {TABLE_NAME}") - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 1024)}") - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 2048)}") - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}") - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 1024, -1)}") - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 2048, -1)}") - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096, -1)}") + azure_query( + node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 1024)}" + ) + azure_query( + node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 2048)}" + ) + azure_query( + node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}" + ) + azure_query( + node, + f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 1024, -1)}", + ) + azure_query( + node, + f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 2048, -1)}", + ) + azure_query( + node, + f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096, -1)}", + ) assert azure_query(node, f"SELECT sum(id) FROM {TABLE_NAME} FORMAT Values") == "(0)" - assert azure_query(node, f"SELECT count(distinct(id)) FROM {TABLE_NAME} FORMAT Values") == "(8192)" + assert ( + azure_query(node, f"SELECT count(distinct(id)) FROM {TABLE_NAME} FORMAT Values") + == "(8192)" + ) node.query(f"SYSTEM START MERGES {TABLE_NAME}") # Wait for merges and old parts deletion for attempt in range(0, 10): - parts_count = azure_query(node, f"SELECT COUNT(*) FROM system.parts WHERE table = '{TABLE_NAME}' FORMAT Values") + parts_count = azure_query( + node, + f"SELECT COUNT(*) FROM system.parts WHERE table = '{TABLE_NAME}' FORMAT Values", + ) if parts_count == "(1)": break @@ -162,63 +221,123 @@ def test_insert_same_partition_and_merge(cluster, merge_vertical): time.sleep(1) assert azure_query(node, f"SELECT sum(id) FROM {TABLE_NAME} FORMAT Values") == "(0)" - assert azure_query(node, f"SELECT count(distinct(id)) FROM {TABLE_NAME} FORMAT Values") == "(8192)" + assert ( + azure_query(node, f"SELECT count(distinct(id)) FROM {TABLE_NAME} FORMAT Values") + == "(8192)" + ) def test_alter_table_columns(cluster): node = cluster.instances[NODE_NAME] create_table(node, TABLE_NAME) - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}") - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096, -1)}") + azure_query( + node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}" + ) + azure_query( + node, + f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096, -1)}", + ) node.query(f"ALTER TABLE {TABLE_NAME} ADD COLUMN col1 UInt64 DEFAULT 1") # To ensure parts have been merged node.query(f"OPTIMIZE TABLE {TABLE_NAME}") - assert azure_query(node, f"SELECT sum(col1) FROM {TABLE_NAME} FORMAT Values") == "(8192)" - assert azure_query(node, f"SELECT sum(col1) FROM {TABLE_NAME} WHERE id > 0 FORMAT Values") == "(4096)" + assert ( + azure_query(node, f"SELECT sum(col1) FROM {TABLE_NAME} FORMAT Values") + == "(8192)" + ) + assert ( + azure_query( + node, f"SELECT sum(col1) FROM {TABLE_NAME} WHERE id > 0 FORMAT Values" + ) + == "(4096)" + ) - node.query(f"ALTER TABLE {TABLE_NAME} MODIFY COLUMN col1 String", settings={"mutations_sync": 2}) + node.query( + f"ALTER TABLE {TABLE_NAME} MODIFY COLUMN col1 String", + settings={"mutations_sync": 2}, + ) - assert azure_query(node, f"SELECT distinct(col1) FROM {TABLE_NAME} FORMAT Values") == "('1')" + assert ( + azure_query(node, f"SELECT distinct(col1) FROM {TABLE_NAME} FORMAT Values") + == "('1')" + ) def test_attach_detach_partition(cluster): node = cluster.instances[NODE_NAME] create_table(node, TABLE_NAME) - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}") - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096)}") - assert azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(8192)" + azure_query( + node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}" + ) + azure_query( + node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096)}" + ) + assert ( + azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") + == "(8192)" + ) node.query(f"ALTER TABLE {TABLE_NAME} DETACH PARTITION '2020-01-03'") - assert azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(4096)" + assert ( + azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") + == "(4096)" + ) node.query(f"ALTER TABLE {TABLE_NAME} ATTACH PARTITION '2020-01-03'") - assert azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(8192)" + assert ( + azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") + == "(8192)" + ) node.query(f"ALTER TABLE {TABLE_NAME} DROP PARTITION '2020-01-03'") - assert azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(4096)" + assert ( + azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") + == "(4096)" + ) node.query(f"ALTER TABLE {TABLE_NAME} DETACH PARTITION '2020-01-04'") - node.query(f"ALTER TABLE {TABLE_NAME} DROP DETACHED PARTITION '2020-01-04'", settings={"allow_drop_detached": 1}) - assert azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(0)" + node.query( + f"ALTER TABLE {TABLE_NAME} DROP DETACHED PARTITION '2020-01-04'", + settings={"allow_drop_detached": 1}, + ) + assert ( + azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(0)" + ) def test_move_partition_to_another_disk(cluster): node = cluster.instances[NODE_NAME] create_table(node, TABLE_NAME) - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}") - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096)}") - assert azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(8192)" + azure_query( + node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}" + ) + azure_query( + node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096)}" + ) + assert ( + azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") + == "(8192)" + ) - node.query(f"ALTER TABLE {TABLE_NAME} MOVE PARTITION '2020-01-04' TO DISK '{LOCAL_DISK}'") - assert azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(8192)" + node.query( + f"ALTER TABLE {TABLE_NAME} MOVE PARTITION '2020-01-04' TO DISK '{LOCAL_DISK}'" + ) + assert ( + azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") + == "(8192)" + ) - node.query(f"ALTER TABLE {TABLE_NAME} MOVE PARTITION '2020-01-04' TO DISK '{AZURE_BLOB_STORAGE_DISK}'") - assert azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(8192)" + node.query( + f"ALTER TABLE {TABLE_NAME} MOVE PARTITION '2020-01-04' TO DISK '{AZURE_BLOB_STORAGE_DISK}'" + ) + assert ( + azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") + == "(8192)" + ) def test_table_manipulations(cluster): @@ -227,21 +346,33 @@ def test_table_manipulations(cluster): renamed_table = TABLE_NAME + "_renamed" - node.query_with_retry(f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}") - node.query_with_retry(f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096)}") + node.query_with_retry( + f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}" + ) + node.query_with_retry( + f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096)}" + ) node.query(f"RENAME TABLE {TABLE_NAME} TO {renamed_table}") - assert azure_query(node, f"SELECT count(*) FROM {renamed_table} FORMAT Values") == "(8192)" + assert ( + azure_query(node, f"SELECT count(*) FROM {renamed_table} FORMAT Values") + == "(8192)" + ) node.query(f"RENAME TABLE {renamed_table} TO {TABLE_NAME}") assert node.query(f"CHECK TABLE {TABLE_NAME} FORMAT Values") == "(1)" node.query(f"DETACH TABLE {TABLE_NAME}") node.query(f"ATTACH TABLE {TABLE_NAME}") - assert azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(8192)" + assert ( + azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") + == "(8192)" + ) node.query(f"TRUNCATE TABLE {TABLE_NAME}") - assert azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(0)" + assert ( + azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(0)" + ) @pytest.mark.long_run @@ -251,38 +382,87 @@ def test_move_replace_partition_to_another_table(cluster): table_clone_name = TABLE_NAME + "_clone" - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 256)}") - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 256)}") - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-05', 256, -1)}") - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-06', 256, -1)}") + azure_query( + node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 256)}" + ) + azure_query( + node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 256)}" + ) + azure_query( + node, + f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-05', 256, -1)}", + ) + azure_query( + node, + f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-06', 256, -1)}", + ) assert azure_query(node, f"SELECT sum(id) FROM {TABLE_NAME} FORMAT Values") == "(0)" - assert azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(1024)" + assert ( + azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") + == "(1024)" + ) create_table(node, table_clone_name) - node.query(f"ALTER TABLE {TABLE_NAME} MOVE PARTITION '2020-01-03' TO TABLE {table_clone_name}") - node.query(f"ALTER TABLE {TABLE_NAME} MOVE PARTITION '2020-01-05' TO TABLE {table_clone_name}") + node.query( + f"ALTER TABLE {TABLE_NAME} MOVE PARTITION '2020-01-03' TO TABLE {table_clone_name}" + ) + node.query( + f"ALTER TABLE {TABLE_NAME} MOVE PARTITION '2020-01-05' TO TABLE {table_clone_name}" + ) assert azure_query(node, f"SELECT sum(id) FROM {TABLE_NAME} FORMAT Values") == "(0)" - assert azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(512)" - assert azure_query(node, f"SELECT sum(id) FROM {table_clone_name} FORMAT Values") == "(0)" - assert azure_query(node, f"SELECT count(*) FROM {table_clone_name} FORMAT Values") == "(512)" + assert ( + azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(512)" + ) + assert ( + azure_query(node, f"SELECT sum(id) FROM {table_clone_name} FORMAT Values") + == "(0)" + ) + assert ( + azure_query(node, f"SELECT count(*) FROM {table_clone_name} FORMAT Values") + == "(512)" + ) # Add new partitions to source table, but with different values and replace them from copied table. - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 256, -1)}") - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-05', 256)}") + azure_query( + node, + f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 256, -1)}", + ) + azure_query( + node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-05', 256)}" + ) assert azure_query(node, f"SELECT sum(id) FROM {TABLE_NAME} FORMAT Values") == "(0)" - assert azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(1024)" + assert ( + azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") + == "(1024)" + ) - node.query(f"ALTER TABLE {TABLE_NAME} REPLACE PARTITION '2020-01-03' FROM {table_clone_name}") - node.query(f"ALTER TABLE {TABLE_NAME} REPLACE PARTITION '2020-01-05' FROM {table_clone_name}") + node.query( + f"ALTER TABLE {TABLE_NAME} REPLACE PARTITION '2020-01-03' FROM {table_clone_name}" + ) + node.query( + f"ALTER TABLE {TABLE_NAME} REPLACE PARTITION '2020-01-05' FROM {table_clone_name}" + ) assert azure_query(node, f"SELECT sum(id) FROM {TABLE_NAME} FORMAT Values") == "(0)" - assert azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(1024)" - assert azure_query(node, f"SELECT sum(id) FROM {table_clone_name} FORMAT Values") == "(0)" - assert azure_query(node, f"SELECT count(*) FROM {table_clone_name} FORMAT Values") == "(512)" + assert ( + azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") + == "(1024)" + ) + assert ( + azure_query(node, f"SELECT sum(id) FROM {table_clone_name} FORMAT Values") + == "(0)" + ) + assert ( + azure_query(node, f"SELECT count(*) FROM {table_clone_name} FORMAT Values") + == "(512)" + ) node.query(f"DROP TABLE {table_clone_name} NO DELAY") assert azure_query(node, f"SELECT sum(id) FROM {TABLE_NAME} FORMAT Values") == "(0)" - assert azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(1024)" + assert ( + azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") + == "(1024)" + ) node.query(f"ALTER TABLE {TABLE_NAME} FREEZE") @@ -293,18 +473,24 @@ def test_freeze_unfreeze(cluster): node = cluster.instances[NODE_NAME] create_table(node, TABLE_NAME) - backup1 = 'backup1' - backup2 = 'backup2' + backup1 = "backup1" + backup2 = "backup2" - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}") + azure_query( + node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}" + ) node.query(f"ALTER TABLE {TABLE_NAME} FREEZE WITH NAME '{backup1}'") - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096)}") + azure_query( + node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096)}" + ) node.query(f"ALTER TABLE {TABLE_NAME} FREEZE WITH NAME '{backup2}'") azure_query(node, f"TRUNCATE TABLE {TABLE_NAME}") # Unfreeze single partition from backup1. - node.query(f"ALTER TABLE {TABLE_NAME} UNFREEZE PARTITION '2020-01-03' WITH NAME '{backup1}'") + node.query( + f"ALTER TABLE {TABLE_NAME} UNFREEZE PARTITION '2020-01-03' WITH NAME '{backup1}'" + ) # Unfreeze all partitions from backup2. node.query(f"ALTER TABLE {TABLE_NAME} UNFREEZE WITH NAME '{backup2}'") @@ -313,16 +499,22 @@ def test_apply_new_settings(cluster): node = cluster.instances[NODE_NAME] create_table(node, TABLE_NAME) - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}") + azure_query( + node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}" + ) # Force multi-part upload mode. replace_config( CONFIG_PATH, "33554432", - "4096") + "4096", + ) node.query("SYSTEM RELOAD CONFIG") - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096, -1)}") + azure_query( + node, + f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096, -1)}", + ) # NOTE: this test takes a couple of minutes when run together with other tests @@ -332,16 +524,25 @@ def test_restart_during_load(cluster): create_table(node, TABLE_NAME) # Force multi-part upload mode. - replace_config(CONFIG_PATH, "false", "") - - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096)}") - azure_query(node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-05', 4096, -1)}") + replace_config( + CONFIG_PATH, "false", "" + ) + azure_query( + node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096)}" + ) + azure_query( + node, + f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-05', 4096, -1)}", + ) def read(): for ii in range(0, 5): logging.info(f"Executing {ii} query") - assert azure_query(node, f"SELECT sum(id) FROM {TABLE_NAME} FORMAT Values") == "(0)" + assert ( + azure_query(node, f"SELECT sum(id) FROM {TABLE_NAME} FORMAT Values") + == "(0)" + ) logging.info(f"Query {ii} executed") time.sleep(0.2) @@ -368,5 +569,8 @@ def test_restart_during_load(cluster): def test_big_insert(cluster): node = cluster.instances[NODE_NAME] create_table(node, TABLE_NAME) - azure_query(node, f"INSERT INTO {TABLE_NAME} select '2020-01-03', number, toString(number) from numbers(5000000)") + azure_query( + node, + f"INSERT INTO {TABLE_NAME} select '2020-01-03', number, toString(number) from numbers(5000000)", + ) assert int(azure_query(node, f"SELECT count() FROM {TABLE_NAME}")) == 5000000 diff --git a/tests/integration/test_merge_tree_empty_parts/test.py b/tests/integration/test_merge_tree_empty_parts/test.py index bc2679d4c92..7ca275e96de 100644 --- a/tests/integration/test_merge_tree_empty_parts/test.py +++ b/tests/integration/test_merge_tree_empty_parts/test.py @@ -5,7 +5,11 @@ from helpers.test_tools import assert_eq_with_retry cluster = helpers.cluster.ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml', 'configs/cleanup_thread.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/remote_servers.xml", "configs/cleanup_thread.xml"], + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -17,22 +21,38 @@ def started_cluster(): finally: cluster.shutdown() + def test_empty_parts_alter_delete(started_cluster): - node1.query("CREATE TABLE empty_parts_delete (d Date, key UInt64, value String) \ - ENGINE = ReplicatedMergeTree('/clickhouse/tables/empty_parts_delete', 'r1', d, key, 8192)") + node1.query( + "CREATE TABLE empty_parts_delete (d Date, key UInt64, value String) \ + ENGINE = ReplicatedMergeTree('/clickhouse/tables/empty_parts_delete', 'r1', d, key, 8192)" + ) node1.query("INSERT INTO empty_parts_delete VALUES (toDate('2020-10-10'), 1, 'a')") - node1.query("ALTER TABLE empty_parts_delete DELETE WHERE 1 SETTINGS mutations_sync = 2") + node1.query( + "ALTER TABLE empty_parts_delete DELETE WHERE 1 SETTINGS mutations_sync = 2" + ) print(node1.query("SELECT count() FROM empty_parts_delete")) - assert_eq_with_retry(node1, "SELECT count() FROM system.parts WHERE table = 'empty_parts_delete' AND active", "0") + assert_eq_with_retry( + node1, + "SELECT count() FROM system.parts WHERE table = 'empty_parts_delete' AND active", + "0", + ) + def test_empty_parts_summing(started_cluster): - node1.query("CREATE TABLE empty_parts_summing (d Date, key UInt64, value Int64) \ - ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/empty_parts_summing', 'r1', d, key, 8192)") + node1.query( + "CREATE TABLE empty_parts_summing (d Date, key UInt64, value Int64) \ + ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/empty_parts_summing', 'r1', d, key, 8192)" + ) node1.query("INSERT INTO empty_parts_summing VALUES (toDate('2020-10-10'), 1, 1)") node1.query("INSERT INTO empty_parts_summing VALUES (toDate('2020-10-10'), 1, -1)") node1.query("OPTIMIZE TABLE empty_parts_summing FINAL") - assert_eq_with_retry(node1, "SELECT count() FROM system.parts WHERE table = 'empty_parts_summing' AND active", "0") + assert_eq_with_retry( + node1, + "SELECT count() FROM system.parts WHERE table = 'empty_parts_summing' AND active", + "0", + ) diff --git a/tests/integration/test_merge_tree_hdfs/test.py b/tests/integration/test_merge_tree_hdfs/test.py index d6e3315e45d..132e1027586 100644 --- a/tests/integration/test_merge_tree_hdfs/test.py +++ b/tests/integration/test_merge_tree_hdfs/test.py @@ -9,7 +9,9 @@ from helpers.utility import generate_values from pyhdfs import HdfsClient SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -CONFIG_PATH = os.path.join(SCRIPT_DIR, './_instances/node/configs/config.d/storage_conf.xml') +CONFIG_PATH = os.path.join( + SCRIPT_DIR, "./_instances/node/configs/config.d/storage_conf.xml" +) def create_table(cluster, table_name, additional_settings=None): @@ -26,7 +28,9 @@ def create_table(cluster, table_name, additional_settings=None): storage_policy='hdfs', old_parts_lifetime=0, index_granularity=512 - """.format(table_name) + """.format( + table_name + ) if additional_settings: create_table_statement += "," @@ -45,13 +49,15 @@ FILES_OVERHEAD_PER_PART_COMPACT = 10 + 1 def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("node", main_configs=["configs/config.d/storage_conf.xml"], with_hdfs=True) + cluster.add_instance( + "node", main_configs=["configs/config.d/storage_conf.xml"], with_hdfs=True + ) logging.info("Starting cluster...") cluster.start() logging.info("Cluster started") fs = HdfsClient(hosts=cluster.hdfs_ip) - fs.mkdirs('/clickhouse') + fs.mkdirs("/clickhouse") logging.info("Created HDFS directory") @@ -63,12 +69,12 @@ def cluster(): def wait_for_delete_hdfs_objects(cluster, expected, num_tries=30): fs = HdfsClient(hosts=cluster.hdfs_ip) while num_tries > 0: - num_hdfs_objects = len(fs.listdir('/clickhouse')) + num_hdfs_objects = len(fs.listdir("/clickhouse")) if num_hdfs_objects == expected: break num_tries -= 1 time.sleep(1) - assert(len(fs.listdir('/clickhouse')) == expected) + assert len(fs.listdir("/clickhouse")) == expected @pytest.fixture(autouse=True) @@ -76,46 +82,63 @@ def drop_table(cluster): node = cluster.instances["node"] fs = HdfsClient(hosts=cluster.hdfs_ip) - hdfs_objects = fs.listdir('/clickhouse') - print('Number of hdfs objects to delete:', len(hdfs_objects), sep=' ') + hdfs_objects = fs.listdir("/clickhouse") + print("Number of hdfs objects to delete:", len(hdfs_objects), sep=" ") node.query("DROP TABLE IF EXISTS hdfs_test SYNC") try: wait_for_delete_hdfs_objects(cluster, 0) finally: - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") if len(hdfs_objects) == 0: return - print("Manually removing extra objects to prevent tests cascade failing: ", hdfs_objects) + print( + "Manually removing extra objects to prevent tests cascade failing: ", + hdfs_objects, + ) for path in hdfs_objects: fs.delete(path) -@pytest.mark.parametrize("min_rows_for_wide_part,files_per_part", [(0, FILES_OVERHEAD_PER_PART_WIDE), (8192, FILES_OVERHEAD_PER_PART_COMPACT)]) +@pytest.mark.parametrize( + "min_rows_for_wide_part,files_per_part", + [(0, FILES_OVERHEAD_PER_PART_WIDE), (8192, FILES_OVERHEAD_PER_PART_COMPACT)], +) def test_simple_insert_select(cluster, min_rows_for_wide_part, files_per_part): - create_table(cluster, "hdfs_test", additional_settings="min_rows_for_wide_part={}".format(min_rows_for_wide_part)) + create_table( + cluster, + "hdfs_test", + additional_settings="min_rows_for_wide_part={}".format(min_rows_for_wide_part), + ) node = cluster.instances["node"] - values1 = generate_values('2020-01-03', 4096) + values1 = generate_values("2020-01-03", 4096) node.query("INSERT INTO hdfs_test VALUES {}".format(values1)) - assert node.query("SELECT * FROM hdfs_test order by dt, id FORMAT Values") == values1 + assert ( + node.query("SELECT * FROM hdfs_test order by dt, id FORMAT Values") == values1 + ) fs = HdfsClient(hosts=cluster.hdfs_ip) - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") print(hdfs_objects) assert len(hdfs_objects) == FILES_OVERHEAD + files_per_part - values2 = generate_values('2020-01-04', 4096) + values2 = generate_values("2020-01-04", 4096) node.query("INSERT INTO hdfs_test VALUES {}".format(values2)) - assert node.query("SELECT * FROM hdfs_test ORDER BY dt, id FORMAT Values") == values1 + "," + values2 + assert ( + node.query("SELECT * FROM hdfs_test ORDER BY dt, id FORMAT Values") + == values1 + "," + values2 + ) - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") assert len(hdfs_objects) == FILES_OVERHEAD + files_per_part * 2 - assert node.query("SELECT count(*) FROM hdfs_test where id = 1 FORMAT Values") == "(2)" + assert ( + node.query("SELECT count(*) FROM hdfs_test where id = 1 FORMAT Values") == "(2)" + ) def test_alter_table_columns(cluster): @@ -124,27 +147,47 @@ def test_alter_table_columns(cluster): node = cluster.instances["node"] fs = HdfsClient(hosts=cluster.hdfs_ip) - node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096))) - node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096, -1))) + node.query( + "INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) + node.query( + "INSERT INTO hdfs_test VALUES {}".format( + generate_values("2020-01-03", 4096, -1) + ) + ) node.query("ALTER TABLE hdfs_test ADD COLUMN col1 UInt64 DEFAULT 1") # To ensure parts have merged node.query("OPTIMIZE TABLE hdfs_test") assert node.query("SELECT sum(col1) FROM hdfs_test FORMAT Values") == "(8192)" - assert node.query("SELECT sum(col1) FROM hdfs_test WHERE id > 0 FORMAT Values") == "(4096)" - wait_for_delete_hdfs_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN) + assert ( + node.query("SELECT sum(col1) FROM hdfs_test WHERE id > 0 FORMAT Values") + == "(4096)" + ) + wait_for_delete_hdfs_objects( + cluster, + FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN, + ) - node.query("ALTER TABLE hdfs_test MODIFY COLUMN col1 String", settings={"mutations_sync": 2}) + node.query( + "ALTER TABLE hdfs_test MODIFY COLUMN col1 String", + settings={"mutations_sync": 2}, + ) assert node.query("SELECT distinct(col1) FROM hdfs_test FORMAT Values") == "('1')" # and file with mutation - wait_for_delete_hdfs_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN + 1) + wait_for_delete_hdfs_objects( + cluster, + FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN + 1, + ) node.query("ALTER TABLE hdfs_test DROP COLUMN col1", settings={"mutations_sync": 2}) # and 2 files with mutations - wait_for_delete_hdfs_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2) + wait_for_delete_hdfs_objects( + cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2 + ) def test_attach_detach_partition(cluster): @@ -153,36 +196,43 @@ def test_attach_detach_partition(cluster): node = cluster.instances["node"] fs = HdfsClient(hosts=cluster.hdfs_ip) - node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096))) - node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096))) + node.query( + "INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) + node.query( + "INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-04", 4096)) + ) assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(8192)" - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 node.query("ALTER TABLE hdfs_test DETACH PARTITION '2020-01-03'") assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(4096)" - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 node.query("ALTER TABLE hdfs_test ATTACH PARTITION '2020-01-03'") assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(8192)" - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 node.query("ALTER TABLE hdfs_test DROP PARTITION '2020-01-03'") assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(4096)" - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE node.query("ALTER TABLE hdfs_test DETACH PARTITION '2020-01-04'") - node.query("ALTER TABLE hdfs_test DROP DETACHED PARTITION '2020-01-04'", settings={"allow_drop_detached": 1}) + node.query( + "ALTER TABLE hdfs_test DROP DETACHED PARTITION '2020-01-04'", + settings={"allow_drop_detached": 1}, + ) assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(0)" - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") assert len(hdfs_objects) == FILES_OVERHEAD @@ -192,23 +242,27 @@ def test_move_partition_to_another_disk(cluster): node = cluster.instances["node"] fs = HdfsClient(hosts=cluster.hdfs_ip) - node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096))) - node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096))) + node.query( + "INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) + node.query( + "INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-04", 4096)) + ) assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(8192)" - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 node.query("ALTER TABLE hdfs_test MOVE PARTITION '2020-01-04' TO DISK 'hdd'") assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(8192)" - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE node.query("ALTER TABLE hdfs_test MOVE PARTITION '2020-01-04' TO DISK 'hdfs'") assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(8192)" - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 @@ -218,13 +272,17 @@ def test_table_manipulations(cluster): node = cluster.instances["node"] fs = HdfsClient(hosts=cluster.hdfs_ip) - node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096))) - node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096))) + node.query( + "INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) + node.query( + "INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-04", 4096)) + ) node.query("RENAME TABLE hdfs_test TO hdfs_renamed") assert node.query("SELECT count(*) FROM hdfs_renamed FORMAT Values") == "(8192)" - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 node.query("RENAME TABLE hdfs_renamed TO hdfs_test") @@ -234,13 +292,13 @@ def test_table_manipulations(cluster): node.query("ATTACH TABLE hdfs_test") assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(8192)" - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 node.query("TRUNCATE TABLE hdfs_test") assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(0)" - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") assert len(hdfs_objects) == FILES_OVERHEAD @@ -250,14 +308,26 @@ def test_move_replace_partition_to_another_table(cluster): node = cluster.instances["node"] fs = HdfsClient(hosts=cluster.hdfs_ip) - node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096))) - node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096))) - node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-05', 4096, -1))) - node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-06', 4096, -1))) + node.query( + "INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) + node.query( + "INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-04", 4096)) + ) + node.query( + "INSERT INTO hdfs_test VALUES {}".format( + generate_values("2020-01-05", 4096, -1) + ) + ) + node.query( + "INSERT INTO hdfs_test VALUES {}".format( + generate_values("2020-01-06", 4096, -1) + ) + ) assert node.query("SELECT sum(id) FROM hdfs_test FORMAT Values") == "(0)" assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(16384)" - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4 create_table(cluster, "hdfs_clone") @@ -270,16 +340,22 @@ def test_move_replace_partition_to_another_table(cluster): assert node.query("SELECT count(*) FROM hdfs_clone FORMAT Values") == "(8192)" # Number of objects in HDFS should be unchanged. - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") assert len(hdfs_objects) == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4 # Add new partitions to source table, but with different values and replace them from copied table. - node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096, -1))) - node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-05', 4096))) + node.query( + "INSERT INTO hdfs_test VALUES {}".format( + generate_values("2020-01-03", 4096, -1) + ) + ) + node.query( + "INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-05", 4096)) + ) assert node.query("SELECT sum(id) FROM hdfs_test FORMAT Values") == "(0)" assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(16384)" - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") assert len(hdfs_objects) == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 6 node.query("ALTER TABLE hdfs_test REPLACE PARTITION '2020-01-03' FROM hdfs_clone") @@ -291,13 +367,14 @@ def test_move_replace_partition_to_another_table(cluster): # Wait for outdated partitions deletion. print(1) - wait_for_delete_hdfs_objects(cluster, FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4) + wait_for_delete_hdfs_objects( + cluster, FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4 + ) node.query("DROP TABLE hdfs_clone NO DELAY") assert node.query("SELECT sum(id) FROM hdfs_test FORMAT Values") == "(0)" assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(16384)" # Data should remain in hdfs - hdfs_objects = fs.listdir('/clickhouse') + hdfs_objects = fs.listdir("/clickhouse") assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4 - diff --git a/tests/integration/test_merge_tree_s3/s3_mocks/unstable_proxy.py b/tests/integration/test_merge_tree_s3/s3_mocks/unstable_proxy.py index 1f8fcc4bbfd..c4a1e5ea1f5 100644 --- a/tests/integration/test_merge_tree_s3/s3_mocks/unstable_proxy.py +++ b/tests/integration/test_merge_tree_s3/s3_mocks/unstable_proxy.py @@ -11,13 +11,19 @@ random.seed("Unstable proxy/1.0") def request(command, url, headers={}, data=None): - """ Mini-requests. """ + """Mini-requests.""" + class Dummy: pass parts = urllib.parse.urlparse(url) c = http.client.HTTPConnection(parts.hostname, parts.port) - c.request(command, urllib.parse.urlunparse(parts._replace(scheme='', netloc='')), headers=headers, body=data) + c.request( + command, + urllib.parse.urlunparse(parts._replace(scheme="", netloc="")), + headers=headers, + body=data, + ) r = c.getresponse() result = Dummy() result.status_code = r.status @@ -45,13 +51,18 @@ class RequestHandler(http.server.BaseHTTPRequestHandler): def do_HEAD(self): content_length = self.headers.get("Content-Length") data = self.rfile.read(int(content_length)) if content_length else None - r = request(self.command, f"http://{UPSTREAM_HOST}{self.path}", headers=self.headers, data=data) + r = request( + self.command, + f"http://{UPSTREAM_HOST}{self.path}", + headers=self.headers, + data=data, + ) self.send_response(r.status_code) for k, v in r.headers.items(): self.send_header(k, v) self.end_headers() - if random.random() < 0.25 and len(r.content) > 1024*1024: - r.content = r.content[:len(r.content)//2] + if random.random() < 0.25 and len(r.content) > 1024 * 1024: + r.content = r.content[: len(r.content) // 2] self.wfile.write(r.content) self.wfile.close() diff --git a/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py index 04981523432..b7ef3ce3ef2 100644 --- a/tests/integration/test_merge_tree_s3/test.py +++ b/tests/integration/test_merge_tree_s3/test.py @@ -8,17 +8,24 @@ from helpers.utility import generate_values, replace_config, SafeThread SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -CONFIG_PATH = os.path.join(SCRIPT_DIR, './{}/node/configs/config.d/storage_conf.xml'.format(get_instances_dir())) +CONFIG_PATH = os.path.join( + SCRIPT_DIR, + "./{}/node/configs/config.d/storage_conf.xml".format(get_instances_dir()), +) @pytest.fixture(scope="module") def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("node", - main_configs=["configs/config.d/storage_conf.xml", - "configs/config.d/bg_processing_pool_conf.xml"], - with_minio=True) + cluster.add_instance( + "node", + main_configs=[ + "configs/config.d/storage_conf.xml", + "configs/config.d/bg_processing_pool_conf.xml", + ], + with_minio=True, + ) logging.info("Starting cluster...") cluster.start() logging.info("Cluster started") @@ -39,7 +46,7 @@ def create_table(node, table_name, **additional_settings): settings = { "storage_policy": "s3", "old_parts_lifetime": 0, - "index_granularity": 512 + "index_granularity": 512, } settings.update(additional_settings) @@ -60,28 +67,39 @@ def create_table(node, table_name, **additional_settings): def run_s3_mocks(cluster): logging.info("Starting s3 mocks") - mocks = ( - ("unstable_proxy.py", "resolver", "8081"), - ) + mocks = (("unstable_proxy.py", "resolver", "8081"),) for mock_filename, container, port in mocks: container_id = cluster.get_container_id(container) current_dir = os.path.dirname(__file__) - cluster.copy_file_to_container(container_id, os.path.join(current_dir, "s3_mocks", mock_filename), mock_filename) - cluster.exec_in_container(container_id, ["python", mock_filename, port], detach=True) + cluster.copy_file_to_container( + container_id, + os.path.join(current_dir, "s3_mocks", mock_filename), + mock_filename, + ) + cluster.exec_in_container( + container_id, ["python", mock_filename, port], detach=True + ) # Wait for S3 mocks to start for mock_filename, container, port in mocks: num_attempts = 100 for attempt in range(num_attempts): - ping_response = cluster.exec_in_container(cluster.get_container_id(container), - ["curl", "-s", f"http://localhost:{port}/"], nothrow=True) + ping_response = cluster.exec_in_container( + cluster.get_container_id(container), + ["curl", "-s", f"http://localhost:{port}/"], + nothrow=True, + ) if ping_response != "OK": if attempt == num_attempts - 1: - assert ping_response == "OK", f'Expected "OK", but got "{ping_response}"' + assert ( + ping_response == "OK" + ), f'Expected "OK", but got "{ping_response}"' else: time.sleep(1) else: - logging.debug(f"mock {mock_filename} ({port}) answered {ping_response} on attempt {attempt}") + logging.debug( + f"mock {mock_filename} ({port}) answered {ping_response} on attempt {attempt}" + ) break logging.info("S3 mocks started") @@ -90,11 +108,11 @@ def run_s3_mocks(cluster): def wait_for_delete_s3_objects(cluster, expected, timeout=30): minio = cluster.minio_client while timeout > 0: - if len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == expected: + if len(list(minio.list_objects(cluster.minio_bucket, "data/"))) == expected: return timeout -= 1 time.sleep(1) - assert(len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == expected) + assert len(list(minio.list_objects(cluster.minio_bucket, "data/"))) == expected @pytest.fixture(autouse=True) @@ -110,7 +128,7 @@ def drop_table(cluster, node_name): wait_for_delete_s3_objects(cluster, 0) finally: # Remove extra objects to prevent tests cascade failing - for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')): + for obj in list(minio.list_objects(cluster.minio_bucket, "data/")): minio.remove_object(cluster.minio_bucket, obj.object_name) @@ -118,59 +136,86 @@ def drop_table(cluster, node_name): "min_rows_for_wide_part,files_per_part,node_name", [ (0, FILES_OVERHEAD_PER_PART_WIDE, "node"), - (8192, FILES_OVERHEAD_PER_PART_COMPACT, "node") - ] + (8192, FILES_OVERHEAD_PER_PART_COMPACT, "node"), + ], ) -def test_simple_insert_select(cluster, min_rows_for_wide_part, files_per_part, node_name): +def test_simple_insert_select( + cluster, min_rows_for_wide_part, files_per_part, node_name +): node = cluster.instances[node_name] create_table(node, "s3_test", min_rows_for_wide_part=min_rows_for_wide_part) minio = cluster.minio_client - values1 = generate_values('2020-01-03', 4096) + values1 = generate_values("2020-01-03", 4096) node.query("INSERT INTO s3_test VALUES {}".format(values1)) assert node.query("SELECT * FROM s3_test order by dt, id FORMAT Values") == values1 - assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + files_per_part + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) + == FILES_OVERHEAD + files_per_part + ) - values2 = generate_values('2020-01-04', 4096) + values2 = generate_values("2020-01-04", 4096) node.query("INSERT INTO s3_test VALUES {}".format(values2)) - assert node.query("SELECT * FROM s3_test ORDER BY dt, id FORMAT Values") == values1 + "," + values2 - assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + files_per_part * 2 + assert ( + node.query("SELECT * FROM s3_test ORDER BY dt, id FORMAT Values") + == values1 + "," + values2 + ) + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) + == FILES_OVERHEAD + files_per_part * 2 + ) - assert node.query("SELECT count(*) FROM s3_test where id = 1 FORMAT Values") == "(2)" + assert ( + node.query("SELECT count(*) FROM s3_test where id = 1 FORMAT Values") == "(2)" + ) -@pytest.mark.parametrize( - "merge_vertical,node_name", [ - (True, "node"), - (False, "node") -]) +@pytest.mark.parametrize("merge_vertical,node_name", [(True, "node"), (False, "node")]) def test_insert_same_partition_and_merge(cluster, merge_vertical, node_name): settings = {} if merge_vertical: - settings['vertical_merge_algorithm_min_rows_to_activate'] = 0 - settings['vertical_merge_algorithm_min_columns_to_activate'] = 0 + settings["vertical_merge_algorithm_min_rows_to_activate"] = 0 + settings["vertical_merge_algorithm_min_columns_to_activate"] = 0 node = cluster.instances[node_name] create_table(node, "s3_test", **settings) minio = cluster.minio_client node.query("SYSTEM STOP MERGES s3_test") - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 1024))) - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 2048))) - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096))) - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 1024, -1))) - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 2048, -1))) - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096, -1))) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 1024)) + ) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 2048)) + ) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 1024, -1)) + ) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 2048, -1)) + ) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096, -1)) + ) assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)" - assert node.query("SELECT count(distinct(id)) FROM s3_test FORMAT Values") == "(8192)" - assert len( - list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD_PER_PART_WIDE * 6 + FILES_OVERHEAD + assert ( + node.query("SELECT count(distinct(id)) FROM s3_test FORMAT Values") == "(8192)" + ) + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) + == FILES_OVERHEAD_PER_PART_WIDE * 6 + FILES_OVERHEAD + ) node.query("SYSTEM START MERGES s3_test") # Wait for merges and old parts deletion for attempt in range(0, 10): - parts_count = node.query("SELECT COUNT(*) FROM system.parts WHERE table = 's3_test' FORMAT Values") + parts_count = node.query( + "SELECT COUNT(*) FROM system.parts WHERE table = 's3_test' FORMAT Values" + ) if parts_count == "(1)": break @@ -180,7 +225,9 @@ def test_insert_same_partition_and_merge(cluster, merge_vertical, node_name): time.sleep(1) assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)" - assert node.query("SELECT count(distinct(id)) FROM s3_test FORMAT Values") == "(8192)" + assert ( + node.query("SELECT count(distinct(id)) FROM s3_test FORMAT Values") == "(8192)" + ) wait_for_delete_s3_objects(cluster, FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD) @@ -190,27 +237,44 @@ def test_alter_table_columns(cluster, node_name): create_table(node, "s3_test") minio = cluster.minio_client - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096))) - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096, -1))) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096, -1)) + ) node.query("ALTER TABLE s3_test ADD COLUMN col1 UInt64 DEFAULT 1") # To ensure parts have merged node.query("OPTIMIZE TABLE s3_test") assert node.query("SELECT sum(col1) FROM s3_test FORMAT Values") == "(8192)" - assert node.query("SELECT sum(col1) FROM s3_test WHERE id > 0 FORMAT Values") == "(4096)" - wait_for_delete_s3_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN) + assert ( + node.query("SELECT sum(col1) FROM s3_test WHERE id > 0 FORMAT Values") + == "(4096)" + ) + wait_for_delete_s3_objects( + cluster, + FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN, + ) - node.query("ALTER TABLE s3_test MODIFY COLUMN col1 String", settings={"mutations_sync": 2}) + node.query( + "ALTER TABLE s3_test MODIFY COLUMN col1 String", settings={"mutations_sync": 2} + ) assert node.query("SELECT distinct(col1) FROM s3_test FORMAT Values") == "('1')" # and file with mutation - wait_for_delete_s3_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN + 1) + wait_for_delete_s3_objects( + cluster, + FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN + 1, + ) node.query("ALTER TABLE s3_test DROP COLUMN col1", settings={"mutations_sync": 2}) # and 2 files with mutations - wait_for_delete_s3_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2) + wait_for_delete_s3_objects( + cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2 + ) @pytest.mark.parametrize("node_name", ["node"]) @@ -219,30 +283,48 @@ def test_attach_detach_partition(cluster, node_name): create_table(node, "s3_test") minio = cluster.minio_client - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096))) - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096))) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-04", 4096)) + ) assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)" - assert len( - list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) + == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + ) node.query("ALTER TABLE s3_test DETACH PARTITION '2020-01-03'") assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(4096)" - assert len( - list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) + == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + ) node.query("ALTER TABLE s3_test ATTACH PARTITION '2020-01-03'") assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)" - assert len( - list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) + == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + ) node.query("ALTER TABLE s3_test DROP PARTITION '2020-01-03'") assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(4096)" - assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) + == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + ) node.query("ALTER TABLE s3_test DETACH PARTITION '2020-01-04'") - node.query("ALTER TABLE s3_test DROP DETACHED PARTITION '2020-01-04'", settings={"allow_drop_detached": 1}) + node.query( + "ALTER TABLE s3_test DROP DETACHED PARTITION '2020-01-04'", + settings={"allow_drop_detached": 1}, + ) assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(0)" - assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) == FILES_OVERHEAD + ) @pytest.mark.parametrize("node_name", ["node"]) @@ -251,20 +333,31 @@ def test_move_partition_to_another_disk(cluster, node_name): create_table(node, "s3_test") minio = cluster.minio_client - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096))) - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096))) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-04", 4096)) + ) assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)" - assert len( - list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) + == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + ) node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-04' TO DISK 'hdd'") assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)" - assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) + == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + ) node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-04' TO DISK 's3'") assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)" - assert len( - list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) + == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + ) @pytest.mark.parametrize("node_name", ["node"]) @@ -273,13 +366,19 @@ def test_table_manipulations(cluster, node_name): create_table(node, "s3_test") minio = cluster.minio_client - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096))) - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096))) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-04", 4096)) + ) node.query("RENAME TABLE s3_test TO s3_renamed") assert node.query("SELECT count(*) FROM s3_renamed FORMAT Values") == "(8192)" - assert len( - list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) + == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + ) node.query("RENAME TABLE s3_renamed TO s3_test") assert node.query("CHECK TABLE s3_test FORMAT Values") == "(1)" @@ -287,12 +386,16 @@ def test_table_manipulations(cluster, node_name): node.query("DETACH TABLE s3_test") node.query("ATTACH TABLE s3_test") assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)" - assert len( - list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) + == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + ) node.query("TRUNCATE TABLE s3_test") assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(0)" - assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) == FILES_OVERHEAD + ) @pytest.mark.parametrize("node_name", ["node"]) @@ -301,14 +404,24 @@ def test_move_replace_partition_to_another_table(cluster, node_name): create_table(node, "s3_test") minio = cluster.minio_client - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096))) - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096))) - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-05', 4096, -1))) - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-06', 4096, -1))) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-04", 4096)) + ) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-05", 4096, -1)) + ) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-06", 4096, -1)) + ) assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)" assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)" - assert len( - list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4 + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) + == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4 + ) create_table(node, "s3_clone") @@ -319,16 +432,24 @@ def test_move_replace_partition_to_another_table(cluster, node_name): assert node.query("SELECT sum(id) FROM s3_clone FORMAT Values") == "(0)" assert node.query("SELECT count(*) FROM s3_clone FORMAT Values") == "(8192)" # Number of objects in S3 should be unchanged. - assert len(list( - minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4 + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) + == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4 + ) # Add new partitions to source table, but with different values and replace them from copied table. - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096, -1))) - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-05', 4096))) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096, -1)) + ) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-05", 4096)) + ) assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)" assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)" - assert len(list( - minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 6 + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) + == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 6 + ) node.query("ALTER TABLE s3_test REPLACE PARTITION '2020-01-03' FROM s3_clone") node.query("ALTER TABLE s3_test REPLACE PARTITION '2020-01-05' FROM s3_clone") @@ -338,26 +459,32 @@ def test_move_replace_partition_to_another_table(cluster, node_name): assert node.query("SELECT count(*) FROM s3_clone FORMAT Values") == "(8192)" # Wait for outdated partitions deletion. - wait_for_delete_s3_objects(cluster, FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4) + wait_for_delete_s3_objects( + cluster, FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4 + ) node.query("DROP TABLE s3_clone NO DELAY") assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)" assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)" # Data should remain in S3 - assert len( - list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4 + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) + == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4 + ) node.query("ALTER TABLE s3_test FREEZE") # Number S3 objects should be unchanged. - assert len( - list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4 + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) + == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4 + ) node.query("DROP TABLE s3_test NO DELAY") # Backup data should remain in S3. wait_for_delete_s3_objects(cluster, FILES_OVERHEAD_PER_PART_WIDE * 4) - for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')): + for obj in list(minio.list_objects(cluster.minio_bucket, "data/")): minio.remove_object(cluster.minio_bucket, obj.object_name) @@ -367,23 +494,32 @@ def test_freeze_unfreeze(cluster, node_name): create_table(node, "s3_test") minio = cluster.minio_client - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096))) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) node.query("ALTER TABLE s3_test FREEZE WITH NAME 'backup1'") - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096))) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-04", 4096)) + ) node.query("ALTER TABLE s3_test FREEZE WITH NAME 'backup2'") node.query("TRUNCATE TABLE s3_test") - assert len( - list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) + == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + ) # Unfreeze single partition from backup1. - node.query("ALTER TABLE s3_test UNFREEZE PARTITION '2020-01-03' WITH NAME 'backup1'") + node.query( + "ALTER TABLE s3_test UNFREEZE PARTITION '2020-01-03' WITH NAME 'backup1'" + ) # Unfreeze all partitions from backup2. node.query("ALTER TABLE s3_test UNFREEZE WITH NAME 'backup2'") # Data should be removed from S3. - assert len( - list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + assert ( + len(list(minio.list_objects(cluster.minio_bucket, "data/"))) == FILES_OVERHEAD + ) @pytest.mark.parametrize("node_name", ["node"]) @@ -393,21 +529,31 @@ def test_s3_disk_apply_new_settings(cluster, node_name): def get_s3_requests(): node.query("SYSTEM FLUSH LOGS") - return int(node.query("SELECT value FROM system.events WHERE event='S3WriteRequestsCount'")) + return int( + node.query( + "SELECT value FROM system.events WHERE event='S3WriteRequestsCount'" + ) + ) s3_requests_before = get_s3_requests() - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096))) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) s3_requests_to_write_partition = get_s3_requests() - s3_requests_before # Force multi-part upload mode. - replace_config(CONFIG_PATH, + replace_config( + CONFIG_PATH, "33554432", - "0") + "0", + ) node.query("SYSTEM RELOAD CONFIG") s3_requests_before = get_s3_requests() - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096, -1))) + node.query( + "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-04", 4096, -1)) + ) # There should be 3 times more S3 requests because multi-part upload mode uses 3 requests to upload object. assert get_s3_requests() - s3_requests_before == s3_requests_to_write_partition * 3 @@ -418,8 +564,16 @@ def test_s3_disk_restart_during_load(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test") - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 1024 * 1024))) - node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-05', 1024 * 1024, -1))) + node.query( + "INSERT INTO s3_test VALUES {}".format( + generate_values("2020-01-04", 1024 * 1024) + ) + ) + node.query( + "INSERT INTO s3_test VALUES {}".format( + generate_values("2020-01-05", 1024 * 1024, -1) + ) + ) def read(): for ii in range(0, 20): @@ -451,21 +605,29 @@ def test_s3_disk_restart_during_load(cluster, node_name): @pytest.mark.parametrize("node_name", ["node"]) def test_s3_disk_reads_on_unstable_connection(cluster, node_name): node = cluster.instances[node_name] - create_table(node, "s3_test", storage_policy='unstable_s3') - node.query("INSERT INTO s3_test SELECT today(), *, toString(*) FROM system.numbers LIMIT 9000000") + create_table(node, "s3_test", storage_policy="unstable_s3") + node.query( + "INSERT INTO s3_test SELECT today(), *, toString(*) FROM system.numbers LIMIT 9000000" + ) for i in range(30): print(f"Read sequence {i}") - assert node.query("SELECT sum(id) FROM s3_test").splitlines() == ["40499995500000"] + assert node.query("SELECT sum(id) FROM s3_test").splitlines() == [ + "40499995500000" + ] @pytest.mark.parametrize("node_name", ["node"]) def test_lazy_seek_optimization_for_async_read(cluster, node_name): node = cluster.instances[node_name] node.query("DROP TABLE IF EXISTS s3_test NO DELAY") - node.query("CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3';") - node.query("INSERT INTO s3_test SELECT * FROM generateRandom('key UInt32, value String') LIMIT 10000000") + node.query( + "CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3';" + ) + node.query( + "INSERT INTO s3_test SELECT * FROM generateRandom('key UInt32, value String') LIMIT 10000000" + ) node.query("SELECT * FROM s3_test WHERE value LIKE '%abc%' ORDER BY value LIMIT 10") node.query("DROP TABLE IF EXISTS s3_test NO DELAY") minio = cluster.minio_client - for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')): + for obj in list(minio.list_objects(cluster.minio_bucket, "data/")): minio.remove_object(cluster.minio_bucket, obj.object_name) diff --git a/tests/integration/test_merge_tree_s3_failover/s3_endpoint/endpoint.py b/tests/integration/test_merge_tree_s3_failover/s3_endpoint/endpoint.py index 9f5c7b1c8ce..b6567dfebc5 100644 --- a/tests/integration/test_merge_tree_s3_failover/s3_endpoint/endpoint.py +++ b/tests/integration/test_merge_tree_s3_failover/s3_endpoint/endpoint.py @@ -10,73 +10,75 @@ cache = {} mutex = Lock() -@route('/fail_request/<_request_number>') +@route("/fail_request/<_request_number>") def fail_request(_request_number): request_number = int(_request_number) if request_number > 0: - cache['request_number'] = request_number + cache["request_number"] = request_number else: - cache.pop('request_number', None) - return 'OK' + cache.pop("request_number", None) + return "OK" -@route('/throttle_request/<_request_number>') +@route("/throttle_request/<_request_number>") def fail_request(_request_number): request_number = int(_request_number) if request_number > 0: - cache['throttle_request_number'] = request_number + cache["throttle_request_number"] = request_number else: - cache.pop('throttle_request_number', None) - return 'OK' + cache.pop("throttle_request_number", None) + return "OK" # Handle for MultipleObjectsDelete. -@route('/<_bucket>', ['POST']) +@route("/<_bucket>", ["POST"]) def delete(_bucket): - response.set_header("Location", "http://minio1:9001/" + _bucket + "?" + request.query_string) + response.set_header( + "Location", "http://minio1:9001/" + _bucket + "?" + request.query_string + ) response.status = 307 - return 'Redirected' + return "Redirected" -@route('/<_bucket>/<_path:path>', ['GET', 'POST', 'PUT', 'DELETE']) +@route("/<_bucket>/<_path:path>", ["GET", "POST", "PUT", "DELETE"]) def server(_bucket, _path): # It's delete query for failed part - if _path.endswith('delete'): - response.set_header("Location", "http://minio1:9001/" + _bucket + '/' + _path) + if _path.endswith("delete"): + response.set_header("Location", "http://minio1:9001/" + _bucket + "/" + _path) response.status = 307 - return 'Redirected' + return "Redirected" mutex.acquire() try: - if cache.get('request_number', None): - request_number = cache.pop('request_number') - 1 + if cache.get("request_number", None): + request_number = cache.pop("request_number") - 1 if request_number > 0: - cache['request_number'] = request_number + cache["request_number"] = request_number else: response.status = 500 - response.content_type = 'text/xml' + response.content_type = "text/xml" return 'ExpectedErrorExpected Errortxfbd566d03042474888193-00608d7537' - if cache.get('throttle_request_number', None): - request_number = cache.pop('throttle_request_number') - 1 + if cache.get("throttle_request_number", None): + request_number = cache.pop("throttle_request_number") - 1 if request_number > 0: - cache['throttle_request_number'] = request_number + cache["throttle_request_number"] = request_number else: response.status = 429 - response.content_type = 'text/xml' + response.content_type = "text/xml" return 'TooManyRequestsExceptionPlease reduce your request rate.txfbd566d03042474888193-00608d7538' finally: mutex.release() - response.set_header("Location", "http://minio1:9001/" + _bucket + '/' + _path) + response.set_header("Location", "http://minio1:9001/" + _bucket + "/" + _path) response.status = 307 - return 'Redirected' + return "Redirected" -@route('/') +@route("/") def ping(): - return 'OK' + return "OK" -run(host='0.0.0.0', port=8080) +run(host="0.0.0.0", port=8080) diff --git a/tests/integration/test_merge_tree_s3_failover/test.py b/tests/integration/test_merge_tree_s3_failover/test.py index 44e7e0ae5ad..d4c691fdb55 100644 --- a/tests/integration/test_merge_tree_s3_failover/test.py +++ b/tests/integration/test_merge_tree_s3_failover/test.py @@ -11,19 +11,28 @@ from helpers.cluster import ClickHouseCluster # Runs custom python-based S3 endpoint. def run_endpoint(cluster): logging.info("Starting custom S3 endpoint") - container_id = cluster.get_container_id('resolver') + container_id = cluster.get_container_id("resolver") current_dir = os.path.dirname(__file__) - cluster.copy_file_to_container(container_id, os.path.join(current_dir, "s3_endpoint", "endpoint.py"), "endpoint.py") + cluster.copy_file_to_container( + container_id, + os.path.join(current_dir, "s3_endpoint", "endpoint.py"), + "endpoint.py", + ) cluster.exec_in_container(container_id, ["python", "endpoint.py"], detach=True) # Wait for S3 endpoint start num_attempts = 100 for attempt in range(num_attempts): - ping_response = cluster.exec_in_container(cluster.get_container_id('resolver'), - ["curl", "-s", "http://resolver:8080/"], nothrow=True) - if ping_response != 'OK': + ping_response = cluster.exec_in_container( + cluster.get_container_id("resolver"), + ["curl", "-s", "http://resolver:8080/"], + nothrow=True, + ) + if ping_response != "OK": if attempt == num_attempts - 1: - assert ping_response == 'OK', 'Expected "OK", but got "{}"'.format(ping_response) + assert ping_response == "OK", 'Expected "OK", but got "{}"'.format( + ping_response + ) else: time.sleep(1) else: @@ -33,25 +42,34 @@ def run_endpoint(cluster): def fail_request(cluster, request): - response = cluster.exec_in_container(cluster.get_container_id('resolver'), - ["curl", "-s", "http://resolver:8080/fail_request/{}".format(request)]) - assert response == 'OK', 'Expected "OK", but got "{}"'.format(response) + response = cluster.exec_in_container( + cluster.get_container_id("resolver"), + ["curl", "-s", "http://resolver:8080/fail_request/{}".format(request)], + ) + assert response == "OK", 'Expected "OK", but got "{}"'.format(response) + def throttle_request(cluster, request): - response = cluster.exec_in_container(cluster.get_container_id('resolver'), - ["curl", "-s", "http://resolver:8080/throttle_request/{}".format(request)]) - assert response == 'OK', 'Expected "OK", but got "{}"'.format(response) + response = cluster.exec_in_container( + cluster.get_container_id("resolver"), + ["curl", "-s", "http://resolver:8080/throttle_request/{}".format(request)], + ) + assert response == "OK", 'Expected "OK", but got "{}"'.format(response) @pytest.fixture(scope="module") def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("node", - main_configs=["configs/config.d/storage_conf.xml", - "configs/config.d/instant_moves.xml", - "configs/config.d/part_log.xml"], - with_minio=True) + cluster.add_instance( + "node", + main_configs=[ + "configs/config.d/storage_conf.xml", + "configs/config.d/instant_moves.xml", + "configs/config.d/part_log.xml", + ], + with_minio=True, + ) logging.info("Starting cluster...") cluster.start() logging.info("Cluster started") @@ -72,7 +90,9 @@ def drop_table(cluster): # S3 request will be failed for an appropriate part file write. FILES_PER_PART_BASE = 5 # partition.dat, default_compression_codec.txt, count.txt, columns.txt, checksums.txt -FILES_PER_PART_WIDE = FILES_PER_PART_BASE + 1 + 1 + 3 * 2 # Primary index, MinMax, Mark and data file for column(s) +FILES_PER_PART_WIDE = ( + FILES_PER_PART_BASE + 1 + 1 + 3 * 2 +) # Primary index, MinMax, Mark and data file for column(s) # In debug build there are additional requests (from MergeTreeDataPartWriterWide.cpp:554 due to additional validation). FILES_PER_PART_WIDE_DEBUG = 2 # Additional requests to S3 in debug build @@ -85,10 +105,12 @@ FILES_PER_PART_COMPACT_DEBUG = 0 "min_bytes_for_wide_part,request_count,debug_request_count", [ (0, FILES_PER_PART_WIDE, FILES_PER_PART_WIDE_DEBUG), - (1024 * 1024, FILES_PER_PART_COMPACT, FILES_PER_PART_COMPACT_DEBUG) - ] + (1024 * 1024, FILES_PER_PART_COMPACT, FILES_PER_PART_COMPACT_DEBUG), + ], ) -def test_write_failover(cluster, min_bytes_for_wide_part, request_count, debug_request_count): +def test_write_failover( + cluster, min_bytes_for_wide_part, request_count, debug_request_count +): node = cluster.instances["node"] node.query( @@ -101,8 +123,9 @@ def test_write_failover(cluster, min_bytes_for_wide_part, request_count, debug_r ORDER BY id PARTITION BY dt SETTINGS storage_policy='s3', min_bytes_for_wide_part={} - """ - .format(min_bytes_for_wide_part) + """.format( + min_bytes_for_wide_part + ) ) is_debug_mode = False @@ -113,7 +136,9 @@ def test_write_failover(cluster, min_bytes_for_wide_part, request_count, debug_r fail_request(cluster, request + 1) data = "('2020-03-01',0,'data'),('2020-03-01',1,'data')" - positive = request >= (request_count + debug_request_count if is_debug_mode else request_count) + positive = request >= ( + request_count + debug_request_count if is_debug_mode else request_count + ) try: node.query("INSERT INTO s3_failover_test VALUES {}".format(data)) assert positive, "Insert query should be failed, request {}".format(request) @@ -123,17 +148,26 @@ def test_write_failover(cluster, min_bytes_for_wide_part, request_count, debug_r is_debug_mode = True positive = False - assert not positive, "Insert query shouldn't be failed, request {}".format(request) - assert str(e).find("Expected Error") != -1, "Unexpected error {}".format(str(e)) + assert not positive, "Insert query shouldn't be failed, request {}".format( + request + ) + assert str(e).find("Expected Error") != -1, "Unexpected error {}".format( + str(e) + ) if positive: # Disable request failing. fail_request(cluster, 0) - assert node.query("CHECK TABLE s3_failover_test") == '1\n' - assert success_count > 1 or node.query("SELECT * FROM s3_failover_test FORMAT Values") == data + assert node.query("CHECK TABLE s3_failover_test") == "1\n" + assert ( + success_count > 1 + or node.query("SELECT * FROM s3_failover_test FORMAT Values") == data + ) - assert success_count == (1 if is_debug_mode else debug_request_count + 1), "Insert query should be successful at least once" + assert success_count == ( + 1 if is_debug_mode else debug_request_count + 1 + ), "Insert query should be successful at least once" # Check that second data part move is ended successfully if first attempt was failed. @@ -156,15 +190,21 @@ def test_move_failover(cluster): # Fail a request to S3 to break first TTL move. fail_request(cluster, 1) - node.query("INSERT INTO s3_failover_test VALUES (now() - 2, 0, 'data'), (now() - 2, 1, 'data')") + node.query( + "INSERT INTO s3_failover_test VALUES (now() - 2, 0, 'data'), (now() - 2, 1, 'data')" + ) # Wait for part move to S3. max_attempts = 10 for attempt in range(max_attempts + 1): - disk = node.query("SELECT disk_name FROM system.parts WHERE table='s3_failover_test' LIMIT 1") + disk = node.query( + "SELECT disk_name FROM system.parts WHERE table='s3_failover_test' LIMIT 1" + ) if disk != "s3\n": if attempt == max_attempts: - assert disk == "s3\n", "Expected move to S3 while part still on disk " + disk + assert disk == "s3\n", ( + "Expected move to S3 while part still on disk " + disk + ) else: time.sleep(1) else: @@ -174,23 +214,33 @@ def test_move_failover(cluster): node.query("SYSTEM FLUSH LOGS") # There should be 2 attempts to move part. - assert node.query(""" + assert ( + node.query( + """ SELECT count(*) FROM system.part_log WHERE event_type='MovePart' AND table='s3_failover_test' - """) == '2\n' + """ + ) + == "2\n" + ) # First attempt should be failed with expected error. - exception = node.query(""" + exception = node.query( + """ SELECT exception FROM system.part_log WHERE event_type='MovePart' AND table='s3_failover_test' AND notEmpty(exception) ORDER BY event_time LIMIT 1 - """) + """ + ) assert exception.find("Expected Error") != -1, exception # Ensure data is not corrupted. - assert node.query("CHECK TABLE s3_failover_test") == '1\n' - assert node.query("SELECT id,data FROM s3_failover_test FORMAT Values") == "(0,'data'),(1,'data')" + assert node.query("CHECK TABLE s3_failover_test") == "1\n" + assert ( + node.query("SELECT id,data FROM s3_failover_test FORMAT Values") + == "(0,'data'),(1,'data')" + ) # Check that throttled request retries and does not cause an error on disk with default `retry_attempts` (>0) @@ -212,6 +262,11 @@ def test_throttle_retry(cluster): throttle_request(cluster, 1) - assert node.query(""" + assert ( + node.query( + """ SELECT * FROM s3_throttle_retry_test - """) == '42\n' + """ + ) + == "42\n" + ) diff --git a/tests/integration/test_merge_tree_s3_restore/test.py b/tests/integration/test_merge_tree_s3_restore/test.py index acbcd8c04cf..6ae63db52ef 100644 --- a/tests/integration/test_merge_tree_s3_restore/test.py +++ b/tests/integration/test_merge_tree_s3_restore/test.py @@ -9,16 +9,24 @@ from helpers.cluster import ClickHouseCluster, get_instances_dir SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -NOT_RESTORABLE_CONFIG_PATH = os.path.join(SCRIPT_DIR, './{}/node_not_restorable/configs/config.d/storage_conf_not_restorable.xml'.format(get_instances_dir())) -COMMON_CONFIGS = ["configs/config.d/bg_processing_pool_conf.xml", "configs/config.d/clusters.xml"] +NOT_RESTORABLE_CONFIG_PATH = os.path.join( + SCRIPT_DIR, + "./{}/node_not_restorable/configs/config.d/storage_conf_not_restorable.xml".format( + get_instances_dir() + ), +) +COMMON_CONFIGS = [ + "configs/config.d/bg_processing_pool_conf.xml", + "configs/config.d/clusters.xml", +] def replace_config(old, new): - config = open(NOT_RESTORABLE_CONFIG_PATH, 'r') + config = open(NOT_RESTORABLE_CONFIG_PATH, "r") config_lines = config.readlines() config.close() config_lines = [line.replace(old, new) for line in config_lines] - config = open(NOT_RESTORABLE_CONFIG_PATH, 'w') + config = open(NOT_RESTORABLE_CONFIG_PATH, "w") config.writelines(config_lines) config.close() @@ -28,20 +36,34 @@ def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("node", - main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf.xml"], - macros={"cluster": "node", "replica": "0"}, - with_minio=True, with_zookeeper=True, stay_alive=True) - cluster.add_instance("node_another_bucket", - main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf_another_bucket.xml"], - macros={"cluster": "node_another_bucket", "replica": "0"}, - with_zookeeper=True, stay_alive=True) - cluster.add_instance("node_another_bucket_path", - main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf_another_bucket_path.xml"], - stay_alive=True) - cluster.add_instance("node_not_restorable", - main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf_not_restorable.xml"], - stay_alive=True) + cluster.add_instance( + "node", + main_configs=COMMON_CONFIGS + ["configs/config.d/storage_conf.xml"], + macros={"cluster": "node", "replica": "0"}, + with_minio=True, + with_zookeeper=True, + stay_alive=True, + ) + cluster.add_instance( + "node_another_bucket", + main_configs=COMMON_CONFIGS + + ["configs/config.d/storage_conf_another_bucket.xml"], + macros={"cluster": "node_another_bucket", "replica": "0"}, + with_zookeeper=True, + stay_alive=True, + ) + cluster.add_instance( + "node_another_bucket_path", + main_configs=COMMON_CONFIGS + + ["configs/config.d/storage_conf_another_bucket_path.xml"], + stay_alive=True, + ) + cluster.add_instance( + "node_not_restorable", + main_configs=COMMON_CONFIGS + + ["configs/config.d/storage_conf_not_restorable.xml"], + stay_alive=True, + ) logging.info("Starting cluster...") cluster.start() @@ -54,7 +76,7 @@ def cluster(): def random_string(length): letters = string.ascii_letters - return ''.join(random.choice(letters) for i in range(length)) + return "".join(random.choice(letters) for i in range(length)) def generate_values(date_str, count, sign=1): @@ -63,8 +85,14 @@ def generate_values(date_str, count, sign=1): return ",".join(["('{}',{},'{}',{})".format(x, y, z, 0) for x, y, z in data]) -def create_table(node, table_name, attach=False, replicated=False, db_atomic=False, uuid=""): - node.query("CREATE DATABASE IF NOT EXISTS s3 ENGINE = {engine}".format(engine="Atomic" if db_atomic else "Ordinary")) +def create_table( + node, table_name, attach=False, replicated=False, db_atomic=False, uuid="" +): + node.query( + "CREATE DATABASE IF NOT EXISTS s3 ENGINE = {engine}".format( + engine="Atomic" if db_atomic else "Ordinary" + ) + ) create_table_statement = """ {create} TABLE s3.{table_name} {uuid} {on_cluster} ( @@ -80,11 +108,15 @@ def create_table(node, table_name, attach=False, replicated=False, db_atomic=Fal storage_policy='s3', old_parts_lifetime=600, index_granularity=512 - """.format(create="ATTACH" if attach else "CREATE", - table_name=table_name, - uuid="UUID '{uuid}'".format(uuid=uuid) if db_atomic and uuid else "", - on_cluster="ON CLUSTER '{}'".format(node.name) if replicated else "", - engine="ReplicatedMergeTree('/clickhouse/tables/{cluster}/test', '{replica}')" if replicated else "MergeTree()") + """.format( + create="ATTACH" if attach else "CREATE", + table_name=table_name, + uuid="UUID '{uuid}'".format(uuid=uuid) if db_atomic and uuid else "", + on_cluster="ON CLUSTER '{}'".format(node.name) if replicated else "", + engine="ReplicatedMergeTree('/clickhouse/tables/{cluster}/test', '{replica}')" + if replicated + else "MergeTree()", + ) node.query(create_table_statement) @@ -98,37 +130,68 @@ def purge_s3(cluster, bucket): def drop_s3_metadata(node): - node.exec_in_container(['bash', '-c', 'rm -rf /var/lib/clickhouse/disks/s3/*'], user='root') + node.exec_in_container( + ["bash", "-c", "rm -rf /var/lib/clickhouse/disks/s3/*"], user="root" + ) def drop_shadow_information(node): - node.exec_in_container(['bash', '-c', 'rm -rf /var/lib/clickhouse/shadow/*'], user='root') + node.exec_in_container( + ["bash", "-c", "rm -rf /var/lib/clickhouse/shadow/*"], user="root" + ) def create_restore_file(node, revision=None, bucket=None, path=None, detached=None): - node.exec_in_container(['bash', '-c', 'mkdir -p /var/lib/clickhouse/disks/s3/'], user='root') - node.exec_in_container(['bash', '-c', 'touch /var/lib/clickhouse/disks/s3/restore'], user='root') + node.exec_in_container( + ["bash", "-c", "mkdir -p /var/lib/clickhouse/disks/s3/"], user="root" + ) + node.exec_in_container( + ["bash", "-c", "touch /var/lib/clickhouse/disks/s3/restore"], user="root" + ) add_restore_option = 'echo -en "{}={}\n" >> /var/lib/clickhouse/disks/s3/restore' if revision: - node.exec_in_container(['bash', '-c', add_restore_option.format('revision', revision)], user='root') + node.exec_in_container( + ["bash", "-c", add_restore_option.format("revision", revision)], user="root" + ) if bucket: - node.exec_in_container(['bash', '-c', add_restore_option.format('source_bucket', bucket)], user='root') + node.exec_in_container( + ["bash", "-c", add_restore_option.format("source_bucket", bucket)], + user="root", + ) if path: - node.exec_in_container(['bash', '-c', add_restore_option.format('source_path', path)], user='root') + node.exec_in_container( + ["bash", "-c", add_restore_option.format("source_path", path)], user="root" + ) if detached: - node.exec_in_container(['bash', '-c', add_restore_option.format('detached', 'true')], user='root') + node.exec_in_container( + ["bash", "-c", add_restore_option.format("detached", "true")], user="root" + ) def get_revision_counter(node, backup_number): - return int(node.exec_in_container( - ['bash', '-c', 'cat /var/lib/clickhouse/disks/s3/shadow/{}/revision.txt'.format(backup_number)], user='root')) + return int( + node.exec_in_container( + [ + "bash", + "-c", + "cat /var/lib/clickhouse/disks/s3/shadow/{}/revision.txt".format( + backup_number + ), + ], + user="root", + ) + ) def get_table_uuid(node, db_atomic, table): uuid = "" if db_atomic: - uuid = node.query("SELECT uuid FROM system.tables WHERE database='s3' AND table='{}' FORMAT TabSeparated".format(table)).strip() + uuid = node.query( + "SELECT uuid FROM system.tables WHERE database='s3' AND table='{}' FORMAT TabSeparated".format( + table + ) + ).strip() return uuid @@ -136,7 +199,12 @@ def get_table_uuid(node, db_atomic, table): def drop_table(cluster): yield - node_names = ["node", "node_another_bucket", "node_another_bucket_path", "node_not_restorable"] + node_names = [ + "node", + "node_another_bucket", + "node_another_bucket_path", + "node_not_restorable", + ] for node_name in node_names: node = cluster.instances[node_name] @@ -151,21 +219,25 @@ def drop_table(cluster): purge_s3(cluster, bucket) -@pytest.mark.parametrize( - "replicated", [False, True] -) -@pytest.mark.parametrize( - "db_atomic", [False, True] -) +@pytest.mark.parametrize("replicated", [False, True]) +@pytest.mark.parametrize("db_atomic", [False, True]) def test_full_restore(cluster, replicated, db_atomic): node = cluster.instances["node"] create_table(node, "test", attach=False, replicated=replicated, db_atomic=db_atomic) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-03', 4096))) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-04', 4096, -1))) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-05', 4096))) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-05', 4096, -1))) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-04", 4096, -1)) + ) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-05", 4096)) + ) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-05", 4096, -1)) + ) node.query("DETACH TABLE s3.test") drop_s3_metadata(node) @@ -173,66 +245,94 @@ def test_full_restore(cluster, replicated, db_atomic): node.query("SYSTEM RESTART DISK s3") node.query("ATTACH TABLE s3.test") - assert node.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 4) + assert node.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format( + 4096 * 4 + ) assert node.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0) -@pytest.mark.parametrize( - "db_atomic", [False, True] -) +@pytest.mark.parametrize("db_atomic", [False, True]) def test_restore_another_bucket_path(cluster, db_atomic): node = cluster.instances["node"] create_table(node, "test", db_atomic=db_atomic) uuid = get_table_uuid(node, db_atomic, "test") - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-03', 4096))) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-04', 4096, -1))) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-05', 4096))) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-05', 4096, -1))) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-04", 4096, -1)) + ) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-05", 4096)) + ) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-05", 4096, -1)) + ) # To ensure parts have merged node.query("OPTIMIZE TABLE s3.test") - assert node.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 4) + assert node.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format( + 4096 * 4 + ) assert node.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0) node_another_bucket = cluster.instances["node_another_bucket"] create_restore_file(node_another_bucket, bucket="root") node_another_bucket.query("SYSTEM RESTART DISK s3") - create_table(node_another_bucket, "test", attach=True, db_atomic=db_atomic, uuid=uuid) + create_table( + node_another_bucket, "test", attach=True, db_atomic=db_atomic, uuid=uuid + ) - assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 4) - assert node_another_bucket.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0) + assert node_another_bucket.query( + "SELECT count(*) FROM s3.test FORMAT Values" + ) == "({})".format(4096 * 4) + assert node_another_bucket.query( + "SELECT sum(id) FROM s3.test FORMAT Values" + ) == "({})".format(0) node_another_bucket_path = cluster.instances["node_another_bucket_path"] create_restore_file(node_another_bucket_path, bucket="root2", path="data") node_another_bucket_path.query("SYSTEM RESTART DISK s3") - create_table(node_another_bucket_path, "test", attach=True, db_atomic=db_atomic, uuid=uuid) + create_table( + node_another_bucket_path, "test", attach=True, db_atomic=db_atomic, uuid=uuid + ) - assert node_another_bucket_path.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 4) - assert node_another_bucket_path.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0) + assert node_another_bucket_path.query( + "SELECT count(*) FROM s3.test FORMAT Values" + ) == "({})".format(4096 * 4) + assert node_another_bucket_path.query( + "SELECT sum(id) FROM s3.test FORMAT Values" + ) == "({})".format(0) -@pytest.mark.parametrize( - "db_atomic", [False, True] -) +@pytest.mark.parametrize("db_atomic", [False, True]) def test_restore_different_revisions(cluster, db_atomic): node = cluster.instances["node"] create_table(node, "test", db_atomic=db_atomic) uuid = get_table_uuid(node, db_atomic, "test") - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-03', 4096))) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-04', 4096, -1))) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-04", 4096, -1)) + ) node.query("ALTER TABLE s3.test FREEZE") revision1 = get_revision_counter(node, 1) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-05', 4096))) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-05', 4096, -1))) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-05", 4096)) + ) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-05", 4096, -1)) + ) node.query("ALTER TABLE s3.test FREEZE") revision2 = get_revision_counter(node, 2) @@ -243,20 +343,33 @@ def test_restore_different_revisions(cluster, db_atomic): node.query("ALTER TABLE s3.test FREEZE") revision3 = get_revision_counter(node, 3) - assert node.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 4) + assert node.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format( + 4096 * 4 + ) assert node.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0) - assert node.query("SELECT count(*) from system.parts where table = 'test'") == '5\n' + assert node.query("SELECT count(*) from system.parts where table = 'test'") == "5\n" node_another_bucket = cluster.instances["node_another_bucket"] # Restore to revision 1 (2 parts). create_restore_file(node_another_bucket, revision=revision1, bucket="root") node_another_bucket.query("SYSTEM RESTART DISK s3") - create_table(node_another_bucket, "test", attach=True, db_atomic=db_atomic, uuid=uuid) + create_table( + node_another_bucket, "test", attach=True, db_atomic=db_atomic, uuid=uuid + ) - assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 2) - assert node_another_bucket.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0) - assert node_another_bucket.query("SELECT count(*) from system.parts where table = 'test'") == '2\n' + assert node_another_bucket.query( + "SELECT count(*) FROM s3.test FORMAT Values" + ) == "({})".format(4096 * 2) + assert node_another_bucket.query( + "SELECT sum(id) FROM s3.test FORMAT Values" + ) == "({})".format(0) + assert ( + node_another_bucket.query( + "SELECT count(*) from system.parts where table = 'test'" + ) + == "2\n" + ) # Restore to revision 2 (4 parts). node_another_bucket.query("DETACH TABLE s3.test") @@ -264,9 +377,18 @@ def test_restore_different_revisions(cluster, db_atomic): node_another_bucket.query("SYSTEM RESTART DISK s3") node_another_bucket.query("ATTACH TABLE s3.test") - assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 4) - assert node_another_bucket.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0) - assert node_another_bucket.query("SELECT count(*) from system.parts where table = 'test'") == '4\n' + assert node_another_bucket.query( + "SELECT count(*) FROM s3.test FORMAT Values" + ) == "({})".format(4096 * 4) + assert node_another_bucket.query( + "SELECT sum(id) FROM s3.test FORMAT Values" + ) == "({})".format(0) + assert ( + node_another_bucket.query( + "SELECT count(*) from system.parts where table = 'test'" + ) + == "4\n" + ) # Restore to revision 3 (4 parts + 1 merged). node_another_bucket.query("DETACH TABLE s3.test") @@ -274,27 +396,40 @@ def test_restore_different_revisions(cluster, db_atomic): node_another_bucket.query("SYSTEM RESTART DISK s3") node_another_bucket.query("ATTACH TABLE s3.test") - assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 4) - assert node_another_bucket.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0) - assert node_another_bucket.query("SELECT count(*) from system.parts where table = 'test'") == '5\n' + assert node_another_bucket.query( + "SELECT count(*) FROM s3.test FORMAT Values" + ) == "({})".format(4096 * 4) + assert node_another_bucket.query( + "SELECT sum(id) FROM s3.test FORMAT Values" + ) == "({})".format(0) + assert ( + node_another_bucket.query( + "SELECT count(*) from system.parts where table = 'test'" + ) + == "5\n" + ) -@pytest.mark.parametrize( - "db_atomic", [False, True] -) +@pytest.mark.parametrize("db_atomic", [False, True]) def test_restore_mutations(cluster, db_atomic): node = cluster.instances["node"] create_table(node, "test", db_atomic=db_atomic) uuid = get_table_uuid(node, db_atomic, "test") - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-03', 4096))) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-03', 4096, -1))) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-03", 4096, -1)) + ) node.query("ALTER TABLE s3.test FREEZE") revision_before_mutation = get_revision_counter(node, 1) - node.query("ALTER TABLE s3.test UPDATE counter = 1 WHERE 1", settings={"mutations_sync": 2}) + node.query( + "ALTER TABLE s3.test UPDATE counter = 1 WHERE 1", settings={"mutations_sync": 2} + ) node.query("ALTER TABLE s3.test FREEZE") revision_after_mutation = get_revision_counter(node, 2) @@ -302,24 +437,44 @@ def test_restore_mutations(cluster, db_atomic): node_another_bucket = cluster.instances["node_another_bucket"] # Restore to revision before mutation. - create_restore_file(node_another_bucket, revision=revision_before_mutation, bucket="root") + create_restore_file( + node_another_bucket, revision=revision_before_mutation, bucket="root" + ) node_another_bucket.query("SYSTEM RESTART DISK s3") - create_table(node_another_bucket, "test", attach=True, db_atomic=db_atomic, uuid=uuid) + create_table( + node_another_bucket, "test", attach=True, db_atomic=db_atomic, uuid=uuid + ) - assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 2) - assert node_another_bucket.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0) - assert node_another_bucket.query("SELECT sum(counter) FROM s3.test FORMAT Values") == "({})".format(0) + assert node_another_bucket.query( + "SELECT count(*) FROM s3.test FORMAT Values" + ) == "({})".format(4096 * 2) + assert node_another_bucket.query( + "SELECT sum(id) FROM s3.test FORMAT Values" + ) == "({})".format(0) + assert node_another_bucket.query( + "SELECT sum(counter) FROM s3.test FORMAT Values" + ) == "({})".format(0) # Restore to revision after mutation. node_another_bucket.query("DETACH TABLE s3.test") - create_restore_file(node_another_bucket, revision=revision_after_mutation, bucket="root") + create_restore_file( + node_another_bucket, revision=revision_after_mutation, bucket="root" + ) node_another_bucket.query("SYSTEM RESTART DISK s3") node_another_bucket.query("ATTACH TABLE s3.test") - assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 2) - assert node_another_bucket.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0) - assert node_another_bucket.query("SELECT sum(counter) FROM s3.test FORMAT Values") == "({})".format(4096 * 2) - assert node_another_bucket.query("SELECT sum(counter) FROM s3.test WHERE id > 0 FORMAT Values") == "({})".format(4096) + assert node_another_bucket.query( + "SELECT count(*) FROM s3.test FORMAT Values" + ) == "({})".format(4096 * 2) + assert node_another_bucket.query( + "SELECT sum(id) FROM s3.test FORMAT Values" + ) == "({})".format(0) + assert node_another_bucket.query( + "SELECT sum(counter) FROM s3.test FORMAT Values" + ) == "({})".format(4096 * 2) + assert node_another_bucket.query( + "SELECT sum(counter) FROM s3.test WHERE id > 0 FORMAT Values" + ) == "({})".format(4096) # Restore to revision in the middle of mutation. # Unfinished mutation should be completed after table startup. @@ -332,31 +487,51 @@ def test_restore_mutations(cluster, db_atomic): # Wait for unfinished mutation completion. time.sleep(3) - assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 2) - assert node_another_bucket.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0) - assert node_another_bucket.query("SELECT sum(counter) FROM s3.test FORMAT Values") == "({})".format(4096 * 2) - assert node_another_bucket.query("SELECT sum(counter) FROM s3.test WHERE id > 0 FORMAT Values") == "({})".format(4096) + assert node_another_bucket.query( + "SELECT count(*) FROM s3.test FORMAT Values" + ) == "({})".format(4096 * 2) + assert node_another_bucket.query( + "SELECT sum(id) FROM s3.test FORMAT Values" + ) == "({})".format(0) + assert node_another_bucket.query( + "SELECT sum(counter) FROM s3.test FORMAT Values" + ) == "({})".format(4096 * 2) + assert node_another_bucket.query( + "SELECT sum(counter) FROM s3.test WHERE id > 0 FORMAT Values" + ) == "({})".format(4096) -@pytest.mark.parametrize( - "db_atomic", [False, True] -) +@pytest.mark.parametrize("db_atomic", [False, True]) def test_migrate_to_restorable_schema(cluster, db_atomic): node = cluster.instances["node_not_restorable"] create_table(node, "test", db_atomic=db_atomic) uuid = get_table_uuid(node, db_atomic, "test") - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-03', 4096))) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-04', 4096, -1))) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-05', 4096))) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-05', 4096, -1))) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-04", 4096, -1)) + ) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-05", 4096)) + ) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-05", 4096, -1)) + ) - replace_config("false", "true") + replace_config( + "false", "true" + ) node.restart_clickhouse() - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-06', 4096))) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-06', 4096, -1))) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-06", 4096)) + ) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-06", 4096, -1)) + ) node.query("ALTER TABLE s3.test FREEZE") revision = get_revision_counter(node, 1) @@ -366,34 +541,50 @@ def test_migrate_to_restorable_schema(cluster, db_atomic): node_another_bucket = cluster.instances["node_another_bucket"] # Restore to revision before mutation. - create_restore_file(node_another_bucket, revision=revision, bucket="root", path="another_data") + create_restore_file( + node_another_bucket, revision=revision, bucket="root", path="another_data" + ) node_another_bucket.query("SYSTEM RESTART DISK s3") - create_table(node_another_bucket, "test", attach=True, db_atomic=db_atomic, uuid=uuid) + create_table( + node_another_bucket, "test", attach=True, db_atomic=db_atomic, uuid=uuid + ) - assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 6) - assert node_another_bucket.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0) + assert node_another_bucket.query( + "SELECT count(*) FROM s3.test FORMAT Values" + ) == "({})".format(4096 * 6) + assert node_another_bucket.query( + "SELECT sum(id) FROM s3.test FORMAT Values" + ) == "({})".format(0) -@pytest.mark.parametrize( - "replicated", [False, True] -) -@pytest.mark.parametrize( - "db_atomic", [False, True] -) +@pytest.mark.parametrize("replicated", [False, True]) +@pytest.mark.parametrize("db_atomic", [False, True]) def test_restore_to_detached(cluster, replicated, db_atomic): node = cluster.instances["node"] create_table(node, "test", attach=False, replicated=replicated, db_atomic=db_atomic) uuid = get_table_uuid(node, db_atomic, "test") - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-03', 4096))) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-04', 4096, -1))) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-05', 4096))) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-06', 4096, -1))) - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-07', 4096, 0))) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-03", 4096)) + ) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-04", 4096, -1)) + ) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-05", 4096)) + ) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-06", 4096, -1)) + ) + node.query( + "INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-07", 4096, 0)) + ) # Add some mutation. - node.query("ALTER TABLE s3.test UPDATE counter = 1 WHERE 1", settings={"mutations_sync": 2}) + node.query( + "ALTER TABLE s3.test UPDATE counter = 1 WHERE 1", settings={"mutations_sync": 2} + ) # Detach some partition. node.query("ALTER TABLE s3.test DETACH PARTITION '2020-01-07'") @@ -403,42 +594,64 @@ def test_restore_to_detached(cluster, replicated, db_atomic): node_another_bucket = cluster.instances["node_another_bucket"] - create_restore_file(node_another_bucket, revision=revision, bucket="root", path="data", detached=True) + create_restore_file( + node_another_bucket, + revision=revision, + bucket="root", + path="data", + detached=True, + ) node_another_bucket.query("SYSTEM RESTART DISK s3") - create_table(node_another_bucket, "test", replicated=replicated, db_atomic=db_atomic, uuid=uuid) + create_table( + node_another_bucket, + "test", + replicated=replicated, + db_atomic=db_atomic, + uuid=uuid, + ) - assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(0) + assert node_another_bucket.query( + "SELECT count(*) FROM s3.test FORMAT Values" + ) == "({})".format(0) node_another_bucket.query("ALTER TABLE s3.test ATTACH PARTITION '2020-01-03'") node_another_bucket.query("ALTER TABLE s3.test ATTACH PARTITION '2020-01-04'") node_another_bucket.query("ALTER TABLE s3.test ATTACH PARTITION '2020-01-05'") node_another_bucket.query("ALTER TABLE s3.test ATTACH PARTITION '2020-01-06'") - assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 4) - assert node_another_bucket.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0) - assert node_another_bucket.query("SELECT sum(counter) FROM s3.test FORMAT Values") == "({})".format(4096 * 4) + assert node_another_bucket.query( + "SELECT count(*) FROM s3.test FORMAT Values" + ) == "({})".format(4096 * 4) + assert node_another_bucket.query( + "SELECT sum(id) FROM s3.test FORMAT Values" + ) == "({})".format(0) + assert node_another_bucket.query( + "SELECT sum(counter) FROM s3.test FORMAT Values" + ) == "({})".format(4096 * 4) # Attach partition that was already detached before backup-restore. node_another_bucket.query("ALTER TABLE s3.test ATTACH PARTITION '2020-01-07'") - assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(4096 * 5) - assert node_another_bucket.query("SELECT sum(id) FROM s3.test FORMAT Values") == "({})".format(0) - assert node_another_bucket.query("SELECT sum(counter) FROM s3.test FORMAT Values") == "({})".format(4096 * 5) + assert node_another_bucket.query( + "SELECT count(*) FROM s3.test FORMAT Values" + ) == "({})".format(4096 * 5) + assert node_another_bucket.query( + "SELECT sum(id) FROM s3.test FORMAT Values" + ) == "({})".format(0) + assert node_another_bucket.query( + "SELECT sum(counter) FROM s3.test FORMAT Values" + ) == "({})".format(4096 * 5) -@pytest.mark.parametrize( - "replicated", [False, True] -) -@pytest.mark.parametrize( - "db_atomic", [False, True] -) +@pytest.mark.parametrize("replicated", [False, True]) +@pytest.mark.parametrize("db_atomic", [False, True]) def test_restore_without_detached(cluster, replicated, db_atomic): node = cluster.instances["node"] create_table(node, "test", attach=False, replicated=replicated, db_atomic=db_atomic) uuid = get_table_uuid(node, db_atomic, "test") - node.query("INSERT INTO s3.test VALUES {}".format(generate_values('2020-01-03', 1))) + node.query("INSERT INTO s3.test VALUES {}".format(generate_values("2020-01-03", 1))) assert node.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(1) @@ -447,12 +660,28 @@ def test_restore_without_detached(cluster, replicated, db_atomic): node_another_bucket = cluster.instances["node_another_bucket"] - create_restore_file(node_another_bucket, revision=revision, bucket="root", path="data", detached=True) + create_restore_file( + node_another_bucket, + revision=revision, + bucket="root", + path="data", + detached=True, + ) node_another_bucket.query("SYSTEM RESTART DISK s3") - create_table(node_another_bucket, "test", replicated=replicated, db_atomic=db_atomic, uuid=uuid) + create_table( + node_another_bucket, + "test", + replicated=replicated, + db_atomic=db_atomic, + uuid=uuid, + ) - assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(0) + assert node_another_bucket.query( + "SELECT count(*) FROM s3.test FORMAT Values" + ) == "({})".format(0) node_another_bucket.query("ALTER TABLE s3.test ATTACH PARTITION '2020-01-03'") - assert node_another_bucket.query("SELECT count(*) FROM s3.test FORMAT Values") == "({})".format(1) + assert node_another_bucket.query( + "SELECT count(*) FROM s3.test FORMAT Values" + ) == "({})".format(1) diff --git a/tests/integration/test_merge_tree_s3_with_cache/test.py b/tests/integration/test_merge_tree_s3_with_cache/test.py index be3d2709873..89b5a400b1b 100644 --- a/tests/integration/test_merge_tree_s3_with_cache/test.py +++ b/tests/integration/test_merge_tree_s3_with_cache/test.py @@ -8,9 +8,16 @@ from helpers.cluster import ClickHouseCluster def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("node", main_configs=["configs/config.d/storage_conf.xml", "configs/config.d/ssl_conf.xml", - "configs/config.d/query_log.xml"], - user_configs=["configs/config.d/users.xml"], with_minio=True) + cluster.add_instance( + "node", + main_configs=[ + "configs/config.d/storage_conf.xml", + "configs/config.d/ssl_conf.xml", + "configs/config.d/query_log.xml", + ], + user_configs=["configs/config.d/users.xml"], + with_minio=True, + ) logging.info("Starting cluster...") cluster.start() logging.info("Cluster started") @@ -23,12 +30,16 @@ def cluster(): def get_query_stat(instance, hint): result = {} instance.query("SYSTEM FLUSH LOGS") - events = instance.query(''' + events = instance.query( + """ SELECT ProfileEvents.keys, ProfileEvents.values FROM system.query_log ARRAY JOIN ProfileEvents WHERE type != 1 AND query LIKE '%{}%' - '''.format(hint.replace("'", "\\'"))).split("\n") + """.format( + hint.replace("'", "\\'") + ) + ).split("\n") for event in events: ev = event.split("\t") if len(ev) == 2: @@ -36,6 +47,7 @@ def get_query_stat(instance, hint): result[ev[0]] = int(ev[1]) return result + @pytest.mark.parametrize("min_rows_for_wide_part,read_requests", [(0, 2), (8192, 1)]) def test_write_is_cached(cluster, min_rows_for_wide_part, read_requests): node = cluster.instances["node"] @@ -48,7 +60,9 @@ def test_write_is_cached(cluster, min_rows_for_wide_part, read_requests): ) ENGINE=MergeTree() ORDER BY id SETTINGS storage_policy='s3', min_rows_for_wide_part={} - """.format(min_rows_for_wide_part) + """.format( + min_rows_for_wide_part + ) ) node.query("SYSTEM FLUSH LOGS") @@ -65,8 +79,13 @@ def test_write_is_cached(cluster, min_rows_for_wide_part, read_requests): node.query("DROP TABLE IF EXISTS s3_test NO DELAY") -@pytest.mark.parametrize("min_rows_for_wide_part,all_files,bin_files", [(0, 4, 2), (8192, 2, 1)]) -def test_read_after_cache_is_wiped(cluster, min_rows_for_wide_part, all_files, bin_files): + +@pytest.mark.parametrize( + "min_rows_for_wide_part,all_files,bin_files", [(0, 4, 2), (8192, 2, 1)] +) +def test_read_after_cache_is_wiped( + cluster, min_rows_for_wide_part, all_files, bin_files +): node = cluster.instances["node"] node.query( @@ -77,7 +96,9 @@ def test_read_after_cache_is_wiped(cluster, min_rows_for_wide_part, all_files, b ) ENGINE=MergeTree() ORDER BY id SETTINGS storage_policy='s3', min_rows_for_wide_part={} - """.format(min_rows_for_wide_part) + """.format( + min_rows_for_wide_part + ) ) node.query("SYSTEM FLUSH LOGS") @@ -86,7 +107,10 @@ def test_read_after_cache_is_wiped(cluster, min_rows_for_wide_part, all_files, b node.query("INSERT INTO s3_test VALUES (0,'data'),(1,'data')") # Wipe cache - cluster.exec_in_container(cluster.get_container_id("node"), ["rm", "-rf", "/var/lib/clickhouse/disks/s3/cache/"]) + cluster.exec_in_container( + cluster.get_container_id("node"), + ["rm", "-rf", "/var/lib/clickhouse/disks/s3/cache/"], + ) select_query = "SELECT * FROM s3_test" node.query(select_query) @@ -99,7 +123,7 @@ def test_read_after_cache_is_wiped(cluster, min_rows_for_wide_part, all_files, b assert node.query(select_query) == "(0,'data'),(1,'data')" # With async reads profile events are not updated because reads are done in a separate thread. - #stat = get_query_stat(node, select_query) - #assert stat["S3ReadRequestsCount"] == bin_files + # stat = get_query_stat(node, select_query) + # assert stat["S3ReadRequestsCount"] == bin_files node.query("DROP TABLE IF EXISTS s3_test NO DELAY") diff --git a/tests/integration/test_multiple_disks/test.py b/tests/integration/test_multiple_disks/test.py index e2b30b8f90e..be07e4a1b8c 100644 --- a/tests/integration/test_multiple_disks/test.py +++ b/tests/integration/test_multiple_disks/test.py @@ -12,21 +12,31 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', - main_configs=['configs/logs_config.xml', 'configs/config.d/storage_configuration.xml', - 'configs/config.d/cluster.xml'], - with_zookeeper=True, - stay_alive=True, - tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'], - macros={"shard": 0, "replica": 1}) +node1 = cluster.add_instance( + "node1", + main_configs=[ + "configs/logs_config.xml", + "configs/config.d/storage_configuration.xml", + "configs/config.d/cluster.xml", + ], + with_zookeeper=True, + stay_alive=True, + tmpfs=["/jbod1:size=40M", "/jbod2:size=40M", "/external:size=200M"], + macros={"shard": 0, "replica": 1}, +) -node2 = cluster.add_instance('node2', - main_configs=['configs/logs_config.xml', 'configs/config.d/storage_configuration.xml', - 'configs/config.d/cluster.xml'], - with_zookeeper=True, - stay_alive=True, - tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'], - macros={"shard": 0, "replica": 2}) +node2 = cluster.add_instance( + "node2", + main_configs=[ + "configs/logs_config.xml", + "configs/config.d/storage_configuration.xml", + "configs/config.d/cluster.xml", + ], + with_zookeeper=True, + stay_alive=True, + tmpfs=["/jbod1:size=40M", "/jbod2:size=40M", "/external:size=200M"], + macros={"shard": 0, "replica": 2}, +) @pytest.fixture(scope="module") @@ -44,28 +54,31 @@ def test_system_tables(start_cluster): { "name": "default", "path": "/var/lib/clickhouse/", - "keep_free_space": '1024', + "keep_free_space": "1024", }, { "name": "jbod1", "path": "/jbod1/", - "keep_free_space": '0', + "keep_free_space": "0", }, { "name": "jbod2", "path": "/jbod2/", - "keep_free_space": '10485760', + "keep_free_space": "10485760", }, { "name": "external", "path": "/external/", - "keep_free_space": '0', - } + "keep_free_space": "0", + }, ] - click_disk_data = json.loads(node1.query("SELECT name, path, keep_free_space FROM system.disks FORMAT JSON"))[ - "data"] - assert sorted(click_disk_data, key=lambda x: x["name"]) == sorted(expected_disks_data, key=lambda x: x["name"]) + click_disk_data = json.loads( + node1.query("SELECT name, path, keep_free_space FROM system.disks FORMAT JSON") + )["data"] + assert sorted(click_disk_data, key=lambda x: x["name"]) == sorted( + expected_disks_data, key=lambda x: x["name"] + ) expected_policies_data = [ { @@ -230,131 +243,211 @@ def test_system_tables(start_cluster): }, ] - clickhouse_policies_data = \ - json.loads(node1.query("SELECT * FROM system.storage_policies WHERE policy_name != 'default' FORMAT JSON"))[ - "data"] + clickhouse_policies_data = json.loads( + node1.query( + "SELECT * FROM system.storage_policies WHERE policy_name != 'default' FORMAT JSON" + ) + )["data"] def key(x): return (x["policy_name"], x["volume_name"], x["volume_priority"]) - assert sorted(clickhouse_policies_data, key=key) == sorted(expected_policies_data, key=key) + assert sorted(clickhouse_policies_data, key=key) == sorted( + expected_policies_data, key=key + ) def test_query_parser(start_cluster): try: with pytest.raises(QueryRuntimeException): - node1.query(""" + node1.query( + """ CREATE TABLE IF NOT EXISTS table_with_absent_policy ( d UInt64 ) ENGINE = MergeTree() ORDER BY d SETTINGS storage_policy='very_exciting_policy' - """) + """ + ) with pytest.raises(QueryRuntimeException): - node1.query(""" + node1.query( + """ CREATE TABLE IF NOT EXISTS table_with_absent_policy ( d UInt64 ) ENGINE = MergeTree() ORDER BY d SETTINGS storage_policy='jbod1' - """) + """ + ) - node1.query(""" + node1.query( + """ CREATE TABLE IF NOT EXISTS table_with_normal_policy ( d UInt64 ) ENGINE = MergeTree() ORDER BY d SETTINGS storage_policy='default' - """) + """ + ) node1.query("INSERT INTO table_with_normal_policy VALUES (5)") with pytest.raises(QueryRuntimeException): - node1.query("ALTER TABLE table_with_normal_policy MOVE PARTITION tuple() TO VOLUME 'some_volume'") - - with pytest.raises(QueryRuntimeException): - node1.query("ALTER TABLE table_with_normal_policy MOVE PARTITION tuple() TO DISK 'some_volume'") - - with pytest.raises(QueryRuntimeException): - node1.query("ALTER TABLE table_with_normal_policy MOVE PART 'xxxxx' TO DISK 'jbod1'") - - with pytest.raises(QueryRuntimeException): - node1.query("ALTER TABLE table_with_normal_policy MOVE PARTITION 'yyyy' TO DISK 'jbod1'") + node1.query( + "ALTER TABLE table_with_normal_policy MOVE PARTITION tuple() TO VOLUME 'some_volume'" + ) with pytest.raises(QueryRuntimeException): node1.query( - "ALTER TABLE table_with_normal_policy MODIFY SETTING storage_policy='moving_jbod_with_external'") + "ALTER TABLE table_with_normal_policy MOVE PARTITION tuple() TO DISK 'some_volume'" + ) + + with pytest.raises(QueryRuntimeException): + node1.query( + "ALTER TABLE table_with_normal_policy MOVE PART 'xxxxx' TO DISK 'jbod1'" + ) + + with pytest.raises(QueryRuntimeException): + node1.query( + "ALTER TABLE table_with_normal_policy MOVE PARTITION 'yyyy' TO DISK 'jbod1'" + ) + + with pytest.raises(QueryRuntimeException): + node1.query( + "ALTER TABLE table_with_normal_policy MODIFY SETTING storage_policy='moving_jbod_with_external'" + ) finally: node1.query("DROP TABLE IF EXISTS table_with_normal_policy SYNC") -@pytest.mark.parametrize("name,engine", [ - pytest.param("test_alter_policy", "MergeTree()", id="mt"), - pytest.param("replicated_test_alter_policy", "ReplicatedMergeTree('/clickhouse/test_alter_policy', '1')", id="replicated"), -]) +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param("test_alter_policy", "MergeTree()", id="mt"), + pytest.param( + "replicated_test_alter_policy", + "ReplicatedMergeTree('/clickhouse/test_alter_policy', '1')", + id="replicated", + ), + ], +) def test_alter_policy(start_cluster, name, engine): try: - node1.query_with_retry(""" + node1.query_with_retry( + """ CREATE TABLE IF NOT EXISTS {name} ( d UInt64 ) ENGINE = {engine} ORDER BY d SETTINGS storage_policy='small_jbod_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) - assert node1.query("""SELECT storage_policy FROM system.tables WHERE name = '{name}'""".format( - name=name)) == "small_jbod_with_external\n" + assert ( + node1.query( + """SELECT storage_policy FROM system.tables WHERE name = '{name}'""".format( + name=name + ) + ) + == "small_jbod_with_external\n" + ) with pytest.raises(QueryRuntimeException): node1.query( """ALTER TABLE {name} MODIFY SETTING storage_policy='one_more_small_jbod_with_external'""".format( - name=name)) + name=name + ) + ) - assert node1.query("""SELECT storage_policy FROM system.tables WHERE name = '{name}'""".format( - name=name)) == "small_jbod_with_external\n" + assert ( + node1.query( + """SELECT storage_policy FROM system.tables WHERE name = '{name}'""".format( + name=name + ) + ) + == "small_jbod_with_external\n" + ) - node1.query_with_retry("""ALTER TABLE {name} MODIFY SETTING storage_policy='jbods_with_external'""".format(name=name)) + node1.query_with_retry( + """ALTER TABLE {name} MODIFY SETTING storage_policy='jbods_with_external'""".format( + name=name + ) + ) - assert node1.query("""SELECT storage_policy FROM system.tables WHERE name = '{name}'""".format( - name=name)) == "jbods_with_external\n" + assert ( + node1.query( + """SELECT storage_policy FROM system.tables WHERE name = '{name}'""".format( + name=name + ) + ) + == "jbods_with_external\n" + ) with pytest.raises(QueryRuntimeException): node1.query( - """ALTER TABLE {name} MODIFY SETTING storage_policy='small_jbod_with_external'""".format(name=name)) + """ALTER TABLE {name} MODIFY SETTING storage_policy='small_jbod_with_external'""".format( + name=name + ) + ) - assert node1.query("""SELECT storage_policy FROM system.tables WHERE name = '{name}'""".format( - name=name)) == "jbods_with_external\n" + assert ( + node1.query( + """SELECT storage_policy FROM system.tables WHERE name = '{name}'""".format( + name=name + ) + ) + == "jbods_with_external\n" + ) finally: node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC") def get_random_string(length): - return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length)) + return "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(length) + ) def get_used_disks_for_table(node, table_name): - return tuple(node.query( - "select disk_name from system.parts where table == '{}' and active=1 order by modification_time".format( - table_name)).strip().split('\n')) + return tuple( + node.query( + "select disk_name from system.parts where table == '{}' and active=1 order by modification_time".format( + table_name + ) + ) + .strip() + .split("\n") + ) def get_used_parts_for_table(node, table_name): - return node.query("SELECT name FROM system.parts WHERE table = '{}' AND active = 1 ORDER BY modification_time".format(table_name)).splitlines() + return node.query( + "SELECT name FROM system.parts WHERE table = '{}' AND active = 1 ORDER BY modification_time".format( + table_name + ) + ).splitlines() + def test_no_warning_about_zero_max_data_part_size(start_cluster): def get_log(node): - return node.exec_in_container(["bash", "-c", "cat /var/log/clickhouse-server/clickhouse-server.log"]) + return node.exec_in_container( + ["bash", "-c", "cat /var/log/clickhouse-server/clickhouse-server.log"] + ) for node in (node1, node2): - node.query(""" + node.query( + """ CREATE TABLE IF NOT EXISTS default.test_warning_table ( s String ) ENGINE = MergeTree ORDER BY tuple() SETTINGS storage_policy='small_jbod_with_external' - """) + """ + ) node.query("DROP TABLE IF EXISTS default.test_warning_table SYNC") log = get_log(node) assert not re.search("Warning.*Volume.*special_warning_zero_volume", log) @@ -363,38 +456,55 @@ def test_no_warning_about_zero_max_data_part_size(start_cluster): assert not re.search("Warning.*Volume.*special_warning_big_volume", log) -@pytest.mark.parametrize("name,engine", [ - pytest.param("mt_on_jbod", "MergeTree()", id="mt"), - pytest.param("replicated_mt_on_jbod", "ReplicatedMergeTree('/clickhouse/replicated_mt_on_jbod', '1')", id="replicated"), -]) +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param("mt_on_jbod", "MergeTree()", id="mt"), + pytest.param( + "replicated_mt_on_jbod", + "ReplicatedMergeTree('/clickhouse/replicated_mt_on_jbod', '1')", + id="replicated", + ), + ], +) def test_round_robin(start_cluster, name, engine): try: - node1.query_with_retry(""" + node1.query_with_retry( + """ CREATE TABLE IF NOT EXISTS {name} ( d UInt64 ) ENGINE = {engine} ORDER BY d SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) # first should go to the jbod1 - node1.query_with_retry("insert into {} select * from numbers(10000)".format(name)) + node1.query_with_retry( + "insert into {} select * from numbers(10000)".format(name) + ) used_disk = get_used_disks_for_table(node1, name) - assert len(used_disk) == 1, 'More than one disk used for single insert' + assert len(used_disk) == 1, "More than one disk used for single insert" # sleep is required because we order disks by their modification time, and if insert will be fast # modification time of two disks will be equal, then sort will not provide deterministic results time.sleep(5) - node1.query_with_retry("insert into {} select * from numbers(10000, 10000)".format(name)) + node1.query_with_retry( + "insert into {} select * from numbers(10000, 10000)".format(name) + ) used_disks = get_used_disks_for_table(node1, name) - assert len(used_disks) == 2, 'Two disks should be used for two parts' + assert len(used_disks) == 2, "Two disks should be used for two parts" assert used_disks[0] != used_disks[1], "Should write to different disks" time.sleep(5) - node1.query_with_retry("insert into {} select * from numbers(20000, 10000)".format(name)) + node1.query_with_retry( + "insert into {} select * from numbers(20000, 10000)".format(name) + ) used_disks = get_used_disks_for_table(node1, name) # jbod1 -> jbod2 -> jbod1 -> jbod2 ... etc @@ -405,46 +515,79 @@ def test_round_robin(start_cluster, name, engine): node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC") -@pytest.mark.parametrize("name,engine", [ - pytest.param("mt_with_huge_part", "MergeTree()", id="mt"), - pytest.param("replicated_mt_with_huge_part", "ReplicatedMergeTree('/clickhouse/replicated_mt_with_huge_part', '1')", id="replicated"), -]) +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param("mt_with_huge_part", "MergeTree()", id="mt"), + pytest.param( + "replicated_mt_with_huge_part", + "ReplicatedMergeTree('/clickhouse/replicated_mt_with_huge_part', '1')", + id="replicated", + ), + ], +) def test_max_data_part_size(start_cluster, name, engine): try: - assert int(*node1.query("""SELECT max_data_part_size FROM system.storage_policies WHERE policy_name = 'jbods_with_external' AND volume_name = 'main'""").splitlines()) == 10*1024*1024 + assert ( + int( + *node1.query( + """SELECT max_data_part_size FROM system.storage_policies WHERE policy_name = 'jbods_with_external' AND volume_name = 'main'""" + ).splitlines() + ) + == 10 * 1024 * 1024 + ) - node1.query_with_retry(""" + node1.query_with_retry( + """ CREATE TABLE IF NOT EXISTS {name} ( s1 String ) ENGINE = {engine} ORDER BY tuple() SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) data = [] # 10MB in total for i in range(10): data.append(get_random_string(1024 * 1024)) # 1MB row - node1.query_with_retry("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data]))) + node1.query_with_retry( + "INSERT INTO {} VALUES {}".format( + name, ",".join(["('" + x + "')" for x in data]) + ) + ) used_disks = get_used_disks_for_table(node1, name) assert len(used_disks) == 1 - assert used_disks[0] == 'external' + assert used_disks[0] == "external" finally: node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC") -@pytest.mark.parametrize("name,engine", [ - pytest.param("mt_with_overflow", "MergeTree()", id="mt"), - pytest.param("replicated_mt_with_overflow", "ReplicatedMergeTree('/clickhouse/replicated_mt_with_overflow', '1')", id="replicated"), -]) +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param("mt_with_overflow", "MergeTree()", id="mt"), + pytest.param( + "replicated_mt_with_overflow", + "ReplicatedMergeTree('/clickhouse/replicated_mt_with_overflow', '1')", + id="replicated", + ), + ], +) def test_jbod_overflow(start_cluster, name, engine): try: - node1.query_with_retry(""" + node1.query_with_retry( + """ CREATE TABLE IF NOT EXISTS {name} ( s1 String ) ENGINE = {engine} ORDER BY tuple() SETTINGS storage_policy='small_jbod_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) node1.query(f"SYSTEM STOP MERGES {name}") @@ -453,21 +596,29 @@ def test_jbod_overflow(start_cluster, name, engine): data = [] # 5MB in total for i in range(5): data.append(get_random_string(1024 * 1024)) # 1MB row - node1.query_with_retry("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data]))) + node1.query_with_retry( + "INSERT INTO {} VALUES {}".format( + name, ",".join(["('" + x + "')" for x in data]) + ) + ) used_disks = get_used_disks_for_table(node1, name) - assert used_disks == tuple('jbod1' for _ in used_disks) + assert used_disks == tuple("jbod1" for _ in used_disks) # should go to the external disk (jbod is overflown) data = [] # 10MB in total for i in range(10): data.append(get_random_string(1024 * 1024)) # 1MB row - node1.query_with_retry("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data]))) + node1.query_with_retry( + "INSERT INTO {} VALUES {}".format( + name, ",".join(["('" + x + "')" for x in data]) + ) + ) used_disks = get_used_disks_for_table(node1, name) - assert used_disks[-1] == 'external' + assert used_disks[-1] == "external" node1.query(f"SYSTEM START MERGES {name}") time.sleep(1) @@ -475,29 +626,46 @@ def test_jbod_overflow(start_cluster, name, engine): node1.query_with_retry("OPTIMIZE TABLE {} FINAL".format(name)) time.sleep(2) - disks_for_merges = tuple(node1.query( - "SELECT disk_name FROM system.parts WHERE table == '{}' AND level >= 1 and active = 1 ORDER BY modification_time".format( - name)).strip().split('\n')) + disks_for_merges = tuple( + node1.query( + "SELECT disk_name FROM system.parts WHERE table == '{}' AND level >= 1 and active = 1 ORDER BY modification_time".format( + name + ) + ) + .strip() + .split("\n") + ) - assert disks_for_merges == tuple('external' for _ in disks_for_merges) + assert disks_for_merges == tuple("external" for _ in disks_for_merges) finally: node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC") -@pytest.mark.parametrize("name,engine", [ - pytest.param("moving_mt", "MergeTree()", id="mt"), - pytest.param("moving_replicated_mt", "ReplicatedMergeTree('/clickhouse/moving_replicated_mt', '1')", id="replicated"), -]) +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param("moving_mt", "MergeTree()", id="mt"), + pytest.param( + "moving_replicated_mt", + "ReplicatedMergeTree('/clickhouse/moving_replicated_mt', '1')", + id="replicated", + ), + ], +) def test_background_move(start_cluster, name, engine): try: - node1.query_with_retry(""" + node1.query_with_retry( + """ CREATE TABLE IF NOT EXISTS {name} ( s1 String ) ENGINE = {engine} ORDER BY tuple() SETTINGS storage_policy='moving_jbod_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) node1.query(f"SYSTEM STOP MERGES {name}") @@ -506,25 +674,31 @@ def test_background_move(start_cluster, name, engine): for i in range(5): data.append(get_random_string(1024 * 1024)) # 1MB row # small jbod size is 40MB, so lets insert 5MB batch 5 times - node1.query_with_retry("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data]))) + node1.query_with_retry( + "INSERT INTO {} VALUES {}".format( + name, ",".join(["('" + x + "')" for x in data]) + ) + ) used_disks = get_used_disks_for_table(node1, name) retry = 20 i = 0 - while not sum(1 for x in used_disks if x == 'jbod1') <= 2 and i < retry: + while not sum(1 for x in used_disks if x == "jbod1") <= 2 and i < retry: time.sleep(0.5) used_disks = get_used_disks_for_table(node1, name) i += 1 - assert sum(1 for x in used_disks if x == 'jbod1') <= 2 + assert sum(1 for x in used_disks if x == "jbod1") <= 2 # first (oldest) part was moved to external - assert used_disks[0] == 'external' + assert used_disks[0] == "external" path = node1.query( "SELECT path_on_disk FROM system.part_log WHERE table = '{}' AND event_type='MovePart' ORDER BY event_time LIMIT 1".format( - name)) + name + ) + ) # first (oldest) part was moved to external assert path.startswith("/external") @@ -535,19 +709,30 @@ def test_background_move(start_cluster, name, engine): node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC") -@pytest.mark.parametrize("name,engine", [ - pytest.param("stopped_moving_mt", "MergeTree()", id="mt"), - pytest.param("stopped_moving_replicated_mt", "ReplicatedMergeTree('/clickhouse/stopped_moving_replicated_mt', '1')", id="replicated"), -]) +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param("stopped_moving_mt", "MergeTree()", id="mt"), + pytest.param( + "stopped_moving_replicated_mt", + "ReplicatedMergeTree('/clickhouse/stopped_moving_replicated_mt', '1')", + id="replicated", + ), + ], +) def test_start_stop_moves(start_cluster, name, engine): try: - node1.query_with_retry(""" + node1.query_with_retry( + """ CREATE TABLE IF NOT EXISTS {name} ( s1 String ) ENGINE = {engine} ORDER BY tuple() SETTINGS storage_policy='moving_jbod_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) node1.query_with_retry("INSERT INTO {} VALUES ('HELLO')".format(name)) node1.query_with_retry("INSERT INTO {} VALUES ('WORLD')".format(name)) @@ -557,23 +742,37 @@ def test_start_stop_moves(start_cluster, name, engine): first_part = node1.query( "SELECT name FROM system.parts WHERE table = '{}' and active = 1 ORDER BY modification_time LIMIT 1".format( - name)).strip() + name + ) + ).strip() node1.query("SYSTEM STOP MOVES") with pytest.raises(QueryRuntimeException): - node1.query("ALTER TABLE {} MOVE PART '{}' TO VOLUME 'external'".format(name, first_part)) + node1.query( + "ALTER TABLE {} MOVE PART '{}' TO VOLUME 'external'".format( + name, first_part + ) + ) used_disks = get_used_disks_for_table(node1, name) - assert all(d == "jbod1" for d in used_disks), "Blocked moves doesn't actually move something" + assert all( + d == "jbod1" for d in used_disks + ), "Blocked moves doesn't actually move something" node1.query("SYSTEM START MOVES") - node1.query("ALTER TABLE {} MOVE PART '{}' TO VOLUME 'external'".format(name, first_part)) + node1.query( + "ALTER TABLE {} MOVE PART '{}' TO VOLUME 'external'".format( + name, first_part + ) + ) disk = node1.query( - "SELECT disk_name FROM system.parts WHERE table = '{}' and name = '{}' and active = 1".format(name, - first_part)).strip() + "SELECT disk_name FROM system.parts WHERE table = '{}' and name = '{}' and active = 1".format( + name, first_part + ) + ).strip() assert disk == "external" @@ -587,36 +786,40 @@ def test_start_stop_moves(start_cluster, name, engine): for i in range(5): data.append(get_random_string(1024 * 1024)) # 1MB row # jbod size is 40MB, so lets insert 5MB batch 7 times - node1.query_with_retry("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data]))) + node1.query_with_retry( + "INSERT INTO {} VALUES {}".format( + name, ",".join(["('" + x + "')" for x in data]) + ) + ) used_disks = get_used_disks_for_table(node1, name) retry = 5 i = 0 - while not sum(1 for x in used_disks if x == 'jbod1') <= 2 and i < retry: + while not sum(1 for x in used_disks if x == "jbod1") <= 2 and i < retry: time.sleep(0.1) used_disks = get_used_disks_for_table(node1, name) i += 1 # first (oldest) part doesn't move anywhere - assert used_disks[0] == 'jbod1' + assert used_disks[0] == "jbod1" node1.query("SYSTEM START MOVES {}".format(name)) # wait sometime until background backoff finishes retry = 30 i = 0 - while not sum(1 for x in used_disks if x == 'jbod1') <= 2 and i < retry: + while not sum(1 for x in used_disks if x == "jbod1") <= 2 and i < retry: time.sleep(1) used_disks = get_used_disks_for_table(node1, name) i += 1 node1.query("SYSTEM START MERGES {}".format(name)) - assert sum(1 for x in used_disks if x == 'jbod1') <= 2 + assert sum(1 for x in used_disks if x == "jbod1") <= 2 # first (oldest) part moved to external - assert used_disks[0] == 'external' + assert used_disks[0] == "external" finally: node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC") @@ -626,7 +829,9 @@ def get_path_for_part_from_part_log(node, table, part_name): node.query("SYSTEM FLUSH LOGS") path = node.query( "SELECT path_on_disk FROM system.part_log WHERE table = '{}' and part_name = '{}' ORDER BY event_time DESC LIMIT 1".format( - table, part_name)) + table, part_name + ) + ) return path.strip() @@ -634,18 +839,24 @@ def get_paths_for_partition_from_part_log(node, table, partition_id): node.query("SYSTEM FLUSH LOGS") paths = node.query( "SELECT path_on_disk FROM system.part_log WHERE table = '{}' and partition_id = '{}' ORDER BY event_time DESC".format( - table, partition_id)) - return paths.strip().split('\n') + table, partition_id + ) + ) + return paths.strip().split("\n") -@pytest.mark.parametrize("name,engine", [ - pytest.param("altering_mt", "MergeTree()", id="mt"), - # ("altering_replicated_mt","ReplicatedMergeTree('/clickhouse/altering_replicated_mt', '1')",), - # SYSTEM STOP MERGES doesn't disable merges assignments -]) +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param("altering_mt", "MergeTree()", id="mt"), + # ("altering_replicated_mt","ReplicatedMergeTree('/clickhouse/altering_replicated_mt', '1')",), + # SYSTEM STOP MERGES doesn't disable merges assignments + ], +) def test_alter_move(start_cluster, name, engine): try: - node1.query(""" + node1.query( + """ CREATE TABLE IF NOT EXISTS {name} ( EventDate Date, number UInt64 @@ -653,7 +864,10 @@ def test_alter_move(start_cluster, name, engine): ORDER BY tuple() PARTITION BY toYYYYMM(EventDate) SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) node1.query("SYSTEM STOP MERGES {}".format(name)) # to avoid conflicts @@ -662,47 +876,83 @@ def test_alter_move(start_cluster, name, engine): node1.query("INSERT INTO {} VALUES(toDate('2019-04-10'), 42)".format(name)) node1.query("INSERT INTO {} VALUES(toDate('2019-04-11'), 43)".format(name)) used_disks = get_used_disks_for_table(node1, name) - assert all(d.startswith("jbod") for d in used_disks), "All writes should go to jbods" + assert all( + d.startswith("jbod") for d in used_disks + ), "All writes should go to jbods" first_part = node1.query( "SELECT name FROM system.parts WHERE table = '{}' and active = 1 ORDER BY modification_time LIMIT 1".format( - name)).strip() + name + ) + ).strip() time.sleep(1) - node1.query("ALTER TABLE {} MOVE PART '{}' TO VOLUME 'external'".format(name, first_part)) + node1.query( + "ALTER TABLE {} MOVE PART '{}' TO VOLUME 'external'".format( + name, first_part + ) + ) disk = node1.query( - "SELECT disk_name FROM system.parts WHERE table = '{}' and name = '{}' and active = 1".format(name, - first_part)).strip() - assert disk == 'external' - assert get_path_for_part_from_part_log(node1, name, first_part).startswith("/external") + "SELECT disk_name FROM system.parts WHERE table = '{}' and name = '{}' and active = 1".format( + name, first_part + ) + ).strip() + assert disk == "external" + assert get_path_for_part_from_part_log(node1, name, first_part).startswith( + "/external" + ) time.sleep(1) - node1.query("ALTER TABLE {} MOVE PART '{}' TO DISK 'jbod1'".format(name, first_part)) + node1.query( + "ALTER TABLE {} MOVE PART '{}' TO DISK 'jbod1'".format(name, first_part) + ) disk = node1.query( - "SELECT disk_name FROM system.parts WHERE table = '{}' and name = '{}' and active = 1".format(name, - first_part)).strip() - assert disk == 'jbod1' - assert get_path_for_part_from_part_log(node1, name, first_part).startswith("/jbod1") + "SELECT disk_name FROM system.parts WHERE table = '{}' and name = '{}' and active = 1".format( + name, first_part + ) + ).strip() + assert disk == "jbod1" + assert get_path_for_part_from_part_log(node1, name, first_part).startswith( + "/jbod1" + ) time.sleep(1) - node1.query("ALTER TABLE {} MOVE PARTITION 201904 TO VOLUME 'external'".format(name)) - disks = node1.query( - "SELECT disk_name FROM system.parts WHERE table = '{}' and partition = '201904' and active = 1".format( - name)).strip().split('\n') + node1.query( + "ALTER TABLE {} MOVE PARTITION 201904 TO VOLUME 'external'".format(name) + ) + disks = ( + node1.query( + "SELECT disk_name FROM system.parts WHERE table = '{}' and partition = '201904' and active = 1".format( + name + ) + ) + .strip() + .split("\n") + ) assert len(disks) == 2 assert all(d == "external" for d in disks) assert all( - path.startswith("/external") for path in get_paths_for_partition_from_part_log(node1, name, '201904')[:2]) + path.startswith("/external") + for path in get_paths_for_partition_from_part_log(node1, name, "201904")[:2] + ) time.sleep(1) node1.query("ALTER TABLE {} MOVE PARTITION 201904 TO DISK 'jbod2'".format(name)) - disks = node1.query( - "SELECT disk_name FROM system.parts WHERE table = '{}' and partition = '201904' and active = 1".format( - name)).strip().split('\n') + disks = ( + node1.query( + "SELECT disk_name FROM system.parts WHERE table = '{}' and partition = '201904' and active = 1".format( + name + ) + ) + .strip() + .split("\n") + ) assert len(disks) == 2 assert all(d == "jbod2" for d in disks) assert all( - path.startswith("/jbod2") for path in get_paths_for_partition_from_part_log(node1, name, '201904')[:2]) + path.startswith("/jbod2") + for path in get_paths_for_partition_from_part_log(node1, name, "201904")[:2] + ) assert node1.query("SELECT COUNT() FROM {}".format(name)) == "4\n" @@ -710,15 +960,13 @@ def test_alter_move(start_cluster, name, engine): node1.query(f"DROP TABLE IF EXISTS {name} SYNC") -@pytest.mark.parametrize("volume_or_disk", [ - "DISK", - "VOLUME" -]) +@pytest.mark.parametrize("volume_or_disk", ["DISK", "VOLUME"]) def test_alter_move_half_of_partition(start_cluster, volume_or_disk): name = "alter_move_half_of_partition" engine = "MergeTree()" try: - node1.query(""" + node1.query( + """ CREATE TABLE IF NOT EXISTS {name} ( EventDate Date, number UInt64 @@ -726,31 +974,49 @@ def test_alter_move_half_of_partition(start_cluster, volume_or_disk): ORDER BY tuple() PARTITION BY toYYYYMM(EventDate) SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) node1.query("SYSTEM STOP MERGES {}".format(name)) node1.query("INSERT INTO {} VALUES(toDate('2019-03-15'), 65)".format(name)) node1.query("INSERT INTO {} VALUES(toDate('2019-03-16'), 42)".format(name)) used_disks = get_used_disks_for_table(node1, name) - assert all(d.startswith("jbod") for d in used_disks), "All writes should go to jbods" + assert all( + d.startswith("jbod") for d in used_disks + ), "All writes should go to jbods" time.sleep(1) - parts = node1.query("SELECT name FROM system.parts WHERE table = '{}' and active = 1".format(name)).splitlines() + parts = node1.query( + "SELECT name FROM system.parts WHERE table = '{}' and active = 1".format( + name + ) + ).splitlines() assert len(parts) == 2 - node1.query("ALTER TABLE {} MOVE PART '{}' TO VOLUME 'external'".format(name, parts[0])) + node1.query( + "ALTER TABLE {} MOVE PART '{}' TO VOLUME 'external'".format(name, parts[0]) + ) disks = node1.query( - "SELECT disk_name FROM system.parts WHERE table = '{}' and name = '{}' and active = 1".format(name, parts[ - 0])).splitlines() + "SELECT disk_name FROM system.parts WHERE table = '{}' and name = '{}' and active = 1".format( + name, parts[0] + ) + ).splitlines() assert disks == ["external"] time.sleep(1) - node1.query("ALTER TABLE {} MOVE PARTITION 201903 TO {volume_or_disk} 'external'".format(name, - volume_or_disk=volume_or_disk)) + node1.query( + "ALTER TABLE {} MOVE PARTITION 201903 TO {volume_or_disk} 'external'".format( + name, volume_or_disk=volume_or_disk + ) + ) disks = node1.query( "SELECT disk_name FROM system.parts WHERE table = '{}' and partition = '201903' and active = 1".format( - name)).splitlines() + name + ) + ).splitlines() assert disks == ["external"] * 2 assert node1.query("SELECT COUNT() FROM {}".format(name)) == "2\n" @@ -759,15 +1025,13 @@ def test_alter_move_half_of_partition(start_cluster, volume_or_disk): node1.query(f"DROP TABLE IF EXISTS {name} SYNC") -@pytest.mark.parametrize("volume_or_disk", [ - "DISK", - "VOLUME" -]) +@pytest.mark.parametrize("volume_or_disk", ["DISK", "VOLUME"]) def test_alter_double_move_partition(start_cluster, volume_or_disk): name = "alter_double_move_partition" engine = "MergeTree()" try: - node1.query(""" + node1.query( + """ CREATE TABLE IF NOT EXISTS {name} ( EventDate Date, number UInt64 @@ -775,29 +1039,42 @@ def test_alter_double_move_partition(start_cluster, volume_or_disk): ORDER BY tuple() PARTITION BY toYYYYMM(EventDate) SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) node1.query("SYSTEM STOP MERGES {}".format(name)) node1.query("INSERT INTO {} VALUES(toDate('2019-03-15'), 65)".format(name)) node1.query("INSERT INTO {} VALUES(toDate('2019-03-16'), 42)".format(name)) used_disks = get_used_disks_for_table(node1, name) - assert all(d.startswith("jbod") for d in used_disks), "All writes should go to jbods" + assert all( + d.startswith("jbod") for d in used_disks + ), "All writes should go to jbods" time.sleep(1) - node1.query("ALTER TABLE {} MOVE PARTITION 201903 TO {volume_or_disk} 'external'".format(name, - volume_or_disk=volume_or_disk)) + node1.query( + "ALTER TABLE {} MOVE PARTITION 201903 TO {volume_or_disk} 'external'".format( + name, volume_or_disk=volume_or_disk + ) + ) disks = node1.query( "SELECT disk_name FROM system.parts WHERE table = '{}' and partition = '201903' and active = 1".format( - name)).splitlines() + name + ) + ).splitlines() assert disks == ["external"] * 2 assert node1.query("SELECT COUNT() FROM {}".format(name)) == "2\n" time.sleep(1) with pytest.raises(QueryRuntimeException): - node1.query("ALTER TABLE {} MOVE PARTITION 201903 TO {volume_or_disk} 'external'".format(name, - volume_or_disk=volume_or_disk)) + node1.query( + "ALTER TABLE {} MOVE PARTITION 201903 TO {volume_or_disk} 'external'".format( + name, volume_or_disk=volume_or_disk + ) + ) finally: node1.query(f"DROP TABLE IF EXISTS {name} SYNC") @@ -808,8 +1085,15 @@ def produce_alter_move(node, name): if move_type == "PART": for _ in range(10): try: - parts = node1.query( - "SELECT name from system.parts where table = '{}' and active = 1".format(name)).strip().split('\n') + parts = ( + node1.query( + "SELECT name from system.parts where table = '{}' and active = 1".format( + name + ) + ) + .strip() + .split("\n") + ) break except QueryRuntimeException: pass @@ -826,20 +1110,30 @@ def produce_alter_move(node, name): else: move_volume = random.choice(["'main'", "'external'"]) try: - node1.query("ALTER TABLE {} MOVE {mt} {mp} TO {md} {mv}".format( - name, mt=move_type, mp=move_part, md=move_disk, mv=move_volume)) + node1.query( + "ALTER TABLE {} MOVE {mt} {mp} TO {md} {mv}".format( + name, mt=move_type, mp=move_part, md=move_disk, mv=move_volume + ) + ) except QueryRuntimeException as ex: pass -@pytest.mark.parametrize("name,engine", [ - pytest.param("concurrently_altering_mt", "MergeTree()", id="mt"), - pytest.param("concurrently_altering_replicated_mt", - "ReplicatedMergeTree('/clickhouse/concurrently_altering_replicated_mt', '1')", id="replicated"), -]) +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param("concurrently_altering_mt", "MergeTree()", id="mt"), + pytest.param( + "concurrently_altering_replicated_mt", + "ReplicatedMergeTree('/clickhouse/concurrently_altering_replicated_mt', '1')", + id="replicated", + ), + ], +) def test_concurrent_alter_move(start_cluster, name, engine): try: - node1.query_with_retry(""" + node1.query_with_retry( + """ CREATE TABLE IF NOT EXISTS {name} ( EventDate Date, number UInt64 @@ -847,7 +1141,10 @@ def test_concurrent_alter_move(start_cluster, name, engine): ORDER BY tuple() PARTITION BY toYYYYMM(EventDate) SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) values = list({random.randint(1, 1000000) for _ in range(0, 1000)}) @@ -855,8 +1152,12 @@ def test_concurrent_alter_move(start_cluster, name, engine): for i in range(num): day = random.randint(11, 30) value = values.pop() - month = '0' + str(random.choice([3, 4])) - node1.query_with_retry("INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format(name, m=month, d=day, v=value)) + month = "0" + str(random.choice([3, 4])) + node1.query_with_retry( + "INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format( + name, m=month, d=day, v=value + ) + ) def alter_move(num): for i in range(num): @@ -864,7 +1165,9 @@ def test_concurrent_alter_move(start_cluster, name, engine): def alter_update(num): for i in range(num): - node1.query("ALTER TABLE {} UPDATE number = number + 1 WHERE 1".format(name)) + node1.query( + "ALTER TABLE {} UPDATE number = number + 1 WHERE 1".format(name) + ) def optimize_table(num): for i in range(num): @@ -887,14 +1190,21 @@ def test_concurrent_alter_move(start_cluster, name, engine): node1.query(f"DROP TABLE IF EXISTS {name} SYNC") -@pytest.mark.parametrize("name,engine", [ - pytest.param("concurrently_dropping_mt", "MergeTree()", id="mt"), - pytest.param("concurrently_dropping_replicated_mt", - "ReplicatedMergeTree('/clickhouse/concurrently_dropping_replicated_mt', '1')", id="replicated"), -]) +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param("concurrently_dropping_mt", "MergeTree()", id="mt"), + pytest.param( + "concurrently_dropping_replicated_mt", + "ReplicatedMergeTree('/clickhouse/concurrently_dropping_replicated_mt', '1')", + id="replicated", + ), + ], +) def test_concurrent_alter_move_and_drop(start_cluster, name, engine): try: - node1.query(""" + node1.query( + """ CREATE TABLE IF NOT EXISTS {name} ( EventDate Date, number UInt64 @@ -902,7 +1212,10 @@ def test_concurrent_alter_move_and_drop(start_cluster, name, engine): ORDER BY tuple() PARTITION BY toYYYYMM(EventDate) SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) values = list({random.randint(1, 1000000) for _ in range(0, 1000)}) @@ -910,8 +1223,12 @@ def test_concurrent_alter_move_and_drop(start_cluster, name, engine): for i in range(num): day = random.randint(11, 30) value = values.pop() - month = '0' + str(random.choice([3, 4])) - node1.query_with_retry("INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format(name, m=month, d=day, v=value)) + month = "0" + str(random.choice([3, 4])) + node1.query_with_retry( + "INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format( + name, m=month, d=day, v=value + ) + ) def alter_move(num): for i in range(num): @@ -921,7 +1238,9 @@ def test_concurrent_alter_move_and_drop(start_cluster, name, engine): for i in range(num): partition = random.choice([201903, 201904]) drach = random.choice(["drop", "detach"]) - node1.query("ALTER TABLE {} {} PARTITION {}".format(name, drach, partition)) + node1.query( + "ALTER TABLE {} {} PARTITION {}".format(name, drach, partition) + ) insert(100) p = Pool(15) @@ -940,29 +1259,49 @@ def test_concurrent_alter_move_and_drop(start_cluster, name, engine): node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC") -@pytest.mark.parametrize("name,engine", [ - pytest.param("detach_attach_mt", "MergeTree()", id="mt"), - pytest.param("replicated_detach_attach_mt", "ReplicatedMergeTree('/clickhouse/replicated_detach_attach_mt', '1')", id="replicated"), -]) +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param("detach_attach_mt", "MergeTree()", id="mt"), + pytest.param( + "replicated_detach_attach_mt", + "ReplicatedMergeTree('/clickhouse/replicated_detach_attach_mt', '1')", + id="replicated", + ), + ], +) def test_detach_attach(start_cluster, name, engine): try: - node1.query_with_retry(""" + node1.query_with_retry( + """ CREATE TABLE IF NOT EXISTS {name} ( s1 String ) ENGINE = {engine} ORDER BY tuple() SETTINGS storage_policy='moving_jbod_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) data = [] # 5MB in total for i in range(5): data.append(get_random_string(1024 * 1024)) # 1MB row - node1.query_with_retry("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data]))) + node1.query_with_retry( + "INSERT INTO {} VALUES {}".format( + name, ",".join(["('" + x + "')" for x in data]) + ) + ) node1.query("ALTER TABLE {} DETACH PARTITION tuple()".format(name)) assert node1.query("SELECT count() FROM {}".format(name)).strip() == "0" - assert node1.query("SELECT disk FROM system.detached_parts WHERE table = '{}'".format(name)).strip() == "jbod1" + assert ( + node1.query( + "SELECT disk FROM system.detached_parts WHERE table = '{}'".format(name) + ).strip() + == "jbod1" + ) node1.query("ALTER TABLE {} ATTACH PARTITION tuple()".format(name)) assert node1.query("SELECT count() FROM {}".format(name)).strip() == "5" @@ -971,59 +1310,101 @@ def test_detach_attach(start_cluster, name, engine): node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC") -@pytest.mark.parametrize("name,engine", [ - pytest.param("mutating_mt", "MergeTree()", id="mt"), - pytest.param("replicated_mutating_mt", "ReplicatedMergeTree('/clickhouse/replicated_mutating_mt', '1')", id="replicated"), -]) +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param("mutating_mt", "MergeTree()", id="mt"), + pytest.param( + "replicated_mutating_mt", + "ReplicatedMergeTree('/clickhouse/replicated_mutating_mt', '1')", + id="replicated", + ), + ], +) def test_mutate_to_another_disk(start_cluster, name, engine): try: - node1.query_with_retry(""" + node1.query_with_retry( + """ CREATE TABLE IF NOT EXISTS {name} ( s1 String ) ENGINE = {engine} ORDER BY tuple() SETTINGS storage_policy='moving_jbod_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) for i in range(5): data = [] # 5MB in total for i in range(5): data.append(get_random_string(1024 * 1024)) # 1MB row - node1.query_with_retry("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data]))) + node1.query_with_retry( + "INSERT INTO {} VALUES {}".format( + name, ",".join(["('" + x + "')" for x in data]) + ) + ) node1.query("ALTER TABLE {} UPDATE s1 = concat(s1, 'x') WHERE 1".format(name)) retry = 20 - while node1.query("SELECT * FROM system.mutations WHERE is_done = 0") != "" and retry > 0: + while ( + node1.query("SELECT * FROM system.mutations WHERE is_done = 0") != "" + and retry > 0 + ): retry -= 1 time.sleep(0.5) - if node1.query("SELECT latest_fail_reason FROM system.mutations WHERE table = '{}'".format(name)) == "": - assert node1.query("SELECT sum(endsWith(s1, 'x')) FROM {}".format(name)) == "25\n" + if ( + node1.query( + "SELECT latest_fail_reason FROM system.mutations WHERE table = '{}'".format( + name + ) + ) + == "" + ): + assert ( + node1.query("SELECT sum(endsWith(s1, 'x')) FROM {}".format(name)) + == "25\n" + ) else: # mutation failed, let's try on another disk print("Mutation failed") node1.query_with_retry("OPTIMIZE TABLE {} FINAL".format(name)) - node1.query("ALTER TABLE {} UPDATE s1 = concat(s1, 'x') WHERE 1".format(name)) + node1.query( + "ALTER TABLE {} UPDATE s1 = concat(s1, 'x') WHERE 1".format(name) + ) retry = 20 - while node1.query("SELECT * FROM system.mutations WHERE is_done = 0") != "" and retry > 0: + while ( + node1.query("SELECT * FROM system.mutations WHERE is_done = 0") != "" + and retry > 0 + ): retry -= 1 time.sleep(0.5) - assert node1.query("SELECT sum(endsWith(s1, 'x')) FROM {}".format(name)) == "25\n" - - + assert ( + node1.query("SELECT sum(endsWith(s1, 'x')) FROM {}".format(name)) + == "25\n" + ) finally: node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC") -@pytest.mark.parametrize("name,engine", [ - pytest.param("alter_modifying_mt", "MergeTree()", id="mt"), - pytest.param("replicated_alter_modifying_mt", "ReplicatedMergeTree('/clickhouse/replicated_alter_modifying_mt', '1')", id="replicated"), -]) +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param("alter_modifying_mt", "MergeTree()", id="mt"), + pytest.param( + "replicated_alter_modifying_mt", + "ReplicatedMergeTree('/clickhouse/replicated_alter_modifying_mt', '1')", + id="replicated", + ), + ], +) def test_concurrent_alter_modify(start_cluster, name, engine): try: - node1.query_with_retry(""" + node1.query_with_retry( + """ CREATE TABLE IF NOT EXISTS {name} ( EventDate Date, number UInt64 @@ -1031,7 +1412,10 @@ def test_concurrent_alter_modify(start_cluster, name, engine): ORDER BY tuple() PARTITION BY toYYYYMM(EventDate) SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) values = list({random.randint(1, 1000000) for _ in range(0, 1000)}) @@ -1039,8 +1423,12 @@ def test_concurrent_alter_modify(start_cluster, name, engine): for i in range(num): day = random.randint(11, 30) value = values.pop() - month = '0' + str(random.choice([3, 4])) - node1.query_with_retry("INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format(name, m=month, d=day, v=value)) + month = "0" + str(random.choice([3, 4])) + node1.query_with_retry( + "INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format( + name, m=month, d=day, v=value + ) + ) def alter_move(num): for i in range(num): @@ -1050,7 +1438,11 @@ def test_concurrent_alter_modify(start_cluster, name, engine): for i in range(num): column_type = random.choice(["UInt64", "String"]) try: - node1.query("ALTER TABLE {} MODIFY COLUMN number {}".format(name, column_type)) + node1.query( + "ALTER TABLE {} MODIFY COLUMN number {}".format( + name, column_type + ) + ) except: if "Replicated" not in engine: raise @@ -1078,13 +1470,17 @@ def test_concurrent_alter_modify(start_cluster, name, engine): def test_simple_replication_and_moves(start_cluster): try: for i, node in enumerate([node1, node2]): - node.query_with_retry(""" + node.query_with_retry( + """ CREATE TABLE IF NOT EXISTS replicated_table_for_moves ( s1 String ) ENGINE = ReplicatedMergeTree('/clickhouse/replicated_table_for_moves', '{}') ORDER BY tuple() SETTINGS storage_policy='moving_jbod_with_external', old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=2 - """.format(i + 1)) + """.format( + i + 1 + ) + ) def insert(num): for i in range(num): @@ -1092,8 +1488,11 @@ def test_simple_replication_and_moves(start_cluster): data = [] # 1MB in total for i in range(2): data.append(get_random_string(512 * 1024)) # 500KB value - node.query_with_retry("INSERT INTO replicated_table_for_moves VALUES {}".format( - ','.join(["('" + x + "')" for x in data]))) + node.query_with_retry( + "INSERT INTO replicated_table_for_moves VALUES {}".format( + ",".join(["('" + x + "')" for x in data]) + ) + ) def optimize(num): for i in range(num): @@ -1108,7 +1507,10 @@ def test_simple_replication_and_moves(start_cluster): for task in tasks: task.get(timeout=60) - node1.query_with_retry("SYSTEM SYNC REPLICA ON CLUSTER test_cluster replicated_table_for_moves", timeout=5) + node1.query_with_retry( + "SYSTEM SYNC REPLICA ON CLUSTER test_cluster replicated_table_for_moves", + timeout=5, + ) node1.query("SELECT COUNT() FROM replicated_table_for_moves") == "40\n" node2.query("SELECT COUNT() FROM replicated_table_for_moves") == "40\n" @@ -1122,9 +1524,15 @@ def test_simple_replication_and_moves(start_cluster): node2.query("SYSTEM STOP MERGES") node1.query_with_retry( - "INSERT INTO replicated_table_for_moves VALUES {}".format(','.join(["('" + x + "')" for x in data]))) + "INSERT INTO replicated_table_for_moves VALUES {}".format( + ",".join(["('" + x + "')" for x in data]) + ) + ) node2.query_with_retry( - "INSERT INTO replicated_table_for_moves VALUES {}".format(','.join(["('" + x + "')" for x in data]))) + "INSERT INTO replicated_table_for_moves VALUES {}".format( + ",".join(["('" + x + "')" for x in data]) + ) + ) time.sleep(3) # nothing was moved @@ -1143,24 +1551,33 @@ def test_simple_replication_and_moves(start_cluster): def test_download_appropriate_disk(start_cluster): try: for i, node in enumerate([node1, node2]): - node.query_with_retry(""" + node.query_with_retry( + """ CREATE TABLE IF NOT EXISTS replicated_table_for_download ( s1 String ) ENGINE = ReplicatedMergeTree('/clickhouse/replicated_table_for_download', '{}') ORDER BY tuple() SETTINGS storage_policy='moving_jbod_with_external', old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=2 - """.format(i + 1)) + """.format( + i + 1 + ) + ) data = [] for i in range(50): data.append(get_random_string(1024 * 1024)) # 1MB value node1.query_with_retry( - "INSERT INTO replicated_table_for_download VALUES {}".format(','.join(["('" + x + "')" for x in data]))) + "INSERT INTO replicated_table_for_download VALUES {}".format( + ",".join(["('" + x + "')" for x in data]) + ) + ) for _ in range(10): try: print("Syncing replica") - node2.query_with_retry("SYSTEM SYNC REPLICA replicated_table_for_download") + node2.query_with_retry( + "SYSTEM SYNC REPLICA replicated_table_for_download" + ) break except: time.sleep(0.5) @@ -1171,24 +1588,32 @@ def test_download_appropriate_disk(start_cluster): finally: for node in [node1, node2]: - node.query_with_retry("DROP TABLE IF EXISTS replicated_table_for_download SYNC") + node.query_with_retry( + "DROP TABLE IF EXISTS replicated_table_for_download SYNC" + ) def test_rename(start_cluster): try: - node1.query(""" + node1.query( + """ CREATE TABLE IF NOT EXISTS default.renaming_table ( s String ) ENGINE = MergeTree ORDER BY tuple() SETTINGS storage_policy='small_jbod_with_external' - """) + """ + ) for _ in range(5): data = [] for i in range(10): data.append(get_random_string(1024 * 1024)) # 1MB value - node1.query("INSERT INTO renaming_table VALUES {}".format(','.join(["('" + x + "')" for x in data]))) + node1.query( + "INSERT INTO renaming_table VALUES {}".format( + ",".join(["('" + x + "')" for x in data]) + ) + ) disks = get_used_disks_for_table(node1, "renaming_table") assert len(disks) > 1 @@ -1215,7 +1640,8 @@ def test_rename(start_cluster): def test_freeze(start_cluster): try: - node1.query(""" + node1.query( + """ CREATE TABLE IF NOT EXISTS default.freezing_table ( d Date, s String @@ -1223,7 +1649,8 @@ def test_freeze(start_cluster): ORDER BY tuple() PARTITION BY toYYYYMM(d) SETTINGS storage_policy='small_jbod_with_external' - """) + """ + ) for _ in range(5): data = [] @@ -1231,8 +1658,11 @@ def test_freeze(start_cluster): for i in range(10): data.append(get_random_string(1024 * 1024)) # 1MB value dates.append("toDate('2019-03-05')") - node1.query("INSERT INTO freezing_table VALUES {}".format( - ','.join(["(" + d + ", '" + s + "')" for d, s in zip(dates, data)]))) + node1.query( + "INSERT INTO freezing_table VALUES {}".format( + ",".join(["(" + d + ", '" + s + "')" for d, s in zip(dates, data)]) + ) + ) disks = get_used_disks_for_table(node1, "freezing_table") assert len(disks) > 1 @@ -1240,8 +1670,12 @@ def test_freeze(start_cluster): node1.query("ALTER TABLE freezing_table FREEZE PARTITION 201903") # check shadow files (backups) exists - node1.exec_in_container(["bash", "-c", "find /jbod1/shadow -name '*.mrk2' | grep '.*'"]) - node1.exec_in_container(["bash", "-c", "find /external/shadow -name '*.mrk2' | grep '.*'"]) + node1.exec_in_container( + ["bash", "-c", "find /jbod1/shadow -name '*.mrk2' | grep '.*'"] + ) + node1.exec_in_container( + ["bash", "-c", "find /external/shadow -name '*.mrk2' | grep '.*'"] + ) finally: node1.query("DROP TABLE IF EXISTS default.freezing_table SYNC") @@ -1252,19 +1686,27 @@ def test_kill_while_insert(start_cluster): try: name = "test_kill_while_insert" - node1.query(""" + node1.query( + """ CREATE TABLE IF NOT EXISTS {name} ( s String ) ENGINE = MergeTree ORDER BY tuple() SETTINGS storage_policy='small_jbod_with_external' - """.format(name=name)) + """.format( + name=name + ) + ) data = [] dates = [] for i in range(10): data.append(get_random_string(1024 * 1024)) # 1MB value - node1.query("INSERT INTO {name} VALUES {}".format(','.join(["('" + s + "')" for s in data]), name=name)) + node1.query( + "INSERT INTO {name} VALUES {}".format( + ",".join(["('" + s + "')" for s in data]), name=name + ) + ) disks = get_used_disks_for_table(node1, name) assert set(disks) == {"jbod1"} @@ -1276,12 +1718,19 @@ def test_kill_while_insert(start_cluster): """(っಠ‿ಠ)っ""" start_time = time.time() - long_select = threading.Thread(target=ignore_exceptions, args=(node1.query, "SELECT sleep(3) FROM {name}".format(name=name))) + long_select = threading.Thread( + target=ignore_exceptions, + args=(node1.query, "SELECT sleep(3) FROM {name}".format(name=name)), + ) long_select.start() time.sleep(0.5) - node1.query("ALTER TABLE {name} MOVE PARTITION tuple() TO DISK 'external'".format(name=name)) + node1.query( + "ALTER TABLE {name} MOVE PARTITION tuple() TO DISK 'external'".format( + name=name + ) + ) assert time.time() - start_time < 2 node1.restart_clickhouse(kill=True) @@ -1290,7 +1739,9 @@ def test_kill_while_insert(start_cluster): except: """""" - assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["10"] + assert node1.query( + "SELECT count() FROM {name}".format(name=name) + ).splitlines() == ["10"] finally: try: @@ -1303,13 +1754,17 @@ def test_move_while_merge(start_cluster): try: name = "test_move_while_merge" - node1.query(""" + node1.query( + """ CREATE TABLE IF NOT EXISTS {name} ( n Int64 ) ENGINE = MergeTree ORDER BY sleep(2) SETTINGS storage_policy='small_jbod_with_external' - """.format(name=name)) + """.format( + name=name + ) + ) node1.query("INSERT INTO {name} VALUES (1)".format(name=name)) node1.query("INSERT INTO {name} VALUES (2)".format(name=name)) @@ -1326,7 +1781,11 @@ def test_move_while_merge(start_cluster): time.sleep(0.5) with pytest.raises(QueryRuntimeException): - node1.query("ALTER TABLE {name} MOVE PART '{part}' TO DISK 'external'".format(name=name, part=parts[0])) + node1.query( + "ALTER TABLE {name} MOVE PART '{part}' TO DISK 'external'".format( + name=name, part=parts[0] + ) + ) exiting = False no_exception = {} @@ -1335,8 +1794,11 @@ def test_move_while_merge(start_cluster): while not exiting: try: node1.query( - "ALTER TABLE {name} MOVE PART '{part}' TO DISK 'external'".format(name=name, part=parts[0])) - no_exception['missing'] = 'exception' + "ALTER TABLE {name} MOVE PART '{part}' TO DISK 'external'".format( + name=name, part=parts[0] + ) + ) + no_exception["missing"] = "exception" break except QueryRuntimeException: """""" @@ -1352,7 +1814,9 @@ def test_move_while_merge(start_cluster): alter_thread.join() assert len(no_exception) == 0 - assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["2"] + assert node1.query( + "SELECT count() FROM {name}".format(name=name) + ).splitlines() == ["2"] finally: node1.query(f"DROP TABLE IF EXISTS {name} SYNC") @@ -1362,47 +1826,85 @@ def test_move_across_policies_does_not_work(start_cluster): try: name = "test_move_across_policies_does_not_work" - node1.query(""" + node1.query( + """ CREATE TABLE IF NOT EXISTS {name} ( n Int64 ) ENGINE = MergeTree ORDER BY tuple() SETTINGS storage_policy='jbods_with_external' - """.format(name=name)) + """.format( + name=name + ) + ) - node1.query(""" + node1.query( + """ CREATE TABLE IF NOT EXISTS {name}2 ( n Int64 ) ENGINE = MergeTree ORDER BY tuple() SETTINGS storage_policy='small_jbod_with_external' - """.format(name=name)) + """.format( + name=name + ) + ) node1.query("""INSERT INTO {name} VALUES (1)""".format(name=name)) try: - node1.query("""ALTER TABLE {name} MOVE PARTITION tuple() TO DISK 'jbod2'""".format(name=name)) + node1.query( + """ALTER TABLE {name} MOVE PARTITION tuple() TO DISK 'jbod2'""".format( + name=name + ) + ) except QueryRuntimeException: """All parts of partition 'all' are already on disk 'jbod2'.""" - with pytest.raises(QueryRuntimeException, match='.*because disk does not belong to storage policy.*'): - node1.query("""ALTER TABLE {name}2 ATTACH PARTITION tuple() FROM {name}""".format(name=name)) + with pytest.raises( + QueryRuntimeException, + match=".*because disk does not belong to storage policy.*", + ): + node1.query( + """ALTER TABLE {name}2 ATTACH PARTITION tuple() FROM {name}""".format( + name=name + ) + ) - with pytest.raises(QueryRuntimeException, match='.*because disk does not belong to storage policy.*'): - node1.query("""ALTER TABLE {name}2 REPLACE PARTITION tuple() FROM {name}""".format(name=name)) + with pytest.raises( + QueryRuntimeException, + match=".*because disk does not belong to storage policy.*", + ): + node1.query( + """ALTER TABLE {name}2 REPLACE PARTITION tuple() FROM {name}""".format( + name=name + ) + ) - with pytest.raises(QueryRuntimeException, match='.*should have the same storage policy of source table.*'): - node1.query("""ALTER TABLE {name} MOVE PARTITION tuple() TO TABLE {name}2""".format(name=name)) + with pytest.raises( + QueryRuntimeException, + match=".*should have the same storage policy of source table.*", + ): + node1.query( + """ALTER TABLE {name} MOVE PARTITION tuple() TO TABLE {name}2""".format( + name=name + ) + ) - assert node1.query("""SELECT * FROM {name}""".format(name=name)).splitlines() == ["1"] + assert node1.query( + """SELECT * FROM {name}""".format(name=name) + ).splitlines() == ["1"] finally: node1.query(f"DROP TABLE IF EXISTS {name} SYNC") node1.query(f"DROP TABLE IF EXISTS {name}2 SYNC") -def _insert_merge_execute(node, name, policy, parts, cmds, parts_before_cmds, parts_after_cmds): +def _insert_merge_execute( + node, name, policy, parts, cmds, parts_before_cmds, parts_after_cmds +): try: - node.query(""" + node.query( + """ CREATE TABLE IF NOT EXISTS {name} ( n Int64 ) ENGINE = MergeTree @@ -1410,7 +1912,10 @@ def _insert_merge_execute(node, name, policy, parts, cmds, parts_before_cmds, pa PARTITION BY tuple() TTL now()-1 TO VOLUME 'external' SETTINGS storage_policy='{policy}' - """.format(name=name, policy=policy)) + """.format( + name=name, policy=policy + ) + ) for i in range(parts): node.query("""INSERT INTO {name} VALUES ({n})""".format(name=name, n=i)) @@ -1437,29 +1942,45 @@ def _insert_merge_execute(node, name, policy, parts, cmds, parts_before_cmds, pa def _check_merges_are_working(node, storage_policy, volume, shall_work): try: - name = "_check_merges_are_working_{storage_policy}_{volume}".format(storage_policy=storage_policy, volume=volume) + name = "_check_merges_are_working_{storage_policy}_{volume}".format( + storage_policy=storage_policy, volume=volume + ) - node.query(""" + node.query( + """ CREATE TABLE IF NOT EXISTS {name} ( n Int64 ) ENGINE = MergeTree ORDER BY tuple() PARTITION BY tuple() SETTINGS storage_policy='{storage_policy}' - """.format(name=name, storage_policy=storage_policy)) + """.format( + name=name, storage_policy=storage_policy + ) + ) created_parts = 24 for i in range(created_parts): node.query("""INSERT INTO {name} VALUES ({n})""".format(name=name, n=i)) try: - node.query("""ALTER TABLE {name} MOVE PARTITION tuple() TO VOLUME '{volume}' """.format(name=name, volume=volume)) + node.query( + """ALTER TABLE {name} MOVE PARTITION tuple() TO VOLUME '{volume}' """.format( + name=name, volume=volume + ) + ) except: """Ignore 'nothing to move'.""" - expected_disks = set(node.query(""" + expected_disks = set( + node.query( + """ SELECT disks FROM system.storage_policies ARRAY JOIN disks WHERE volume_name = '{volume_name}' - """.format(volume_name=volume)).splitlines()) + """.format( + volume_name=volume + ) + ).splitlines() + ) disks = get_used_disks_for_table(node, name) assert set(disks) <= expected_disks @@ -1474,11 +1995,22 @@ def _check_merges_are_working(node, storage_policy, volume, shall_work): def _get_prefer_not_to_merge_for_storage_policy(node, storage_policy): - return list(map(int, node.query("SELECT prefer_not_to_merge FROM system.storage_policies WHERE policy_name = '{}' ORDER BY volume_priority".format(storage_policy)).splitlines())) + return list( + map( + int, + node.query( + "SELECT prefer_not_to_merge FROM system.storage_policies WHERE policy_name = '{}' ORDER BY volume_priority".format( + storage_policy + ) + ).splitlines(), + ) + ) def test_simple_merge_tree_merges_are_disabled(start_cluster): - _check_merges_are_working(node1, "small_jbod_with_external_no_merges", "external", False) + _check_merges_are_working( + node1, "small_jbod_with_external_no_merges", "external", False + ) def test_no_merges_in_configuration_allow_from_query_without_reload(start_cluster): @@ -1489,9 +2021,15 @@ def test_no_merges_in_configuration_allow_from_query_without_reload(start_cluste assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 1] _check_merges_are_working(node1, policy, "external", False) - _insert_merge_execute(node1, name, policy, 2, [ - "SYSTEM START MERGES ON VOLUME {}.external".format(policy) - ], 2, 1) + _insert_merge_execute( + node1, + name, + policy, + 2, + ["SYSTEM START MERGES ON VOLUME {}.external".format(policy)], + 2, + 1, + ) assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 0] _check_merges_are_working(node1, policy, "external", True) @@ -1507,17 +2045,28 @@ def test_no_merges_in_configuration_allow_from_query_with_reload(start_cluster): assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 1] _check_merges_are_working(node1, policy, "external", False) - _insert_merge_execute(node1, name, policy, 2, [ + _insert_merge_execute( + node1, + name, + policy, + 2, + [ "SYSTEM START MERGES ON VOLUME {}.external".format(policy), - "SYSTEM RELOAD CONFIG" - ], 2, 1) + "SYSTEM RELOAD CONFIG", + ], + 2, + 1, + ) assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 0] _check_merges_are_working(node1, policy, "external", True) finally: node1.query("SYSTEM STOP MERGES ON VOLUME {}.external".format(policy)) -def test_no_merges_in_configuration_allow_from_query_with_reload_on_cluster(start_cluster): + +def test_no_merges_in_configuration_allow_from_query_with_reload_on_cluster( + start_cluster, +): try: name = "test_no_merges_in_configuration_allow_from_query_with_reload" policy = "small_jbod_with_external_no_merges" @@ -1525,15 +2074,29 @@ def test_no_merges_in_configuration_allow_from_query_with_reload_on_cluster(star assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 1] _check_merges_are_working(node1, policy, "external", False) - _insert_merge_execute(node1, name, policy, 2, [ - "SYSTEM START MERGES ON CLUSTER test_cluster ON VOLUME {}.external".format(policy), - "SYSTEM RELOAD CONFIG ON CLUSTER test_cluster" - ], 2, 1) + _insert_merge_execute( + node1, + name, + policy, + 2, + [ + "SYSTEM START MERGES ON CLUSTER test_cluster ON VOLUME {}.external".format( + policy + ), + "SYSTEM RELOAD CONFIG ON CLUSTER test_cluster", + ], + 2, + 1, + ) assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 0] _check_merges_are_working(node1, policy, "external", True) finally: - node1.query("SYSTEM STOP MERGES ON CLUSTER test_cluster ON VOLUME {}.external".format(policy)) + node1.query( + "SYSTEM STOP MERGES ON CLUSTER test_cluster ON VOLUME {}.external".format( + policy + ) + ) def test_yes_merges_in_configuration_disallow_from_query_without_reload(start_cluster): @@ -1544,10 +2107,18 @@ def test_yes_merges_in_configuration_disallow_from_query_without_reload(start_cl assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 0] _check_merges_are_working(node1, policy, "external", True) - _insert_merge_execute(node1, name, policy, 2, [ + _insert_merge_execute( + node1, + name, + policy, + 2, + [ "SYSTEM STOP MERGES ON VOLUME {}.external".format(policy), - "INSERT INTO {name} VALUES (2)".format(name=name) - ], 1, 2) + "INSERT INTO {name} VALUES (2)".format(name=name), + ], + 1, + 2, + ) assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 1] _check_merges_are_working(node1, policy, "external", False) @@ -1563,11 +2134,19 @@ def test_yes_merges_in_configuration_disallow_from_query_with_reload(start_clust assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 0] _check_merges_are_working(node1, policy, "external", True) - _insert_merge_execute(node1, name, policy, 2, [ + _insert_merge_execute( + node1, + name, + policy, + 2, + [ "SYSTEM STOP MERGES ON VOLUME {}.external".format(policy), "INSERT INTO {name} VALUES (2)".format(name=name), - "SYSTEM RELOAD CONFIG" - ], 1, 2) + "SYSTEM RELOAD CONFIG", + ], + 1, + 2, + ) assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 1] _check_merges_are_working(node1, policy, "external", False) diff --git a/tests/integration/test_mutations_hardlinks/test.py b/tests/integration/test_mutations_hardlinks/test.py index 7ac7fe12108..f70cbccefa5 100644 --- a/tests/integration/test_mutations_hardlinks/test.py +++ b/tests/integration/test_mutations_hardlinks/test.py @@ -8,7 +8,7 @@ from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/wide_parts_only.xml']) +node1 = cluster.add_instance("node1", main_configs=["configs/wide_parts_only.xml"]) @pytest.fixture(scope="module") @@ -21,42 +21,61 @@ def started_cluster(): def check_hardlinks(table, part_path, column_file, count): - column_path = os.path.join("/var/lib/clickhouse/data/default", table, part_path, column_file) + column_path = os.path.join( + "/var/lib/clickhouse/data/default", table, part_path, column_file + ) script = """ export INODE=`ls -i {column_path} | awk '{{print $1}}'` export COUNT=`find /var/lib/clickhouse -inum $INODE | wc -l` test $COUNT = {count} - """.format(column_path=column_path, count=count) + """.format( + column_path=column_path, count=count + ) node1.exec_in_container(["bash", "-c", script]) def check_exists(table, part_path, column_file): - column_path = os.path.join("/var/lib/clickhouse/data/default", table, part_path, column_file) + column_path = os.path.join( + "/var/lib/clickhouse/data/default", table, part_path, column_file + ) node1.exec_in_container(["bash", "-c", "test -f {}".format(column_path)]) def test_update_mutation(started_cluster): node1.query( - "CREATE TABLE table_for_update(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()") + "CREATE TABLE table_for_update(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()" + ) - node1.query("INSERT INTO table_for_update SELECT number, number, toString(number) from numbers(100)") + node1.query( + "INSERT INTO table_for_update SELECT number, number, toString(number) from numbers(100)" + ) - assert int(node1.query("SELECT sum(value1) FROM table_for_update").strip()) == sum(range(100)) + assert int(node1.query("SELECT sum(value1) FROM table_for_update").strip()) == sum( + range(100) + ) - node1.query("ALTER TABLE table_for_update UPDATE value1 = value1 * value1 WHERE 1", - settings={"mutations_sync": "2"}) - assert int(node1.query("SELECT sum(value1) FROM table_for_update").strip()) == sum(i * i for i in range(100)) + node1.query( + "ALTER TABLE table_for_update UPDATE value1 = value1 * value1 WHERE 1", + settings={"mutations_sync": "2"}, + ) + assert int(node1.query("SELECT sum(value1) FROM table_for_update").strip()) == sum( + i * i for i in range(100) + ) check_hardlinks("table_for_update", "all_1_1_0_2", "key.bin", 2) check_hardlinks("table_for_update", "all_1_1_0_2", "value2.bin", 2) check_hardlinks("table_for_update", "all_1_1_0_2", "value1.bin", 1) - node1.query("ALTER TABLE table_for_update UPDATE key=key, value1=value1, value2=value2 WHERE 1", - settings={"mutations_sync": "2"}) + node1.query( + "ALTER TABLE table_for_update UPDATE key=key, value1=value1, value2=value2 WHERE 1", + settings={"mutations_sync": "2"}, + ) - assert int(node1.query("SELECT sum(value1) FROM table_for_update").strip()) == sum(i * i for i in range(100)) + assert int(node1.query("SELECT sum(value1) FROM table_for_update").strip()) == sum( + i * i for i in range(100) + ) check_hardlinks("table_for_update", "all_1_1_0_3", "key.bin", 1) check_hardlinks("table_for_update", "all_1_1_0_3", "value1.bin", 1) @@ -65,15 +84,25 @@ def test_update_mutation(started_cluster): def test_modify_mutation(started_cluster): node1.query( - "CREATE TABLE table_for_modify(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()") + "CREATE TABLE table_for_modify(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()" + ) - node1.query("INSERT INTO table_for_modify SELECT number, number, toString(number) from numbers(100)") + node1.query( + "INSERT INTO table_for_modify SELECT number, number, toString(number) from numbers(100)" + ) - assert int(node1.query("SELECT sum(value1) FROM table_for_modify").strip()) == sum(range(100)) + assert int(node1.query("SELECT sum(value1) FROM table_for_modify").strip()) == sum( + range(100) + ) - node1.query("ALTER TABLE table_for_modify MODIFY COLUMN value2 UInt64", settings={"mutations_sync": "2"}) + node1.query( + "ALTER TABLE table_for_modify MODIFY COLUMN value2 UInt64", + settings={"mutations_sync": "2"}, + ) - assert int(node1.query("SELECT sum(value2) FROM table_for_modify").strip()) == sum(range(100)) + assert int(node1.query("SELECT sum(value2) FROM table_for_modify").strip()) == sum( + range(100) + ) check_hardlinks("table_for_modify", "all_1_1_0_2", "key.bin", 2) check_hardlinks("table_for_modify", "all_1_1_0_2", "value1.bin", 2) @@ -82,13 +111,21 @@ def test_modify_mutation(started_cluster): def test_drop_mutation(started_cluster): node1.query( - "CREATE TABLE table_for_drop(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()") + "CREATE TABLE table_for_drop(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()" + ) - node1.query("INSERT INTO table_for_drop SELECT number, number, toString(number) from numbers(100)") + node1.query( + "INSERT INTO table_for_drop SELECT number, number, toString(number) from numbers(100)" + ) - assert int(node1.query("SELECT sum(value1) FROM table_for_drop").strip()) == sum(range(100)) + assert int(node1.query("SELECT sum(value1) FROM table_for_drop").strip()) == sum( + range(100) + ) - node1.query("ALTER TABLE table_for_drop DROP COLUMN value2", settings={"mutations_sync": "2"}) + node1.query( + "ALTER TABLE table_for_drop DROP COLUMN value2", + settings={"mutations_sync": "2"}, + ) check_hardlinks("table_for_drop", "all_1_1_0_2", "key.bin", 2) check_hardlinks("table_for_drop", "all_1_1_0_2", "value1.bin", 2) @@ -101,23 +138,31 @@ def test_drop_mutation(started_cluster): def test_delete_and_drop_mutation(started_cluster): node1.query( - "CREATE TABLE table_for_delete_and_drop(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()") + "CREATE TABLE table_for_delete_and_drop(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()" + ) - node1.query("INSERT INTO table_for_delete_and_drop SELECT number, number, toString(number) from numbers(100)") + node1.query( + "INSERT INTO table_for_delete_and_drop SELECT number, number, toString(number) from numbers(100)" + ) - assert int(node1.query("SELECT sum(value1) FROM table_for_delete_and_drop").strip()) == sum(range(100)) + assert int( + node1.query("SELECT sum(value1) FROM table_for_delete_and_drop").strip() + ) == sum(range(100)) node1.query("SYSTEM STOP MERGES") def mutate(): - node1.query("ALTER TABLE table_for_delete_and_drop DELETE WHERE key % 2 == 0, DROP COLUMN value2") + node1.query( + "ALTER TABLE table_for_delete_and_drop DELETE WHERE key % 2 == 0, DROP COLUMN value2" + ) p = Pool(2) p.apply_async(mutate) for _ in range(1, 100): result = node1.query( - "SELECT COUNT() FROM system.mutations WHERE table = 'table_for_delete_and_drop' and is_done=0") + "SELECT COUNT() FROM system.mutations WHERE table = 'table_for_delete_and_drop' and is_done=0" + ) try: if int(result.strip()) == 2: break @@ -129,8 +174,11 @@ def test_delete_and_drop_mutation(started_cluster): node1.query("SYSTEM START MERGES") - assert_eq_with_retry(node1, "SELECT COUNT() FROM table_for_delete_and_drop", - str(sum(1 for i in range(100) if i % 2 != 0))) + assert_eq_with_retry( + node1, + "SELECT COUNT() FROM table_for_delete_and_drop", + str(sum(1 for i in range(100) if i % 2 != 0)), + ) check_hardlinks("table_for_delete_and_drop", "all_1_1_0_3", "key.bin", 1) check_hardlinks("table_for_delete_and_drop", "all_1_1_0_3", "value1.bin", 1) diff --git a/tests/integration/test_mutations_in_partitions_of_merge_tree/test.py b/tests/integration/test_mutations_in_partitions_of_merge_tree/test.py index 2abeaf50cbd..2ab5816e5b1 100644 --- a/tests/integration/test_mutations_in_partitions_of_merge_tree/test.py +++ b/tests/integration/test_mutations_in_partitions_of_merge_tree/test.py @@ -5,11 +5,19 @@ import helpers.cluster cluster = helpers.cluster.ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/logs_config.xml', 'configs/cluster.xml'], - with_zookeeper=True, stay_alive=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/logs_config.xml", "configs/cluster.xml"], + with_zookeeper=True, + stay_alive=True, +) -node2 = cluster.add_instance('node2', main_configs=['configs/logs_config.xml', 'configs/cluster.xml'], - with_zookeeper=True, stay_alive=True) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/logs_config.xml", "configs/cluster.xml"], + with_zookeeper=True, + stay_alive=True, +) @pytest.fixture(scope="module") @@ -26,19 +34,39 @@ def test_trivial_alter_in_partition_merge_tree_without_where(started_cluster): try: name = "test_trivial_alter_in_partition_merge_tree_without_where" node1.query("DROP TABLE IF EXISTS {}".format(name)) - node1.query("CREATE TABLE {} (p Int64, x Int64) ENGINE=MergeTree() ORDER BY tuple() PARTITION BY p".format(name)) + node1.query( + "CREATE TABLE {} (p Int64, x Int64) ENGINE=MergeTree() ORDER BY tuple() PARTITION BY p".format( + name + ) + ) node1.query("INSERT INTO {} VALUES (1, 2), (2, 3)".format(name)) with pytest.raises(helpers.client.QueryRuntimeException): - node1.query("ALTER TABLE {} UPDATE x = x + 1 IN PARTITION 1 SETTINGS mutations_sync = 2".format(name)) + node1.query( + "ALTER TABLE {} UPDATE x = x + 1 IN PARTITION 1 SETTINGS mutations_sync = 2".format( + name + ) + ) assert node1.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["5"] with pytest.raises(helpers.client.QueryRuntimeException): - node1.query("ALTER TABLE {} UPDATE x = x + 1 IN PARTITION 2 SETTINGS mutations_sync = 2".format(name)) + node1.query( + "ALTER TABLE {} UPDATE x = x + 1 IN PARTITION 2 SETTINGS mutations_sync = 2".format( + name + ) + ) assert node1.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["5"] with pytest.raises(helpers.client.QueryRuntimeException): - node1.query("ALTER TABLE {} DELETE IN PARTITION 1 SETTINGS mutations_sync = 2".format(name)) + node1.query( + "ALTER TABLE {} DELETE IN PARTITION 1 SETTINGS mutations_sync = 2".format( + name + ) + ) assert node1.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["5"] with pytest.raises(helpers.client.QueryRuntimeException): - node1.query("ALTER TABLE {} DELETE IN PARTITION 2 SETTINGS mutations_sync = 2".format(name)) + node1.query( + "ALTER TABLE {} DELETE IN PARTITION 2 SETTINGS mutations_sync = 2".format( + name + ) + ) assert node1.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["5"] finally: node1.query("DROP TABLE IF EXISTS {}".format(name)) @@ -48,16 +76,39 @@ def test_trivial_alter_in_partition_merge_tree_with_where(started_cluster): try: name = "test_trivial_alter_in_partition_merge_tree_with_where" node1.query("DROP TABLE IF EXISTS {}".format(name)) - node1.query("CREATE TABLE {} (p Int64, x Int64) ENGINE=MergeTree() ORDER BY tuple() PARTITION BY p".format(name)) + node1.query( + "CREATE TABLE {} (p Int64, x Int64) ENGINE=MergeTree() ORDER BY tuple() PARTITION BY p".format( + name + ) + ) node1.query("INSERT INTO {} VALUES (1, 2), (2, 3)".format(name)) - node1.query("ALTER TABLE {} UPDATE x = x + 1 IN PARTITION 2 WHERE p = 2 SETTINGS mutations_sync = 2".format(name)) - assert node1.query("SELECT x FROM {} ORDER BY p".format(name)).splitlines() == ["2", "4"] + node1.query( + "ALTER TABLE {} UPDATE x = x + 1 IN PARTITION 2 WHERE p = 2 SETTINGS mutations_sync = 2".format( + name + ) + ) + assert node1.query("SELECT x FROM {} ORDER BY p".format(name)).splitlines() == [ + "2", + "4", + ] assert node1.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["6"] - node1.query("ALTER TABLE {} UPDATE x = x + 1 IN PARTITION 1 WHERE p = 2 SETTINGS mutations_sync = 2".format(name)) + node1.query( + "ALTER TABLE {} UPDATE x = x + 1 IN PARTITION 1 WHERE p = 2 SETTINGS mutations_sync = 2".format( + name + ) + ) assert node1.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["6"] - node1.query("ALTER TABLE {} DELETE IN PARTITION 2 WHERE p = 2 SETTINGS mutations_sync = 2".format(name)) + node1.query( + "ALTER TABLE {} DELETE IN PARTITION 2 WHERE p = 2 SETTINGS mutations_sync = 2".format( + name + ) + ) assert node1.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["2"] - node1.query("ALTER TABLE {} DELETE IN PARTITION 1 WHERE p = 2 SETTINGS mutations_sync = 2".format(name)) + node1.query( + "ALTER TABLE {} DELETE IN PARTITION 1 WHERE p = 2 SETTINGS mutations_sync = 2".format( + name + ) + ) assert node1.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["2"] finally: node1.query("DROP TABLE IF EXISTS {}".format(name)) @@ -72,28 +123,60 @@ def test_trivial_alter_in_partition_replicated_merge_tree(started_cluster): for node in (node1, node2): node.query( - "CREATE TABLE {name} (p Int64, x Int64) ENGINE=ReplicatedMergeTree('/clickhouse/{name}', '{{instance}}') ORDER BY tuple() PARTITION BY p" - .format(name=name)) + "CREATE TABLE {name} (p Int64, x Int64) ENGINE=ReplicatedMergeTree('/clickhouse/{name}', '{{instance}}') ORDER BY tuple() PARTITION BY p".format( + name=name + ) + ) node1.query("INSERT INTO {} VALUES (1, 2)".format(name)) node2.query("INSERT INTO {} VALUES (2, 3)".format(name)) - node1.query("ALTER TABLE {} UPDATE x = x + 1 IN PARTITION 2 WHERE 1 SETTINGS mutations_sync = 2".format(name)) + node1.query( + "ALTER TABLE {} UPDATE x = x + 1 IN PARTITION 2 WHERE 1 SETTINGS mutations_sync = 2".format( + name + ) + ) for node in (node1, node2): - assert node.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["6"] - node1.query("ALTER TABLE {} UPDATE x = x + 1 IN PARTITION 1 WHERE p = 2 SETTINGS mutations_sync = 2".format(name)) + assert node.query("SELECT sum(x) FROM {}".format(name)).splitlines() == [ + "6" + ] + node1.query( + "ALTER TABLE {} UPDATE x = x + 1 IN PARTITION 1 WHERE p = 2 SETTINGS mutations_sync = 2".format( + name + ) + ) for node in (node1, node2): - assert node.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["6"] + assert node.query("SELECT sum(x) FROM {}".format(name)).splitlines() == [ + "6" + ] with pytest.raises(helpers.client.QueryRuntimeException): - node1.query("ALTER TABLE {} DELETE IN PARTITION 2 SETTINGS mutations_sync = 2".format(name)) + node1.query( + "ALTER TABLE {} DELETE IN PARTITION 2 SETTINGS mutations_sync = 2".format( + name + ) + ) for node in (node1, node2): - assert node.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["6"] - node1.query("ALTER TABLE {} DELETE IN PARTITION 2 WHERE p = 2 SETTINGS mutations_sync = 2".format(name)) + assert node.query("SELECT sum(x) FROM {}".format(name)).splitlines() == [ + "6" + ] + node1.query( + "ALTER TABLE {} DELETE IN PARTITION 2 WHERE p = 2 SETTINGS mutations_sync = 2".format( + name + ) + ) for node in (node1, node2): - assert node.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["2"] - node1.query("ALTER TABLE {} DELETE IN PARTITION 1 WHERE p = 2 SETTINGS mutations_sync = 2".format(name)) + assert node.query("SELECT sum(x) FROM {}".format(name)).splitlines() == [ + "2" + ] + node1.query( + "ALTER TABLE {} DELETE IN PARTITION 1 WHERE p = 2 SETTINGS mutations_sync = 2".format( + name + ) + ) for node in (node1, node2): - assert node.query("SELECT sum(x) FROM {}".format(name)).splitlines() == ["2"] + assert node.query("SELECT sum(x) FROM {}".format(name)).splitlines() == [ + "2" + ] finally: node1.query("DROP TABLE IF EXISTS {}".format(name)) node2.query("DROP TABLE IF EXISTS {}".format(name)) diff --git a/tests/integration/test_mutations_with_merge_tree/test.py b/tests/integration/test_mutations_with_merge_tree/test.py index 72ef8c9a373..d1843017b9f 100644 --- a/tests/integration/test_mutations_with_merge_tree/test.py +++ b/tests/integration/test_mutations_with_merge_tree/test.py @@ -5,8 +5,11 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance_test_mutations = cluster.add_instance('test_mutations_with_merge_tree', main_configs=['configs/config.xml'], - user_configs=['configs/users.xml']) +instance_test_mutations = cluster.add_instance( + "test_mutations_with_merge_tree", + main_configs=["configs/config.xml"], + user_configs=["configs/users.xml"], +) @pytest.fixture(scope="module") @@ -14,9 +17,11 @@ def started_cluster(): try: cluster.start() instance_test_mutations.query( - '''CREATE TABLE test_mutations_with_ast_elements(date Date, a UInt64, b String) ENGINE = MergeTree(date, (a, date), 8192)''') + """CREATE TABLE test_mutations_with_ast_elements(date Date, a UInt64, b String) ENGINE = MergeTree(date, (a, date), 8192)""" + ) instance_test_mutations.query( - '''INSERT INTO test_mutations_with_ast_elements SELECT '2019-07-29' AS date, 1, toString(number) FROM numbers(1) SETTINGS force_index_by_date = 0, force_primary_key = 0''') + """INSERT INTO test_mutations_with_ast_elements SELECT '2019-07-29' AS date, 1, toString(number) FROM numbers(1) SETTINGS force_index_by_date = 0, force_primary_key = 0""" + ) yield cluster finally: cluster.shutdown() @@ -28,110 +33,161 @@ def test_mutations_in_partition_background(started_cluster): name = "test_mutations_in_partition" instance_test_mutations.query( - f'''CREATE TABLE {name} (date Date, a UInt64, b String) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY a''') + f"""CREATE TABLE {name} (date Date, a UInt64, b String) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY a""" + ) instance_test_mutations.query( - f'''INSERT INTO {name} SELECT '2019-07-29' AS date, number, toString(number) FROM numbers({numbers})''') + f"""INSERT INTO {name} SELECT '2019-07-29' AS date, number, toString(number) FROM numbers({numbers})""" + ) for i in range(0, numbers, 3): - instance_test_mutations.query(f'''ALTER TABLE {name} DELETE IN PARTITION {i} WHERE a = {i}''') + instance_test_mutations.query( + f"""ALTER TABLE {name} DELETE IN PARTITION {i} WHERE a = {i}""" + ) for i in range(1, numbers, 3): - instance_test_mutations.query(f'''ALTER TABLE {name} UPDATE b = 'changed' IN PARTITION {i} WHERE a = {i} ''') + instance_test_mutations.query( + f"""ALTER TABLE {name} UPDATE b = 'changed' IN PARTITION {i} WHERE a = {i} """ + ) def count_and_changed(): - return instance_test_mutations.query(f"SELECT count(), countIf(b == 'changed') FROM {name} SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT CSV").splitlines() + return instance_test_mutations.query( + f"SELECT count(), countIf(b == 'changed') FROM {name} SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT CSV" + ).splitlines() all_done = False - for wait_times_for_mutation in range(100): # wait for replication 80 seconds max + for wait_times_for_mutation in range( + 100 + ): # wait for replication 80 seconds max time.sleep(0.8) if count_and_changed() == ["66,33"]: all_done = True break - print(instance_test_mutations.query( - f"SELECT mutation_id, command, parts_to_do, is_done, latest_failed_part, latest_fail_reason, parts_to_do_names FROM system.mutations WHERE table = '{name}' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT TSVWithNames")) + print( + instance_test_mutations.query( + f"SELECT mutation_id, command, parts_to_do, is_done, latest_failed_part, latest_fail_reason, parts_to_do_names FROM system.mutations WHERE table = '{name}' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT TSVWithNames" + ) + ) assert (count_and_changed(), all_done) == (["66,33"], True) - assert instance_test_mutations.query(f"SELECT count(), sum(is_done) FROM system.mutations WHERE table = '{name}' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT CSV").splitlines() == ["67,67"] + assert instance_test_mutations.query( + f"SELECT count(), sum(is_done) FROM system.mutations WHERE table = '{name}' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT CSV" + ).splitlines() == ["67,67"] finally: - instance_test_mutations.query(f'''DROP TABLE {name}''') + instance_test_mutations.query(f"""DROP TABLE {name}""") -@pytest.mark.parametrize("sync", [ - ("last",), - ("all",) -]) +@pytest.mark.parametrize("sync", [("last",), ("all",)]) def test_mutations_in_partition_sync(started_cluster, sync): try: numbers = 10 name = "test_mutations_in_partition_sync" instance_test_mutations.query( - f'''CREATE TABLE {name} (date Date, a UInt64, b String) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY a''') + f"""CREATE TABLE {name} (date Date, a UInt64, b String) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY a""" + ) instance_test_mutations.query( - f'''INSERT INTO {name} SELECT '2019-07-29' AS date, number, toString(number) FROM numbers({numbers})''') + f"""INSERT INTO {name} SELECT '2019-07-29' AS date, number, toString(number) FROM numbers({numbers})""" + ) for i in range(0, numbers, 3): - instance_test_mutations.query(f'''ALTER TABLE {name} DELETE IN PARTITION {i} WHERE a = {i}''' - + (' SETTINGS mutations_sync = 1' if sync == 'all' else '')) + instance_test_mutations.query( + f"""ALTER TABLE {name} DELETE IN PARTITION {i} WHERE a = {i}""" + + (" SETTINGS mutations_sync = 1" if sync == "all" else "") + ) - for reverse_index, i in reversed(list(enumerate(reversed(range(1, numbers, 3))))): - instance_test_mutations.query(f'''ALTER TABLE {name} UPDATE b = 'changed' IN PARTITION {i} WHERE a = {i}''' - + (' SETTINGS mutations_sync = 1' if not reverse_index or sync == 'all' else '')) + for reverse_index, i in reversed( + list(enumerate(reversed(range(1, numbers, 3)))) + ): + instance_test_mutations.query( + f"""ALTER TABLE {name} UPDATE b = 'changed' IN PARTITION {i} WHERE a = {i}""" + + ( + " SETTINGS mutations_sync = 1" + if not reverse_index or sync == "all" + else "" + ) + ) def count_and_changed(): - return instance_test_mutations.query(f"SELECT count(), countIf(b == 'changed') FROM {name} SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT CSV").splitlines() + return instance_test_mutations.query( + f"SELECT count(), countIf(b == 'changed') FROM {name} SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT CSV" + ).splitlines() - print(instance_test_mutations.query( - f"SELECT mutation_id, command, parts_to_do, is_done, latest_failed_part, latest_fail_reason FROM system.mutations WHERE table = '{name}' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT TSVWithNames")) + print( + instance_test_mutations.query( + f"SELECT mutation_id, command, parts_to_do, is_done, latest_failed_part, latest_fail_reason FROM system.mutations WHERE table = '{name}' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT TSVWithNames" + ) + ) assert count_and_changed() == ["6,3"] - assert instance_test_mutations.query(f"SELECT count(), sum(is_done) FROM system.mutations WHERE table = '{name}' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT CSV").splitlines() == ["7,7"] + assert instance_test_mutations.query( + f"SELECT count(), sum(is_done) FROM system.mutations WHERE table = '{name}' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT CSV" + ).splitlines() == ["7,7"] finally: - instance_test_mutations.query(f'''DROP TABLE {name}''') + instance_test_mutations.query(f"""DROP TABLE {name}""") def test_mutations_with_merge_background_task(started_cluster): - instance_test_mutations.query('''SYSTEM STOP MERGES test_mutations_with_ast_elements''') + instance_test_mutations.query( + """SYSTEM STOP MERGES test_mutations_with_ast_elements""" + ) ## The number of asts per query is 15 for execution_times_for_mutation in range(100): instance_test_mutations.query( - '''ALTER TABLE test_mutations_with_ast_elements DELETE WHERE 1 = 1 AND toUInt32(b) IN (1)''') + """ALTER TABLE test_mutations_with_ast_elements DELETE WHERE 1 = 1 AND toUInt32(b) IN (1)""" + ) all_done = False for wait_times_for_mutation in range(100): # wait for replication 80 seconds max time.sleep(0.8) def get_done_mutations(instance): - instance_test_mutations.query('''DETACH TABLE test_mutations_with_ast_elements''') - instance_test_mutations.query('''ATTACH TABLE test_mutations_with_ast_elements''') - return int(instance.query( - "SELECT sum(is_done) FROM system.mutations WHERE table = 'test_mutations_with_ast_elements' SETTINGS force_index_by_date = 0, force_primary_key = 0").rstrip()) + instance_test_mutations.query( + """DETACH TABLE test_mutations_with_ast_elements""" + ) + instance_test_mutations.query( + """ATTACH TABLE test_mutations_with_ast_elements""" + ) + return int( + instance.query( + "SELECT sum(is_done) FROM system.mutations WHERE table = 'test_mutations_with_ast_elements' SETTINGS force_index_by_date = 0, force_primary_key = 0" + ).rstrip() + ) if get_done_mutations(instance_test_mutations) == 100: all_done = True break - print(instance_test_mutations.query( - "SELECT mutation_id, command, parts_to_do, is_done FROM system.mutations WHERE table = 'test_mutations_with_ast_elements' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT TSVWithNames")) + print( + instance_test_mutations.query( + "SELECT mutation_id, command, parts_to_do, is_done FROM system.mutations WHERE table = 'test_mutations_with_ast_elements' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT TSVWithNames" + ) + ) assert all_done def test_mutations_with_truncate_table(started_cluster): - instance_test_mutations.query('''SYSTEM STOP MERGES test_mutations_with_ast_elements''') + instance_test_mutations.query( + """SYSTEM STOP MERGES test_mutations_with_ast_elements""" + ) ## The number of asts per query is 15 for execute_number in range(100): instance_test_mutations.query( - '''ALTER TABLE test_mutations_with_ast_elements DELETE WHERE 1 = 1 AND toUInt32(b) IN (1)''') + """ALTER TABLE test_mutations_with_ast_elements DELETE WHERE 1 = 1 AND toUInt32(b) IN (1)""" + ) instance_test_mutations.query("TRUNCATE TABLE test_mutations_with_ast_elements") - assert instance_test_mutations.query( - "SELECT COUNT() FROM system.mutations WHERE table = 'test_mutations_with_ast_elements SETTINGS force_index_by_date = 0, force_primary_key = 0'").rstrip() == '0' + assert ( + instance_test_mutations.query( + "SELECT COUNT() FROM system.mutations WHERE table = 'test_mutations_with_ast_elements SETTINGS force_index_by_date = 0, force_primary_key = 0'" + ).rstrip() + == "0" + ) def test_mutations_will_not_hang_for_non_existing_parts_sync(started_cluster): @@ -140,21 +196,32 @@ def test_mutations_will_not_hang_for_non_existing_parts_sync(started_cluster): name = "test_mutations_will_not_hang_for_non_existing_parts_sync" instance_test_mutations.query( - f"""CREATE TABLE {name} (date Date, a UInt64, b String) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY a""") + f"""CREATE TABLE {name} (date Date, a UInt64, b String) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY a""" + ) instance_test_mutations.query( - f"""INSERT INTO {name} SELECT '2019-07-29' AS date, number, toString(number) FROM numbers({numbers})""") + f"""INSERT INTO {name} SELECT '2019-07-29' AS date, number, toString(number) FROM numbers({numbers})""" + ) for i in range(0, numbers, 3): - instance_test_mutations.query(f"""ALTER TABLE {name} DELETE IN PARTITION {i+1000} WHERE a = {i} SETTINGS mutations_sync = 1""") + instance_test_mutations.query( + f"""ALTER TABLE {name} DELETE IN PARTITION {i+1000} WHERE a = {i} SETTINGS mutations_sync = 1""" + ) def count(): - return instance_test_mutations.query(f"SELECT count() FROM {name} SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT CSV").splitlines() + return instance_test_mutations.query( + f"SELECT count() FROM {name} SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT CSV" + ).splitlines() - print(instance_test_mutations.query( - f"SELECT mutation_id, command, parts_to_do, is_done, latest_failed_part, latest_fail_reason, parts_to_do_names FROM system.mutations WHERE table = '{name}' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT TSVWithNames")) + print( + instance_test_mutations.query( + f"SELECT mutation_id, command, parts_to_do, is_done, latest_failed_part, latest_fail_reason, parts_to_do_names FROM system.mutations WHERE table = '{name}' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT TSVWithNames" + ) + ) assert count() == [f"{numbers}"] - assert instance_test_mutations.query(f"SELECT count(), sum(is_done) FROM system.mutations WHERE table = '{name}' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT CSV").splitlines() == [f"34,34"] + assert instance_test_mutations.query( + f"SELECT count(), sum(is_done) FROM system.mutations WHERE table = '{name}' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT CSV" + ).splitlines() == [f"34,34"] finally: instance_test_mutations.query(f"""DROP TABLE {name}""") @@ -166,29 +233,42 @@ def test_mutations_will_not_hang_for_non_existing_parts_async(started_cluster): name = "test_mutations_will_not_hang_for_non_existing_parts_async" instance_test_mutations.query( - f"""CREATE TABLE {name} (date Date, a UInt64, b String) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY a""") + f"""CREATE TABLE {name} (date Date, a UInt64, b String) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY a""" + ) instance_test_mutations.query( - f"""INSERT INTO {name} SELECT '2019-07-29' AS date, number, toString(number) FROM numbers({numbers})""") + f"""INSERT INTO {name} SELECT '2019-07-29' AS date, number, toString(number) FROM numbers({numbers})""" + ) for i in range(0, numbers, 3): - instance_test_mutations.query(f"""ALTER TABLE {name} DELETE IN PARTITION {i+1000} WHERE a = {i}""") + instance_test_mutations.query( + f"""ALTER TABLE {name} DELETE IN PARTITION {i+1000} WHERE a = {i}""" + ) def count(): - return instance_test_mutations.query(f"SELECT count() FROM {name} SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT CSV").splitlines() + return instance_test_mutations.query( + f"SELECT count() FROM {name} SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT CSV" + ).splitlines() def count_and_sum_is_done(): - return instance_test_mutations.query(f"SELECT count(), sum(is_done) FROM system.mutations WHERE table = '{name}' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT CSV").splitlines() + return instance_test_mutations.query( + f"SELECT count(), sum(is_done) FROM system.mutations WHERE table = '{name}' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT CSV" + ).splitlines() all_done = False - for wait_times_for_mutation in range(100): # wait for replication 80 seconds max + for wait_times_for_mutation in range( + 100 + ): # wait for replication 80 seconds max time.sleep(0.8) if count_and_sum_is_done() == ["34,34"]: all_done = True break - print(instance_test_mutations.query( - f"SELECT mutation_id, command, parts_to_do, is_done, latest_failed_part, latest_fail_reason, parts_to_do_names FROM system.mutations WHERE table = '{name}' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT TSVWithNames")) + print( + instance_test_mutations.query( + f"SELECT mutation_id, command, parts_to_do, is_done, latest_failed_part, latest_fail_reason, parts_to_do_names FROM system.mutations WHERE table = '{name}' SETTINGS force_index_by_date = 0, force_primary_key = 0 FORMAT TSVWithNames" + ) + ) assert count() == [f"{numbers}"] assert count_and_sum_is_done() == ["34,34"] diff --git a/tests/integration/test_mysql_database_engine/test.py b/tests/integration/test_mysql_database_engine/test.py index 35d6d6e72b6..96a4e2d692c 100644 --- a/tests/integration/test_mysql_database_engine/test.py +++ b/tests/integration/test_mysql_database_engine/test.py @@ -9,7 +9,12 @@ from helpers.cluster import ClickHouseCluster from helpers.network import PartitionManager cluster = ClickHouseCluster(__file__) -clickhouse_node = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml', 'configs/named_collections.xml'], with_mysql=True, stay_alive=True) +clickhouse_node = cluster.add_instance( + "node1", + main_configs=["configs/remote_servers.xml", "configs/named_collections.xml"], + with_mysql=True, + stay_alive=True, +) @pytest.fixture(scope="module") @@ -32,12 +37,17 @@ class MySQLNodeInstance: def query(self, execution_query): if self.mysql_connection is None: - self.mysql_connection = pymysql.connect(user=self.user, password=self.password, host=self.hostname, - port=self.port) + self.mysql_connection = pymysql.connect( + user=self.user, + password=self.password, + host=self.hostname, + port=self.port, + ) with self.mysql_connection.cursor() as cursor: + def execute(query): res = cursor.execute(query) - if query.lstrip().lower().startswith(('select', 'show')): + if query.lstrip().lower().startswith(("select", "show")): # Mimic output of the ClickHouseInstance, which is: # tab-sparated values and newline (\n)-separated rows. rows = [] @@ -57,168 +67,279 @@ class MySQLNodeInstance: def test_mysql_ddl_for_mysql_database(started_cluster): - with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', started_cluster.mysql_ip, started_cluster.mysql_port)) as mysql_node: + with contextlib.closing( + MySQLNodeInstance( + "root", "clickhouse", started_cluster.mysql_ip, started_cluster.mysql_port + ) + ) as mysql_node: mysql_node.query("DROP DATABASE IF EXISTS test_database") mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'") clickhouse_node.query( - "CREATE DATABASE test_database ENGINE = MySQL('mysql57:3306', 'test_database', 'root', 'clickhouse')") - assert 'test_database' in clickhouse_node.query('SHOW DATABASES') + "CREATE DATABASE test_database ENGINE = MySQL('mysql57:3306', 'test_database', 'root', 'clickhouse')" + ) + assert "test_database" in clickhouse_node.query("SHOW DATABASES") mysql_node.query( - 'CREATE TABLE `test_database`.`test_table` ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;') - assert 'test_table' in clickhouse_node.query('SHOW TABLES FROM test_database') + "CREATE TABLE `test_database`.`test_table` ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;" + ) + assert "test_table" in clickhouse_node.query("SHOW TABLES FROM test_database") time.sleep( - 3) # Because the unit of MySQL modification time is seconds, modifications made in the same second cannot be obtained - mysql_node.query('ALTER TABLE `test_database`.`test_table` ADD COLUMN `add_column` int(11)') - assert 'add_column' in clickhouse_node.query( - "SELECT name FROM system.columns WHERE table = 'test_table' AND database = 'test_database'") + 3 + ) # Because the unit of MySQL modification time is seconds, modifications made in the same second cannot be obtained + mysql_node.query( + "ALTER TABLE `test_database`.`test_table` ADD COLUMN `add_column` int(11)" + ) + assert "add_column" in clickhouse_node.query( + "SELECT name FROM system.columns WHERE table = 'test_table' AND database = 'test_database'" + ) time.sleep( - 3) # Because the unit of MySQL modification time is seconds, modifications made in the same second cannot be obtained - mysql_node.query('ALTER TABLE `test_database`.`test_table` DROP COLUMN `add_column`') - assert 'add_column' not in clickhouse_node.query( - "SELECT name FROM system.columns WHERE table = 'test_table' AND database = 'test_database'") + 3 + ) # Because the unit of MySQL modification time is seconds, modifications made in the same second cannot be obtained + mysql_node.query( + "ALTER TABLE `test_database`.`test_table` DROP COLUMN `add_column`" + ) + assert "add_column" not in clickhouse_node.query( + "SELECT name FROM system.columns WHERE table = 'test_table' AND database = 'test_database'" + ) - mysql_node.query('DROP TABLE `test_database`.`test_table`;') - assert 'test_table' not in clickhouse_node.query('SHOW TABLES FROM test_database') + mysql_node.query("DROP TABLE `test_database`.`test_table`;") + assert "test_table" not in clickhouse_node.query( + "SHOW TABLES FROM test_database" + ) clickhouse_node.query("DROP DATABASE test_database") - assert 'test_database' not in clickhouse_node.query('SHOW DATABASES') + assert "test_database" not in clickhouse_node.query("SHOW DATABASES") mysql_node.query("DROP DATABASE test_database") def test_clickhouse_ddl_for_mysql_database(started_cluster): - with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', started_cluster.mysql_ip, started_cluster.mysql_port)) as mysql_node: + with contextlib.closing( + MySQLNodeInstance( + "root", "clickhouse", started_cluster.mysql_ip, started_cluster.mysql_port + ) + ) as mysql_node: mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'") mysql_node.query( - 'CREATE TABLE `test_database`.`test_table` ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;') + "CREATE TABLE `test_database`.`test_table` ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;" + ) clickhouse_node.query( - "CREATE DATABASE test_database ENGINE = MySQL('mysql57:3306', 'test_database', 'root', 'clickhouse')") + "CREATE DATABASE test_database ENGINE = MySQL('mysql57:3306', 'test_database', 'root', 'clickhouse')" + ) - assert 'test_table' in clickhouse_node.query('SHOW TABLES FROM test_database') + assert "test_table" in clickhouse_node.query("SHOW TABLES FROM test_database") clickhouse_node.query("DROP TABLE test_database.test_table") - assert 'test_table' not in clickhouse_node.query('SHOW TABLES FROM test_database') + assert "test_table" not in clickhouse_node.query( + "SHOW TABLES FROM test_database" + ) clickhouse_node.query("ATTACH TABLE test_database.test_table") - assert 'test_table' in clickhouse_node.query('SHOW TABLES FROM test_database') + assert "test_table" in clickhouse_node.query("SHOW TABLES FROM test_database") clickhouse_node.query("DETACH TABLE test_database.test_table") - assert 'test_table' not in clickhouse_node.query('SHOW TABLES FROM test_database') + assert "test_table" not in clickhouse_node.query( + "SHOW TABLES FROM test_database" + ) clickhouse_node.query("ATTACH TABLE test_database.test_table") - assert 'test_table' in clickhouse_node.query('SHOW TABLES FROM test_database') + assert "test_table" in clickhouse_node.query("SHOW TABLES FROM test_database") clickhouse_node.query("DROP DATABASE test_database") - assert 'test_database' not in clickhouse_node.query('SHOW DATABASES') + assert "test_database" not in clickhouse_node.query("SHOW DATABASES") mysql_node.query("DROP DATABASE test_database") def test_clickhouse_dml_for_mysql_database(started_cluster): - with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', started_cluster.mysql_ip, started_cluster.mysql_port)) as mysql_node: + with contextlib.closing( + MySQLNodeInstance( + "root", "clickhouse", started_cluster.mysql_ip, started_cluster.mysql_port + ) + ) as mysql_node: mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'") mysql_node.query( - 'CREATE TABLE `test_database`.`test_table` ( `i``d` int(11) NOT NULL, PRIMARY KEY (`i``d`)) ENGINE=InnoDB;') + "CREATE TABLE `test_database`.`test_table` ( `i``d` int(11) NOT NULL, PRIMARY KEY (`i``d`)) ENGINE=InnoDB;" + ) clickhouse_node.query( - "CREATE DATABASE test_database ENGINE = MySQL('mysql57:3306', test_database, 'root', 'clickhouse')") + "CREATE DATABASE test_database ENGINE = MySQL('mysql57:3306', test_database, 'root', 'clickhouse')" + ) - assert clickhouse_node.query("SELECT count() FROM `test_database`.`test_table`").rstrip() == '0' - clickhouse_node.query("INSERT INTO `test_database`.`test_table`(`i``d`) select number from numbers(10000)") - assert clickhouse_node.query("SELECT count() FROM `test_database`.`test_table`").rstrip() == '10000' + assert ( + clickhouse_node.query( + "SELECT count() FROM `test_database`.`test_table`" + ).rstrip() + == "0" + ) + clickhouse_node.query( + "INSERT INTO `test_database`.`test_table`(`i``d`) select number from numbers(10000)" + ) + assert ( + clickhouse_node.query( + "SELECT count() FROM `test_database`.`test_table`" + ).rstrip() + == "10000" + ) clickhouse_node.query("DROP DATABASE test_database") - assert 'test_database' not in clickhouse_node.query('SHOW DATABASES') + assert "test_database" not in clickhouse_node.query("SHOW DATABASES") mysql_node.query("DROP DATABASE test_database") def test_clickhouse_join_for_mysql_database(started_cluster): - with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', started_cluster.mysql_ip, started_cluster.mysql_port)) as mysql_node: - mysql_node.query("CREATE DATABASE IF NOT EXISTS test DEFAULT CHARACTER SET 'utf8'") - mysql_node.query("CREATE TABLE test.t1_mysql_local (" - "pays VARCHAR(55) DEFAULT 'FRA' NOT NULL," - "service VARCHAR(5) DEFAULT '' NOT NULL," - "opco CHAR(3) DEFAULT '' NOT NULL" - ")") - mysql_node.query("CREATE TABLE test.t2_mysql_local (" - "service VARCHAR(5) DEFAULT '' NOT NULL," - "opco VARCHAR(5) DEFAULT ''" - ")") + with contextlib.closing( + MySQLNodeInstance( + "root", "clickhouse", started_cluster.mysql_ip, started_cluster.mysql_port + ) + ) as mysql_node: + mysql_node.query( + "CREATE DATABASE IF NOT EXISTS test DEFAULT CHARACTER SET 'utf8'" + ) + mysql_node.query( + "CREATE TABLE test.t1_mysql_local (" + "pays VARCHAR(55) DEFAULT 'FRA' NOT NULL," + "service VARCHAR(5) DEFAULT '' NOT NULL," + "opco CHAR(3) DEFAULT '' NOT NULL" + ")" + ) + mysql_node.query( + "CREATE TABLE test.t2_mysql_local (" + "service VARCHAR(5) DEFAULT '' NOT NULL," + "opco VARCHAR(5) DEFAULT ''" + ")" + ) clickhouse_node.query( - "CREATE TABLE default.t1_remote_mysql AS mysql('mysql57:3306','test','t1_mysql_local','root','clickhouse')") + "CREATE TABLE default.t1_remote_mysql AS mysql('mysql57:3306','test','t1_mysql_local','root','clickhouse')" + ) clickhouse_node.query( - "CREATE TABLE default.t2_remote_mysql AS mysql('mysql57:3306','test','t2_mysql_local','root','clickhouse')") - clickhouse_node.query("INSERT INTO `default`.`t1_remote_mysql` VALUES ('EN','A',''),('RU','B','AAA')") - clickhouse_node.query("INSERT INTO `default`.`t2_remote_mysql` VALUES ('A','AAA'),('Z','')") + "CREATE TABLE default.t2_remote_mysql AS mysql('mysql57:3306','test','t2_mysql_local','root','clickhouse')" + ) + clickhouse_node.query( + "INSERT INTO `default`.`t1_remote_mysql` VALUES ('EN','A',''),('RU','B','AAA')" + ) + clickhouse_node.query( + "INSERT INTO `default`.`t2_remote_mysql` VALUES ('A','AAA'),('Z','')" + ) - assert clickhouse_node.query("SELECT s.pays " - "FROM default.t1_remote_mysql AS s " - "LEFT JOIN default.t1_remote_mysql AS s_ref " - "ON (s_ref.opco = s.opco AND s_ref.service = s.service) " - "WHERE s_ref.opco != '' AND s.opco != '' ").rstrip() == 'RU' + assert ( + clickhouse_node.query( + "SELECT s.pays " + "FROM default.t1_remote_mysql AS s " + "LEFT JOIN default.t1_remote_mysql AS s_ref " + "ON (s_ref.opco = s.opco AND s_ref.service = s.service) " + "WHERE s_ref.opco != '' AND s.opco != '' " + ).rstrip() + == "RU" + ) mysql_node.query("DROP DATABASE test") def test_bad_arguments_for_mysql_database_engine(started_cluster): - with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', started_cluster.mysql_ip, port=started_cluster.mysql_port)) as mysql_node: + with contextlib.closing( + MySQLNodeInstance( + "root", + "clickhouse", + started_cluster.mysql_ip, + port=started_cluster.mysql_port, + ) + ) as mysql_node: with pytest.raises(QueryRuntimeException) as exception: - mysql_node.query("CREATE DATABASE IF NOT EXISTS test_bad_arguments DEFAULT CHARACTER SET 'utf8'") + mysql_node.query( + "CREATE DATABASE IF NOT EXISTS test_bad_arguments DEFAULT CHARACTER SET 'utf8'" + ) clickhouse_node.query( - "CREATE DATABASE test_database_bad_arguments ENGINE = MySQL('mysql57:3306', test_bad_arguments, root, 'clickhouse')") - assert 'Database engine MySQL requested literal argument.' in str(exception.value) + "CREATE DATABASE test_database_bad_arguments ENGINE = MySQL('mysql57:3306', test_bad_arguments, root, 'clickhouse')" + ) + assert "Database engine MySQL requested literal argument." in str( + exception.value + ) mysql_node.query("DROP DATABASE test_bad_arguments") + def test_column_comments_for_mysql_database_engine(started_cluster): - with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', started_cluster.mysql_ip, started_cluster.mysql_port)) as mysql_node: + with contextlib.closing( + MySQLNodeInstance( + "root", "clickhouse", started_cluster.mysql_ip, started_cluster.mysql_port + ) + ) as mysql_node: mysql_node.query("DROP DATABASE IF EXISTS test_database") mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'") clickhouse_node.query( - "CREATE DATABASE test_database ENGINE = MySQL('mysql57:3306', 'test_database', 'root', 'clickhouse')") - assert 'test_database' in clickhouse_node.query('SHOW DATABASES') + "CREATE DATABASE test_database ENGINE = MySQL('mysql57:3306', 'test_database', 'root', 'clickhouse')" + ) + assert "test_database" in clickhouse_node.query("SHOW DATABASES") mysql_node.query( - "CREATE TABLE `test_database`.`test_table` ( `id` int(11) NOT NULL, PRIMARY KEY (`id`), `test` int COMMENT 'test comment') ENGINE=InnoDB;") - assert 'test comment' in clickhouse_node.query('DESCRIBE TABLE `test_database`.`test_table`') + "CREATE TABLE `test_database`.`test_table` ( `id` int(11) NOT NULL, PRIMARY KEY (`id`), `test` int COMMENT 'test comment') ENGINE=InnoDB;" + ) + assert "test comment" in clickhouse_node.query( + "DESCRIBE TABLE `test_database`.`test_table`" + ) time.sleep( - 3) # Because the unit of MySQL modification time is seconds, modifications made in the same second cannot be obtained - mysql_node.query("ALTER TABLE `test_database`.`test_table` ADD COLUMN `add_column` int(11) COMMENT 'add_column comment'") - assert 'add_column comment' in clickhouse_node.query( - "SELECT comment FROM system.columns WHERE table = 'test_table' AND database = 'test_database'") + 3 + ) # Because the unit of MySQL modification time is seconds, modifications made in the same second cannot be obtained + mysql_node.query( + "ALTER TABLE `test_database`.`test_table` ADD COLUMN `add_column` int(11) COMMENT 'add_column comment'" + ) + assert "add_column comment" in clickhouse_node.query( + "SELECT comment FROM system.columns WHERE table = 'test_table' AND database = 'test_database'" + ) clickhouse_node.query("DROP DATABASE test_database") mysql_node.query("DROP DATABASE test_database") def test_data_types_support_level_for_mysql_database_engine(started_cluster): - with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', started_cluster.mysql_ip, started_cluster.mysql_port)) as mysql_node: - mysql_node.query("CREATE DATABASE IF NOT EXISTS test DEFAULT CHARACTER SET 'utf8'") - clickhouse_node.query("CREATE DATABASE test_database ENGINE = MySQL('mysql57:3306', test, 'root', 'clickhouse')", - settings={"mysql_datatypes_support_level": "decimal,datetime64"}) + with contextlib.closing( + MySQLNodeInstance( + "root", "clickhouse", started_cluster.mysql_ip, started_cluster.mysql_port + ) + ) as mysql_node: + mysql_node.query( + "CREATE DATABASE IF NOT EXISTS test DEFAULT CHARACTER SET 'utf8'" + ) + clickhouse_node.query( + "CREATE DATABASE test_database ENGINE = MySQL('mysql57:3306', test, 'root', 'clickhouse')", + settings={"mysql_datatypes_support_level": "decimal,datetime64"}, + ) - assert "SETTINGS mysql_datatypes_support_level = \\'decimal,datetime64\\'" in clickhouse_node.query("SHOW CREATE DATABASE test_database FORMAT TSV") + assert ( + "SETTINGS mysql_datatypes_support_level = \\'decimal,datetime64\\'" + in clickhouse_node.query("SHOW CREATE DATABASE test_database FORMAT TSV") + ) clickhouse_node.query("DETACH DATABASE test_database") # without context settings clickhouse_node.query("ATTACH DATABASE test_database") - assert "SETTINGS mysql_datatypes_support_level = \\'decimal,datetime64\\'" in clickhouse_node.query("SHOW CREATE DATABASE test_database FORMAT TSV") + assert ( + "SETTINGS mysql_datatypes_support_level = \\'decimal,datetime64\\'" + in clickhouse_node.query("SHOW CREATE DATABASE test_database FORMAT TSV") + ) clickhouse_node.query( "CREATE DATABASE test_database_1 ENGINE = MySQL('mysql57:3306', test, 'root', 'clickhouse') SETTINGS mysql_datatypes_support_level = 'decimal,datetime64'", - settings={"mysql_datatypes_support_level": "decimal"}) + settings={"mysql_datatypes_support_level": "decimal"}, + ) - assert "SETTINGS mysql_datatypes_support_level = \\'decimal,datetime64\\'" in clickhouse_node.query("SHOW CREATE DATABASE test_database_1 FORMAT TSV") + assert ( + "SETTINGS mysql_datatypes_support_level = \\'decimal,datetime64\\'" + in clickhouse_node.query("SHOW CREATE DATABASE test_database_1 FORMAT TSV") + ) clickhouse_node.query("DETACH DATABASE test_database_1") # without context settings clickhouse_node.query("ATTACH DATABASE test_database_1") - assert "SETTINGS mysql_datatypes_support_level = \\'decimal,datetime64\\'" in clickhouse_node.query("SHOW CREATE DATABASE test_database_1 FORMAT TSV") + assert ( + "SETTINGS mysql_datatypes_support_level = \\'decimal,datetime64\\'" + in clickhouse_node.query("SHOW CREATE DATABASE test_database_1 FORMAT TSV") + ) clickhouse_node.query("DROP DATABASE test_database") clickhouse_node.query("DROP DATABASE test_database_1") - assert 'test_database' not in clickhouse_node.query('SHOW DATABASES') + assert "test_database" not in clickhouse_node.query("SHOW DATABASES") mysql_node.query("DROP DATABASE test") @@ -226,7 +347,10 @@ def test_data_types_support_level_for_mysql_database_engine(started_cluster): # float_values = ['NULL'] # float_values = [0] mysql returns 0 while clickhouse returns 0.0, so cannot compare using == directly int32_values = [0, 1, -1, 2147483647, -2147483648] -uint32_values = [0, 1] # [FIXME] seems client have issue with value 4294967295, it returns -1 for it +uint32_values = [ + 0, + 1, +] # [FIXME] seems client have issue with value 4294967295, it returns -1 for it mint_values = [0, 1, -1, 8388607, -8388608] umint_values = [0, 1, 16777215] int16_values = [0, 1, -1, 32767, -32768] @@ -235,221 +359,548 @@ int8_values = [0, 1, -1, 127, -128] uint8_values = [0, 1, 255] # string_values = ["'ClickHouse'", 'NULL'] string_values = ["'ClickHouse'"] -date_values=["'1970-01-01'"] -date2Date32_values=["'1925-01-01'", "'2283-11-11'"] -date2String_values=["'1000-01-01'", "'9999-12-31'"] +date_values = ["'1970-01-01'"] +date2Date32_values = ["'1925-01-01'", "'2283-11-11'"] +date2String_values = ["'1000-01-01'", "'9999-12-31'"] -decimal_values = [0, 0.123, 0.4, 5.67, 8.91011, 123456789.123, -0.123, -0.4, -5.67, -8.91011, -123456789.123] +decimal_values = [ + 0, + 0.123, + 0.4, + 5.67, + 8.91011, + 123456789.123, + -0.123, + -0.4, + -5.67, + -8.91011, + -123456789.123, +] timestamp_values = ["'2015-05-18 07:40:01.123'", "'2019-09-16 19:20:11.123'"] timestamp_values_no_subsecond = ["'2015-05-18 07:40:01'", "'2019-09-16 19:20:11'"] -@pytest.mark.parametrize("case_name, mysql_type, expected_ch_type, mysql_values, setting_mysql_datatypes_support_level", - [ - # test common type mapping - # ("common_types", "FLOAT", "Nullable(Float32)", float_values, ""), - # ("common_types", "FLOAT UNSIGNED", "Nullable(Float32)", float_values, ""), - - pytest.param("common_types", "INT", "Nullable(Int32)", int32_values, "", id="common_types_1"), - pytest.param("common_types", "INT NOT NULL", "Int32", int32_values, "", id="common_types_2"), - pytest.param("common_types", "INT UNSIGNED NOT NULL", "UInt32", uint32_values, "", id="common_types_3"), - pytest.param("common_types", "INT UNSIGNED", "Nullable(UInt32)", uint32_values, "", id="common_types_4"), - pytest.param("common_types", "INT UNSIGNED DEFAULT NULL", "Nullable(UInt32)", uint32_values, "", id="common_types_5"), - pytest.param("common_types", "INT UNSIGNED DEFAULT '1'", "Nullable(UInt32)", uint32_values, "", id="common_types_6"), - pytest.param("common_types", "INT(10)", "Nullable(Int32)", int32_values, "", id="common_types_7"), - pytest.param("common_types", "INT(10) NOT NULL", "Int32", int32_values, "", id="common_types_8"), - pytest.param("common_types", "INT(10) UNSIGNED NOT NULL", "UInt32", uint32_values, "", id="common_types_8"), - pytest.param("common_types", "INT(10) UNSIGNED", "Nullable(UInt32)", uint32_values, "", id="common_types_9"), - pytest.param("common_types", "INT(10) UNSIGNED DEFAULT NULL", "Nullable(UInt32)", uint32_values, "", id="common_types_10"), - pytest.param("common_types", "INT(10) UNSIGNED DEFAULT '1'", "Nullable(UInt32)", uint32_values, "", id="common_types_11"), - pytest.param("common_types", "INTEGER", "Nullable(Int32)", int32_values, "", id="common_types_12"), - pytest.param("common_types", "INTEGER UNSIGNED", "Nullable(UInt32)", uint32_values, "", id="common_types_13"), - - pytest.param("common_types", "MEDIUMINT", "Nullable(Int32)", mint_values, "", id="common_types_14"), - pytest.param("common_types", "MEDIUMINT UNSIGNED", "Nullable(UInt32)", umint_values, "", id="common_types_15"), - - pytest.param("common_types", "SMALLINT", "Nullable(Int16)", int16_values, "", id="common_types_16"), - pytest.param("common_types", "SMALLINT UNSIGNED", "Nullable(UInt16)", uint16_values, "", id="common_types_17"), - - pytest.param("common_types", "TINYINT", "Nullable(Int8)", int8_values, "", id="common_types_18"), - pytest.param("common_types", "TINYINT UNSIGNED", "Nullable(UInt8)", uint8_values, "", id="common_types_19"), - - pytest.param("common_types", "VARCHAR(10)", "Nullable(String)", string_values, "", id="common_types_20"), - - pytest.param("common_types", "DATE", "Nullable(Date)", date_values, "", id="common_types_21"), - pytest.param("common_types", "DATE", "Nullable(Date32)", date2Date32_values, "date2Date32", id="common_types_22"), - pytest.param("common_types", "DATE", "Nullable(String)", date2String_values, "date2String", id="common_types_23"), - - pytest.param("decimal_default", "decimal NOT NULL", "Decimal(10, 0)", decimal_values, - "decimal,datetime64", id="decimal_1"), - pytest.param("decimal_default_nullable", "decimal", "Nullable(Decimal(10, 0))", decimal_values, - "decimal,datetime64", id="decimal_2"), - pytest.param("decimal_18_6", "decimal(18, 6) NOT NULL", "Decimal(18, 6)", decimal_values, - "decimal,datetime64", id="decimal_3"), - pytest.param("decimal_38_6", "decimal(38, 6) NOT NULL", "Decimal(38, 6)", decimal_values, - "decimal,datetime64", id="decimal_4"), - - # Due to python DB driver roundtrip MySQL timestamp and datetime values - # are printed with 6 digits after decimal point, so to simplify tests a bit, - # we only validate precision of 0 and 6. - pytest.param("timestamp_default", "timestamp", "DateTime", timestamp_values, "decimal,datetime64", id="timestamp_default"), - pytest.param("timestamp_6", "timestamp(6)", "DateTime64(6)", timestamp_values, "decimal,datetime64", id="timestamp_6"), - pytest.param("datetime_default", "DATETIME NOT NULL", "DateTime64(0)", timestamp_values, - "decimal,datetime64", id="datetime_default"), - pytest.param("datetime_6", "DATETIME(6) NOT NULL", "DateTime64(6)", timestamp_values, - "decimal,datetime64", id="datetime_6_1"), - - # right now precision bigger than 39 is not supported by ClickHouse's Decimal, hence fall back to String - pytest.param("decimal_40_6", "decimal(40, 6) NOT NULL", "String", decimal_values, - "decimal,datetime64", id="decimal_40_6"), - pytest.param("decimal_18_6", "decimal(18, 6) NOT NULL", "String", decimal_values, "datetime64", id="decimal_18_6_1"), - pytest.param("decimal_18_6", "decimal(18, 6) NOT NULL", "String", decimal_values, "", id="decimal_18_6_2"), - pytest.param("datetime_6", "DATETIME(6) NOT NULL", "DateTime", timestamp_values_no_subsecond, - "decimal", id="datetime_6_2"), - pytest.param("datetime_6", "DATETIME(6) NOT NULL", "DateTime", timestamp_values_no_subsecond, "", id="datetime_6_3"), - ]) -def test_mysql_types(started_cluster, case_name, mysql_type, expected_ch_type, mysql_values, - setting_mysql_datatypes_support_level): - """ Verify that values written to MySQL can be read on ClickHouse side via DB engine MySQL, +@pytest.mark.parametrize( + "case_name, mysql_type, expected_ch_type, mysql_values, setting_mysql_datatypes_support_level", + [ + # test common type mapping + # ("common_types", "FLOAT", "Nullable(Float32)", float_values, ""), + # ("common_types", "FLOAT UNSIGNED", "Nullable(Float32)", float_values, ""), + pytest.param( + "common_types", + "INT", + "Nullable(Int32)", + int32_values, + "", + id="common_types_1", + ), + pytest.param( + "common_types", + "INT NOT NULL", + "Int32", + int32_values, + "", + id="common_types_2", + ), + pytest.param( + "common_types", + "INT UNSIGNED NOT NULL", + "UInt32", + uint32_values, + "", + id="common_types_3", + ), + pytest.param( + "common_types", + "INT UNSIGNED", + "Nullable(UInt32)", + uint32_values, + "", + id="common_types_4", + ), + pytest.param( + "common_types", + "INT UNSIGNED DEFAULT NULL", + "Nullable(UInt32)", + uint32_values, + "", + id="common_types_5", + ), + pytest.param( + "common_types", + "INT UNSIGNED DEFAULT '1'", + "Nullable(UInt32)", + uint32_values, + "", + id="common_types_6", + ), + pytest.param( + "common_types", + "INT(10)", + "Nullable(Int32)", + int32_values, + "", + id="common_types_7", + ), + pytest.param( + "common_types", + "INT(10) NOT NULL", + "Int32", + int32_values, + "", + id="common_types_8", + ), + pytest.param( + "common_types", + "INT(10) UNSIGNED NOT NULL", + "UInt32", + uint32_values, + "", + id="common_types_8", + ), + pytest.param( + "common_types", + "INT(10) UNSIGNED", + "Nullable(UInt32)", + uint32_values, + "", + id="common_types_9", + ), + pytest.param( + "common_types", + "INT(10) UNSIGNED DEFAULT NULL", + "Nullable(UInt32)", + uint32_values, + "", + id="common_types_10", + ), + pytest.param( + "common_types", + "INT(10) UNSIGNED DEFAULT '1'", + "Nullable(UInt32)", + uint32_values, + "", + id="common_types_11", + ), + pytest.param( + "common_types", + "INTEGER", + "Nullable(Int32)", + int32_values, + "", + id="common_types_12", + ), + pytest.param( + "common_types", + "INTEGER UNSIGNED", + "Nullable(UInt32)", + uint32_values, + "", + id="common_types_13", + ), + pytest.param( + "common_types", + "MEDIUMINT", + "Nullable(Int32)", + mint_values, + "", + id="common_types_14", + ), + pytest.param( + "common_types", + "MEDIUMINT UNSIGNED", + "Nullable(UInt32)", + umint_values, + "", + id="common_types_15", + ), + pytest.param( + "common_types", + "SMALLINT", + "Nullable(Int16)", + int16_values, + "", + id="common_types_16", + ), + pytest.param( + "common_types", + "SMALLINT UNSIGNED", + "Nullable(UInt16)", + uint16_values, + "", + id="common_types_17", + ), + pytest.param( + "common_types", + "TINYINT", + "Nullable(Int8)", + int8_values, + "", + id="common_types_18", + ), + pytest.param( + "common_types", + "TINYINT UNSIGNED", + "Nullable(UInt8)", + uint8_values, + "", + id="common_types_19", + ), + pytest.param( + "common_types", + "VARCHAR(10)", + "Nullable(String)", + string_values, + "", + id="common_types_20", + ), + pytest.param( + "common_types", + "DATE", + "Nullable(Date)", + date_values, + "", + id="common_types_21", + ), + pytest.param( + "common_types", + "DATE", + "Nullable(Date32)", + date2Date32_values, + "date2Date32", + id="common_types_22", + ), + pytest.param( + "common_types", + "DATE", + "Nullable(String)", + date2String_values, + "date2String", + id="common_types_23", + ), + pytest.param( + "decimal_default", + "decimal NOT NULL", + "Decimal(10, 0)", + decimal_values, + "decimal,datetime64", + id="decimal_1", + ), + pytest.param( + "decimal_default_nullable", + "decimal", + "Nullable(Decimal(10, 0))", + decimal_values, + "decimal,datetime64", + id="decimal_2", + ), + pytest.param( + "decimal_18_6", + "decimal(18, 6) NOT NULL", + "Decimal(18, 6)", + decimal_values, + "decimal,datetime64", + id="decimal_3", + ), + pytest.param( + "decimal_38_6", + "decimal(38, 6) NOT NULL", + "Decimal(38, 6)", + decimal_values, + "decimal,datetime64", + id="decimal_4", + ), + # Due to python DB driver roundtrip MySQL timestamp and datetime values + # are printed with 6 digits after decimal point, so to simplify tests a bit, + # we only validate precision of 0 and 6. + pytest.param( + "timestamp_default", + "timestamp", + "DateTime", + timestamp_values, + "decimal,datetime64", + id="timestamp_default", + ), + pytest.param( + "timestamp_6", + "timestamp(6)", + "DateTime64(6)", + timestamp_values, + "decimal,datetime64", + id="timestamp_6", + ), + pytest.param( + "datetime_default", + "DATETIME NOT NULL", + "DateTime64(0)", + timestamp_values, + "decimal,datetime64", + id="datetime_default", + ), + pytest.param( + "datetime_6", + "DATETIME(6) NOT NULL", + "DateTime64(6)", + timestamp_values, + "decimal,datetime64", + id="datetime_6_1", + ), + # right now precision bigger than 39 is not supported by ClickHouse's Decimal, hence fall back to String + pytest.param( + "decimal_40_6", + "decimal(40, 6) NOT NULL", + "String", + decimal_values, + "decimal,datetime64", + id="decimal_40_6", + ), + pytest.param( + "decimal_18_6", + "decimal(18, 6) NOT NULL", + "String", + decimal_values, + "datetime64", + id="decimal_18_6_1", + ), + pytest.param( + "decimal_18_6", + "decimal(18, 6) NOT NULL", + "String", + decimal_values, + "", + id="decimal_18_6_2", + ), + pytest.param( + "datetime_6", + "DATETIME(6) NOT NULL", + "DateTime", + timestamp_values_no_subsecond, + "decimal", + id="datetime_6_2", + ), + pytest.param( + "datetime_6", + "DATETIME(6) NOT NULL", + "DateTime", + timestamp_values_no_subsecond, + "", + id="datetime_6_3", + ), + ], +) +def test_mysql_types( + started_cluster, + case_name, + mysql_type, + expected_ch_type, + mysql_values, + setting_mysql_datatypes_support_level, +): + """Verify that values written to MySQL can be read on ClickHouse side via DB engine MySQL, or Table engine MySQL, or mysql() table function. Make sure that type is converted properly and values match exactly. """ substitutes = dict( - mysql_db='decimal_support', + mysql_db="decimal_support", table_name=case_name, mysql_type=mysql_type, - mysql_values=', '.join('({})'.format(x) for x in mysql_values), - ch_mysql_db='mysql_db', - ch_mysql_table='mysql_table_engine_' + case_name, + mysql_values=", ".join("({})".format(x) for x in mysql_values), + ch_mysql_db="mysql_db", + ch_mysql_table="mysql_table_engine_" + case_name, expected_ch_type=expected_ch_type, ) clickhouse_query_settings = dict( mysql_datatypes_support_level=setting_mysql_datatypes_support_level, - output_format_decimal_trailing_zeros=1 + output_format_decimal_trailing_zeros=1, ) def execute_query(node, query, **kwargs): def do_execute(query): query = Template(query).safe_substitute(substitutes) res = node.query(query, **kwargs) - return res if isinstance(res, int) else res.rstrip('\n\r') + return res if isinstance(res, int) else res.rstrip("\n\r") if isinstance(query, (str, bytes)): return do_execute(query) else: return [do_execute(q) for q in query] - with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', started_cluster.mysql_ip, port=started_cluster.mysql_port)) as mysql_node: - execute_query(mysql_node, [ - "DROP DATABASE IF EXISTS ${mysql_db}", - "CREATE DATABASE ${mysql_db} DEFAULT CHARACTER SET 'utf8'", - "CREATE TABLE `${mysql_db}`.`${table_name}` (value ${mysql_type})", - "INSERT INTO `${mysql_db}`.`${table_name}` (value) VALUES ${mysql_values}", - "SELECT * FROM `${mysql_db}`.`${table_name}`", - "FLUSH TABLES" - ]) + with contextlib.closing( + MySQLNodeInstance( + "root", + "clickhouse", + started_cluster.mysql_ip, + port=started_cluster.mysql_port, + ) + ) as mysql_node: + execute_query( + mysql_node, + [ + "DROP DATABASE IF EXISTS ${mysql_db}", + "CREATE DATABASE ${mysql_db} DEFAULT CHARACTER SET 'utf8'", + "CREATE TABLE `${mysql_db}`.`${table_name}` (value ${mysql_type})", + "INSERT INTO `${mysql_db}`.`${table_name}` (value) VALUES ${mysql_values}", + "SELECT * FROM `${mysql_db}`.`${table_name}`", + "FLUSH TABLES", + ], + ) - assert execute_query(mysql_node, "SELECT COUNT(*) FROM ${mysql_db}.${table_name}") \ - == \ - "{}".format(len(mysql_values)) + assert execute_query( + mysql_node, "SELECT COUNT(*) FROM ${mysql_db}.${table_name}" + ) == "{}".format(len(mysql_values)) # MySQL TABLE ENGINE - execute_query(clickhouse_node, [ - "DROP TABLE IF EXISTS ${ch_mysql_table};", - "CREATE TABLE ${ch_mysql_table} (value ${expected_ch_type}) ENGINE = MySQL('mysql57:3306', '${mysql_db}', '${table_name}', 'root', 'clickhouse')", - ], settings=clickhouse_query_settings) + execute_query( + clickhouse_node, + [ + "DROP TABLE IF EXISTS ${ch_mysql_table};", + "CREATE TABLE ${ch_mysql_table} (value ${expected_ch_type}) ENGINE = MySQL('mysql57:3306', '${mysql_db}', '${table_name}', 'root', 'clickhouse')", + ], + settings=clickhouse_query_settings, + ) # Validate type - assert \ - execute_query(clickhouse_node, "SELECT toTypeName(value) FROM ${ch_mysql_table} LIMIT 1", - settings=clickhouse_query_settings) \ - == \ - expected_ch_type + assert ( + execute_query( + clickhouse_node, + "SELECT toTypeName(value) FROM ${ch_mysql_table} LIMIT 1", + settings=clickhouse_query_settings, + ) + == expected_ch_type + ) # Validate values - assert \ - execute_query(clickhouse_node, "SELECT value FROM ${ch_mysql_table}", - settings=clickhouse_query_settings) \ - == \ - execute_query(mysql_node, "SELECT value FROM ${mysql_db}.${table_name}") + assert execute_query( + clickhouse_node, + "SELECT value FROM ${ch_mysql_table}", + settings=clickhouse_query_settings, + ) == execute_query(mysql_node, "SELECT value FROM ${mysql_db}.${table_name}") # MySQL DATABASE ENGINE - execute_query(clickhouse_node, [ - "DROP DATABASE IF EXISTS ${ch_mysql_db}", - "CREATE DATABASE ${ch_mysql_db} ENGINE = MySQL('mysql57:3306', '${mysql_db}', 'root', 'clickhouse')" - ], settings=clickhouse_query_settings) + execute_query( + clickhouse_node, + [ + "DROP DATABASE IF EXISTS ${ch_mysql_db}", + "CREATE DATABASE ${ch_mysql_db} ENGINE = MySQL('mysql57:3306', '${mysql_db}', 'root', 'clickhouse')", + ], + settings=clickhouse_query_settings, + ) # Validate type - assert \ - execute_query(clickhouse_node, "SELECT toTypeName(value) FROM ${ch_mysql_db}.${table_name} LIMIT 1", - settings=clickhouse_query_settings) \ - == \ - expected_ch_type + assert ( + execute_query( + clickhouse_node, + "SELECT toTypeName(value) FROM ${ch_mysql_db}.${table_name} LIMIT 1", + settings=clickhouse_query_settings, + ) + == expected_ch_type + ) # Validate values - assert \ - execute_query(clickhouse_node, "SELECT value FROM ${ch_mysql_db}.${table_name}", - settings=clickhouse_query_settings) \ - == \ - execute_query(mysql_node, "SELECT value FROM ${mysql_db}.${table_name}") + assert execute_query( + clickhouse_node, + "SELECT value FROM ${ch_mysql_db}.${table_name}", + settings=clickhouse_query_settings, + ) == execute_query(mysql_node, "SELECT value FROM ${mysql_db}.${table_name}") # MySQL TABLE FUNCTION # Validate type - assert \ - execute_query(clickhouse_node, - "SELECT toTypeName(value) FROM mysql('mysql57:3306', '${mysql_db}', '${table_name}', 'root', 'clickhouse') LIMIT 1", - settings=clickhouse_query_settings) \ - == \ - expected_ch_type + assert ( + execute_query( + clickhouse_node, + "SELECT toTypeName(value) FROM mysql('mysql57:3306', '${mysql_db}', '${table_name}', 'root', 'clickhouse') LIMIT 1", + settings=clickhouse_query_settings, + ) + == expected_ch_type + ) # Validate values - assert \ - execute_query(mysql_node, "SELECT value FROM ${mysql_db}.${table_name}") \ - == \ - execute_query(clickhouse_node, - "SELECT value FROM mysql('mysql57:3306', '${mysql_db}', '${table_name}', 'root', 'clickhouse')", - settings=clickhouse_query_settings) + assert execute_query( + mysql_node, "SELECT value FROM ${mysql_db}.${table_name}" + ) == execute_query( + clickhouse_node, + "SELECT value FROM mysql('mysql57:3306', '${mysql_db}', '${table_name}', 'root', 'clickhouse')", + settings=clickhouse_query_settings, + ) def test_predefined_connection_configuration(started_cluster): - with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', started_cluster.mysql_ip, started_cluster.mysql_port)) as mysql_node: + with contextlib.closing( + MySQLNodeInstance( + "root", "clickhouse", started_cluster.mysql_ip, started_cluster.mysql_port + ) + ) as mysql_node: mysql_node.query("DROP DATABASE IF EXISTS test_database") mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'") - mysql_node.query('CREATE TABLE `test_database`.`test_table` ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;') + mysql_node.query( + "CREATE TABLE `test_database`.`test_table` ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;" + ) clickhouse_node.query("DROP DATABASE IF EXISTS test_database") clickhouse_node.query("CREATE DATABASE test_database ENGINE = MySQL(mysql1)") - clickhouse_node.query("INSERT INTO `test_database`.`test_table` select number from numbers(100)") - assert clickhouse_node.query("SELECT count() FROM `test_database`.`test_table`").rstrip() == '100' + clickhouse_node.query( + "INSERT INTO `test_database`.`test_table` select number from numbers(100)" + ) + assert ( + clickhouse_node.query( + "SELECT count() FROM `test_database`.`test_table`" + ).rstrip() + == "100" + ) clickhouse_node.query("DROP DATABASE test_database") - clickhouse_node.query_and_get_error("CREATE DATABASE test_database ENGINE = MySQL(mysql2)") - clickhouse_node.query_and_get_error("CREATE DATABASE test_database ENGINE = MySQL(unknown_collection)") - clickhouse_node.query_and_get_error("CREATE DATABASE test_database ENGINE = MySQL(mysql1, 1)") + clickhouse_node.query_and_get_error( + "CREATE DATABASE test_database ENGINE = MySQL(mysql2)" + ) + clickhouse_node.query_and_get_error( + "CREATE DATABASE test_database ENGINE = MySQL(unknown_collection)" + ) + clickhouse_node.query_and_get_error( + "CREATE DATABASE test_database ENGINE = MySQL(mysql1, 1)" + ) - clickhouse_node.query("CREATE DATABASE test_database ENGINE = MySQL(mysql1, port=3306)") - assert clickhouse_node.query("SELECT count() FROM `test_database`.`test_table`").rstrip() == '100' + clickhouse_node.query( + "CREATE DATABASE test_database ENGINE = MySQL(mysql1, port=3306)" + ) + assert ( + clickhouse_node.query( + "SELECT count() FROM `test_database`.`test_table`" + ).rstrip() + == "100" + ) def test_restart_server(started_cluster): - with contextlib.closing(MySQLNodeInstance('root', 'clickhouse', started_cluster.mysql_ip, started_cluster.mysql_port)) as mysql_node: + with contextlib.closing( + MySQLNodeInstance( + "root", "clickhouse", started_cluster.mysql_ip, started_cluster.mysql_port + ) + ) as mysql_node: mysql_node.query("DROP DATABASE IF EXISTS test_restart") clickhouse_node.query("DROP DATABASE IF EXISTS test_restart") - clickhouse_node.query_and_get_error("CREATE DATABASE test_restart ENGINE = MySQL('mysql57:3306', 'test_restart', 'root', 'clickhouse')") - assert 'test_restart' not in clickhouse_node.query('SHOW DATABASES') + clickhouse_node.query_and_get_error( + "CREATE DATABASE test_restart ENGINE = MySQL('mysql57:3306', 'test_restart', 'root', 'clickhouse')" + ) + assert "test_restart" not in clickhouse_node.query("SHOW DATABASES") mysql_node.query("CREATE DATABASE test_restart DEFAULT CHARACTER SET 'utf8'") - mysql_node.query("CREATE TABLE `test_restart`.`test_table` ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;") - clickhouse_node.query("CREATE DATABASE test_restart ENGINE = MySQL('mysql57:3306', 'test_restart', 'root', 'clickhouse')") + mysql_node.query( + "CREATE TABLE `test_restart`.`test_table` ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;" + ) + clickhouse_node.query( + "CREATE DATABASE test_restart ENGINE = MySQL('mysql57:3306', 'test_restart', 'root', 'clickhouse')" + ) - assert 'test_restart' in clickhouse_node.query('SHOW DATABASES') - assert 'test_table' in clickhouse_node.query('SHOW TABLES FROM test_restart') + assert "test_restart" in clickhouse_node.query("SHOW DATABASES") + assert "test_table" in clickhouse_node.query("SHOW TABLES FROM test_restart") with PartitionManager() as pm: - pm.partition_instances(clickhouse_node, mysql_node, action='REJECT --reject-with tcp-reset') + pm.partition_instances( + clickhouse_node, mysql_node, action="REJECT --reject-with tcp-reset" + ) clickhouse_node.restart_clickhouse() - clickhouse_node.query_and_get_error('SHOW TABLES FROM test_restart') - assert 'test_table' in clickhouse_node.query('SHOW TABLES FROM test_restart') + clickhouse_node.query_and_get_error("SHOW TABLES FROM test_restart") + assert "test_table" in clickhouse_node.query("SHOW TABLES FROM test_restart") diff --git a/tests/integration/test_mysql_protocol/test.py b/tests/integration/test_mysql_protocol/test.py index 0b3f6ea95af..78049e0f123 100644 --- a/tests/integration/test_mysql_protocol/test.py +++ b/tests/integration/test_mysql_protocol/test.py @@ -16,12 +16,23 @@ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) DOCKER_COMPOSE_PATH = get_docker_compose_path() cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=["configs/ssl_conf.xml", "configs/mysql.xml", "configs/dhparam.pem", - "configs/server.crt", "configs/server.key"], - user_configs=["configs/users.xml"], env_variables={'UBSAN_OPTIONS': 'print_stacktrace=1'}, with_mysql_client=True) +node = cluster.add_instance( + "node", + main_configs=[ + "configs/ssl_conf.xml", + "configs/mysql.xml", + "configs/dhparam.pem", + "configs/server.crt", + "configs/server.key", + ], + user_configs=["configs/users.xml"], + env_variables={"UBSAN_OPTIONS": "print_stacktrace=1"}, + with_mysql_client=True, +) server_port = 9001 + @pytest.fixture(scope="module") def started_cluster(): cluster.start() @@ -31,77 +42,177 @@ def started_cluster(): cluster.shutdown() -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def golang_container(): - docker_compose = os.path.join(DOCKER_COMPOSE_PATH, 'docker_compose_mysql_golang_client.yml') + docker_compose = os.path.join( + DOCKER_COMPOSE_PATH, "docker_compose_mysql_golang_client.yml" + ) run_and_check( - ['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d', '--no-build']) - yield docker.DockerClient(base_url='unix:///var/run/docker.sock', version=cluster.docker_api_version, timeout=600).containers.get(cluster.project_name + '_golang1_1') + [ + "docker-compose", + "-p", + cluster.project_name, + "-f", + docker_compose, + "up", + "--no-recreate", + "-d", + "--no-build", + ] + ) + yield docker.DockerClient( + base_url="unix:///var/run/docker.sock", + version=cluster.docker_api_version, + timeout=600, + ).containers.get(cluster.project_name + "_golang1_1") -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def php_container(): - docker_compose = os.path.join(DOCKER_COMPOSE_PATH, 'docker_compose_mysql_php_client.yml') + docker_compose = os.path.join( + DOCKER_COMPOSE_PATH, "docker_compose_mysql_php_client.yml" + ) run_and_check( - ['docker-compose', '--env-file', cluster.instances["node"].env_file, '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d', '--no-build']) - yield docker.DockerClient(base_url='unix:///var/run/docker.sock', version=cluster.docker_api_version, timeout=600).containers.get(cluster.project_name + '_php1_1') + [ + "docker-compose", + "--env-file", + cluster.instances["node"].env_file, + "-p", + cluster.project_name, + "-f", + docker_compose, + "up", + "--no-recreate", + "-d", + "--no-build", + ] + ) + yield docker.DockerClient( + base_url="unix:///var/run/docker.sock", + version=cluster.docker_api_version, + timeout=600, + ).containers.get(cluster.project_name + "_php1_1") -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def nodejs_container(): - docker_compose = os.path.join(DOCKER_COMPOSE_PATH, 'docker_compose_mysql_js_client.yml') + docker_compose = os.path.join( + DOCKER_COMPOSE_PATH, "docker_compose_mysql_js_client.yml" + ) run_and_check( - ['docker-compose', '--env-file', cluster.instances["node"].env_file, '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d', '--no-build']) - yield docker.DockerClient(base_url='unix:///var/run/docker.sock', version=cluster.docker_api_version, timeout=600).containers.get(cluster.project_name + '_mysqljs1_1') + [ + "docker-compose", + "--env-file", + cluster.instances["node"].env_file, + "-p", + cluster.project_name, + "-f", + docker_compose, + "up", + "--no-recreate", + "-d", + "--no-build", + ] + ) + yield docker.DockerClient( + base_url="unix:///var/run/docker.sock", + version=cluster.docker_api_version, + timeout=600, + ).containers.get(cluster.project_name + "_mysqljs1_1") -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def java_container(): - docker_compose = os.path.join(DOCKER_COMPOSE_PATH, 'docker_compose_mysql_java_client.yml') + docker_compose = os.path.join( + DOCKER_COMPOSE_PATH, "docker_compose_mysql_java_client.yml" + ) run_and_check( - ['docker-compose', '--env-file', cluster.instances["node"].env_file, '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d', '--no-build']) - yield docker.DockerClient(base_url='unix:///var/run/docker.sock', version=cluster.docker_api_version, timeout=600).containers.get(cluster.project_name + '_java1_1') + [ + "docker-compose", + "--env-file", + cluster.instances["node"].env_file, + "-p", + cluster.project_name, + "-f", + docker_compose, + "up", + "--no-recreate", + "-d", + "--no-build", + ] + ) + yield docker.DockerClient( + base_url="unix:///var/run/docker.sock", + version=cluster.docker_api_version, + timeout=600, + ).containers.get(cluster.project_name + "_java1_1") def test_mysql_client(started_cluster): # type: (Container, str) -> None - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u user_with_double_sha1 --password=abacaba -e "SELECT 1;" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) logging.debug(f"test_mysql_client code:{code} stdout:{stdout}, stderr:{stderr}") - assert stdout.decode() == '\n'.join(['1', '1', '']) + assert stdout.decode() == "\n".join(["1", "1", ""]) - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=123 -e "SELECT 1 as a;" -e "SELECT 'тест' as b;" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) - assert stdout.decode() == '\n'.join(['a', '1', 'b', 'тест', '']) + assert stdout.decode() == "\n".join(["a", "1", "b", "тест", ""]) - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=abc -e "select 1 as a;" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) - assert stderr.decode() == 'mysql: [Warning] Using a password on the command line interface can be insecure.\n' \ - 'ERROR 516 (00000): default: Authentication failed: password is incorrect or there is no user with such name\n' + assert ( + stderr.decode() + == "mysql: [Warning] Using a password on the command line interface can be insecure.\n" + "ERROR 516 (00000): default: Authentication failed: password is incorrect or there is no user with such name\n" + ) - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=123 -e "use system;" -e "select count(*) from (select name from tables limit 1);" -e "use system2;" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) - assert stdout.decode() == 'count()\n1\n' - expected_msg = '\n'.join([ - "mysql: [Warning] Using a password on the command line interface can be insecure.", - "ERROR 81 (00000) at line 1: Code: 81. DB::Exception: Database system2 doesn't exist", - ]) - assert stderr[:len(expected_msg)].decode() == expected_msg + assert stdout.decode() == "count()\n1\n" + expected_msg = "\n".join( + [ + "mysql: [Warning] Using a password on the command line interface can be insecure.", + "ERROR 81 (00000) at line 1: Code: 81. DB::Exception: Database system2 doesn't exist", + ] + ) + assert stderr[: len(expected_msg)].decode() == expected_msg - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=123 -e "CREATE DATABASE x;" -e "USE x;" @@ -113,134 +224,223 @@ def test_mysql_client(started_cluster): -e "CREATE TEMPORARY TABLE tmp (tmp_column UInt32);" -e "INSERT INTO tmp VALUES (0), (1);" -e "SELECT * FROM tmp ORDER BY tmp_column;" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) - assert stdout.decode() == '\n'.join(['column', '0', '0', '1', '1', '5', '5', 'tmp_column', '0', '1', '']) + assert stdout.decode() == "\n".join( + ["column", "0", "0", "1", "1", "5", "5", "tmp_column", "0", "1", ""] + ) def test_mysql_client_exception(started_cluster): # Poco exception. - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=123 -e "CREATE TABLE default.t1_remote_mysql AS mysql('127.0.0.1:10086','default','t1_local','default','');" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) - expected_msg = '\n'.join([ - "mysql: [Warning] Using a password on the command line interface can be insecure.", - "ERROR 1000 (00000) at line 1: Poco::Exception. Code: 1000, e.code() = 0, Exception: Connections to all replicas failed: default@127.0.0.1:10086 as user default", - ]) - assert stderr[:len(expected_msg)].decode() == expected_msg + expected_msg = "\n".join( + [ + "mysql: [Warning] Using a password on the command line interface can be insecure.", + "ERROR 1000 (00000) at line 1: Poco::Exception. Code: 1000, e.code() = 0, Exception: Connections to all replicas failed: default@127.0.0.1:10086 as user default", + ] + ) + assert stderr[: len(expected_msg)].decode() == expected_msg def test_mysql_affected_rows(started_cluster): - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=123 -e "CREATE TABLE IF NOT EXISTS default.t1 (n UInt64) ENGINE MergeTree() ORDER BY tuple();" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql -vvv --protocol tcp -h {host} -P {port} default -u default --password=123 -e "INSERT INTO default.t1(n) VALUES(1);" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 assert "1 row affected" in stdout.decode() - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql -vvv --protocol tcp -h {host} -P {port} default -u default --password=123 -e "INSERT INTO default.t1(n) SELECT * FROM numbers(1000)" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 assert "1000 rows affected" in stdout.decode() - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=123 -e "DROP TABLE default.t1;" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 def test_mysql_replacement_query(started_cluster): # SHOW TABLE STATUS LIKE. - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=123 -e "show table status like 'xx';" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 # SHOW VARIABLES. - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=123 -e "show variables;" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 # KILL QUERY. - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=123 -e "kill query 0;" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=123 -e "kill query where query_id='mysql:0';" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 # SELECT DATABASE(). - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=123 -e "select database();" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 - assert stdout.decode() == 'DATABASE()\ndefault\n' + assert stdout.decode() == "DATABASE()\ndefault\n" - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=123 -e "select DATABASE();" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 - assert stdout.decode() == 'DATABASE()\ndefault\n' + assert stdout.decode() == "DATABASE()\ndefault\n" def test_mysql_select_user(started_cluster): - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=123 -e "select user();" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 - assert stdout.decode() == 'currentUser()\ndefault\n' + assert stdout.decode() == "currentUser()\ndefault\n" + def test_mysql_explain(started_cluster): # EXPLAIN SELECT 1 - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=123 -e "EXPLAIN SELECT 1;" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 # EXPLAIN AST SELECT 1 - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=123 -e "EXPLAIN AST SELECT 1;" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 # EXPLAIN PLAN SELECT 1 - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=123 -e "EXPLAIN PLAN SELECT 1;" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 # EXPLAIN PIPELINE graph=1 SELECT 1 - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=123 -e "EXPLAIN PIPELINE graph=1 SELECT 1;" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 @@ -248,25 +448,40 @@ def test_mysql_federated(started_cluster): # For some reason it occasionally fails without retries. retries = 100 for try_num in range(retries): - node.query('''DROP DATABASE IF EXISTS mysql_federated''', settings={"password": "123"}) - node.query('''CREATE DATABASE mysql_federated''', settings={"password": "123"}) - node.query('''CREATE TABLE mysql_federated.test (col UInt32) ENGINE = Log''', settings={"password": "123"}) - node.query('''INSERT INTO mysql_federated.test VALUES (0), (1), (5)''', settings={"password": "123"}) + node.query( + """DROP DATABASE IF EXISTS mysql_federated""", settings={"password": "123"} + ) + node.query("""CREATE DATABASE mysql_federated""", settings={"password": "123"}) + node.query( + """CREATE TABLE mysql_federated.test (col UInt32) ENGINE = Log""", + settings={"password": "123"}, + ) + node.query( + """INSERT INTO mysql_federated.test VALUES (0), (1), (5)""", + settings={"password": "123"}, + ) def check_retryable_error_in_stderr(stderr): stderr = stderr.decode() - return ("Can't connect to local MySQL server through socket" in stderr - or "MySQL server has gone away" in stderr - or "Server shutdown in progress" in stderr) + return ( + "Can't connect to local MySQL server through socket" in stderr + or "MySQL server has gone away" in stderr + or "Server shutdown in progress" in stderr + ) - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql -e "DROP SERVER IF EXISTS clickhouse;" -e "CREATE SERVER clickhouse FOREIGN DATA WRAPPER mysql OPTIONS (USER 'default', PASSWORD '123', HOST '{host}', PORT {port}, DATABASE 'mysql_federated');" -e "DROP DATABASE IF EXISTS mysql_federated;" -e "CREATE DATABASE mysql_federated;" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) if code != 0: print(("stdout", stdout)) @@ -276,11 +491,16 @@ def test_mysql_federated(started_cluster): continue assert code == 0 - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql -e "CREATE TABLE mysql_federated.test(`col` int UNSIGNED) ENGINE=FEDERATED CONNECTION='clickhouse';" -e "SELECT * FROM mysql_federated.test ORDER BY col;" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) if code != 0: print(("stdout", stdout)) @@ -290,13 +510,18 @@ def test_mysql_federated(started_cluster): continue assert code == 0 - assert stdout.decode() == '\n'.join(['col', '0', '1', '5', '']) + assert stdout.decode() == "\n".join(["col", "0", "1", "5", ""]) - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql -e "INSERT INTO mysql_federated.test VALUES (0), (1), (5);" -e "SELECT * FROM mysql_federated.test ORDER BY col;" - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) if code != 0: print(("stdout", stdout)) @@ -306,11 +531,12 @@ def test_mysql_federated(started_cluster): continue assert code == 0 - assert stdout.decode() == '\n'.join(['col', '0', '0', '1', '1', '5', '5', '']) + assert stdout.decode() == "\n".join(["col", "0", "0", "1", "1", "5", "5", ""]) def test_mysql_set_variables(started_cluster): - code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(''' + code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run( + """ mysql --protocol tcp -h {host} -P {port} default -u default --password=123 -e " @@ -322,82 +548,118 @@ def test_mysql_set_variables(started_cluster): SET @@wait_timeout = 2147483; SET SESSION TRANSACTION ISOLATION LEVEL READ; " - '''.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + """.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 - def test_python_client(started_cluster): - client = pymysql.connections.Connection(host=started_cluster.get_instance_ip('node'), user='user_with_double_sha1', password='abacaba', - database='default', port=server_port) + client = pymysql.connections.Connection( + host=started_cluster.get_instance_ip("node"), + user="user_with_double_sha1", + password="abacaba", + database="default", + port=server_port, + ) with pytest.raises(pymysql.InternalError) as exc_info: - client.query('select name from tables') + client.query("select name from tables") - assert exc_info.value.args[1].startswith("Code: 60. DB::Exception: Table default.tables doesn't exist"), exc_info.value.args[1] + assert exc_info.value.args[1].startswith( + "Code: 60. DB::Exception: Table default.tables doesn't exist" + ), exc_info.value.args[1] cursor = client.cursor(pymysql.cursors.DictCursor) cursor.execute("select 1 as a, 'тест' as b") - assert cursor.fetchall() == [{'a': 1, 'b': 'тест'}] + assert cursor.fetchall() == [{"a": 1, "b": "тест"}] with pytest.raises(pymysql.InternalError) as exc_info: - pymysql.connections.Connection(host=started_cluster.get_instance_ip('node'), user='default', password='abacab', database='default', - port=server_port) + pymysql.connections.Connection( + host=started_cluster.get_instance_ip("node"), + user="default", + password="abacab", + database="default", + port=server_port, + ) assert exc_info.value.args == ( - 516, 'default: Authentication failed: password is incorrect or there is no user with such name') + 516, + "default: Authentication failed: password is incorrect or there is no user with such name", + ) - client = pymysql.connections.Connection(host=started_cluster.get_instance_ip('node'), user='default', password='123', database='default', - port=server_port) + client = pymysql.connections.Connection( + host=started_cluster.get_instance_ip("node"), + user="default", + password="123", + database="default", + port=server_port, + ) with pytest.raises(pymysql.InternalError) as exc_info: - client.query('select name from tables') + client.query("select name from tables") - assert exc_info.value.args[1].startswith("Code: 60. DB::Exception: Table default.tables doesn't exist"), exc_info.value.args[1] + assert exc_info.value.args[1].startswith( + "Code: 60. DB::Exception: Table default.tables doesn't exist" + ), exc_info.value.args[1] cursor = client.cursor(pymysql.cursors.DictCursor) cursor.execute("select 1 as a, 'тест' as b") - assert cursor.fetchall() == [{'a': 1, 'b': 'тест'}] + assert cursor.fetchall() == [{"a": 1, "b": "тест"}] - client.select_db('system') + client.select_db("system") with pytest.raises(pymysql.InternalError) as exc_info: - client.select_db('system2') + client.select_db("system2") - assert exc_info.value.args[1].startswith("Code: 81. DB::Exception: Database system2 doesn't exist"), exc_info.value.args[1] + assert exc_info.value.args[1].startswith( + "Code: 81. DB::Exception: Database system2 doesn't exist" + ), exc_info.value.args[1] cursor = client.cursor(pymysql.cursors.DictCursor) - cursor.execute('CREATE DATABASE x') - client.select_db('x') + cursor.execute("CREATE DATABASE x") + client.select_db("x") cursor.execute("CREATE TABLE table1 (a UInt32) ENGINE = Memory") cursor.execute("INSERT INTO table1 VALUES (1), (3)") cursor.execute("INSERT INTO table1 VALUES (1), (4)") cursor.execute("SELECT * FROM table1 ORDER BY a") - assert cursor.fetchall() == [{'a': 1}, {'a': 1}, {'a': 3}, {'a': 4}] + assert cursor.fetchall() == [{"a": 1}, {"a": 1}, {"a": 3}, {"a": 4}] def test_golang_client(started_cluster, golang_container): # type: (str, Container) -> None - with open(os.path.join(SCRIPT_DIR, 'golang.reference'), 'rb') as fp: + with open(os.path.join(SCRIPT_DIR, "golang.reference"), "rb") as fp: reference = fp.read() code, (stdout, stderr) = golang_container.exec_run( - './main --host {host} --port {port} --user default --password 123 --database ' - 'abc'.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + "./main --host {host} --port {port} --user default --password 123 --database " + "abc".format(host=started_cluster.get_instance_ip("node"), port=server_port), + demux=True, + ) assert code == 1 assert stderr.decode() == "Error 81: Database abc doesn't exist\n" code, (stdout, stderr) = golang_container.exec_run( - './main --host {host} --port {port} --user default --password 123 --database ' - 'default'.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + "./main --host {host} --port {port} --user default --password 123 --database " + "default".format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 assert stdout == reference code, (stdout, stderr) = golang_container.exec_run( - './main --host {host} --port {port} --user user_with_double_sha1 --password abacaba --database ' - 'default'.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + "./main --host {host} --port {port} --user user_with_double_sha1 --password abacaba --database " + "default".format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 assert stdout == reference @@ -405,86 +667,135 @@ def test_golang_client(started_cluster, golang_container): def test_php_client(started_cluster, php_container): # type: (str, Container) -> None code, (stdout, stderr) = php_container.exec_run( - 'php -f test.php {host} {port} default 123'.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + "php -f test.php {host} {port} default 123".format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 - assert stdout.decode() == 'tables\ntables\n' + assert stdout.decode() == "tables\ntables\n" code, (stdout, stderr) = php_container.exec_run( - 'php -f test_ssl.php {host} {port} default 123'.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + "php -f test_ssl.php {host} {port} default 123".format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 - assert stdout.decode() == 'tables\ntables\n' + assert stdout.decode() == "tables\ntables\n" code, (stdout, stderr) = php_container.exec_run( - 'php -f test.php {host} {port} user_with_double_sha1 abacaba'.format(host=started_cluster.get_instance_ip('node'), port=server_port), - demux=True) + "php -f test.php {host} {port} user_with_double_sha1 abacaba".format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 - assert stdout.decode() == 'tables\ntables\n' + assert stdout.decode() == "tables\ntables\n" code, (stdout, stderr) = php_container.exec_run( - 'php -f test_ssl.php {host} {port} user_with_double_sha1 abacaba'.format(host=started_cluster.get_instance_ip('node'), port=server_port), - demux=True) + "php -f test_ssl.php {host} {port} user_with_double_sha1 abacaba".format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 - assert stdout.decode() == 'tables\ntables\n' + assert stdout.decode() == "tables\ntables\n" def test_mysqljs_client(started_cluster, nodejs_container): code, (_, stderr) = nodejs_container.exec_run( - 'node test.js {host} {port} user_with_sha256 abacaba'.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + "node test.js {host} {port} user_with_sha256 abacaba".format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 1 - assert 'MySQL is requesting the sha256_password authentication method, which is not supported.' in stderr.decode() + assert ( + "MySQL is requesting the sha256_password authentication method, which is not supported." + in stderr.decode() + ) code, (_, stderr) = nodejs_container.exec_run( - 'node test.js {host} {port} user_with_empty_password ""'.format(host=started_cluster.get_instance_ip('node'), port=server_port), - demux=True) + 'node test.js {host} {port} user_with_empty_password ""'.format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 code, (_, _) = nodejs_container.exec_run( - 'node test.js {host} {port} user_with_double_sha1 abacaba'.format(host=started_cluster.get_instance_ip('node'), port=server_port), - demux=True) + "node test.js {host} {port} user_with_double_sha1 abacaba".format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 code, (_, _) = nodejs_container.exec_run( - 'node test.js {host} {port} user_with_empty_password 123'.format(host=started_cluster.get_instance_ip('node'), port=server_port), - demux=True) + "node test.js {host} {port} user_with_empty_password 123".format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 1 def test_java_client(started_cluster, java_container): # type: (str, Container) -> None - with open(os.path.join(SCRIPT_DIR, 'java.reference')) as fp: + with open(os.path.join(SCRIPT_DIR, "java.reference")) as fp: reference = fp.read() # database not exists exception. code, (stdout, stderr) = java_container.exec_run( - 'java JavaConnectorTest --host {host} --port {port} --user user_with_empty_password --database ' - 'abc'.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + "java JavaConnectorTest --host {host} --port {port} --user user_with_empty_password --database " + "abc".format(host=started_cluster.get_instance_ip("node"), port=server_port), + demux=True, + ) assert code == 1 # empty password passed. code, (stdout, stderr) = java_container.exec_run( - 'java JavaConnectorTest --host {host} --port {port} --user user_with_empty_password --database ' - 'default'.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + "java JavaConnectorTest --host {host} --port {port} --user user_with_empty_password --database " + "default".format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 assert stdout.decode() == reference # non-empty password passed. code, (stdout, stderr) = java_container.exec_run( - 'java JavaConnectorTest --host {host} --port {port} --user default --password 123 --database ' - 'default'.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + "java JavaConnectorTest --host {host} --port {port} --user default --password 123 --database " + "default".format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 assert stdout.decode() == reference # double-sha1 password passed. code, (stdout, stderr) = java_container.exec_run( - 'java JavaConnectorTest --host {host} --port {port} --user user_with_double_sha1 --password abacaba --database ' - 'default'.format(host=started_cluster.get_instance_ip('node'), port=server_port), demux=True) + "java JavaConnectorTest --host {host} --port {port} --user user_with_double_sha1 --password abacaba --database " + "default".format( + host=started_cluster.get_instance_ip("node"), port=server_port + ), + demux=True, + ) assert code == 0 assert stdout.decode() == reference def test_types(started_cluster): - client = pymysql.connections.Connection(host=started_cluster.get_instance_ip('node'), user='default', password='123', database='default', - port=server_port) + client = pymysql.connections.Connection( + host=started_cluster.get_instance_ip("node"), + user="default", + password="123", + database="default", + port=server_port, + ) cursor = client.cursor(pymysql.cursors.DictCursor) cursor.execute( @@ -511,24 +822,24 @@ def test_types(started_cluster): result = cursor.fetchall()[0] expected = [ - ('Int8_column', -2 ** 7), - ('UInt8_column', 2 ** 8 - 1), - ('Int16_column', -2 ** 15), - ('UInt16_column', 2 ** 16 - 1), - ('Int32_column', -2 ** 31), - ('UInt32_column', 2 ** 32 - 1), - ('Int64_column', -2 ** 63), - ('UInt64_column', 2 ** 64 - 1), - ('String_column', 'тест'), - ('FixedString_column', 'тест'), - ('Float32_column', 1.5), - ('Float64_column', 1.5), - ('Float32_NaN_column', float('nan')), - ('Float64_Inf_column', float('-inf')), - ('Date_column', datetime.date(2019, 12, 8)), - ('Date_min_column', datetime.date(1970, 1, 1)), - ('Date_after_min_column', datetime.date(1970, 1, 2)), - ('DateTime_column', datetime.datetime(2019, 12, 8, 8, 24, 3)), + ("Int8_column", -(2**7)), + ("UInt8_column", 2**8 - 1), + ("Int16_column", -(2**15)), + ("UInt16_column", 2**16 - 1), + ("Int32_column", -(2**31)), + ("UInt32_column", 2**32 - 1), + ("Int64_column", -(2**63)), + ("UInt64_column", 2**64 - 1), + ("String_column", "тест"), + ("FixedString_column", "тест"), + ("Float32_column", 1.5), + ("Float64_column", 1.5), + ("Float32_NaN_column", float("nan")), + ("Float64_Inf_column", float("-inf")), + ("Date_column", datetime.date(2019, 12, 8)), + ("Date_min_column", datetime.date(1970, 1, 1)), + ("Date_after_min_column", datetime.date(1970, 1, 2)), + ("DateTime_column", datetime.datetime(2019, 12, 8, 8, 24, 3)), ] for key, value in expected: diff --git a/tests/integration/test_nlp/test.py b/tests/integration/test_nlp/test.py index 24935153608..e15c9ecfaa6 100644 --- a/tests/integration/test_nlp/test.py +++ b/tests/integration/test_nlp/test.py @@ -10,38 +10,140 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', main_configs=['configs/dicts_config.xml']) +instance = cluster.add_instance("instance", main_configs=["configs/dicts_config.xml"]) + def copy_file_to_container(local_path, dist_path, container_id): - os.system("docker cp {local} {cont_id}:{dist}".format(local=local_path, cont_id=container_id, dist=dist_path)) + os.system( + "docker cp {local} {cont_id}:{dist}".format( + local=local_path, cont_id=container_id, dist=dist_path + ) + ) + @pytest.fixture(scope="module") def start_cluster(): try: cluster.start() - copy_file_to_container(os.path.join(SCRIPT_DIR, 'dictionaries/.'), '/etc/clickhouse-server/dictionaries', instance.docker_id) + copy_file_to_container( + os.path.join(SCRIPT_DIR, "dictionaries/."), + "/etc/clickhouse-server/dictionaries", + instance.docker_id, + ) yield cluster finally: cluster.shutdown() + def test_lemmatize(start_cluster): - assert instance.query("SELECT lemmatize('en', 'wolves')", settings={"allow_experimental_nlp_functions": 1}) == "wolf\n" - assert instance.query("SELECT lemmatize('en', 'dogs')", settings={"allow_experimental_nlp_functions": 1}) == "dog\n" - assert instance.query("SELECT lemmatize('en', 'looking')", settings={"allow_experimental_nlp_functions": 1}) == "look\n" - assert instance.query("SELECT lemmatize('en', 'took')", settings={"allow_experimental_nlp_functions": 1}) == "take\n" - assert instance.query("SELECT lemmatize('en', 'imported')", settings={"allow_experimental_nlp_functions": 1}) == "import\n" - assert instance.query("SELECT lemmatize('en', 'tokenized')", settings={"allow_experimental_nlp_functions": 1}) == "tokenize\n" - assert instance.query("SELECT lemmatize('en', 'flown')", settings={"allow_experimental_nlp_functions": 1}) == "fly\n" + assert ( + instance.query( + "SELECT lemmatize('en', 'wolves')", + settings={"allow_experimental_nlp_functions": 1}, + ) + == "wolf\n" + ) + assert ( + instance.query( + "SELECT lemmatize('en', 'dogs')", + settings={"allow_experimental_nlp_functions": 1}, + ) + == "dog\n" + ) + assert ( + instance.query( + "SELECT lemmatize('en', 'looking')", + settings={"allow_experimental_nlp_functions": 1}, + ) + == "look\n" + ) + assert ( + instance.query( + "SELECT lemmatize('en', 'took')", + settings={"allow_experimental_nlp_functions": 1}, + ) + == "take\n" + ) + assert ( + instance.query( + "SELECT lemmatize('en', 'imported')", + settings={"allow_experimental_nlp_functions": 1}, + ) + == "import\n" + ) + assert ( + instance.query( + "SELECT lemmatize('en', 'tokenized')", + settings={"allow_experimental_nlp_functions": 1}, + ) + == "tokenize\n" + ) + assert ( + instance.query( + "SELECT lemmatize('en', 'flown')", + settings={"allow_experimental_nlp_functions": 1}, + ) + == "fly\n" + ) + def test_synonyms_extensions(start_cluster): - assert instance.query("SELECT synonyms('en', 'crucial')", settings={"allow_experimental_nlp_functions": 1}) == "['important','big','critical','crucial','essential']\n" - assert instance.query("SELECT synonyms('en', 'cheerful')", settings={"allow_experimental_nlp_functions": 1}) == "['happy','cheerful','delighted','ecstatic']\n" - assert instance.query("SELECT synonyms('en', 'yet')", settings={"allow_experimental_nlp_functions": 1}) == "['however','nonetheless','but','yet']\n" - assert instance.query("SELECT synonyms('en', 'quiz')", settings={"allow_experimental_nlp_functions": 1}) == "['quiz','query','check','exam']\n" - - assert instance.query("SELECT synonyms('ru', 'главный')", settings={"allow_experimental_nlp_functions": 1}) == "['важный','большой','высокий','хороший','главный']\n" - assert instance.query("SELECT synonyms('ru', 'веселый')", settings={"allow_experimental_nlp_functions": 1}) == "['веселый','счастливый','живой','яркий','смешной']\n" - assert instance.query("SELECT synonyms('ru', 'правда')", settings={"allow_experimental_nlp_functions": 1}) == "['хотя','однако','но','правда']\n" - assert instance.query("SELECT synonyms('ru', 'экзамен')", settings={"allow_experimental_nlp_functions": 1}) == "['экзамен','испытание','проверка']\n" + assert ( + instance.query( + "SELECT synonyms('en', 'crucial')", + settings={"allow_experimental_nlp_functions": 1}, + ) + == "['important','big','critical','crucial','essential']\n" + ) + assert ( + instance.query( + "SELECT synonyms('en', 'cheerful')", + settings={"allow_experimental_nlp_functions": 1}, + ) + == "['happy','cheerful','delighted','ecstatic']\n" + ) + assert ( + instance.query( + "SELECT synonyms('en', 'yet')", + settings={"allow_experimental_nlp_functions": 1}, + ) + == "['however','nonetheless','but','yet']\n" + ) + assert ( + instance.query( + "SELECT synonyms('en', 'quiz')", + settings={"allow_experimental_nlp_functions": 1}, + ) + == "['quiz','query','check','exam']\n" + ) + + assert ( + instance.query( + "SELECT synonyms('ru', 'главный')", + settings={"allow_experimental_nlp_functions": 1}, + ) + == "['важный','большой','высокий','хороший','главный']\n" + ) + assert ( + instance.query( + "SELECT synonyms('ru', 'веселый')", + settings={"allow_experimental_nlp_functions": 1}, + ) + == "['веселый','счастливый','живой','яркий','смешной']\n" + ) + assert ( + instance.query( + "SELECT synonyms('ru', 'правда')", + settings={"allow_experimental_nlp_functions": 1}, + ) + == "['хотя','однако','но','правда']\n" + ) + assert ( + instance.query( + "SELECT synonyms('ru', 'экзамен')", + settings={"allow_experimental_nlp_functions": 1}, + ) + == "['экзамен','испытание','проверка']\n" + ) diff --git a/tests/integration/test_no_local_metadata_node/test.py b/tests/integration/test_no_local_metadata_node/test.py index f976cc005bd..a4f04035a11 100644 --- a/tests/integration/test_no_local_metadata_node/test.py +++ b/tests/integration/test_no_local_metadata_node/test.py @@ -3,7 +3,7 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True) +node1 = cluster.add_instance("node1", with_zookeeper=True) @pytest.fixture(scope="module") @@ -17,11 +17,13 @@ def start_cluster(): def test_table_start_without_metadata(start_cluster): - node1.query(""" + node1.query( + """ CREATE TABLE test (date Date) ENGINE = ReplicatedMergeTree('/clickhouse/table/test_table', '1') ORDER BY tuple() - """) + """ + ) node1.query("INSERT INTO test VALUES(toDate('2019-12-01'))") @@ -33,7 +35,7 @@ def test_table_start_without_metadata(start_cluster): assert node1.query("SELECT date FROM test") == "2019-12-01\n" node1.query("DETACH TABLE test") - zk_cli = cluster.get_kazoo_client('zoo1') + zk_cli = cluster.get_kazoo_client("zoo1") # simulate update from old version zk_cli.delete("/clickhouse/table/test_table/replicas/1/metadata") diff --git a/tests/integration/test_non_default_compression/test.py b/tests/integration/test_non_default_compression/test.py index 0cfffd28e12..e0a67a5db95 100644 --- a/tests/integration/test_non_default_compression/test.py +++ b/tests/integration/test_non_default_compression/test.py @@ -6,19 +6,42 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/zstd_compression_by_default.xml'], - user_configs=['configs/allow_suspicious_codecs.xml']) -node2 = cluster.add_instance('node2', main_configs=['configs/lz4hc_compression_by_default.xml'], - user_configs=['configs/allow_suspicious_codecs.xml']) -node3 = cluster.add_instance('node3', main_configs=['configs/custom_compression_by_default.xml'], - user_configs=['configs/allow_suspicious_codecs.xml']) -node4 = cluster.add_instance('node4', user_configs=['configs/enable_uncompressed_cache.xml', - 'configs/allow_suspicious_codecs.xml']) -node5 = cluster.add_instance('node5', main_configs=['configs/zstd_compression_by_default.xml'], - user_configs=['configs/enable_uncompressed_cache.xml', - 'configs/allow_suspicious_codecs.xml']) -node6 = cluster.add_instance('node6', main_configs=['configs/allow_experimental_codecs.xml'], - user_configs=['configs/allow_suspicious_codecs.xml']) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/zstd_compression_by_default.xml"], + user_configs=["configs/allow_suspicious_codecs.xml"], +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/lz4hc_compression_by_default.xml"], + user_configs=["configs/allow_suspicious_codecs.xml"], +) +node3 = cluster.add_instance( + "node3", + main_configs=["configs/custom_compression_by_default.xml"], + user_configs=["configs/allow_suspicious_codecs.xml"], +) +node4 = cluster.add_instance( + "node4", + user_configs=[ + "configs/enable_uncompressed_cache.xml", + "configs/allow_suspicious_codecs.xml", + ], +) +node5 = cluster.add_instance( + "node5", + main_configs=["configs/zstd_compression_by_default.xml"], + user_configs=[ + "configs/enable_uncompressed_cache.xml", + "configs/allow_suspicious_codecs.xml", + ], +) +node6 = cluster.add_instance( + "node6", + main_configs=["configs/allow_experimental_codecs.xml"], + user_configs=["configs/allow_suspicious_codecs.xml"], +) + @pytest.fixture(scope="module") def start_cluster(): @@ -32,109 +55,192 @@ def start_cluster(): def test_preconfigured_default_codec(start_cluster): for node in [node1, node2]: - node.query(""" + node.query( + """ CREATE TABLE compression_codec_multiple_with_key ( somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)), id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC), data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4), somecolumn Float64 ) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2; - """) + """ + ) node.query( - "INSERT INTO compression_codec_multiple_with_key VALUES(toDate('2018-10-12'), 100000, 'hello', 88.88), (toDate('2018-10-12'), 100002, 'world', 99.99), (toDate('2018-10-12'), 1111, '!', 777.777)") - assert node.query("SELECT COUNT(*) FROM compression_codec_multiple_with_key WHERE id % 2 == 0") == "2\n" - assert node.query( - "SELECT DISTINCT somecolumn FROM compression_codec_multiple_with_key ORDER BY id") == "777.777\n88.88\n99.99\n" - assert node.query( - "SELECT data FROM compression_codec_multiple_with_key WHERE id >= 1112 AND somedate = toDate('2018-10-12') AND somecolumn <= 100") == "hello\nworld\n" + "INSERT INTO compression_codec_multiple_with_key VALUES(toDate('2018-10-12'), 100000, 'hello', 88.88), (toDate('2018-10-12'), 100002, 'world', 99.99), (toDate('2018-10-12'), 1111, '!', 777.777)" + ) + assert ( + node.query( + "SELECT COUNT(*) FROM compression_codec_multiple_with_key WHERE id % 2 == 0" + ) + == "2\n" + ) + assert ( + node.query( + "SELECT DISTINCT somecolumn FROM compression_codec_multiple_with_key ORDER BY id" + ) + == "777.777\n88.88\n99.99\n" + ) + assert ( + node.query( + "SELECT data FROM compression_codec_multiple_with_key WHERE id >= 1112 AND somedate = toDate('2018-10-12') AND somecolumn <= 100" + ) + == "hello\nworld\n" + ) node.query( - "INSERT INTO compression_codec_multiple_with_key SELECT toDate('2018-10-12'), number, toString(number), 1.0 FROM system.numbers LIMIT 10000") + "INSERT INTO compression_codec_multiple_with_key SELECT toDate('2018-10-12'), number, toString(number), 1.0 FROM system.numbers LIMIT 10000" + ) - assert node.query("SELECT COUNT(id) FROM compression_codec_multiple_with_key WHERE id % 10 == 0") == "1001\n" - assert node.query("SELECT SUM(somecolumn) FROM compression_codec_multiple_with_key") == str( - 777.777 + 88.88 + 99.99 + 1.0 * 10000) + "\n" - assert node.query("SELECT count(*) FROM compression_codec_multiple_with_key GROUP BY somedate") == "10003\n" + assert ( + node.query( + "SELECT COUNT(id) FROM compression_codec_multiple_with_key WHERE id % 10 == 0" + ) + == "1001\n" + ) + assert ( + node.query( + "SELECT SUM(somecolumn) FROM compression_codec_multiple_with_key" + ) + == str(777.777 + 88.88 + 99.99 + 1.0 * 10000) + "\n" + ) + assert ( + node.query( + "SELECT count(*) FROM compression_codec_multiple_with_key GROUP BY somedate" + ) + == "10003\n" + ) def test_preconfigured_custom_codec(start_cluster): - node3.query(""" + node3.query( + """ CREATE TABLE compression_codec_multiple_with_key ( somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)), id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC), data String, somecolumn Float64 CODEC(ZSTD(2), LZ4HC, NONE, NONE, NONE, LZ4HC(5)) ) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2; - """) + """ + ) node3.query( - "INSERT INTO compression_codec_multiple_with_key VALUES(toDate('2018-10-12'), 100000, 'hello', 88.88), (toDate('2018-10-12'), 100002, 'world', 99.99), (toDate('2018-10-12'), 1111, '!', 777.777)") - assert node3.query("SELECT COUNT(*) FROM compression_codec_multiple_with_key WHERE id % 2 == 0") == "2\n" - assert node3.query( - "SELECT DISTINCT somecolumn FROM compression_codec_multiple_with_key ORDER BY id") == "777.777\n88.88\n99.99\n" - assert node3.query( - "SELECT data FROM compression_codec_multiple_with_key WHERE id >= 1112 AND somedate = toDate('2018-10-12') AND somecolumn <= 100") == "hello\nworld\n" + "INSERT INTO compression_codec_multiple_with_key VALUES(toDate('2018-10-12'), 100000, 'hello', 88.88), (toDate('2018-10-12'), 100002, 'world', 99.99), (toDate('2018-10-12'), 1111, '!', 777.777)" + ) + assert ( + node3.query( + "SELECT COUNT(*) FROM compression_codec_multiple_with_key WHERE id % 2 == 0" + ) + == "2\n" + ) + assert ( + node3.query( + "SELECT DISTINCT somecolumn FROM compression_codec_multiple_with_key ORDER BY id" + ) + == "777.777\n88.88\n99.99\n" + ) + assert ( + node3.query( + "SELECT data FROM compression_codec_multiple_with_key WHERE id >= 1112 AND somedate = toDate('2018-10-12') AND somecolumn <= 100" + ) + == "hello\nworld\n" + ) node3.query( "INSERT INTO compression_codec_multiple_with_key VALUES(toDate('2018-10-12'), 100000, '{}', 88.88)".format( - ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10000)))) + "".join( + random.choice(string.ascii_uppercase + string.digits) + for _ in range(10000) + ) + ) + ) node3.query("OPTIMIZE TABLE compression_codec_multiple_with_key FINAL") - assert node3.query( - "SELECT max(length(data)) from compression_codec_multiple_with_key GROUP BY data ORDER BY max(length(data)) DESC LIMIT 1") == "10000\n" + assert ( + node3.query( + "SELECT max(length(data)) from compression_codec_multiple_with_key GROUP BY data ORDER BY max(length(data)) DESC LIMIT 1" + ) + == "10000\n" + ) for i in range(10): node3.query( - "INSERT INTO compression_codec_multiple_with_key VALUES(toDate('2018-10-12'), {}, '{}', 88.88)".format(i, - ''.join( - random.choice( - string.ascii_uppercase + string.digits) - for - _ - in - range( - 10000)))) + "INSERT INTO compression_codec_multiple_with_key VALUES(toDate('2018-10-12'), {}, '{}', 88.88)".format( + i, + "".join( + random.choice(string.ascii_uppercase + string.digits) + for _ in range(10000) + ), + ) + ) node3.query("OPTIMIZE TABLE compression_codec_multiple_with_key FINAL") - assert node3.query("SELECT COUNT(*) from compression_codec_multiple_with_key WHERE length(data) = 10000") == "11\n" + assert ( + node3.query( + "SELECT COUNT(*) from compression_codec_multiple_with_key WHERE length(data) = 10000" + ) + == "11\n" + ) def test_uncompressed_cache_custom_codec(start_cluster): - node4.query(""" + node4.query( + """ CREATE TABLE compression_codec_multiple_with_key ( somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)), id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC), data String, somecolumn Float64 CODEC(ZSTD(2), LZ4HC, NONE, NONE, NONE, LZ4HC(5)) ) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2; - """) + """ + ) node4.query( "INSERT INTO compression_codec_multiple_with_key VALUES(toDate('2018-10-12'), 100000, '{}', 88.88)".format( - ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10000)))) + "".join( + random.choice(string.ascii_uppercase + string.digits) + for _ in range(10000) + ) + ) + ) # two equal requests one by one, to get into UncompressedCache for the first block - assert node4.query( - "SELECT max(length(data)) from compression_codec_multiple_with_key GROUP BY data ORDER BY max(length(data)) DESC LIMIT 1") == "10000\n" + assert ( + node4.query( + "SELECT max(length(data)) from compression_codec_multiple_with_key GROUP BY data ORDER BY max(length(data)) DESC LIMIT 1" + ) + == "10000\n" + ) - assert node4.query( - "SELECT max(length(data)) from compression_codec_multiple_with_key GROUP BY data ORDER BY max(length(data)) DESC LIMIT 1") == "10000\n" + assert ( + node4.query( + "SELECT max(length(data)) from compression_codec_multiple_with_key GROUP BY data ORDER BY max(length(data)) DESC LIMIT 1" + ) + == "10000\n" + ) def test_uncompressed_cache_plus_zstd_codec(start_cluster): - node5.query(""" + node5.query( + """ CREATE TABLE compression_codec_multiple_with_key ( somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)), id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC), data String, somecolumn Float64 CODEC(ZSTD(2), LZ4HC, NONE, NONE, NONE, LZ4HC(5)) ) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2; - """) + """ + ) node5.query( "INSERT INTO compression_codec_multiple_with_key VALUES(toDate('2018-10-12'), 100000, '{}', 88.88)".format( - 'a' * 10000)) + "a" * 10000 + ) + ) - assert node5.query( - "SELECT max(length(data)) from compression_codec_multiple_with_key GROUP BY data ORDER BY max(length(data)) DESC LIMIT 1") == "10000\n" + assert ( + node5.query( + "SELECT max(length(data)) from compression_codec_multiple_with_key GROUP BY data ORDER BY max(length(data)) DESC LIMIT 1" + ) + == "10000\n" + ) diff --git a/tests/integration/test_odbc_interaction/test.py b/tests/integration/test_odbc_interaction/test.py index 613d6a98030..06028af63c5 100644 --- a/tests/integration/test_odbc_interaction/test.py +++ b/tests/integration/test_odbc_interaction/test.py @@ -12,11 +12,19 @@ from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT from multiprocessing.dummy import Pool cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_odbc_drivers=True, with_mysql=True, with_postgres=True, - main_configs=['configs/openssl.xml', 'configs/odbc_logging.xml'], - dictionaries=['configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml', - 'configs/dictionaries/sqlite3_odbc_cached_dictionary.xml', - 'configs/dictionaries/postgres_odbc_hashed_dictionary.xml'], stay_alive=True) +node1 = cluster.add_instance( + "node1", + with_odbc_drivers=True, + with_mysql=True, + with_postgres=True, + main_configs=["configs/openssl.xml", "configs/odbc_logging.xml"], + dictionaries=[ + "configs/dictionaries/sqlite3_odbc_hashed_dictionary.xml", + "configs/dictionaries/sqlite3_odbc_cached_dictionary.xml", + "configs/dictionaries/postgres_odbc_hashed_dictionary.xml", + ], + stay_alive=True, +) drop_table_sql_template = """ @@ -45,10 +53,17 @@ def get_mysql_conn(): for _ in range(15): try: if conn is None: - conn = pymysql.connect(user='root', password='clickhouse', host=cluster.mysql_ip, port=cluster.mysql_port) + conn = pymysql.connect( + user="root", + password="clickhouse", + host=cluster.mysql_ip, + port=cluster.mysql_port, + ) else: conn.ping(reconnect=True) - logging.debug(f"MySQL Connection establised: {cluster.mysql_ip}:{cluster.mysql_port}") + logging.debug( + f"MySQL Connection establised: {cluster.mysql_ip}:{cluster.mysql_port}" + ) return conn except Exception as e: errors += [str(e)] @@ -70,7 +85,9 @@ def create_mysql_table(conn, table_name): def get_postgres_conn(started_cluster): - conn_string = "host={} port={} user='postgres' password='mysecretpassword'".format(started_cluster.postgres_ip, started_cluster.postgres_port) + conn_string = "host={} port={} user='postgres' password='mysecretpassword'".format( + started_cluster.postgres_ip, started_cluster.postgres_port + ) errors = [] for _ in range(15): try: @@ -83,7 +100,9 @@ def get_postgres_conn(started_cluster): errors += [str(e)] time.sleep(1) - raise Exception("Postgre connection not establised DSN={}, {}".format(conn_string, errors)) + raise Exception( + "Postgre connection not establised DSN={}, {}".format(conn_string, errors) + ) def create_postgres_db(conn, name): @@ -99,36 +118,67 @@ def started_cluster(): logging.debug(f"sqlite data received: {sqlite_db}") node1.exec_in_container( - ["sqlite3", sqlite_db, "CREATE TABLE t1(id INTEGER PRIMARY KEY ASC, x INTEGER, y, z);"], - privileged=True, user='root') + [ + "sqlite3", + sqlite_db, + "CREATE TABLE t1(id INTEGER PRIMARY KEY ASC, x INTEGER, y, z);", + ], + privileged=True, + user="root", + ) node1.exec_in_container( - ["sqlite3", sqlite_db, "CREATE TABLE t2(id INTEGER PRIMARY KEY ASC, X INTEGER, Y, Z);"], - privileged=True, user='root') + [ + "sqlite3", + sqlite_db, + "CREATE TABLE t2(id INTEGER PRIMARY KEY ASC, X INTEGER, Y, Z);", + ], + privileged=True, + user="root", + ) node1.exec_in_container( - ["sqlite3", sqlite_db, "CREATE TABLE t3(id INTEGER PRIMARY KEY ASC, X INTEGER, Y, Z);"], - privileged=True, user='root') + [ + "sqlite3", + sqlite_db, + "CREATE TABLE t3(id INTEGER PRIMARY KEY ASC, X INTEGER, Y, Z);", + ], + privileged=True, + user="root", + ) node1.exec_in_container( - ["sqlite3", sqlite_db, "CREATE TABLE t4(id INTEGER PRIMARY KEY ASC, X INTEGER, Y, Z);"], - privileged=True, user='root') + [ + "sqlite3", + sqlite_db, + "CREATE TABLE t4(id INTEGER PRIMARY KEY ASC, X INTEGER, Y, Z);", + ], + privileged=True, + user="root", + ) node1.exec_in_container( - ["sqlite3", sqlite_db, "CREATE TABLE tf1(id INTEGER PRIMARY KEY ASC, x INTEGER, y, z);"], - privileged=True, user='root') + [ + "sqlite3", + sqlite_db, + "CREATE TABLE tf1(id INTEGER PRIMARY KEY ASC, x INTEGER, y, z);", + ], + privileged=True, + user="root", + ) logging.debug("sqlite tables created") mysql_conn = get_mysql_conn() logging.debug("mysql connection received") ## create mysql db and table - create_mysql_db(mysql_conn, 'clickhouse') + create_mysql_db(mysql_conn, "clickhouse") logging.debug("mysql database created") postgres_conn = get_postgres_conn(cluster) logging.debug("postgres connection received") - create_postgres_db(postgres_conn, 'clickhouse') + create_postgres_db(postgres_conn, "clickhouse") logging.debug("postgres db created") cursor = postgres_conn.cursor() cursor.execute( - "create table if not exists clickhouse.test_table (id int primary key, column1 int not null, column2 varchar(40) not null)") + "create table if not exists clickhouse.test_table (id int primary key, column1 int not null, column2 varchar(40) not null)" + ) yield cluster @@ -144,7 +194,7 @@ def test_mysql_simple_select_works(started_cluster): mysql_setup = node1.odbc_drivers["MySQL"] - table_name = 'test_insert_select' + table_name = "test_insert_select" conn = get_mysql_conn() create_mysql_table(conn, table_name) @@ -152,27 +202,66 @@ def test_mysql_simple_select_works(started_cluster): with conn.cursor() as cursor: cursor.execute( "INSERT INTO clickhouse.{} VALUES(50, 'null-guy', 127, 255, NULL), (100, 'non-null-guy', 127, 255, 511);".format( - table_name)) + table_name + ) + ) conn.commit() - assert node1.query("SELECT column_x FROM odbc('DSN={}', '{}')".format(mysql_setup["DSN"], table_name), - settings={"external_table_functions_use_nulls": "1"}) == '\\N\n511\n' - assert node1.query("SELECT column_x FROM odbc('DSN={}', '{}')".format(mysql_setup["DSN"], table_name), - settings={"external_table_functions_use_nulls": "0"}) == '0\n511\n' + assert ( + node1.query( + "SELECT column_x FROM odbc('DSN={}', '{}')".format( + mysql_setup["DSN"], table_name + ), + settings={"external_table_functions_use_nulls": "1"}, + ) + == "\\N\n511\n" + ) + assert ( + node1.query( + "SELECT column_x FROM odbc('DSN={}', '{}')".format( + mysql_setup["DSN"], table_name + ), + settings={"external_table_functions_use_nulls": "0"}, + ) + == "0\n511\n" + ) - node1.query(''' + node1.query( + """ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32, column_x Nullable(UInt32)) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse'); -'''.format(table_name, table_name)) +""".format( + table_name, table_name + ) + ) node1.query( "INSERT INTO {}(id, name, money, column_x) select number, concat('name_', toString(number)), 3, NULL from numbers(49) ".format( - table_name)) + table_name + ) + ) node1.query( "INSERT INTO {}(id, name, money, column_x) select number, concat('name_', toString(number)), 3, 42 from numbers(51, 49) ".format( - table_name)) + table_name + ) + ) - assert node1.query("SELECT COUNT () FROM {} WHERE column_x IS NOT NULL".format(table_name)) == '50\n' - assert node1.query("SELECT COUNT () FROM {} WHERE column_x IS NULL".format(table_name)) == '50\n' - assert node1.query("SELECT count(*) FROM odbc('DSN={}', '{}')".format(mysql_setup["DSN"], table_name)) == '100\n' + assert ( + node1.query( + "SELECT COUNT () FROM {} WHERE column_x IS NOT NULL".format(table_name) + ) + == "50\n" + ) + assert ( + node1.query("SELECT COUNT () FROM {} WHERE column_x IS NULL".format(table_name)) + == "50\n" + ) + assert ( + node1.query( + "SELECT count(*) FROM odbc('DSN={}', '{}')".format( + mysql_setup["DSN"], table_name + ) + ) + == "100\n" + ) # previously this test fails with segfault # just to be sure :) @@ -185,23 +274,40 @@ def test_mysql_insert(started_cluster): skip_test_msan(node1) mysql_setup = node1.odbc_drivers["MySQL"] - table_name = 'test_insert' + table_name = "test_insert" conn = get_mysql_conn() create_mysql_table(conn, table_name) - odbc_args = "'DSN={}', '{}', '{}'".format(mysql_setup["DSN"], mysql_setup["Database"], table_name) + odbc_args = "'DSN={}', '{}', '{}'".format( + mysql_setup["DSN"], mysql_setup["Database"], table_name + ) node1.query( "create table mysql_insert (id Int64, name String, age UInt8, money Float, column_x Nullable(Int16)) Engine=ODBC({})".format( - odbc_args)) - node1.query("insert into mysql_insert values (1, 'test', 11, 111, 1111), (2, 'odbc', 22, 222, NULL)") - assert node1.query("select * from mysql_insert") == "1\ttest\t11\t111\t1111\n2\todbc\t22\t222\t\\N\n" + odbc_args + ) + ) + node1.query( + "insert into mysql_insert values (1, 'test', 11, 111, 1111), (2, 'odbc', 22, 222, NULL)" + ) + assert ( + node1.query("select * from mysql_insert") + == "1\ttest\t11\t111\t1111\n2\todbc\t22\t222\t\\N\n" + ) - node1.query("insert into table function odbc({}) values (3, 'insert', 33, 333, 3333)".format(odbc_args)) + node1.query( + "insert into table function odbc({}) values (3, 'insert', 33, 333, 3333)".format( + odbc_args + ) + ) node1.query( "insert into table function odbc({}) (id, name, age, money) select id*4, upper(name), age*4, money*4 from odbc({}) where id=1".format( - odbc_args, odbc_args)) - assert node1.query( - "select * from mysql_insert where id in (3, 4)") == "3\tinsert\t33\t333\t3333\n4\tTEST\t44\t444\t\\N\n" + odbc_args, odbc_args + ) + ) + assert ( + node1.query("select * from mysql_insert where id in (3, 4)") + == "3\tinsert\t33\t333\t3333\n4\tTEST\t44\t444\t\\N\n" + ) def test_sqlite_simple_select_function_works(started_cluster): @@ -210,17 +316,57 @@ def test_sqlite_simple_select_function_works(started_cluster): sqlite_setup = node1.odbc_drivers["SQLite3"] sqlite_db = sqlite_setup["Database"] - node1.exec_in_container(["sqlite3", sqlite_db, "INSERT INTO t1 values(1, 1, 2, 3);"], - privileged=True, user='root') - assert node1.query("select * from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "1\t1\t2\t3\n" + node1.exec_in_container( + ["sqlite3", sqlite_db, "INSERT INTO t1 values(1, 1, 2, 3);"], + privileged=True, + user="root", + ) + assert ( + node1.query( + "select * from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], "t1") + ) + == "1\t1\t2\t3\n" + ) + + assert ( + node1.query( + "select y from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], "t1") + ) + == "2\n" + ) + assert ( + node1.query( + "select z from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], "t1") + ) + == "3\n" + ) + assert ( + node1.query( + "select x from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], "t1") + ) + == "1\n" + ) + assert ( + node1.query( + "select x, y from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], "t1") + ) + == "1\t2\n" + ) + assert ( + node1.query( + "select z, x, y from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], "t1") + ) + == "3\t1\t2\n" + ) + assert ( + node1.query( + "select count(), sum(x) from odbc('DSN={}', '{}') group by x".format( + sqlite_setup["DSN"], "t1" + ) + ) + == "1\t1\n" + ) - assert node1.query("select y from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "2\n" - assert node1.query("select z from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "3\n" - assert node1.query("select x from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "1\n" - assert node1.query("select x, y from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "1\t2\n" - assert node1.query("select z, x, y from odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 't1')) == "3\t1\t2\n" - assert node1.query( - "select count(), sum(x) from odbc('DSN={}', '{}') group by x".format(sqlite_setup["DSN"], 't1')) == "1\t1\n" def test_sqlite_table_function(started_cluster): skip_test_msan(node1) @@ -228,9 +374,16 @@ def test_sqlite_table_function(started_cluster): sqlite_setup = node1.odbc_drivers["SQLite3"] sqlite_db = sqlite_setup["Database"] - node1.exec_in_container(["sqlite3", sqlite_db, "INSERT INTO tf1 values(1, 1, 2, 3);"], - privileged=True, user='root') - node1.query("create table odbc_tf as odbc('DSN={}', '{}')".format(sqlite_setup["DSN"], 'tf1')) + node1.exec_in_container( + ["sqlite3", sqlite_db, "INSERT INTO tf1 values(1, 1, 2, 3);"], + privileged=True, + user="root", + ) + node1.query( + "create table odbc_tf as odbc('DSN={}', '{}')".format( + sqlite_setup["DSN"], "tf1" + ) + ) assert node1.query("select * from odbc_tf") == "1\t1\t2\t3\n" assert node1.query("select y from odbc_tf") == "2\n" @@ -240,16 +393,23 @@ def test_sqlite_table_function(started_cluster): assert node1.query("select z, x, y from odbc_tf") == "3\t1\t2\n" assert node1.query("select count(), sum(x) from odbc_tf group by x") == "1\t1\n" + def test_sqlite_simple_select_storage_works(started_cluster): skip_test_msan(node1) sqlite_setup = node1.odbc_drivers["SQLite3"] sqlite_db = sqlite_setup["Database"] - node1.exec_in_container(["sqlite3", sqlite_db, "INSERT INTO t4 values(1, 1, 2, 3);"], - privileged=True, user='root') - node1.query("create table SqliteODBC (x Int32, y String, z String) engine = ODBC('DSN={}', '', 't4')".format( - sqlite_setup["DSN"])) + node1.exec_in_container( + ["sqlite3", sqlite_db, "INSERT INTO t4 values(1, 1, 2, 3);"], + privileged=True, + user="root", + ) + node1.query( + "create table SqliteODBC (x Int32, y String, z String) engine = ODBC('DSN={}', '', 't4')".format( + sqlite_setup["DSN"] + ) + ) assert node1.query("select * from SqliteODBC") == "1\t2\t3\n" assert node1.query("select y from SqliteODBC") == "2\n" @@ -264,70 +424,118 @@ def test_sqlite_odbc_hashed_dictionary(started_cluster): skip_test_msan(node1) sqlite_db = node1.odbc_drivers["SQLite3"]["Database"] - node1.exec_in_container(["sqlite3", sqlite_db, "INSERT INTO t2 values(1, 1, 2, 3);"], - privileged=True, user='root') + node1.exec_in_container( + ["sqlite3", sqlite_db, "INSERT INTO t2 values(1, 1, 2, 3);"], + privileged=True, + user="root", + ) node1.query("SYSTEM RELOAD DICTIONARY sqlite3_odbc_hashed") - first_update_time = node1.query("SELECT last_successful_update_time FROM system.dictionaries WHERE name = 'sqlite3_odbc_hashed'") + first_update_time = node1.query( + "SELECT last_successful_update_time FROM system.dictionaries WHERE name = 'sqlite3_odbc_hashed'" + ) logging.debug(f"First update time {first_update_time}") - assert_eq_with_retry(node1, "select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(1))", "3") - assert_eq_with_retry(node1, "select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(200))", "1") # default + assert_eq_with_retry( + node1, "select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(1))", "3" + ) + assert_eq_with_retry( + node1, "select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(200))", "1" + ) # default - second_update_time = node1.query("SELECT last_successful_update_time FROM system.dictionaries WHERE name = 'sqlite3_odbc_hashed'") + second_update_time = node1.query( + "SELECT last_successful_update_time FROM system.dictionaries WHERE name = 'sqlite3_odbc_hashed'" + ) # Reloaded with new data logging.debug(f"Second update time {second_update_time}") while first_update_time == second_update_time: - second_update_time = node1.query("SELECT last_successful_update_time FROM system.dictionaries WHERE name = 'sqlite3_odbc_hashed'") + second_update_time = node1.query( + "SELECT last_successful_update_time FROM system.dictionaries WHERE name = 'sqlite3_odbc_hashed'" + ) logging.debug("Waiting dictionary to update for the second time") time.sleep(0.1) - node1.exec_in_container(["sqlite3", sqlite_db, "INSERT INTO t2 values(200, 200, 2, 7);"], - privileged=True, user='root') + node1.exec_in_container( + ["sqlite3", sqlite_db, "INSERT INTO t2 values(200, 200, 2, 7);"], + privileged=True, + user="root", + ) # No reload because of invalidate query - third_update_time = node1.query("SELECT last_successful_update_time FROM system.dictionaries WHERE name = 'sqlite3_odbc_hashed'") + third_update_time = node1.query( + "SELECT last_successful_update_time FROM system.dictionaries WHERE name = 'sqlite3_odbc_hashed'" + ) logging.debug(f"Third update time {second_update_time}") counter = 0 while third_update_time == second_update_time: - third_update_time = node1.query("SELECT last_successful_update_time FROM system.dictionaries WHERE name = 'sqlite3_odbc_hashed'") + third_update_time = node1.query( + "SELECT last_successful_update_time FROM system.dictionaries WHERE name = 'sqlite3_odbc_hashed'" + ) time.sleep(0.1) if counter > 50: break counter += 1 - assert_eq_with_retry(node1, "select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(1))", "3") - assert_eq_with_retry(node1, "select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(200))", "1") # still default + assert_eq_with_retry( + node1, "select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(1))", "3" + ) + assert_eq_with_retry( + node1, "select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(200))", "1" + ) # still default - node1.exec_in_container(["sqlite3", sqlite_db, "REPLACE INTO t2 values(1, 1, 2, 5);"], - privileged=True, user='root') + node1.exec_in_container( + ["sqlite3", sqlite_db, "REPLACE INTO t2 values(1, 1, 2, 5);"], + privileged=True, + user="root", + ) - assert_eq_with_retry(node1, "select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(1))", "5") - assert_eq_with_retry(node1, "select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(200))", "7") + assert_eq_with_retry( + node1, "select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(1))", "5" + ) + assert_eq_with_retry( + node1, "select dictGetUInt8('sqlite3_odbc_hashed', 'Z', toUInt64(200))", "7" + ) def test_sqlite_odbc_cached_dictionary(started_cluster): skip_test_msan(node1) sqlite_db = node1.odbc_drivers["SQLite3"]["Database"] - node1.exec_in_container(["sqlite3", sqlite_db, "INSERT INTO t3 values(1, 1, 2, 3);"], - privileged=True, user='root') + node1.exec_in_container( + ["sqlite3", sqlite_db, "INSERT INTO t3 values(1, 1, 2, 3);"], + privileged=True, + user="root", + ) - assert node1.query("select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(1))") == "3\n" + assert ( + node1.query("select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(1))") + == "3\n" + ) # Allow insert - node1.exec_in_container(["chmod", "a+rw", "/tmp"], privileged=True, user='root') - node1.exec_in_container(["chmod", "a+rw", sqlite_db], privileged=True, user='root') + node1.exec_in_container(["chmod", "a+rw", "/tmp"], privileged=True, user="root") + node1.exec_in_container(["chmod", "a+rw", sqlite_db], privileged=True, user="root") - node1.query("insert into table function odbc('DSN={};ReadOnly=0', '', 't3') values (200, 200, 2, 7)".format( - node1.odbc_drivers["SQLite3"]["DSN"])) + node1.query( + "insert into table function odbc('DSN={};ReadOnly=0', '', 't3') values (200, 200, 2, 7)".format( + node1.odbc_drivers["SQLite3"]["DSN"] + ) + ) - assert node1.query("select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(200))") == "7\n" # new value + assert ( + node1.query("select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(200))") + == "7\n" + ) # new value - node1.exec_in_container(["sqlite3", sqlite_db, "REPLACE INTO t3 values(1, 1, 2, 12);"], - privileged=True, user='root') + node1.exec_in_container( + ["sqlite3", sqlite_db, "REPLACE INTO t3 values(1, 1, 2, 12);"], + privileged=True, + user="root", + ) - assert_eq_with_retry(node1, "select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(1))", "12") + assert_eq_with_retry( + node1, "select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(1))", "12" + ) def test_postgres_odbc_hashed_dictionary_with_schema(started_cluster): @@ -336,12 +544,24 @@ def test_postgres_odbc_hashed_dictionary_with_schema(started_cluster): conn = get_postgres_conn(started_cluster) cursor = conn.cursor() cursor.execute("truncate table clickhouse.test_table") - cursor.execute("insert into clickhouse.test_table values(1, 1, 'hello'),(2, 2, 'world')") + cursor.execute( + "insert into clickhouse.test_table values(1, 1, 'hello'),(2, 2, 'world')" + ) node1.query("SYSTEM RELOAD DICTIONARY postgres_odbc_hashed") - node1.exec_in_container(["ss", "-K", "dport", "postgresql"], privileged=True, user='root') + node1.exec_in_container( + ["ss", "-K", "dport", "postgresql"], privileged=True, user="root" + ) node1.query("SYSTEM RELOAD DICTIONARY postgres_odbc_hashed") - assert_eq_with_retry(node1, "select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(1))", "hello") - assert_eq_with_retry(node1, "select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(2))", "world") + assert_eq_with_retry( + node1, + "select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(1))", + "hello", + ) + assert_eq_with_retry( + node1, + "select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(2))", + "world", + ) def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster): @@ -357,7 +577,11 @@ def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster): except Exception as ex: assert False, "Exception occured -- odbc-bridge hangs: " + str(ex) - assert_eq_with_retry(node1, "select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(3))", "xxx") + assert_eq_with_retry( + node1, + "select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(3))", + "xxx", + ) def test_postgres_insert(started_cluster): @@ -371,16 +595,26 @@ def test_postgres_insert(started_cluster): # reconstruction of connection string. node1.query( - "create table pg_insert (id UInt64, column1 UInt8, column2 String) engine=ODBC('DSN=postgresql_odbc;Servername=postgre-sql.local', 'clickhouse', 'test_table')") + "create table pg_insert (id UInt64, column1 UInt8, column2 String) engine=ODBC('DSN=postgresql_odbc;Servername=postgre-sql.local', 'clickhouse', 'test_table')" + ) node1.query("insert into pg_insert values (1, 1, 'hello'), (2, 2, 'world')") - assert node1.query("select * from pg_insert") == '1\t1\thello\n2\t2\tworld\n' - node1.query("insert into table function odbc('DSN=postgresql_odbc', 'clickhouse', 'test_table') format CSV 3,3,test") + assert node1.query("select * from pg_insert") == "1\t1\thello\n2\t2\tworld\n" node1.query( - "insert into table function odbc('DSN=postgresql_odbc;Servername=postgre-sql.local', 'clickhouse', 'test_table')" \ - " select number, number, 's' || toString(number) from numbers (4, 7)") - assert node1.query("select sum(column1), count(column1) from pg_insert") == "55\t10\n" - assert node1.query( - "select sum(n), count(n) from (select (*,).1 as n from (select * from odbc('DSN=postgresql_odbc', 'clickhouse', 'test_table')))") == "55\t10\n" + "insert into table function odbc('DSN=postgresql_odbc', 'clickhouse', 'test_table') format CSV 3,3,test" + ) + node1.query( + "insert into table function odbc('DSN=postgresql_odbc;Servername=postgre-sql.local', 'clickhouse', 'test_table')" + " select number, number, 's' || toString(number) from numbers (4, 7)" + ) + assert ( + node1.query("select sum(column1), count(column1) from pg_insert") == "55\t10\n" + ) + assert ( + node1.query( + "select sum(n), count(n) from (select (*,).1 as n from (select * from odbc('DSN=postgresql_odbc', 'clickhouse', 'test_table')))" + ) + == "55\t10\n" + ) def test_bridge_dies_with_parent(started_cluster): @@ -390,7 +624,9 @@ def test_bridge_dies_with_parent(started_cluster): # TODO: Leak sanitizer falsely reports about a leak of 16 bytes in clickhouse-odbc-bridge in this test and # that's linked somehow with that we have replaced getauxval() in glibc-compatibility. # The leak sanitizer calls getauxval() for its own purposes, and our replaced version doesn't seem to be equivalent in that case. - pytest.skip("Leak sanitizer falsely reports about a leak of 16 bytes in clickhouse-odbc-bridge") + pytest.skip( + "Leak sanitizer falsely reports about a leak of 16 bytes in clickhouse-odbc-bridge" + ) node1.query("select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(1))") @@ -401,7 +637,9 @@ def test_bridge_dies_with_parent(started_cluster): while clickhouse_pid is not None: try: - node1.exec_in_container(["kill", str(clickhouse_pid)], privileged=True, user='root') + node1.exec_in_container( + ["kill", str(clickhouse_pid)], privileged=True, user="root" + ) except: pass clickhouse_pid = node1.get_process_pid("clickhouse server") @@ -414,8 +652,11 @@ def test_bridge_dies_with_parent(started_cluster): break if bridge_pid: - out = node1.exec_in_container(["gdb", "-p", str(bridge_pid), "--ex", "thread apply all bt", "--ex", "q"], - privileged=True, user='root') + out = node1.exec_in_container( + ["gdb", "-p", str(bridge_pid), "--ex", "thread apply all bt", "--ex", "q"], + privileged=True, + user="root", + ) logging.debug(f"Bridge is running, gdb output:\n{out}") assert clickhouse_pid is None @@ -426,9 +667,11 @@ def test_bridge_dies_with_parent(started_cluster): def test_odbc_postgres_date_data_type(started_cluster): skip_test_msan(node1) - conn = get_postgres_conn(started_cluster); + conn = get_postgres_conn(started_cluster) cursor = conn.cursor() - cursor.execute("CREATE TABLE IF NOT EXISTS clickhouse.test_date (id integer, column1 integer, column2 date)") + cursor.execute( + "CREATE TABLE IF NOT EXISTS clickhouse.test_date (id integer, column1 integer, column2 date)" + ) cursor.execute("INSERT INTO clickhouse.test_date VALUES (1, 1, '2020-12-01')") cursor.execute("INSERT INTO clickhouse.test_date VALUES (2, 2, '2020-12-02')") @@ -436,13 +679,14 @@ def test_odbc_postgres_date_data_type(started_cluster): conn.commit() node1.query( - ''' + """ CREATE TABLE test_date (id UInt64, column1 UInt64, column2 Date) - ENGINE=ODBC('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_date')''') + ENGINE=ODBC('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_date')""" + ) - expected = '1\t1\t2020-12-01\n2\t2\t2020-12-02\n3\t3\t2020-12-03\n' - result = node1.query('SELECT * FROM test_date'); - assert(result == expected) + expected = "1\t1\t2020-12-01\n2\t2\t2020-12-02\n3\t3\t2020-12-03\n" + result = node1.query("SELECT * FROM test_date") + assert result == expected cursor.execute("DROP TABLE IF EXISTS clickhouse.test_date") node1.query("DROP TABLE IF EXISTS test_date") @@ -454,39 +698,53 @@ def test_odbc_postgres_conversions(started_cluster): cursor = conn.cursor() cursor.execute( - '''CREATE TABLE IF NOT EXISTS clickhouse.test_types ( + """CREATE TABLE IF NOT EXISTS clickhouse.test_types ( a smallint, b integer, c bigint, d real, e double precision, f serial, g bigserial, - h timestamp)''') - - node1.query(''' - INSERT INTO TABLE FUNCTION - odbc('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_types') - VALUES (-32768, -2147483648, -9223372036854775808, 1.12345, 1.1234567890, 2147483647, 9223372036854775807, '2000-05-12 12:12:12')''') - - result = node1.query(''' - SELECT a, b, c, d, e, f, g, h - FROM odbc('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_types') - ''') - - assert(result == '-32768\t-2147483648\t-9223372036854775808\t1.12345\t1.123456789\t2147483647\t9223372036854775807\t2000-05-12 12:12:12\n') - cursor.execute("DROP TABLE IF EXISTS clickhouse.test_types") - - cursor.execute("""CREATE TABLE IF NOT EXISTS clickhouse.test_types (column1 Timestamp, column2 Numeric)""") + h timestamp)""" + ) node1.query( - ''' + """ + INSERT INTO TABLE FUNCTION + odbc('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_types') + VALUES (-32768, -2147483648, -9223372036854775808, 1.12345, 1.1234567890, 2147483647, 9223372036854775807, '2000-05-12 12:12:12')""" + ) + + result = node1.query( + """ + SELECT a, b, c, d, e, f, g, h + FROM odbc('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_types') + """ + ) + + assert ( + result + == "-32768\t-2147483648\t-9223372036854775808\t1.12345\t1.123456789\t2147483647\t9223372036854775807\t2000-05-12 12:12:12\n" + ) + cursor.execute("DROP TABLE IF EXISTS clickhouse.test_types") + + cursor.execute( + """CREATE TABLE IF NOT EXISTS clickhouse.test_types (column1 Timestamp, column2 Numeric)""" + ) + + node1.query( + """ CREATE TABLE test_types (column1 DateTime64, column2 Decimal(5, 1)) - ENGINE=ODBC('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_types')''') + ENGINE=ODBC('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_types')""" + ) node1.query( """INSERT INTO test_types - SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Etc/UTC'), toDecimal32(1.1, 1)""") + SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Etc/UTC'), toDecimal32(1.1, 1)""" + ) - expected = node1.query("SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Etc/UTC'), toDecimal32(1.1, 1)") + expected = node1.query( + "SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Etc/UTC'), toDecimal32(1.1, 1)" + ) result = node1.query("SELECT * FROM test_types") logging.debug(result) cursor.execute("DROP TABLE IF EXISTS clickhouse.test_types") - assert(result == expected) + assert result == expected def test_odbc_cyrillic_with_varchar(started_cluster): @@ -498,17 +756,21 @@ def test_odbc_cyrillic_with_varchar(started_cluster): cursor.execute("DROP TABLE IF EXISTS clickhouse.test_cyrillic") cursor.execute("CREATE TABLE clickhouse.test_cyrillic (name varchar(11))") - node1.query(''' + node1.query( + """ CREATE TABLE test_cyrillic (name String) - ENGINE = ODBC('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_cyrillic')''') + ENGINE = ODBC('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_cyrillic')""" + ) cursor.execute("INSERT INTO clickhouse.test_cyrillic VALUES ('A-nice-word')") cursor.execute("INSERT INTO clickhouse.test_cyrillic VALUES ('Красивенько')") - result = node1.query(''' SELECT * FROM test_cyrillic ORDER BY name''') - assert(result == 'A-nice-word\nКрасивенько\n') - result = node1.query(''' SELECT name FROM odbc('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_cyrillic') ''') - assert(result == 'A-nice-word\nКрасивенько\n') + result = node1.query(""" SELECT * FROM test_cyrillic ORDER BY name""") + assert result == "A-nice-word\nКрасивенько\n" + result = node1.query( + """ SELECT name FROM odbc('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_cyrillic') """ + ) + assert result == "A-nice-word\nКрасивенько\n" def test_many_connections(started_cluster): @@ -517,22 +779,24 @@ def test_many_connections(started_cluster): conn = get_postgres_conn(started_cluster) cursor = conn.cursor() - cursor.execute('DROP TABLE IF EXISTS clickhouse.test_pg_table') - cursor.execute('CREATE TABLE clickhouse.test_pg_table (key integer, value integer)') + cursor.execute("DROP TABLE IF EXISTS clickhouse.test_pg_table") + cursor.execute("CREATE TABLE clickhouse.test_pg_table (key integer, value integer)") - node1.query(''' + node1.query( + """ DROP TABLE IF EXISTS test_pg_table; CREATE TABLE test_pg_table (key UInt32, value UInt32) - ENGINE = ODBC('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_pg_table')''') + ENGINE = ODBC('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_pg_table')""" + ) node1.query("INSERT INTO test_pg_table SELECT number, number FROM numbers(10)") query = "SELECT count() FROM (" - for i in range (24): + for i in range(24): query += "SELECT key FROM {t} UNION ALL " query += "SELECT key FROM {t})" - assert node1.query(query.format(t='test_pg_table')) == '250\n' + assert node1.query(query.format(t="test_pg_table")) == "250\n" def test_concurrent_queries(started_cluster): @@ -541,41 +805,58 @@ def test_concurrent_queries(started_cluster): conn = get_postgres_conn(started_cluster) cursor = conn.cursor() - node1.query(''' + node1.query( + """ DROP TABLE IF EXISTS test_pg_table; CREATE TABLE test_pg_table (key UInt32, value UInt32) - ENGINE = ODBC('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_pg_table')''') + ENGINE = ODBC('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_pg_table')""" + ) - cursor.execute('DROP TABLE IF EXISTS clickhouse.test_pg_table') - cursor.execute('CREATE TABLE clickhouse.test_pg_table (key integer, value integer)') + cursor.execute("DROP TABLE IF EXISTS clickhouse.test_pg_table") + cursor.execute("CREATE TABLE clickhouse.test_pg_table (key integer, value integer)") def node_insert(_): for i in range(5): - node1.query("INSERT INTO test_pg_table SELECT number, number FROM numbers(1000)", user='default') + node1.query( + "INSERT INTO test_pg_table SELECT number, number FROM numbers(1000)", + user="default", + ) busy_pool = Pool(5) p = busy_pool.map_async(node_insert, range(5)) p.wait() - assert_eq_with_retry(node1, "SELECT count() FROM test_pg_table", str(5*5*1000), retry_count=100) + assert_eq_with_retry( + node1, "SELECT count() FROM test_pg_table", str(5 * 5 * 1000), retry_count=100 + ) def node_insert_select(_): for i in range(5): - result = node1.query("INSERT INTO test_pg_table SELECT number, number FROM numbers(1000)", user='default') - result = node1.query("SELECT * FROM test_pg_table LIMIT 100", user='default') + result = node1.query( + "INSERT INTO test_pg_table SELECT number, number FROM numbers(1000)", + user="default", + ) + result = node1.query( + "SELECT * FROM test_pg_table LIMIT 100", user="default" + ) busy_pool = Pool(5) p = busy_pool.map_async(node_insert_select, range(5)) p.wait() - assert_eq_with_retry(node1, "SELECT count() FROM test_pg_table", str(5*5*1000*2), retry_count=100) + assert_eq_with_retry( + node1, + "SELECT count() FROM test_pg_table", + str(5 * 5 * 1000 * 2), + retry_count=100, + ) - node1.query('DROP TABLE test_pg_table;') - cursor.execute('DROP TABLE clickhouse.test_pg_table;') + node1.query("DROP TABLE test_pg_table;") + cursor.execute("DROP TABLE clickhouse.test_pg_table;") def test_odbc_long_column_names(started_cluster): skip_test_msan(node1) - conn = get_postgres_conn(started_cluster); + conn = get_postgres_conn(started_cluster) cursor = conn.cursor() column_name = "column" * 8 @@ -586,7 +867,11 @@ def test_odbc_long_column_names(started_cluster): create_table += "{} integer".format(column_name + str(i)) create_table += ")" cursor.execute(create_table) - insert = "INSERT INTO clickhouse.test_long_column_names SELECT i" + ", i" * 999 + " FROM generate_series(0, 99) as t(i)" + insert = ( + "INSERT INTO clickhouse.test_long_column_names SELECT i" + + ", i" * 999 + + " FROM generate_series(0, 99) as t(i)" + ) cursor.execute(insert) conn.commit() @@ -596,11 +881,11 @@ def test_odbc_long_column_names(started_cluster): create_table += ", " create_table += "{} UInt32".format(column_name + str(i)) create_table += ") ENGINE=ODBC('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_long_column_names')" - result = node1.query(create_table); + result = node1.query(create_table) - result = node1.query('SELECT * FROM test_long_column_names'); + result = node1.query("SELECT * FROM test_long_column_names") expected = node1.query("SELECT number" + ", number" * 999 + " FROM numbers(100)") - assert(result == expected) + assert result == expected cursor.execute("DROP TABLE IF EXISTS clickhouse.test_long_column_names") node1.query("DROP TABLE IF EXISTS test_long_column_names") @@ -612,20 +897,30 @@ def test_odbc_long_text(started_cluster): conn = get_postgres_conn(started_cluster) cursor = conn.cursor() cursor.execute("drop table if exists clickhouse.test_long_text") - cursor.execute("create table clickhouse.test_long_text(flen int, field1 text)"); + cursor.execute("create table clickhouse.test_long_text(flen int, field1 text)") # sample test from issue 9363 text_from_issue = """BEGIN These examples only show the order that data is arranged in. The values from different columns are stored separately, and data from the same column is stored together. Examples of a column-oriented DBMS: Vertica, Paraccel (Actian Matrix and Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB (VectorWise and Actian Vector), LucidDB, SAP HANA, Google Dremel, Google PowerDrill, Druid, and kdb+. Different orders for storing data are better suited to different scenarios. The data access scenario refers to what queries are made, how often, and in what proportion; how much data is read for each type of query – rows, columns, and bytes; the relationship between reading and updating data; the working size of the data and how locally it is used; whether transactions are used, and how isolated they are; requirements for data replication and logical integrity; requirements for latency and throughput for each type of query, and so on. The higher the load on the system, the more important it is to customize the system set up to match the requirements of the usage scenario, and the more fine grained this customization becomes. There is no system that is equally well-suited to significantly different scenarios. If a system is adaptable to a wide set of scenarios, under a high load, the system will handle all the scenarios equally poorly, or will work well for just one or few of possible scenarios. Key Properties of OLAP Scenario¶ The vast majority of requests are for read access. Data is updated in fairly large batches (> 1000 rows), not by single rows; or it is not updated at all. Data is added to the DB but is not modified. For reads, quite a large number of rows are extracted from the DB, but only a small subset of columns. Tables are "wide," meaning they contain a large number of columns. Queries are relatively rare (usually hundreds of queries per server or less per second). For simple queries, latencies around 50 ms are allowed. Column values are fairly small: numbers and short strings (for example, 60 bytes per URL). Requires high throughput when processing a single query (up to billions of rows per second per server). Transactions are not necessary. Low requirements for data consistency. There is one large table per query. All tables are small, except for one. A query result is significantly smaller than the source data. In other words, data is filtered or aggregated, so the result fits in a single server"s RAM. It is easy to see that the OLAP scenario is very different from other popular scenarios (such as OLTP or Key-Value access). So it doesn"t make sense to try to use OLTP or a Key-Value DB for processing analytical queries if you want to get decent performance. For example, if you try to use MongoDB or Redis for analytics, you will get very poor performance compared to OLAP databases. Why Column-Oriented Databases Work Better in the OLAP Scenario¶ Column-oriented databases are better suited to OLAP scenarios: they are at least 100 times faster in processing most queries. The reasons are explained in detail below, but the fact is easier to demonstrate visually. END""" - cursor.execute("""insert into clickhouse.test_long_text (flen, field1) values (3248, '{}')""".format(text_from_issue)); + cursor.execute( + """insert into clickhouse.test_long_text (flen, field1) values (3248, '{}')""".format( + text_from_issue + ) + ) - node1.query(''' + node1.query( + """ DROP TABLE IF EXISTS test_long_test; CREATE TABLE test_long_text (flen UInt32, field1 String) - ENGINE = ODBC('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_long_text')''') + ENGINE = ODBC('DSN=postgresql_odbc; Servername=postgre-sql.local', 'clickhouse', 'test_long_text')""" + ) result = node1.query("select field1 from test_long_text;") - assert(result.strip() == text_from_issue) + assert result.strip() == text_from_issue long_text = "text" * 1000000 - cursor.execute("""insert into clickhouse.test_long_text (flen, field1) values (400000, '{}')""".format(long_text)); + cursor.execute( + """insert into clickhouse.test_long_text (flen, field1) values (400000, '{}')""".format( + long_text + ) + ) result = node1.query("select field1 from test_long_text where flen=400000;") - assert(result.strip() == long_text) + assert result.strip() == long_text diff --git a/tests/integration/test_old_versions/test.py b/tests/integration/test_old_versions/test.py index 1870ecf4c9d..beef294b792 100644 --- a/tests/integration/test_old_versions/test.py +++ b/tests/integration/test_old_versions/test.py @@ -4,27 +4,69 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node18_14 = cluster.add_instance('node18_14', image='yandex/clickhouse-server', tag='18.14.19', - with_installed_binary=True, main_configs=["configs/config.d/test_cluster.xml"]) -node19_1 = cluster.add_instance('node19_1', image='yandex/clickhouse-server', tag='19.1.16', with_installed_binary=True, - main_configs=["configs/config.d/test_cluster.xml"]) -node19_4 = cluster.add_instance('node19_4', image='yandex/clickhouse-server', tag='19.4.5.35', - with_installed_binary=True, main_configs=["configs/config.d/test_cluster.xml"]) -node19_8 = cluster.add_instance('node19_8', image='yandex/clickhouse-server', tag='19.8.3.8', - with_installed_binary=True, main_configs=["configs/config.d/test_cluster.xml"]) -node19_11 = cluster.add_instance('node19_11', image='yandex/clickhouse-server', tag='19.11.13.74', - with_installed_binary=True, main_configs=["configs/config.d/test_cluster.xml"]) -node19_13 = cluster.add_instance('node19_13', image='yandex/clickhouse-server', tag='19.13.7.57', - with_installed_binary=True, main_configs=["configs/config.d/test_cluster.xml"]) -node19_16 = cluster.add_instance('node19_16', image='yandex/clickhouse-server', tag='19.16.2.2', - with_installed_binary=True, main_configs=["configs/config.d/test_cluster.xml"]) +node18_14 = cluster.add_instance( + "node18_14", + image="yandex/clickhouse-server", + tag="18.14.19", + with_installed_binary=True, + main_configs=["configs/config.d/test_cluster.xml"], +) +node19_1 = cluster.add_instance( + "node19_1", + image="yandex/clickhouse-server", + tag="19.1.16", + with_installed_binary=True, + main_configs=["configs/config.d/test_cluster.xml"], +) +node19_4 = cluster.add_instance( + "node19_4", + image="yandex/clickhouse-server", + tag="19.4.5.35", + with_installed_binary=True, + main_configs=["configs/config.d/test_cluster.xml"], +) +node19_8 = cluster.add_instance( + "node19_8", + image="yandex/clickhouse-server", + tag="19.8.3.8", + with_installed_binary=True, + main_configs=["configs/config.d/test_cluster.xml"], +) +node19_11 = cluster.add_instance( + "node19_11", + image="yandex/clickhouse-server", + tag="19.11.13.74", + with_installed_binary=True, + main_configs=["configs/config.d/test_cluster.xml"], +) +node19_13 = cluster.add_instance( + "node19_13", + image="yandex/clickhouse-server", + tag="19.13.7.57", + with_installed_binary=True, + main_configs=["configs/config.d/test_cluster.xml"], +) +node19_16 = cluster.add_instance( + "node19_16", + image="yandex/clickhouse-server", + tag="19.16.2.2", + with_installed_binary=True, + main_configs=["configs/config.d/test_cluster.xml"], +) old_nodes = [node18_14, node19_1, node19_4, node19_8, node19_11, node19_13, node19_16] -new_node = cluster.add_instance('node_new') +new_node = cluster.add_instance("node_new") def query_from_one_node_to_another(client_node, server_node, query): client_node.exec_in_container( - ["bash", "-c", "/usr/bin/clickhouse client --host {} --query {!r}".format(server_node.name, query)]) + [ + "bash", + "-c", + "/usr/bin/clickhouse client --host {} --query {!r}".format( + server_node.name, query + ), + ] + ) @pytest.fixture(scope="module") @@ -33,11 +75,14 @@ def setup_nodes(): cluster.start() for n in old_nodes + [new_node]: - n.query('''CREATE TABLE test_table (id UInt32, value UInt64) ENGINE = MergeTree() ORDER BY tuple()''') + n.query( + """CREATE TABLE test_table (id UInt32, value UInt64) ENGINE = MergeTree() ORDER BY tuple()""" + ) for n in old_nodes: n.query( - '''CREATE TABLE dist_table AS test_table ENGINE = Distributed('test_cluster', 'default', 'test_table')''') + """CREATE TABLE dist_table AS test_table ENGINE = Distributed('test_cluster', 'default', 'test_table')""" + ) yield cluster finally: @@ -47,18 +92,25 @@ def setup_nodes(): def test_client_is_older_than_server(setup_nodes): server = new_node for i, client in enumerate(old_nodes): - query_from_one_node_to_another(client, server, "INSERT INTO test_table VALUES (1, {})".format(i)) + query_from_one_node_to_another( + client, server, "INSERT INTO test_table VALUES (1, {})".format(i) + ) for client in old_nodes: query_from_one_node_to_another(client, server, "SELECT COUNT() FROM test_table") - assert server.query("SELECT COUNT() FROM test_table WHERE id=1") == str(len(old_nodes)) + "\n" + assert ( + server.query("SELECT COUNT() FROM test_table WHERE id=1") + == str(len(old_nodes)) + "\n" + ) def test_server_is_older_than_client(setup_nodes): client = new_node for i, server in enumerate(old_nodes): - query_from_one_node_to_another(client, server, "INSERT INTO test_table VALUES (2, {})".format(i)) + query_from_one_node_to_another( + client, server, "INSERT INTO test_table VALUES (2, {})".format(i) + ) for server in old_nodes: query_from_one_node_to_another(client, server, "SELECT COUNT() FROM test_table") @@ -73,7 +125,13 @@ def test_distributed_query_initiator_is_older_than_shard(setup_nodes): for i, initiator in enumerate(distributed_query_initiator_old_nodes): initiator.query("INSERT INTO dist_table VALUES (3, {})".format(i)) - assert_eq_with_retry(shard, "SELECT COUNT() FROM test_table WHERE id=3", - str(len(distributed_query_initiator_old_nodes))) - assert_eq_with_retry(initiator, "SELECT COUNT() FROM dist_table WHERE id=3", - str(len(distributed_query_initiator_old_nodes))) + assert_eq_with_retry( + shard, + "SELECT COUNT() FROM test_table WHERE id=3", + str(len(distributed_query_initiator_old_nodes)), + ) + assert_eq_with_retry( + initiator, + "SELECT COUNT() FROM dist_table WHERE id=3", + str(len(distributed_query_initiator_old_nodes)), + ) diff --git a/tests/integration/test_on_cluster_timeouts/test.py b/tests/integration/test_on_cluster_timeouts/test.py index 544153d0d00..06f19fabd68 100644 --- a/tests/integration/test_on_cluster_timeouts/test.py +++ b/tests/integration/test_on_cluster_timeouts/test.py @@ -4,14 +4,30 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], - user_configs=['configs/users_config.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], - user_configs=['configs/users_config.xml'], with_zookeeper=True) -node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml'], - user_configs=['configs/users_config.xml'], with_zookeeper=True) -node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml'], - user_configs=['configs/users_config.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/users_config.xml"], + with_zookeeper=True, +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/users_config.xml"], + with_zookeeper=True, +) +node3 = cluster.add_instance( + "node3", + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/users_config.xml"], + with_zookeeper=True, +) +node4 = cluster.add_instance( + "node4", + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/users_config.xml"], + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -26,23 +42,33 @@ def started_cluster(): def test_long_query(started_cluster): node1.query( - "CREATE TABLE cluster_table (key UInt64, value String) ENGINE = ReplicatedMergeTree('/test/1/cluster_table', '1') ORDER BY tuple()") + "CREATE TABLE cluster_table (key UInt64, value String) ENGINE = ReplicatedMergeTree('/test/1/cluster_table', '1') ORDER BY tuple()" + ) node2.query( - "CREATE TABLE cluster_table (key UInt64, value String) ENGINE = ReplicatedMergeTree('/test/1/cluster_table', '2') ORDER BY tuple()") + "CREATE TABLE cluster_table (key UInt64, value String) ENGINE = ReplicatedMergeTree('/test/1/cluster_table', '2') ORDER BY tuple()" + ) - node1.query("INSERT INTO cluster_table SELECT number, toString(number) FROM numbers(20)") + node1.query( + "INSERT INTO cluster_table SELECT number, toString(number) FROM numbers(20)" + ) node2.query("SYSTEM SYNC REPLICA cluster_table") node3.query( - "CREATE TABLE cluster_table (key UInt64, value String) ENGINE = ReplicatedMergeTree('/test/2/cluster_table', '1') ORDER BY tuple()") + "CREATE TABLE cluster_table (key UInt64, value String) ENGINE = ReplicatedMergeTree('/test/2/cluster_table', '1') ORDER BY tuple()" + ) node4.query( - "CREATE TABLE cluster_table (key UInt64, value String) ENGINE = ReplicatedMergeTree('/test/2/cluster_table', '2') ORDER BY tuple()") - node3.query("INSERT INTO cluster_table SELECT number, toString(number) FROM numbers(20)") + "CREATE TABLE cluster_table (key UInt64, value String) ENGINE = ReplicatedMergeTree('/test/2/cluster_table', '2') ORDER BY tuple()" + ) + node3.query( + "INSERT INTO cluster_table SELECT number, toString(number) FROM numbers(20)" + ) node4.query("SYSTEM SYNC REPLICA cluster_table") - node1.query("ALTER TABLE cluster_table ON CLUSTER 'test_cluster' UPDATE key = 1 WHERE sleepEachRow(1) == 0", - settings={"mutations_sync": "2"}) + node1.query( + "ALTER TABLE cluster_table ON CLUSTER 'test_cluster' UPDATE key = 1 WHERE sleepEachRow(1) == 0", + settings={"mutations_sync": "2"}, + ) assert node1.query("SELECT SUM(key) FROM cluster_table") == "20\n" assert node2.query("SELECT SUM(key) FROM cluster_table") == "20\n" diff --git a/tests/integration/test_optimize_on_insert/test.py b/tests/integration/test_optimize_on_insert/test.py index da4e20edf0c..0dfec53cf9c 100644 --- a/tests/integration/test_optimize_on_insert/test.py +++ b/tests/integration/test_optimize_on_insert/test.py @@ -5,8 +5,9 @@ from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True) -node2 = cluster.add_instance('node2', with_zookeeper=True) +node1 = cluster.add_instance("node1", with_zookeeper=True) +node2 = cluster.add_instance("node2", with_zookeeper=True) + @pytest.fixture(scope="module") def start_cluster(): @@ -20,18 +21,27 @@ def start_cluster(): def get_data_files_for_table(node, table_name): - raw_output = node.exec_in_container(["bash", "-c", "ls /var/lib/clickhouse/data/default/{}".format(table_name)]) + raw_output = node.exec_in_container( + ["bash", "-c", "ls /var/lib/clickhouse/data/default/{}".format(table_name)] + ) return raw_output.strip().split("\n") + def test_empty_parts_optimize(start_cluster): for n, node in enumerate([node1, node2]): - node.query(""" + node.query( + """ CREATE TABLE empty (key UInt32, val UInt32, date Datetime) ENGINE=ReplicatedSummingMergeTree('/clickhouse/01560_optimize_on_insert', '{}', val) PARTITION BY date ORDER BY key; - """.format(n+1)) + """.format( + n + 1 + ) + ) - node1.query("INSERT INTO empty VALUES (1, 1, '2020-01-01'), (1, 1, '2020-01-01'), (1, -2, '2020-01-01')") + node1.query( + "INSERT INTO empty VALUES (1, 1, '2020-01-01'), (1, 1, '2020-01-01'), (1, -2, '2020-01-01')" + ) node2.query("SYSTEM SYNC REPLICA empty", timeout=15) @@ -39,10 +49,19 @@ def test_empty_parts_optimize(start_cluster): assert node2.query("SELECT * FROM empty") == "" # No other tmp files exists - assert set(get_data_files_for_table(node1, "empty")) == {"detached", "format_version.txt"} - assert set(get_data_files_for_table(node2, "empty")) == {"detached", "format_version.txt"} + assert set(get_data_files_for_table(node1, "empty")) == { + "detached", + "format_version.txt", + } + assert set(get_data_files_for_table(node2, "empty")) == { + "detached", + "format_version.txt", + } - node1.query("INSERT INTO empty VALUES (1, 1, '2020-02-01'), (1, 1, '2020-02-01'), (1, -2, '2020-02-01')", settings={"insert_quorum": 2}) + node1.query( + "INSERT INTO empty VALUES (1, 1, '2020-02-01'), (1, 1, '2020-02-01'), (1, -2, '2020-02-01')", + settings={"insert_quorum": 2}, + ) assert node1.query("SELECT * FROM empty") == "" assert node2.query("SELECT * FROM empty") == "" diff --git a/tests/integration/test_part_log_table/test.py b/tests/integration/test_part_log_table/test.py index eba909acf4a..d81990a9d47 100644 --- a/tests/integration/test_part_log_table/test.py +++ b/tests/integration/test_part_log_table/test.py @@ -3,10 +3,18 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance("node1", main_configs=["configs/config_without_standard_part_log.xml"]) -node2 = cluster.add_instance("node2", main_configs=["configs/config_with_standard_part_log.xml"]) -node3 = cluster.add_instance("node3", main_configs=["configs/config_with_non_standard_part_log.xml"]) -node4 = cluster.add_instance("node4", main_configs=["configs/config_disk_name_test.xml"]) +node1 = cluster.add_instance( + "node1", main_configs=["configs/config_without_standard_part_log.xml"] +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/config_with_standard_part_log.xml"] +) +node3 = cluster.add_instance( + "node3", main_configs=["configs/config_with_non_standard_part_log.xml"] +) +node4 = cluster.add_instance( + "node4", main_configs=["configs/config_disk_name_test.xml"] +) @pytest.fixture(scope="module") @@ -19,33 +27,54 @@ def start_cluster(): def test_config_without_part_log(start_cluster): - assert "Table system.part_log doesn't exist" in node1.query_and_get_error("SELECT * FROM system.part_log") - node1.query("CREATE TABLE test_table(word String, value UInt64) ENGINE=MergeTree() ORDER BY value") - assert "Table system.part_log doesn't exist" in node1.query_and_get_error("SELECT * FROM system.part_log") + assert "Table system.part_log doesn't exist" in node1.query_and_get_error( + "SELECT * FROM system.part_log" + ) + node1.query( + "CREATE TABLE test_table(word String, value UInt64) ENGINE=MergeTree() ORDER BY value" + ) + assert "Table system.part_log doesn't exist" in node1.query_and_get_error( + "SELECT * FROM system.part_log" + ) node1.query("INSERT INTO test_table VALUES ('name', 1)") node1.query("SYSTEM FLUSH LOGS") - assert "Table system.part_log doesn't exist" in node1.query_and_get_error("SELECT * FROM system.part_log") + assert "Table system.part_log doesn't exist" in node1.query_and_get_error( + "SELECT * FROM system.part_log" + ) # Note: if part_log is defined, we cannot say when the table will be created - because of metric_log, trace_log, text_log, query_log... + def test_config_with_standard_part_log(start_cluster): - node2.query("CREATE TABLE test_table(word String, value UInt64) ENGINE=MergeTree() Order by value") + node2.query( + "CREATE TABLE test_table(word String, value UInt64) ENGINE=MergeTree() Order by value" + ) node2.query("INSERT INTO test_table VALUES ('name', 1)") node2.query("SYSTEM FLUSH LOGS") assert node2.query("SELECT * FROM system.part_log") != "" def test_config_with_non_standard_part_log(start_cluster): - node3.query("CREATE TABLE test_table(word String, value UInt64) ENGINE=MergeTree() Order by value") + node3.query( + "CREATE TABLE test_table(word String, value UInt64) ENGINE=MergeTree() Order by value" + ) node3.query("INSERT INTO test_table VALUES ('name', 1)") node3.query("SYSTEM FLUSH LOGS") assert node3.query("SELECT * FROM system.own_part_log") != "" + def test_config_disk_name_test(start_cluster): - node4.query("CREATE TABLE test_table1(word String, value UInt64) ENGINE = MergeTree() ORDER BY word SETTINGS storage_policy = 'test1'") + node4.query( + "CREATE TABLE test_table1(word String, value UInt64) ENGINE = MergeTree() ORDER BY word SETTINGS storage_policy = 'test1'" + ) node4.query("INSERT INTO test_table1(*) VALUES ('test1', 2)") - node4.query("CREATE TABLE test_table2(word String, value UInt64) ENGINE = MergeTree() ORDER BY word SETTINGS storage_policy = 'test2'") + node4.query( + "CREATE TABLE test_table2(word String, value UInt64) ENGINE = MergeTree() ORDER BY word SETTINGS storage_policy = 'test2'" + ) node4.query("INSERT INTO test_table2(*) VALUES ('test2', 3)") node4.query("SYSTEM FLUSH LOGS") - assert node4.query("SELECT DISTINCT disk_name FROM system.part_log ORDER by disk_name") == "test1\ntest2\n" + assert ( + node4.query("SELECT DISTINCT disk_name FROM system.part_log ORDER by disk_name") + == "test1\ntest2\n" + ) diff --git a/tests/integration/test_part_moves_between_shards/test.py b/tests/integration/test_part_moves_between_shards/test.py index 6009a9d2a44..1dbe5324124 100644 --- a/tests/integration/test_part_moves_between_shards/test.py +++ b/tests/integration/test_part_moves_between_shards/test.py @@ -12,28 +12,32 @@ transient_ch_errors = [23, 32, 210] cluster = ClickHouseCluster(__file__) s0r0 = cluster.add_instance( - 's0r0', - main_configs=['configs/remote_servers.xml', 'configs/merge_tree.xml'], + "s0r0", + main_configs=["configs/remote_servers.xml", "configs/merge_tree.xml"], stay_alive=True, - with_zookeeper=True) + with_zookeeper=True, +) s0r1 = cluster.add_instance( - 's0r1', - main_configs=['configs/remote_servers.xml', 'configs/merge_tree.xml'], + "s0r1", + main_configs=["configs/remote_servers.xml", "configs/merge_tree.xml"], stay_alive=True, - with_zookeeper=True) + with_zookeeper=True, +) s1r0 = cluster.add_instance( - 's1r0', - main_configs=['configs/remote_servers.xml', 'configs/merge_tree.xml'], + "s1r0", + main_configs=["configs/remote_servers.xml", "configs/merge_tree.xml"], stay_alive=True, - with_zookeeper=True) + with_zookeeper=True, +) s1r1 = cluster.add_instance( - 's1r1', - main_configs=['configs/remote_servers.xml', 'configs/merge_tree.xml'], + "s1r1", + main_configs=["configs/remote_servers.xml", "configs/merge_tree.xml"], stay_alive=True, - with_zookeeper=True) + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -48,12 +52,16 @@ def started_cluster(): def test_move(started_cluster): for shard_ix, rs in enumerate([[s0r0, s0r1], [s1r0, s1r1]]): for replica_ix, r in enumerate(rs): - r.query(""" + r.query( + """ DROP TABLE IF EXISTS test_move; CREATE TABLE test_move(v UInt64) ENGINE ReplicatedMergeTree('/clickhouse/shard_{}/tables/test_move', '{}') ORDER BY tuple() - """.format(shard_ix, r.name)) + """.format( + shard_ix, r.name + ) + ) s0r0.query("SYSTEM STOP MERGES test_move") s0r1.query("SYSTEM STOP MERGES test_move") @@ -64,7 +72,9 @@ def test_move(started_cluster): assert "2" == s0r0.query("SELECT count() FROM test_move").strip() assert "0" == s1r0.query("SELECT count() FROM test_move").strip() - s0r0.query("ALTER TABLE test_move MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_1/tables/test_move'") + s0r0.query( + "ALTER TABLE test_move MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_1/tables/test_move'" + ) print(s0r0.query("SELECT * FROM system.part_moves_between_shards")) @@ -80,7 +90,9 @@ def test_move(started_cluster): assert "1" == n.query("SELECT count() FROM test_move").strip() # Move part back - s1r0.query("ALTER TABLE test_move MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_0/tables/test_move'") + s1r0.query( + "ALTER TABLE test_move MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_0/tables/test_move'" + ) wait_for_state("DONE", s1r0, "test_move") @@ -94,18 +106,24 @@ def test_move(started_cluster): def test_deduplication_while_move(started_cluster): for shard_ix, rs in enumerate([[s0r0, s0r1], [s1r0, s1r1]]): for replica_ix, r in enumerate(rs): - r.query(""" + r.query( + """ DROP TABLE IF EXISTS test_deduplication; CREATE TABLE test_deduplication(v UInt64) ENGINE ReplicatedMergeTree('/clickhouse/shard_{}/tables/test_deduplication', '{}') ORDER BY tuple() - """.format(shard_ix, r.name)) + """.format( + shard_ix, r.name + ) + ) - r.query(""" + r.query( + """ DROP TABLE IF EXISTS test_deduplication_d; CREATE TABLE test_deduplication_d AS test_deduplication ENGINE Distributed('test_cluster', '', test_deduplication) - """) + """ + ) s0r0.query("SYSTEM STOP MERGES test_deduplication") s0r1.query("SYSTEM STOP MERGES test_deduplication") @@ -118,7 +136,8 @@ def test_deduplication_while_move(started_cluster): assert "0" == s1r0.query("SELECT count() FROM test_deduplication").strip() s0r0.query( - "ALTER TABLE test_deduplication MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_1/tables/test_deduplication'") + "ALTER TABLE test_deduplication MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_1/tables/test_deduplication'" + ) s0r0.query("SYSTEM START MERGES test_deduplication") expected = """ @@ -129,19 +148,28 @@ def test_deduplication_while_move(started_cluster): def deduplication_invariant_test(): n = random.choice(list(started_cluster.instances.values())) assert TSV( - n.query("SELECT * FROM test_deduplication_d ORDER BY v", - settings={"allow_experimental_query_deduplication": 1}) + n.query( + "SELECT * FROM test_deduplication_d ORDER BY v", + settings={"allow_experimental_query_deduplication": 1}, + ) ) == TSV(expected) # https://github.com/ClickHouse/ClickHouse/issues/34089 assert TSV( - n.query("SELECT count() FROM test_deduplication_d", - settings={"allow_experimental_query_deduplication": 1}) + n.query( + "SELECT count() FROM test_deduplication_d", + settings={"allow_experimental_query_deduplication": 1}, + ) ) == TSV("2") assert TSV( - n.query("SELECT count() FROM test_deduplication_d", - settings={"allow_experimental_query_deduplication": 1, "allow_experimental_projection_optimization": 1}) + n.query( + "SELECT count() FROM test_deduplication_d", + settings={ + "allow_experimental_query_deduplication": 1, + "allow_experimental_projection_optimization": 1, + }, + ) ) == TSV("2") deduplication_invariant = ConcurrentInvariant(deduplication_invariant_test) @@ -155,18 +183,24 @@ def test_deduplication_while_move(started_cluster): def test_part_move_step_by_step(started_cluster): for shard_ix, rs in enumerate([[s0r0, s0r1], [s1r0, s1r1]]): for replica_ix, r in enumerate(rs): - r.query(""" + r.query( + """ DROP TABLE IF EXISTS test_part_move_step_by_step; CREATE TABLE test_part_move_step_by_step(v UInt64) ENGINE ReplicatedMergeTree('/clickhouse/shard_{}/tables/test_part_move_step_by_step', '{}') ORDER BY tuple() - """.format(shard_ix, r.name)) + """.format( + shard_ix, r.name + ) + ) - r.query(""" + r.query( + """ DROP TABLE IF EXISTS test_part_move_step_by_step_d; CREATE TABLE test_part_move_step_by_step_d AS test_part_move_step_by_step ENGINE Distributed('test_cluster', currentDatabase(), test_part_move_step_by_step) - """) + """ + ) s0r0.query("SYSTEM STOP MERGES test_part_move_step_by_step") s0r1.query("SYSTEM STOP MERGES test_part_move_step_by_step") @@ -187,8 +221,10 @@ def test_part_move_step_by_step(started_cluster): n = random.choice(list(started_cluster.instances.values())) try: assert TSV( - n.query("SELECT * FROM test_part_move_step_by_step_d ORDER BY v", - settings={"allow_experimental_query_deduplication": 1}) + n.query( + "SELECT * FROM test_part_move_step_by_step_d ORDER BY v", + settings={"allow_experimental_query_deduplication": 1}, + ) ) == TSV(expected) except QueryRuntimeException as e: # ignore transient errors that are caused by us restarting nodes @@ -202,10 +238,16 @@ def test_part_move_step_by_step(started_cluster): s0r1.stop_clickhouse() s0r0.query( - "ALTER TABLE test_part_move_step_by_step MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_1/tables/test_part_move_step_by_step'") + "ALTER TABLE test_part_move_step_by_step MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_1/tables/test_part_move_step_by_step'" + ) # Should hang on SYNC_SOURCE until all source replicas acknowledge new pinned UUIDs. - wait_for_state("SYNC_SOURCE", s0r0, "test_part_move_step_by_step", "Some replicas haven\\'t processed event") + wait_for_state( + "SYNC_SOURCE", + s0r0, + "test_part_move_step_by_step", + "Some replicas haven\\'t processed event", + ) deduplication_invariant.assert_no_exception() # Start all replicas in source shard but stop a replica in destination shard @@ -214,10 +256,19 @@ def test_part_move_step_by_step(started_cluster): s0r1.start_clickhouse() # After SYNC_SOURCE step no merges will be assigned. - s0r0.query("SYSTEM START MERGES test_part_move_step_by_step; OPTIMIZE TABLE test_part_move_step_by_step;") - s0r1.query("SYSTEM START MERGES test_part_move_step_by_step; OPTIMIZE TABLE test_part_move_step_by_step;") + s0r0.query( + "SYSTEM START MERGES test_part_move_step_by_step; OPTIMIZE TABLE test_part_move_step_by_step;" + ) + s0r1.query( + "SYSTEM START MERGES test_part_move_step_by_step; OPTIMIZE TABLE test_part_move_step_by_step;" + ) - wait_for_state("SYNC_DESTINATION", s0r0, "test_part_move_step_by_step", "Some replicas haven\\'t processed event") + wait_for_state( + "SYNC_DESTINATION", + s0r0, + "test_part_move_step_by_step", + "Some replicas haven\\'t processed event", + ) deduplication_invariant.assert_no_exception() # Start previously stopped replica in destination shard to let SYNC_DESTINATION @@ -225,7 +276,12 @@ def test_part_move_step_by_step(started_cluster): # Stop the other replica in destination shard to prevent DESTINATION_FETCH succeed. s1r0.stop_clickhouse() s1r1.start_clickhouse() - wait_for_state("DESTINATION_FETCH", s0r0, "test_part_move_step_by_step", "Some replicas haven\\'t processed event") + wait_for_state( + "DESTINATION_FETCH", + s0r0, + "test_part_move_step_by_step", + "Some replicas haven\\'t processed event", + ) deduplication_invariant.assert_no_exception() # Start previously stopped replica in destination shard to let DESTINATION_FETCH @@ -233,14 +289,24 @@ def test_part_move_step_by_step(started_cluster): # Stop the other replica in destination shard to prevent DESTINATION_ATTACH succeed. s1r1.stop_clickhouse() s1r0.start_clickhouse() - wait_for_state("DESTINATION_ATTACH", s0r0, "test_part_move_step_by_step", "Some replicas haven\\'t processed event") + wait_for_state( + "DESTINATION_ATTACH", + s0r0, + "test_part_move_step_by_step", + "Some replicas haven\\'t processed event", + ) deduplication_invariant.assert_no_exception() # Start all replicas in destination shard to let DESTINATION_ATTACH succeed. # Stop a source replica to prevent SOURCE_DROP succeeding. s0r0.stop_clickhouse() s1r1.start_clickhouse() - wait_for_state("SOURCE_DROP", s0r1, "test_part_move_step_by_step", "Some replicas haven\\'t processed event") + wait_for_state( + "SOURCE_DROP", + s0r1, + "test_part_move_step_by_step", + "Some replicas haven\\'t processed event", + ) deduplication_invariant.assert_no_exception() s0r0.start_clickhouse() @@ -260,18 +326,24 @@ def test_part_move_step_by_step(started_cluster): def test_part_move_step_by_step_kill(started_cluster): for shard_ix, rs in enumerate([[s0r0, s0r1], [s1r0, s1r1]]): for replica_ix, r in enumerate(rs): - r.query(""" + r.query( + """ DROP TABLE IF EXISTS test_part_move_step_by_step_kill; CREATE TABLE test_part_move_step_by_step_kill(v UInt64) ENGINE ReplicatedMergeTree('/clickhouse/shard_{}/tables/test_part_move_step_by_step_kill', '{}') ORDER BY tuple() - """.format(shard_ix, r.name)) + """.format( + shard_ix, r.name + ) + ) - r.query(""" + r.query( + """ DROP TABLE IF EXISTS test_part_move_step_by_step_kill_d; CREATE TABLE test_part_move_step_by_step_kill_d AS test_part_move_step_by_step_kill ENGINE Distributed('test_cluster', currentDatabase(), test_part_move_step_by_step_kill) - """) + """ + ) s0r0.query("SYSTEM STOP MERGES test_part_move_step_by_step_kill") s0r1.query("SYSTEM STOP MERGES test_part_move_step_by_step_kill") @@ -280,8 +352,14 @@ def test_part_move_step_by_step_kill(started_cluster): s0r0.query("INSERT INTO test_part_move_step_by_step_kill VALUES (2)") s0r1.query("SYSTEM SYNC REPLICA test_part_move_step_by_step_kill", timeout=20) - assert "2" == s0r0.query("SELECT count() FROM test_part_move_step_by_step_kill").strip() - assert "0" == s1r0.query("SELECT count() FROM test_part_move_step_by_step_kill").strip() + assert ( + "2" + == s0r0.query("SELECT count() FROM test_part_move_step_by_step_kill").strip() + ) + assert ( + "0" + == s1r0.query("SELECT count() FROM test_part_move_step_by_step_kill").strip() + ) expected = """ 1 @@ -292,10 +370,10 @@ def test_part_move_step_by_step_kill(started_cluster): n = random.choice(list(started_cluster.instances.values())) try: assert TSV( - n.query("SELECT * FROM test_part_move_step_by_step_kill_d ORDER BY v", - settings={ - "allow_experimental_query_deduplication": 1 - }) + n.query( + "SELECT * FROM test_part_move_step_by_step_kill_d ORDER BY v", + settings={"allow_experimental_query_deduplication": 1}, + ) ) == TSV(expected) except QueryRuntimeException as e: # ignore transient errors that are caused by us restarting nodes @@ -309,10 +387,16 @@ def test_part_move_step_by_step_kill(started_cluster): s0r1.stop_clickhouse() s0r0.query( - "ALTER TABLE test_part_move_step_by_step_kill MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_1/tables/test_part_move_step_by_step_kill'") + "ALTER TABLE test_part_move_step_by_step_kill MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_1/tables/test_part_move_step_by_step_kill'" + ) # Should hang on SYNC_SOURCE until all source replicas acknowledge new pinned UUIDs. - wait_for_state("SYNC_SOURCE", s0r0, "test_part_move_step_by_step_kill", "Some replicas haven\\'t processed event") + wait_for_state( + "SYNC_SOURCE", + s0r0, + "test_part_move_step_by_step_kill", + "Some replicas haven\\'t processed event", + ) deduplication_invariant.assert_no_exception() # Start all replicas in source shard but stop a replica in destination shard @@ -321,11 +405,19 @@ def test_part_move_step_by_step_kill(started_cluster): s0r1.start_clickhouse() # After SYNC_SOURCE step no merges will be assigned. - s0r0.query("SYSTEM START MERGES test_part_move_step_by_step_kill; OPTIMIZE TABLE test_part_move_step_by_step_kill;") - s0r1.query("SYSTEM START MERGES test_part_move_step_by_step_kill; OPTIMIZE TABLE test_part_move_step_by_step_kill;") + s0r0.query( + "SYSTEM START MERGES test_part_move_step_by_step_kill; OPTIMIZE TABLE test_part_move_step_by_step_kill;" + ) + s0r1.query( + "SYSTEM START MERGES test_part_move_step_by_step_kill; OPTIMIZE TABLE test_part_move_step_by_step_kill;" + ) - wait_for_state("SYNC_DESTINATION", s0r0, "test_part_move_step_by_step_kill", - "Some replicas haven\\'t processed event") + wait_for_state( + "SYNC_DESTINATION", + s0r0, + "test_part_move_step_by_step_kill", + "Some replicas haven\\'t processed event", + ) deduplication_invariant.assert_no_exception() # Start previously stopped replica in destination shard to let SYNC_DESTINATION @@ -333,39 +425,61 @@ def test_part_move_step_by_step_kill(started_cluster): # Stop the other replica in destination shard to prevent DESTINATION_FETCH succeed. s1r0.stop_clickhouse() s1r1.start_clickhouse() - wait_for_state("DESTINATION_FETCH", s0r0, "test_part_move_step_by_step_kill", - "Some replicas haven\\'t processed event") + wait_for_state( + "DESTINATION_FETCH", + s0r0, + "test_part_move_step_by_step_kill", + "Some replicas haven\\'t processed event", + ) # Start previously stopped replica in destination shard to let DESTINATION_FETCH # succeed. # Stop the other replica in destination shard to prevent DESTINATION_ATTACH succeed. s1r1.stop_clickhouse() s1r0.start_clickhouse() - wait_for_state("DESTINATION_ATTACH", s0r0, "test_part_move_step_by_step_kill", - "Some replicas haven\\'t processed event") + wait_for_state( + "DESTINATION_ATTACH", + s0r0, + "test_part_move_step_by_step_kill", + "Some replicas haven\\'t processed event", + ) deduplication_invariant.assert_no_exception() # Rollback here. - s0r0.query(""" + s0r0.query( + """ KILL PART_MOVE_TO_SHARD WHERE task_uuid = (SELECT task_uuid FROM system.part_moves_between_shards WHERE table = 'test_part_move_step_by_step_kill') - """) + """ + ) - wait_for_state("DESTINATION_ATTACH", s0r0, "test_part_move_step_by_step_kill", - assert_exception_msg="Some replicas haven\\'t processed event", - assert_rollback=True) + wait_for_state( + "DESTINATION_ATTACH", + s0r0, + "test_part_move_step_by_step_kill", + assert_exception_msg="Some replicas haven\\'t processed event", + assert_rollback=True, + ) s1r1.start_clickhouse() - wait_for_state("CANCELLED", s0r0, "test_part_move_step_by_step_kill", assert_rollback=True) + wait_for_state( + "CANCELLED", s0r0, "test_part_move_step_by_step_kill", assert_rollback=True + ) deduplication_invariant.assert_no_exception() # No hung tasks in replication queue. Would timeout otherwise. for instance in started_cluster.instances.values(): instance.query("SYSTEM SYNC REPLICA test_part_move_step_by_step_kill") - assert "2" == s0r0.query("SELECT count() FROM test_part_move_step_by_step_kill").strip() - assert "0" == s1r0.query("SELECT count() FROM test_part_move_step_by_step_kill").strip() + assert ( + "2" + == s0r0.query("SELECT count() FROM test_part_move_step_by_step_kill").strip() + ) + assert ( + "0" + == s1r0.query("SELECT count() FROM test_part_move_step_by_step_kill").strip() + ) deduplication_invariant.stop_and_assert_no_exception() @@ -379,40 +493,69 @@ def test_move_not_permitted(started_cluster): s1r0.start_clickhouse() for ix, n in enumerate([s0r0, s1r0]): - n.query(""" + n.query( + """ DROP TABLE IF EXISTS not_permitted_columns; CREATE TABLE not_permitted_columns(v_{ix} UInt64) ENGINE ReplicatedMergeTree('/clickhouse/shard_{ix}/tables/not_permitted_columns', 'r') ORDER BY tuple(); - """.format(ix=ix)) + """.format( + ix=ix + ) + ) partition = "date" if ix > 0: partition = "v" - n.query(""" + n.query( + """ DROP TABLE IF EXISTS not_permitted_partition; CREATE TABLE not_permitted_partition(date Date, v UInt64) ENGINE ReplicatedMergeTree('/clickhouse/shard_{ix}/tables/not_permitted_partition', 'r') PARTITION BY ({partition}) ORDER BY tuple(); - """.format(ix=ix, partition=partition)) + """.format( + ix=ix, partition=partition + ) + ) s0r0.query("INSERT INTO not_permitted_columns VALUES (1)") s0r0.query("INSERT INTO not_permitted_partition VALUES ('2021-09-03', 1)") - with pytest.raises(QueryRuntimeException, match="DB::Exception: Source and destination are the same"): - s0r0.query("ALTER TABLE not_permitted_columns MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_0/tables/not_permitted_columns'") + with pytest.raises( + QueryRuntimeException, + match="DB::Exception: Source and destination are the same", + ): + s0r0.query( + "ALTER TABLE not_permitted_columns MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_0/tables/not_permitted_columns'" + ) - with pytest.raises(QueryRuntimeException, match="DB::Exception: Table columns structure in ZooKeeper is different from local table structure."): - s0r0.query("ALTER TABLE not_permitted_columns MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_1/tables/not_permitted_columns'") + with pytest.raises( + QueryRuntimeException, + match="DB::Exception: Table columns structure in ZooKeeper is different from local table structure.", + ): + s0r0.query( + "ALTER TABLE not_permitted_columns MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_1/tables/not_permitted_columns'" + ) - with pytest.raises(QueryRuntimeException, match="DB::Exception: Existing table metadata in ZooKeeper differs in partition key expression."): - s0r0.query("ALTER TABLE not_permitted_partition MOVE PART '20210903_0_0_0' TO SHARD '/clickhouse/shard_1/tables/not_permitted_partition'") + with pytest.raises( + QueryRuntimeException, + match="DB::Exception: Existing table metadata in ZooKeeper differs in partition key expression.", + ): + s0r0.query( + "ALTER TABLE not_permitted_partition MOVE PART '20210903_0_0_0' TO SHARD '/clickhouse/shard_1/tables/not_permitted_partition'" + ) -def wait_for_state(desired_state, instance, test_table, assert_exception_msg=None, assert_rollback=False): +def wait_for_state( + desired_state, + instance, + test_table, + assert_exception_msg=None, + assert_rollback=False, +): last_debug_print_time = time.time() print("Waiting to reach state: {}".format(desired_state)) @@ -422,9 +565,13 @@ def wait_for_state(desired_state, instance, test_table, assert_exception_msg=Non print(" and rollback: {}".format(assert_rollback)) while True: - tasks = TSV.toMat(instance.query( - "SELECT state, num_tries, last_exception, rollback FROM system.part_moves_between_shards WHERE table = '{}'".format( - test_table))) + tasks = TSV.toMat( + instance.query( + "SELECT state, num_tries, last_exception, rollback FROM system.part_moves_between_shards WHERE table = '{}'".format( + test_table + ) + ) + ) assert len(tasks) == 1, "only one task expected in this test" if time.time() - last_debug_print_time > 30: @@ -448,7 +595,11 @@ def wait_for_state(desired_state, instance, test_table, assert_exception_msg=Non break elif state in ["DONE", "CANCELLED"]: - raise Exception("Reached terminal state {}, but was waiting for {}".format(state, desired_state)) + raise Exception( + "Reached terminal state {}, but was waiting for {}".format( + state, desired_state + ) + ) time.sleep(0.1) @@ -465,7 +616,7 @@ class ConcurrentInvariant: def start(self): if self.started: - raise Exception('invariant thread already started') + raise Exception("invariant thread already started") self.started = True self.thread.start() @@ -496,4 +647,4 @@ class ConcurrentInvariant: def _assert_started(self): if not self.started: - raise Exception('invariant thread not started, forgot to call start?') + raise Exception("invariant thread not started, forgot to call start?") diff --git a/tests/integration/test_part_uuid/test.py b/tests/integration/test_part_uuid/test.py index 0353bf9266d..b30dd884427 100644 --- a/tests/integration/test_part_uuid/test.py +++ b/tests/integration/test_part_uuid/test.py @@ -7,14 +7,20 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( - 'node1', - main_configs=['configs/remote_servers.xml', 'configs/merge_tree_uuids.xml'], - with_zookeeper=True) + "node1", + main_configs=["configs/remote_servers.xml", "configs/merge_tree_uuids.xml"], + with_zookeeper=True, +) node2 = cluster.add_instance( - 'node2', - main_configs=['configs/remote_servers.xml', 'configs/merge_tree_uuids.xml', 'configs/merge_tree_in_memory.xml'], - with_zookeeper=True) + "node2", + main_configs=[ + "configs/remote_servers.xml", + "configs/merge_tree_uuids.xml", + "configs/merge_tree_in_memory.xml", + ], + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -30,11 +36,15 @@ def test_part_uuid(started_cluster): uuid_zero = uuid.UUID(bytes=b"\x00" * 16) for ix, n in enumerate([node1, node2]): - n.query(""" + n.query( + """ CREATE TABLE t(key UInt64, value UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t', '{}') ORDER BY tuple() - """.format(ix)) + """.format( + ix + ) + ) # Test insert assigns uuid to part. node1.query("INSERT INTO t VALUES (1, 1)") @@ -42,28 +52,46 @@ def test_part_uuid(started_cluster): uuids = set() for node in [node1, node2]: node.query("SYSTEM SYNC REPLICA t") - part_initial_uuid = uuid.UUID(node.query("SELECT uuid FROM system.parts WHERE table = 't' AND active ORDER BY name").strip()) + part_initial_uuid = uuid.UUID( + node.query( + "SELECT uuid FROM system.parts WHERE table = 't' AND active ORDER BY name" + ).strip() + ) uuids.add(part_initial_uuid) assert uuid_zero != part_initial_uuid assert len(uuids) == 1, "expect the same uuid on all the replicas" # Test detach / attach. - node1.query("ALTER TABLE t DETACH PARTITION tuple(); ALTER TABLE t ATTACH PARTITION tuple()") + node1.query( + "ALTER TABLE t DETACH PARTITION tuple(); ALTER TABLE t ATTACH PARTITION tuple()" + ) for node in [node1, node2]: node.query("SYSTEM SYNC REPLICA t") - part_reattach_uuid = uuid.UUID(node.query( - "SELECT uuid FROM system.parts WHERE table = 't' AND active ORDER BY name").strip()) + part_reattach_uuid = uuid.UUID( + node.query( + "SELECT uuid FROM system.parts WHERE table = 't' AND active ORDER BY name" + ).strip() + ) assert part_initial_uuid == part_reattach_uuid # Test mutation assigns new non-zero uuids. - node1.query("ALTER TABLE t UPDATE value = 1 WHERE key = 1 SETTINGS mutations_sync = 2") - part_mutate_uuid = uuid.UUID(node1.query("SELECT uuid FROM system.parts WHERE table = 't' AND active ORDER BY name").strip()) + node1.query( + "ALTER TABLE t UPDATE value = 1 WHERE key = 1 SETTINGS mutations_sync = 2" + ) + part_mutate_uuid = uuid.UUID( + node1.query( + "SELECT uuid FROM system.parts WHERE table = 't' AND active ORDER BY name" + ).strip() + ) assert part_mutate_uuid not in [uuid_zero, part_initial_uuid] node2.query("SYSTEM SYNC REPLICA t") - assert part_mutate_uuid == uuid.UUID(node2.query( - "SELECT uuid FROM system.parts WHERE table = 't' AND active ORDER BY name").strip()) + assert part_mutate_uuid == uuid.UUID( + node2.query( + "SELECT uuid FROM system.parts WHERE table = 't' AND active ORDER BY name" + ).strip() + ) # Test merge assigns new non-zero uuids. node2.query("INSERT INTO t VALUES (1, 1)") @@ -72,8 +100,11 @@ def test_part_uuid(started_cluster): uuids = set() for node in [node1, node2]: node.query("SYSTEM SYNC REPLICA t") - part_merge_uuid = uuid.UUID(node.query( - "SELECT uuid FROM system.parts WHERE table = 't' AND active ORDER BY name").strip()) + part_merge_uuid = uuid.UUID( + node.query( + "SELECT uuid FROM system.parts WHERE table = 't' AND active ORDER BY name" + ).strip() + ) uuids.add(part_merge_uuid) assert part_mutate_uuid not in [uuid_zero, part_merge_uuid] assert len(uuids) == 1, "expect the same uuid on all the replicas" @@ -83,19 +114,32 @@ def test_part_uuid_wal(started_cluster): uuid_zero = uuid.UUID(bytes=b"\x00" * 16) for ix, n in enumerate([node1, node2]): - n.query(""" + n.query( + """ CREATE TABLE t_wal(key UInt64, value UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t_wal', '{}') ORDER BY tuple() - """.format(ix)) + """.format( + ix + ) + ) node2.query("INSERT INTO t_wal VALUES (1, 1)") uuids = set() for node in [node1, node2]: node.query("SYSTEM SYNC REPLICA t_wal") - part_initial_uuid = uuid.UUID(node.query("SELECT uuid FROM system.parts WHERE table = 't_wal' AND active ORDER BY name").strip()) - assert "InMemory" == node.query("SELECT part_type FROM system.parts WHERE table = 't_wal' AND active ORDER BY name").strip() + part_initial_uuid = uuid.UUID( + node.query( + "SELECT uuid FROM system.parts WHERE table = 't_wal' AND active ORDER BY name" + ).strip() + ) + assert ( + "InMemory" + == node.query( + "SELECT part_type FROM system.parts WHERE table = 't_wal' AND active ORDER BY name" + ).strip() + ) uuids.add(part_initial_uuid) assert uuid_zero != part_initial_uuid assert len(uuids) == 1, "expect the same uuid on all the replicas" @@ -103,6 +147,9 @@ def test_part_uuid_wal(started_cluster): # Test detach / attach table to trigger WAL processing. for node in [node1, node2]: node.query("DETACH TABLE t_wal; ATTACH TABLE t_wal") - part_reattach_uuid = uuid.UUID(node.query( - "SELECT uuid FROM system.parts WHERE table = 't_wal' AND active ORDER BY name").strip()) + part_reattach_uuid = uuid.UUID( + node.query( + "SELECT uuid FROM system.parts WHERE table = 't_wal' AND active ORDER BY name" + ).strip() + ) assert part_initial_uuid == part_reattach_uuid diff --git a/tests/integration/test_partition/test.py b/tests/integration/test_partition/test.py index 0a44ae332c2..b396b58df10 100644 --- a/tests/integration/test_partition/test.py +++ b/tests/integration/test_partition/test.py @@ -4,16 +4,18 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance') +instance = cluster.add_instance("instance") q = instance.query -path_to_data = '/var/lib/clickhouse/' +path_to_data = "/var/lib/clickhouse/" @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() - q('CREATE DATABASE test ENGINE = Ordinary') # Different path in shadow/ with Atomic + q( + "CREATE DATABASE test ENGINE = Ordinary" + ) # Different path in shadow/ with Atomic yield cluster @@ -24,15 +26,17 @@ def started_cluster(): @pytest.fixture def partition_table_simple(started_cluster): q("DROP TABLE IF EXISTS test.partition_simple") - q("CREATE TABLE test.partition_simple (date MATERIALIZED toDate(0), x UInt64, sample_key MATERIALIZED intHash64(x)) " - "ENGINE=MergeTree PARTITION BY date SAMPLE BY sample_key ORDER BY (date,x,sample_key) " - "SETTINGS index_granularity=8192, index_granularity_bytes=0") + q( + "CREATE TABLE test.partition_simple (date MATERIALIZED toDate(0), x UInt64, sample_key MATERIALIZED intHash64(x)) " + "ENGINE=MergeTree PARTITION BY date SAMPLE BY sample_key ORDER BY (date,x,sample_key) " + "SETTINGS index_granularity=8192, index_granularity_bytes=0" + ) q("INSERT INTO test.partition_simple ( x ) VALUES ( now() )") q("INSERT INTO test.partition_simple ( x ) VALUES ( now()+1 )") yield - q('DROP TABLE test.partition_simple') + q("DROP TABLE test.partition_simple") def test_partition_simple(partition_table_simple): @@ -42,55 +46,70 @@ def test_partition_simple(partition_table_simple): def partition_complex_assert_columns_txt(): - path_to_parts = path_to_data + 'data/test/partition_complex/' - parts = TSV(q("SELECT name FROM system.parts WHERE database='test' AND table='partition_complex'")) + path_to_parts = path_to_data + "data/test/partition_complex/" + parts = TSV( + q( + "SELECT name FROM system.parts WHERE database='test' AND table='partition_complex'" + ) + ) assert len(parts) > 0 for part_name in parts.lines: - path_to_columns = path_to_parts + part_name + '/columns.txt' + path_to_columns = path_to_parts + part_name + "/columns.txt" # 2 header lines + 3 columns - assert instance.exec_in_container(['wc', '-l', path_to_columns]).split()[0] == '5' + assert ( + instance.exec_in_container(["wc", "-l", path_to_columns]).split()[0] == "5" + ) def partition_complex_assert_checksums(): # Do not check increment.txt - it can be changed by other tests with FREEZE - cmd = ["bash", "-c", f"cd {path_to_data} && find shadow -type f -exec" + " md5sum {} \\; | grep partition_complex" \ - " | sed 's shadow/[0-9]*/data/[a-z0-9_-]*/ shadow/1/data/test/ g' | sort | uniq"] + cmd = [ + "bash", + "-c", + f"cd {path_to_data} && find shadow -type f -exec" + + " md5sum {} \\; | grep partition_complex" + " | sed 's shadow/[0-9]*/data/[a-z0-9_-]*/ shadow/1/data/test/ g' | sort | uniq", + ] - checksums = "082814b5aa5109160d5c0c5aff10d4df\tshadow/1/data/test/partition_complex/19700102_2_2_0/k.bin\n" \ - "082814b5aa5109160d5c0c5aff10d4df\tshadow/1/data/test/partition_complex/19700201_1_1_0/v1.bin\n" \ - "13cae8e658e0ca4f75c56b1fc424e150\tshadow/1/data/test/partition_complex/19700102_2_2_0/minmax_p.idx\n" \ - "25daad3d9e60b45043a70c4ab7d3b1c6\tshadow/1/data/test/partition_complex/19700102_2_2_0/partition.dat\n" \ - "3726312af62aec86b64a7708d5751787\tshadow/1/data/test/partition_complex/19700201_1_1_0/partition.dat\n" \ - "37855b06a39b79a67ea4e86e4a3299aa\tshadow/1/data/test/partition_complex/19700102_2_2_0/checksums.txt\n" \ - "38e62ff37e1e5064e9a3f605dfe09d13\tshadow/1/data/test/partition_complex/19700102_2_2_0/v1.bin\n" \ - "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700102_2_2_0/k.mrk\n" \ - "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700102_2_2_0/p.mrk\n" \ - "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700102_2_2_0/v1.mrk\n" \ - "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700201_1_1_0/k.mrk\n" \ - "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700201_1_1_0/p.mrk\n" \ - "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700201_1_1_0/v1.mrk\n" \ - "55a54008ad1ba589aa210d2629c1df41\tshadow/1/data/test/partition_complex/19700201_1_1_0/primary.idx\n" \ - "5f087cb3e7071bf9407e095821e2af8f\tshadow/1/data/test/partition_complex/19700201_1_1_0/checksums.txt\n" \ - "77d5af402ada101574f4da114f242e02\tshadow/1/data/test/partition_complex/19700102_2_2_0/columns.txt\n" \ - "77d5af402ada101574f4da114f242e02\tshadow/1/data/test/partition_complex/19700201_1_1_0/columns.txt\n" \ - "88cdc31ded355e7572d68d8cde525d3a\tshadow/1/data/test/partition_complex/19700201_1_1_0/p.bin\n" \ - "9e688c58a5487b8eaf69c9e1005ad0bf\tshadow/1/data/test/partition_complex/19700102_2_2_0/primary.idx\n" \ - "c0904274faa8f3f06f35666cc9c5bd2f\tshadow/1/data/test/partition_complex/19700102_2_2_0/default_compression_codec.txt\n" \ - "c0904274faa8f3f06f35666cc9c5bd2f\tshadow/1/data/test/partition_complex/19700201_1_1_0/default_compression_codec.txt\n" \ - "c4ca4238a0b923820dcc509a6f75849b\tshadow/1/data/test/partition_complex/19700102_2_2_0/count.txt\n" \ - "c4ca4238a0b923820dcc509a6f75849b\tshadow/1/data/test/partition_complex/19700201_1_1_0/count.txt\n" \ - "cfcb770c3ecd0990dcceb1bde129e6c6\tshadow/1/data/test/partition_complex/19700102_2_2_0/p.bin\n" \ - "e2af3bef1fd129aea73a890ede1e7a30\tshadow/1/data/test/partition_complex/19700201_1_1_0/k.bin\n" \ - "f2312862cc01adf34a93151377be2ddf\tshadow/1/data/test/partition_complex/19700201_1_1_0/minmax_p.idx\n" + checksums = ( + "082814b5aa5109160d5c0c5aff10d4df\tshadow/1/data/test/partition_complex/19700102_2_2_0/k.bin\n" + "082814b5aa5109160d5c0c5aff10d4df\tshadow/1/data/test/partition_complex/19700201_1_1_0/v1.bin\n" + "13cae8e658e0ca4f75c56b1fc424e150\tshadow/1/data/test/partition_complex/19700102_2_2_0/minmax_p.idx\n" + "25daad3d9e60b45043a70c4ab7d3b1c6\tshadow/1/data/test/partition_complex/19700102_2_2_0/partition.dat\n" + "3726312af62aec86b64a7708d5751787\tshadow/1/data/test/partition_complex/19700201_1_1_0/partition.dat\n" + "37855b06a39b79a67ea4e86e4a3299aa\tshadow/1/data/test/partition_complex/19700102_2_2_0/checksums.txt\n" + "38e62ff37e1e5064e9a3f605dfe09d13\tshadow/1/data/test/partition_complex/19700102_2_2_0/v1.bin\n" + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700102_2_2_0/k.mrk\n" + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700102_2_2_0/p.mrk\n" + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700102_2_2_0/v1.mrk\n" + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700201_1_1_0/k.mrk\n" + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700201_1_1_0/p.mrk\n" + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700201_1_1_0/v1.mrk\n" + "55a54008ad1ba589aa210d2629c1df41\tshadow/1/data/test/partition_complex/19700201_1_1_0/primary.idx\n" + "5f087cb3e7071bf9407e095821e2af8f\tshadow/1/data/test/partition_complex/19700201_1_1_0/checksums.txt\n" + "77d5af402ada101574f4da114f242e02\tshadow/1/data/test/partition_complex/19700102_2_2_0/columns.txt\n" + "77d5af402ada101574f4da114f242e02\tshadow/1/data/test/partition_complex/19700201_1_1_0/columns.txt\n" + "88cdc31ded355e7572d68d8cde525d3a\tshadow/1/data/test/partition_complex/19700201_1_1_0/p.bin\n" + "9e688c58a5487b8eaf69c9e1005ad0bf\tshadow/1/data/test/partition_complex/19700102_2_2_0/primary.idx\n" + "c0904274faa8f3f06f35666cc9c5bd2f\tshadow/1/data/test/partition_complex/19700102_2_2_0/default_compression_codec.txt\n" + "c0904274faa8f3f06f35666cc9c5bd2f\tshadow/1/data/test/partition_complex/19700201_1_1_0/default_compression_codec.txt\n" + "c4ca4238a0b923820dcc509a6f75849b\tshadow/1/data/test/partition_complex/19700102_2_2_0/count.txt\n" + "c4ca4238a0b923820dcc509a6f75849b\tshadow/1/data/test/partition_complex/19700201_1_1_0/count.txt\n" + "cfcb770c3ecd0990dcceb1bde129e6c6\tshadow/1/data/test/partition_complex/19700102_2_2_0/p.bin\n" + "e2af3bef1fd129aea73a890ede1e7a30\tshadow/1/data/test/partition_complex/19700201_1_1_0/k.bin\n" + "f2312862cc01adf34a93151377be2ddf\tshadow/1/data/test/partition_complex/19700201_1_1_0/minmax_p.idx\n" + ) - assert TSV(instance.exec_in_container(cmd).replace(' ', '\t')) == TSV(checksums) + assert TSV(instance.exec_in_container(cmd).replace(" ", "\t")) == TSV(checksums) @pytest.fixture def partition_table_complex(started_cluster): q("DROP TABLE IF EXISTS test.partition_complex") - q("CREATE TABLE test.partition_complex (p Date, k Int8, v1 Int8 MATERIALIZED k + 1) " - "ENGINE = MergeTree PARTITION BY p ORDER BY k SETTINGS index_granularity=1, index_granularity_bytes=0") + q( + "CREATE TABLE test.partition_complex (p Date, k Int8, v1 Int8 MATERIALIZED k + 1) " + "ENGINE = MergeTree PARTITION BY p ORDER BY k SETTINGS index_granularity=1, index_granularity_bytes=0" + ) q("INSERT INTO test.partition_complex (p, k) VALUES(toDate(31), 1)") q("INSERT INTO test.partition_complex (p, k) VALUES(toDate(1), 2)") @@ -118,16 +137,17 @@ def test_partition_complex(partition_table_complex): q("OPTIMIZE TABLE test.partition_complex") - expected = TSV('31\t1\t2\n' - '1\t2\t3') + expected = TSV("31\t1\t2\n" "1\t2\t3") res = q("SELECT toUInt16(p), k, v1 FROM test.partition_complex ORDER BY k") - assert (TSV(res) == expected) + assert TSV(res) == expected @pytest.fixture def cannot_attach_active_part_table(started_cluster): q("DROP TABLE IF EXISTS test.attach_active") - q("CREATE TABLE test.attach_active (n UInt64) ENGINE = MergeTree() PARTITION BY intDiv(n, 4) ORDER BY n") + q( + "CREATE TABLE test.attach_active (n UInt64) ENGINE = MergeTree() PARTITION BY intDiv(n, 4) ORDER BY n" + ) q("INSERT INTO test.attach_active SELECT number FROM system.numbers LIMIT 16") yield @@ -136,22 +156,32 @@ def cannot_attach_active_part_table(started_cluster): def test_cannot_attach_active_part(cannot_attach_active_part_table): - error = instance.client.query_and_get_error("ALTER TABLE test.attach_active ATTACH PART '../1_2_2_0'") + error = instance.client.query_and_get_error( + "ALTER TABLE test.attach_active ATTACH PART '../1_2_2_0'" + ) print(error) - assert 0 <= error.find('Invalid part name') + assert 0 <= error.find("Invalid part name") - res = q("SElECT name FROM system.parts WHERE table='attach_active' AND database='test' ORDER BY name") - assert TSV(res) == TSV('0_1_1_0\n1_2_2_0\n2_3_3_0\n3_4_4_0') - assert TSV(q("SElECT count(), sum(n) FROM test.attach_active")) == TSV('16\t120') + res = q( + "SElECT name FROM system.parts WHERE table='attach_active' AND database='test' ORDER BY name" + ) + assert TSV(res) == TSV("0_1_1_0\n1_2_2_0\n2_3_3_0\n3_4_4_0") + assert TSV(q("SElECT count(), sum(n) FROM test.attach_active")) == TSV("16\t120") @pytest.fixture def attach_check_all_parts_table(started_cluster): q("SYSTEM STOP MERGES") q("DROP TABLE IF EXISTS test.attach_partition") - q("CREATE TABLE test.attach_partition (n UInt64) ENGINE = MergeTree() PARTITION BY intDiv(n, 8) ORDER BY n") - q("INSERT INTO test.attach_partition SELECT number FROM system.numbers WHERE number % 2 = 0 LIMIT 8") - q("INSERT INTO test.attach_partition SELECT number FROM system.numbers WHERE number % 2 = 1 LIMIT 8") + q( + "CREATE TABLE test.attach_partition (n UInt64) ENGINE = MergeTree() PARTITION BY intDiv(n, 8) ORDER BY n" + ) + q( + "INSERT INTO test.attach_partition SELECT number FROM system.numbers WHERE number % 2 = 0 LIMIT 8" + ) + q( + "INSERT INTO test.attach_partition SELECT number FROM system.numbers WHERE number % 2 = 1 LIMIT 8" + ) yield @@ -162,40 +192,74 @@ def attach_check_all_parts_table(started_cluster): def test_attach_check_all_parts(attach_check_all_parts_table): q("ALTER TABLE test.attach_partition DETACH PARTITION 0") - path_to_detached = path_to_data + 'data/test/attach_partition/detached/' - instance.exec_in_container(['mkdir', '{}'.format(path_to_detached + '0_5_5_0')]) - instance.exec_in_container(['cp', '-pr', path_to_detached + '0_1_1_0', path_to_detached + 'attaching_0_6_6_0']) - instance.exec_in_container(['cp', '-pr', path_to_detached + '0_3_3_0', path_to_detached + 'deleting_0_7_7_0']) + path_to_detached = path_to_data + "data/test/attach_partition/detached/" + instance.exec_in_container(["mkdir", "{}".format(path_to_detached + "0_5_5_0")]) + instance.exec_in_container( + [ + "cp", + "-pr", + path_to_detached + "0_1_1_0", + path_to_detached + "attaching_0_6_6_0", + ] + ) + instance.exec_in_container( + [ + "cp", + "-pr", + path_to_detached + "0_3_3_0", + path_to_detached + "deleting_0_7_7_0", + ] + ) - error = instance.client.query_and_get_error("ALTER TABLE test.attach_partition ATTACH PARTITION 0") - assert 0 <= error.find('No columns in part 0_5_5_0') or 0 <= error.find('No columns.txt in part 0_5_5_0') + error = instance.client.query_and_get_error( + "ALTER TABLE test.attach_partition ATTACH PARTITION 0" + ) + assert 0 <= error.find("No columns in part 0_5_5_0") or 0 <= error.find( + "No columns.txt in part 0_5_5_0" + ) - parts = q("SElECT name FROM system.parts WHERE table='attach_partition' AND database='test' ORDER BY name") - assert TSV(parts) == TSV('1_2_2_0\n1_4_4_0') - detached = q("SELECT name FROM system.detached_parts " - "WHERE table='attach_partition' AND database='test' ORDER BY name") - assert TSV(detached) == TSV('0_1_1_0\n0_3_3_0\n0_5_5_0\nattaching_0_6_6_0\ndeleting_0_7_7_0') + parts = q( + "SElECT name FROM system.parts WHERE table='attach_partition' AND database='test' ORDER BY name" + ) + assert TSV(parts) == TSV("1_2_2_0\n1_4_4_0") + detached = q( + "SELECT name FROM system.detached_parts " + "WHERE table='attach_partition' AND database='test' ORDER BY name" + ) + assert TSV(detached) == TSV( + "0_1_1_0\n0_3_3_0\n0_5_5_0\nattaching_0_6_6_0\ndeleting_0_7_7_0" + ) - instance.exec_in_container(['rm', '-r', path_to_detached + '0_5_5_0']) + instance.exec_in_container(["rm", "-r", path_to_detached + "0_5_5_0"]) q("ALTER TABLE test.attach_partition ATTACH PARTITION 0") - parts = q("SElECT name FROM system.parts WHERE table='attach_partition' AND database='test' ORDER BY name") - expected = '0_5_5_0\n0_6_6_0\n1_2_2_0\n1_4_4_0' + parts = q( + "SElECT name FROM system.parts WHERE table='attach_partition' AND database='test' ORDER BY name" + ) + expected = "0_5_5_0\n0_6_6_0\n1_2_2_0\n1_4_4_0" assert TSV(parts) == TSV(expected) - assert TSV(q("SElECT count(), sum(n) FROM test.attach_partition")) == TSV('16\t120') + assert TSV(q("SElECT count(), sum(n) FROM test.attach_partition")) == TSV("16\t120") - detached = q("SELECT name FROM system.detached_parts " - "WHERE table='attach_partition' AND database='test' ORDER BY name") - assert TSV(detached) == TSV('attaching_0_6_6_0\ndeleting_0_7_7_0') + detached = q( + "SELECT name FROM system.detached_parts " + "WHERE table='attach_partition' AND database='test' ORDER BY name" + ) + assert TSV(detached) == TSV("attaching_0_6_6_0\ndeleting_0_7_7_0") @pytest.fixture def drop_detached_parts_table(started_cluster): q("SYSTEM STOP MERGES") q("DROP TABLE IF EXISTS test.drop_detached") - q("CREATE TABLE test.drop_detached (n UInt64) ENGINE = MergeTree() PARTITION BY intDiv(n, 8) ORDER BY n") - q("INSERT INTO test.drop_detached SELECT number FROM system.numbers WHERE number % 2 = 0 LIMIT 8") - q("INSERT INTO test.drop_detached SELECT number FROM system.numbers WHERE number % 2 = 1 LIMIT 8") + q( + "CREATE TABLE test.drop_detached (n UInt64) ENGINE = MergeTree() PARTITION BY intDiv(n, 8) ORDER BY n" + ) + q( + "INSERT INTO test.drop_detached SELECT number FROM system.numbers WHERE number % 2 = 0 LIMIT 8" + ) + q( + "INSERT INTO test.drop_detached SELECT number FROM system.numbers WHERE number % 2 = 1 LIMIT 8" + ) yield @@ -208,126 +272,208 @@ def test_drop_detached_parts(drop_detached_parts_table): q("ALTER TABLE test.drop_detached DETACH PARTITION 0") q("ALTER TABLE test.drop_detached DETACH PARTITION 1") - path_to_detached = path_to_data + 'data/test/drop_detached/detached/' - instance.exec_in_container(['mkdir', '{}'.format(path_to_detached + 'attaching_0_6_6_0')]) - instance.exec_in_container(['mkdir', '{}'.format(path_to_detached + 'deleting_0_7_7_0')]) - instance.exec_in_container(['mkdir', '{}'.format(path_to_detached + 'any_other_name')]) - instance.exec_in_container(['mkdir', '{}'.format(path_to_detached + 'prefix_1_2_2_0_0')]) + path_to_detached = path_to_data + "data/test/drop_detached/detached/" + instance.exec_in_container( + ["mkdir", "{}".format(path_to_detached + "attaching_0_6_6_0")] + ) + instance.exec_in_container( + ["mkdir", "{}".format(path_to_detached + "deleting_0_7_7_0")] + ) + instance.exec_in_container( + ["mkdir", "{}".format(path_to_detached + "any_other_name")] + ) + instance.exec_in_container( + ["mkdir", "{}".format(path_to_detached + "prefix_1_2_2_0_0")] + ) - error = instance.client.query_and_get_error("ALTER TABLE test.drop_detached DROP DETACHED PART '../1_2_2_0'", - settings=s) - assert 0 <= error.find('Invalid part name') + error = instance.client.query_and_get_error( + "ALTER TABLE test.drop_detached DROP DETACHED PART '../1_2_2_0'", settings=s + ) + assert 0 <= error.find("Invalid part name") q("ALTER TABLE test.drop_detached DROP DETACHED PART '0_1_1_0'", settings=s) - error = instance.client.query_and_get_error("ALTER TABLE test.drop_detached DROP DETACHED PART 'attaching_0_6_6_0'", - settings=s) - assert 0 <= error.find('Cannot drop part') + error = instance.client.query_and_get_error( + "ALTER TABLE test.drop_detached DROP DETACHED PART 'attaching_0_6_6_0'", + settings=s, + ) + assert 0 <= error.find("Cannot drop part") - error = instance.client.query_and_get_error("ALTER TABLE test.drop_detached DROP DETACHED PART 'deleting_0_7_7_0'", - settings=s) - assert 0 <= error.find('Cannot drop part') + error = instance.client.query_and_get_error( + "ALTER TABLE test.drop_detached DROP DETACHED PART 'deleting_0_7_7_0'", + settings=s, + ) + assert 0 <= error.find("Cannot drop part") q("ALTER TABLE test.drop_detached DROP DETACHED PART 'any_other_name'", settings=s) - detached = q("SElECT name FROM system.detached_parts WHERE table='drop_detached' AND database='test' ORDER BY name") - assert TSV(detached) == TSV('0_3_3_0\n1_2_2_0\n1_4_4_0\nattaching_0_6_6_0\ndeleting_0_7_7_0\nprefix_1_2_2_0_0') + detached = q( + "SElECT name FROM system.detached_parts WHERE table='drop_detached' AND database='test' ORDER BY name" + ) + assert TSV(detached) == TSV( + "0_3_3_0\n1_2_2_0\n1_4_4_0\nattaching_0_6_6_0\ndeleting_0_7_7_0\nprefix_1_2_2_0_0" + ) q("ALTER TABLE test.drop_detached DROP DETACHED PARTITION 1", settings=s) - detached = q("SElECT name FROM system.detached_parts WHERE table='drop_detached' AND database='test' ORDER BY name") - assert TSV(detached) == TSV('0_3_3_0\nattaching_0_6_6_0\ndeleting_0_7_7_0') + detached = q( + "SElECT name FROM system.detached_parts WHERE table='drop_detached' AND database='test' ORDER BY name" + ) + assert TSV(detached) == TSV("0_3_3_0\nattaching_0_6_6_0\ndeleting_0_7_7_0") + def test_system_detached_parts(drop_detached_parts_table): q("create table sdp_0 (n int, x int) engine=MergeTree order by n") q("create table sdp_1 (n int, x int) engine=MergeTree order by n partition by x") q("create table sdp_2 (n int, x String) engine=MergeTree order by n partition by x") - q("create table sdp_3 (n int, x Enum('broken' = 0, 'all' = 1)) engine=MergeTree order by n partition by x") + q( + "create table sdp_3 (n int, x Enum('broken' = 0, 'all' = 1)) engine=MergeTree order by n partition by x" + ) for i in range(0, 4): q("system stop merges sdp_{}".format(i)) q("insert into sdp_{} values (0, 0)".format(i)) q("insert into sdp_{} values (1, 1)".format(i)) - for p in q("select distinct partition_id from system.parts where table='sdp_{}'".format(i))[:-1].split('\n'): + for p in q( + "select distinct partition_id from system.parts where table='sdp_{}'".format( + i + ) + )[:-1].split("\n"): q("alter table sdp_{} detach partition id '{}'".format(i, p)) - path_to_detached = path_to_data + 'data/default/sdp_{}/detached/{}' + path_to_detached = path_to_data + "data/default/sdp_{}/detached/{}" for i in range(0, 4): - instance.exec_in_container(['mkdir', path_to_detached.format(i, 'attaching_0_6_6_0')]) - instance.exec_in_container(['mkdir', path_to_detached.format(i, 'deleting_0_7_7_0')]) - instance.exec_in_container(['mkdir', path_to_detached.format(i, 'any_other_name')]) - instance.exec_in_container(['mkdir', path_to_detached.format(i, 'prefix_1_2_2_0_0')]) + instance.exec_in_container( + ["mkdir", path_to_detached.format(i, "attaching_0_6_6_0")] + ) + instance.exec_in_container( + ["mkdir", path_to_detached.format(i, "deleting_0_7_7_0")] + ) + instance.exec_in_container( + ["mkdir", path_to_detached.format(i, "any_other_name")] + ) + instance.exec_in_container( + ["mkdir", path_to_detached.format(i, "prefix_1_2_2_0_0")] + ) - instance.exec_in_container(['mkdir', path_to_detached.format(i, 'ignored_202107_714380_714380_0')]) - instance.exec_in_container(['mkdir', path_to_detached.format(i, 'broken_202107_714380_714380_123')]) - instance.exec_in_container(['mkdir', path_to_detached.format(i, 'clone_all_714380_714380_42')]) - instance.exec_in_container(['mkdir', path_to_detached.format(i, 'clone_all_714380_714380_42_123')]) - instance.exec_in_container(['mkdir', path_to_detached.format(i, 'broken-on-start_6711e2b2592d86d18fc0f260cf33ef2b_714380_714380_42_123')]) + instance.exec_in_container( + ["mkdir", path_to_detached.format(i, "ignored_202107_714380_714380_0")] + ) + instance.exec_in_container( + ["mkdir", path_to_detached.format(i, "broken_202107_714380_714380_123")] + ) + instance.exec_in_container( + ["mkdir", path_to_detached.format(i, "clone_all_714380_714380_42")] + ) + instance.exec_in_container( + ["mkdir", path_to_detached.format(i, "clone_all_714380_714380_42_123")] + ) + instance.exec_in_container( + [ + "mkdir", + path_to_detached.format( + i, + "broken-on-start_6711e2b2592d86d18fc0f260cf33ef2b_714380_714380_42_123", + ), + ] + ) - res = q("select * from system.detached_parts where table like 'sdp_%' order by table, name") - assert res == \ - "default\tsdp_0\tall\tall_1_1_0\tdefault\t\t1\t1\t0\n" \ - "default\tsdp_0\tall\tall_2_2_0\tdefault\t\t2\t2\t0\n" \ - "default\tsdp_0\t\\N\tany_other_name\tdefault\t\\N\t\\N\t\\N\t\\N\n" \ - "default\tsdp_0\t0\tattaching_0_6_6_0\tdefault\tattaching\t6\t6\t0\n" \ - "default\tsdp_0\t6711e2b2592d86d18fc0f260cf33ef2b\tbroken-on-start_6711e2b2592d86d18fc0f260cf33ef2b_714380_714380_42_123\tdefault\tbroken-on-start\t714380\t714380\t42\n" \ - "default\tsdp_0\t202107\tbroken_202107_714380_714380_123\tdefault\tbroken\t714380\t714380\t123\n" \ - "default\tsdp_0\tall\tclone_all_714380_714380_42\tdefault\tclone\t714380\t714380\t42\n" \ - "default\tsdp_0\tall\tclone_all_714380_714380_42_123\tdefault\tclone\t714380\t714380\t42\n" \ - "default\tsdp_0\t0\tdeleting_0_7_7_0\tdefault\tdeleting\t7\t7\t0\n" \ - "default\tsdp_0\t202107\tignored_202107_714380_714380_0\tdefault\tignored\t714380\t714380\t0\n" \ - "default\tsdp_0\t1\tprefix_1_2_2_0_0\tdefault\tprefix\t2\t2\t0\n" \ - "default\tsdp_1\t0\t0_1_1_0\tdefault\t\t1\t1\t0\n" \ - "default\tsdp_1\t1\t1_2_2_0\tdefault\t\t2\t2\t0\n" \ - "default\tsdp_1\t\\N\tany_other_name\tdefault\t\\N\t\\N\t\\N\t\\N\n" \ - "default\tsdp_1\t0\tattaching_0_6_6_0\tdefault\tattaching\t6\t6\t0\n" \ - "default\tsdp_1\t6711e2b2592d86d18fc0f260cf33ef2b\tbroken-on-start_6711e2b2592d86d18fc0f260cf33ef2b_714380_714380_42_123\tdefault\tbroken-on-start\t714380\t714380\t42\n" \ - "default\tsdp_1\t202107\tbroken_202107_714380_714380_123\tdefault\tbroken\t714380\t714380\t123\n" \ - "default\tsdp_1\tall\tclone_all_714380_714380_42\tdefault\tclone\t714380\t714380\t42\n" \ - "default\tsdp_1\tall\tclone_all_714380_714380_42_123\tdefault\tclone\t714380\t714380\t42\n" \ - "default\tsdp_1\t0\tdeleting_0_7_7_0\tdefault\tdeleting\t7\t7\t0\n" \ - "default\tsdp_1\t202107\tignored_202107_714380_714380_0\tdefault\tignored\t714380\t714380\t0\n" \ - "default\tsdp_1\t1\tprefix_1_2_2_0_0\tdefault\tprefix\t2\t2\t0\n" \ - "default\tsdp_2\t58ed7160db50ea45e1c6aa694c8cbfd1\t58ed7160db50ea45e1c6aa694c8cbfd1_1_1_0\tdefault\t\t1\t1\t0\n" \ - "default\tsdp_2\t6711e2b2592d86d18fc0f260cf33ef2b\t6711e2b2592d86d18fc0f260cf33ef2b_2_2_0\tdefault\t\t2\t2\t0\n" \ - "default\tsdp_2\t\\N\tany_other_name\tdefault\t\\N\t\\N\t\\N\t\\N\n" \ - "default\tsdp_2\t0\tattaching_0_6_6_0\tdefault\tattaching\t6\t6\t0\n" \ - "default\tsdp_2\t6711e2b2592d86d18fc0f260cf33ef2b\tbroken-on-start_6711e2b2592d86d18fc0f260cf33ef2b_714380_714380_42_123\tdefault\tbroken-on-start\t714380\t714380\t42\n" \ - "default\tsdp_2\t202107\tbroken_202107_714380_714380_123\tdefault\tbroken\t714380\t714380\t123\n" \ - "default\tsdp_2\tall\tclone_all_714380_714380_42\tdefault\tclone\t714380\t714380\t42\n" \ - "default\tsdp_2\tall\tclone_all_714380_714380_42_123\tdefault\tclone\t714380\t714380\t42\n" \ - "default\tsdp_2\t0\tdeleting_0_7_7_0\tdefault\tdeleting\t7\t7\t0\n" \ - "default\tsdp_2\t202107\tignored_202107_714380_714380_0\tdefault\tignored\t714380\t714380\t0\n" \ - "default\tsdp_2\t1\tprefix_1_2_2_0_0\tdefault\tprefix\t2\t2\t0\n" \ - "default\tsdp_3\t0\t0_1_1_0\tdefault\t\t1\t1\t0\n" \ - "default\tsdp_3\t1\t1_2_2_0\tdefault\t\t2\t2\t0\n" \ - "default\tsdp_3\t\\N\tany_other_name\tdefault\t\\N\t\\N\t\\N\t\\N\n" \ - "default\tsdp_3\t0\tattaching_0_6_6_0\tdefault\tattaching\t6\t6\t0\n" \ - "default\tsdp_3\t6711e2b2592d86d18fc0f260cf33ef2b\tbroken-on-start_6711e2b2592d86d18fc0f260cf33ef2b_714380_714380_42_123\tdefault\tbroken-on-start\t714380\t714380\t42\n" \ - "default\tsdp_3\t202107\tbroken_202107_714380_714380_123\tdefault\tbroken\t714380\t714380\t123\n" \ - "default\tsdp_3\tall\tclone_all_714380_714380_42\tdefault\tclone\t714380\t714380\t42\n" \ - "default\tsdp_3\tall\tclone_all_714380_714380_42_123\tdefault\tclone\t714380\t714380\t42\n" \ - "default\tsdp_3\t0\tdeleting_0_7_7_0\tdefault\tdeleting\t7\t7\t0\n" \ - "default\tsdp_3\t202107\tignored_202107_714380_714380_0\tdefault\tignored\t714380\t714380\t0\n" \ + res = q( + "select * from system.detached_parts where table like 'sdp_%' order by table, name" + ) + assert ( + res == "default\tsdp_0\tall\tall_1_1_0\tdefault\t\t1\t1\t0\n" + "default\tsdp_0\tall\tall_2_2_0\tdefault\t\t2\t2\t0\n" + "default\tsdp_0\t\\N\tany_other_name\tdefault\t\\N\t\\N\t\\N\t\\N\n" + "default\tsdp_0\t0\tattaching_0_6_6_0\tdefault\tattaching\t6\t6\t0\n" + "default\tsdp_0\t6711e2b2592d86d18fc0f260cf33ef2b\tbroken-on-start_6711e2b2592d86d18fc0f260cf33ef2b_714380_714380_42_123\tdefault\tbroken-on-start\t714380\t714380\t42\n" + "default\tsdp_0\t202107\tbroken_202107_714380_714380_123\tdefault\tbroken\t714380\t714380\t123\n" + "default\tsdp_0\tall\tclone_all_714380_714380_42\tdefault\tclone\t714380\t714380\t42\n" + "default\tsdp_0\tall\tclone_all_714380_714380_42_123\tdefault\tclone\t714380\t714380\t42\n" + "default\tsdp_0\t0\tdeleting_0_7_7_0\tdefault\tdeleting\t7\t7\t0\n" + "default\tsdp_0\t202107\tignored_202107_714380_714380_0\tdefault\tignored\t714380\t714380\t0\n" + "default\tsdp_0\t1\tprefix_1_2_2_0_0\tdefault\tprefix\t2\t2\t0\n" + "default\tsdp_1\t0\t0_1_1_0\tdefault\t\t1\t1\t0\n" + "default\tsdp_1\t1\t1_2_2_0\tdefault\t\t2\t2\t0\n" + "default\tsdp_1\t\\N\tany_other_name\tdefault\t\\N\t\\N\t\\N\t\\N\n" + "default\tsdp_1\t0\tattaching_0_6_6_0\tdefault\tattaching\t6\t6\t0\n" + "default\tsdp_1\t6711e2b2592d86d18fc0f260cf33ef2b\tbroken-on-start_6711e2b2592d86d18fc0f260cf33ef2b_714380_714380_42_123\tdefault\tbroken-on-start\t714380\t714380\t42\n" + "default\tsdp_1\t202107\tbroken_202107_714380_714380_123\tdefault\tbroken\t714380\t714380\t123\n" + "default\tsdp_1\tall\tclone_all_714380_714380_42\tdefault\tclone\t714380\t714380\t42\n" + "default\tsdp_1\tall\tclone_all_714380_714380_42_123\tdefault\tclone\t714380\t714380\t42\n" + "default\tsdp_1\t0\tdeleting_0_7_7_0\tdefault\tdeleting\t7\t7\t0\n" + "default\tsdp_1\t202107\tignored_202107_714380_714380_0\tdefault\tignored\t714380\t714380\t0\n" + "default\tsdp_1\t1\tprefix_1_2_2_0_0\tdefault\tprefix\t2\t2\t0\n" + "default\tsdp_2\t58ed7160db50ea45e1c6aa694c8cbfd1\t58ed7160db50ea45e1c6aa694c8cbfd1_1_1_0\tdefault\t\t1\t1\t0\n" + "default\tsdp_2\t6711e2b2592d86d18fc0f260cf33ef2b\t6711e2b2592d86d18fc0f260cf33ef2b_2_2_0\tdefault\t\t2\t2\t0\n" + "default\tsdp_2\t\\N\tany_other_name\tdefault\t\\N\t\\N\t\\N\t\\N\n" + "default\tsdp_2\t0\tattaching_0_6_6_0\tdefault\tattaching\t6\t6\t0\n" + "default\tsdp_2\t6711e2b2592d86d18fc0f260cf33ef2b\tbroken-on-start_6711e2b2592d86d18fc0f260cf33ef2b_714380_714380_42_123\tdefault\tbroken-on-start\t714380\t714380\t42\n" + "default\tsdp_2\t202107\tbroken_202107_714380_714380_123\tdefault\tbroken\t714380\t714380\t123\n" + "default\tsdp_2\tall\tclone_all_714380_714380_42\tdefault\tclone\t714380\t714380\t42\n" + "default\tsdp_2\tall\tclone_all_714380_714380_42_123\tdefault\tclone\t714380\t714380\t42\n" + "default\tsdp_2\t0\tdeleting_0_7_7_0\tdefault\tdeleting\t7\t7\t0\n" + "default\tsdp_2\t202107\tignored_202107_714380_714380_0\tdefault\tignored\t714380\t714380\t0\n" + "default\tsdp_2\t1\tprefix_1_2_2_0_0\tdefault\tprefix\t2\t2\t0\n" + "default\tsdp_3\t0\t0_1_1_0\tdefault\t\t1\t1\t0\n" + "default\tsdp_3\t1\t1_2_2_0\tdefault\t\t2\t2\t0\n" + "default\tsdp_3\t\\N\tany_other_name\tdefault\t\\N\t\\N\t\\N\t\\N\n" + "default\tsdp_3\t0\tattaching_0_6_6_0\tdefault\tattaching\t6\t6\t0\n" + "default\tsdp_3\t6711e2b2592d86d18fc0f260cf33ef2b\tbroken-on-start_6711e2b2592d86d18fc0f260cf33ef2b_714380_714380_42_123\tdefault\tbroken-on-start\t714380\t714380\t42\n" + "default\tsdp_3\t202107\tbroken_202107_714380_714380_123\tdefault\tbroken\t714380\t714380\t123\n" + "default\tsdp_3\tall\tclone_all_714380_714380_42\tdefault\tclone\t714380\t714380\t42\n" + "default\tsdp_3\tall\tclone_all_714380_714380_42_123\tdefault\tclone\t714380\t714380\t42\n" + "default\tsdp_3\t0\tdeleting_0_7_7_0\tdefault\tdeleting\t7\t7\t0\n" + "default\tsdp_3\t202107\tignored_202107_714380_714380_0\tdefault\tignored\t714380\t714380\t0\n" "default\tsdp_3\t1\tprefix_1_2_2_0_0\tdefault\tprefix\t2\t2\t0\n" + ) for i in range(0, 4): - for p in q("select distinct partition_id from system.detached_parts where table='sdp_{}' and partition_id is not null".format(i))[:-1].split('\n'): + for p in q( + "select distinct partition_id from system.detached_parts where table='sdp_{}' and partition_id is not null".format( + i + ) + )[:-1].split("\n"): q("alter table sdp_{} attach partition id '{}'".format(i, p)) - assert q("select n, x, count() from merge('default', 'sdp_') group by n, x") == "0\t0\t4\n1\t1\t4\n" + assert ( + q("select n, x, count() from merge('default', 'sdp_') group by n, x") + == "0\t0\t4\n1\t1\t4\n" + ) def test_detached_part_dir_exists(started_cluster): q("create table detached_part_dir_exists (n int) engine=MergeTree order by n") q("insert into detached_part_dir_exists select 1") # will create all_1_1_0 - q("alter table detached_part_dir_exists detach partition id 'all'") # will move all_1_1_0 to detached/all_1_1_0 + q( + "alter table detached_part_dir_exists detach partition id 'all'" + ) # will move all_1_1_0 to detached/all_1_1_0 q("detach table detached_part_dir_exists") q("attach table detached_part_dir_exists") q("insert into detached_part_dir_exists select 1") # will create all_1_1_0 q("insert into detached_part_dir_exists select 1") # will create all_2_2_0 - instance.exec_in_container(['bash', '-c', 'mkdir /var/lib/clickhouse/data/default/detached_part_dir_exists/detached/all_2_2_0'], privileged=True) - instance.exec_in_container(['bash', '-c', 'touch /var/lib/clickhouse/data/default/detached_part_dir_exists/detached/all_2_2_0/file'], privileged=True) - q("alter table detached_part_dir_exists detach partition id 'all'") # directories already exist, but it's ok - assert q("select name from system.detached_parts where table='detached_part_dir_exists' order by name") == \ - "all_1_1_0\nall_1_1_0_try1\nall_2_2_0\nall_2_2_0_try1\n" + instance.exec_in_container( + [ + "bash", + "-c", + "mkdir /var/lib/clickhouse/data/default/detached_part_dir_exists/detached/all_2_2_0", + ], + privileged=True, + ) + instance.exec_in_container( + [ + "bash", + "-c", + "touch /var/lib/clickhouse/data/default/detached_part_dir_exists/detached/all_2_2_0/file", + ], + privileged=True, + ) + q( + "alter table detached_part_dir_exists detach partition id 'all'" + ) # directories already exist, but it's ok + assert ( + q( + "select name from system.detached_parts where table='detached_part_dir_exists' order by name" + ) + == "all_1_1_0\nall_1_1_0_try1\nall_2_2_0\nall_2_2_0_try1\n" + ) q("drop table detached_part_dir_exists") diff --git a/tests/integration/test_parts_delete_zookeeper/test.py b/tests/integration/test_parts_delete_zookeeper/test.py index 62e14b68bd1..956f7ab21e2 100644 --- a/tests/integration/test_parts_delete_zookeeper/test.py +++ b/tests/integration/test_parts_delete_zookeeper/test.py @@ -6,7 +6,9 @@ from helpers.network import PartitionManager from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -15,12 +17,12 @@ def start_cluster(): cluster.start() node1.query( - ''' + """ CREATE DATABASE test; CREATE TABLE test_table(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/replicated', 'node1') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS old_parts_lifetime=4, cleanup_delay_period=1; - ''' + """ ) yield cluster @@ -34,31 +36,69 @@ def start_cluster(): # Test that outdated parts are not removed when they cannot be removed from zookeeper def test_merge_doesnt_work_without_zookeeper(start_cluster): - node1.query("INSERT INTO test_table VALUES ('2018-10-01', 1), ('2018-10-02', 2), ('2018-10-03', 3)") - node1.query("INSERT INTO test_table VALUES ('2018-10-01', 4), ('2018-10-02', 5), ('2018-10-03', 6)") - assert node1.query("SELECT count(*) from system.parts where table = 'test_table'") == "2\n" + node1.query( + "INSERT INTO test_table VALUES ('2018-10-01', 1), ('2018-10-02', 2), ('2018-10-03', 3)" + ) + node1.query( + "INSERT INTO test_table VALUES ('2018-10-01', 4), ('2018-10-02', 5), ('2018-10-03', 6)" + ) + assert ( + node1.query("SELECT count(*) from system.parts where table = 'test_table'") + == "2\n" + ) node1.query("OPTIMIZE TABLE test_table FINAL") - assert node1.query("SELECT count(*) from system.parts where table = 'test_table'") == "3\n" + assert ( + node1.query("SELECT count(*) from system.parts where table = 'test_table'") + == "3\n" + ) - assert_eq_with_retry(node1, "SELECT count(*) from system.parts where table = 'test_table' and active = 1", "1") + assert_eq_with_retry( + node1, + "SELECT count(*) from system.parts where table = 'test_table' and active = 1", + "1", + ) node1.query("TRUNCATE TABLE test_table") - assert node1.query("SELECT count(*) from system.parts where table = 'test_table'") == "0\n" + assert ( + node1.query("SELECT count(*) from system.parts where table = 'test_table'") + == "0\n" + ) - node1.query("INSERT INTO test_table VALUES ('2018-10-01', 1), ('2018-10-02', 2), ('2018-10-03', 3)") - node1.query("INSERT INTO test_table VALUES ('2018-10-01', 4), ('2018-10-02', 5), ('2018-10-03', 6)") - assert node1.query("SELECT count(*) from system.parts where table = 'test_table' and active") == "2\n" + node1.query( + "INSERT INTO test_table VALUES ('2018-10-01', 1), ('2018-10-02', 2), ('2018-10-03', 3)" + ) + node1.query( + "INSERT INTO test_table VALUES ('2018-10-01', 4), ('2018-10-02', 5), ('2018-10-03', 6)" + ) + assert ( + node1.query( + "SELECT count(*) from system.parts where table = 'test_table' and active" + ) + == "2\n" + ) with PartitionManager() as pm: node1.query("OPTIMIZE TABLE test_table FINAL") pm.drop_instance_zk_connections(node1) # unfortunately we can be too fast and delete node before partition with ZK - if node1.query("SELECT count(*) from system.parts where table = 'test_table'") == "1\n": + if ( + node1.query("SELECT count(*) from system.parts where table = 'test_table'") + == "1\n" + ): print("We were too fast and deleted parts before partition with ZK") else: time.sleep(10) # > old_parts_lifetime - assert node1.query("SELECT count(*) from system.parts where table = 'test_table'") == "3\n" + assert ( + node1.query( + "SELECT count(*) from system.parts where table = 'test_table'" + ) + == "3\n" + ) - assert_eq_with_retry(node1, "SELECT count(*) from system.parts where table = 'test_table' and active = 1", "1") + assert_eq_with_retry( + node1, + "SELECT count(*) from system.parts where table = 'test_table' and active = 1", + "1", + ) diff --git a/tests/integration/test_passing_max_partitions_to_read_remotely/test.py b/tests/integration/test_passing_max_partitions_to_read_remotely/test.py index 45b3dd00b2a..e64ca7ece33 100644 --- a/tests/integration/test_passing_max_partitions_to_read_remotely/test.py +++ b/tests/integration/test_passing_max_partitions_to_read_remotely/test.py @@ -23,6 +23,9 @@ def test_default_database_on_cluster(started_cluster): sql="CREATE TABLE test_local_table ENGINE MergeTree PARTITION BY i ORDER BY tuple() SETTINGS max_partitions_to_read = 1 AS SELECT arrayJoin([1, 2]) i;", ) - assert ch2.query( - sql="SELECT * FROM remote('ch1:9000', test_default_database, test_local_table) ORDER BY i FORMAT TSV SETTINGS max_partitions_to_read = 0;", - ) == "1\n2\n" + assert ( + ch2.query( + sql="SELECT * FROM remote('ch1:9000', test_default_database, test_local_table) ORDER BY i FORMAT TSV SETTINGS max_partitions_to_read = 0;", + ) + == "1\n2\n" + ) diff --git a/tests/integration/test_polymorphic_parts/test.py b/tests/integration/test_polymorphic_parts/test.py index 9fe3ef77da8..ba40b46c586 100644 --- a/tests/integration/test_polymorphic_parts/test.py +++ b/tests/integration/test_polymorphic_parts/test.py @@ -18,26 +18,33 @@ def get_random_array(): def get_random_string(): length = random.randint(0, 1000) - return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length)) + return "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(length) + ) def insert_random_data(table, node, size): data = [ - '(' + ','.join(( - "'2019-10-11'", - str(i), - "'" + get_random_string() + "'", - str(get_random_array()))) + - ')' for i in range(size) + "(" + + ",".join( + ( + "'2019-10-11'", + str(i), + "'" + get_random_string() + "'", + str(get_random_array()), + ) + ) + + ")" + for i in range(size) ] - node.query("INSERT INTO {} VALUES {}".format(table, ','.join(data))) + node.query("INSERT INTO {} VALUES {}".format(table, ",".join(data))) def create_tables(name, nodes, node_settings, shard): for i, (node, settings) in enumerate(zip(nodes, node_settings)): node.query( - ''' + """ CREATE TABLE {name}(date Date, id UInt32, s String, arr Array(Int32)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{shard}/{name}', '{repl}') PARTITION BY toYYYYMM(date) @@ -46,46 +53,105 @@ def create_tables(name, nodes, node_settings, shard): min_rows_for_wide_part = {min_rows_for_wide_part}, min_rows_for_compact_part = {min_rows_for_compact_part}, min_bytes_for_wide_part = 0, min_bytes_for_compact_part = 0, in_memory_parts_enable_wal = 1 - '''.format(name=name, shard=shard, repl=i, **settings)) + """.format( + name=name, shard=shard, repl=i, **settings + ) + ) + def create_tables_old_format(name, nodes, shard): for i, node in enumerate(nodes): node.query( - ''' + """ CREATE TABLE {name}(date Date, id UInt32, s String, arr Array(Int32)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{shard}/{name}', '{repl}', date, id, 64) - '''.format(name=name, shard=shard, repl=i)) + """.format( + name=name, shard=shard, repl=i + ) + ) -node1 = cluster.add_instance('node1', main_configs=[], user_configs=["configs/users.d/not_optimize_count.xml"], - with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=[], user_configs=["configs/users.d/not_optimize_count.xml"], - with_zookeeper=True) +node1 = cluster.add_instance( + "node1", + main_configs=[], + user_configs=["configs/users.d/not_optimize_count.xml"], + with_zookeeper=True, +) +node2 = cluster.add_instance( + "node2", + main_configs=[], + user_configs=["configs/users.d/not_optimize_count.xml"], + with_zookeeper=True, +) -settings_default = {'index_granularity_bytes': 10485760, 'min_rows_for_wide_part': 512, 'min_rows_for_compact_part': 0} -settings_compact_only = {'index_granularity_bytes': 10485760, 'min_rows_for_wide_part': 1000000, - 'min_rows_for_compact_part': 0} -settings_not_adaptive = {'index_granularity_bytes': 0, 'min_rows_for_wide_part': 512, 'min_rows_for_compact_part': 0} +settings_default = { + "index_granularity_bytes": 10485760, + "min_rows_for_wide_part": 512, + "min_rows_for_compact_part": 0, +} +settings_compact_only = { + "index_granularity_bytes": 10485760, + "min_rows_for_wide_part": 1000000, + "min_rows_for_compact_part": 0, +} +settings_not_adaptive = { + "index_granularity_bytes": 0, + "min_rows_for_wide_part": 512, + "min_rows_for_compact_part": 0, +} -node3 = cluster.add_instance('node3', main_configs=[], user_configs=["configs/users.d/not_optimize_count.xml"], - with_zookeeper=True) -node4 = cluster.add_instance('node4', user_configs=["configs/users.d/not_optimize_count.xml"], - main_configs=['configs/no_leader.xml'], with_zookeeper=True) +node3 = cluster.add_instance( + "node3", + main_configs=[], + user_configs=["configs/users.d/not_optimize_count.xml"], + with_zookeeper=True, +) +node4 = cluster.add_instance( + "node4", + user_configs=["configs/users.d/not_optimize_count.xml"], + main_configs=["configs/no_leader.xml"], + with_zookeeper=True, +) -settings_compact = {'index_granularity_bytes': 10485760, 'min_rows_for_wide_part': 512, 'min_rows_for_compact_part': 0} -settings_wide = {'index_granularity_bytes': 10485760, 'min_rows_for_wide_part': 0, 'min_rows_for_compact_part': 0} +settings_compact = { + "index_granularity_bytes": 10485760, + "min_rows_for_wide_part": 512, + "min_rows_for_compact_part": 0, +} +settings_wide = { + "index_granularity_bytes": 10485760, + "min_rows_for_wide_part": 0, + "min_rows_for_compact_part": 0, +} -node5 = cluster.add_instance('node5', main_configs=['configs/compact_parts.xml'], with_zookeeper=True) -node6 = cluster.add_instance('node6', main_configs=['configs/compact_parts.xml'], with_zookeeper=True) +node5 = cluster.add_instance( + "node5", main_configs=["configs/compact_parts.xml"], with_zookeeper=True +) +node6 = cluster.add_instance( + "node6", main_configs=["configs/compact_parts.xml"], with_zookeeper=True +) -settings_in_memory = {'index_granularity_bytes': 10485760, 'min_rows_for_wide_part': 512, - 'min_rows_for_compact_part': 256} +settings_in_memory = { + "index_granularity_bytes": 10485760, + "min_rows_for_wide_part": 512, + "min_rows_for_compact_part": 256, +} -node9 = cluster.add_instance('node9', with_zookeeper=True, stay_alive=True) -node10 = cluster.add_instance('node10', with_zookeeper=True) +node9 = cluster.add_instance("node9", with_zookeeper=True, stay_alive=True) +node10 = cluster.add_instance("node10", with_zookeeper=True) -node11 = cluster.add_instance('node11', main_configs=['configs/do_not_merge.xml'], with_zookeeper=True, stay_alive=True) -node12 = cluster.add_instance('node12', main_configs=['configs/do_not_merge.xml'], with_zookeeper=True, stay_alive=True) +node11 = cluster.add_instance( + "node11", + main_configs=["configs/do_not_merge.xml"], + with_zookeeper=True, + stay_alive=True, +) +node12 = cluster.add_instance( + "node12", + main_configs=["configs/do_not_merge.xml"], + with_zookeeper=True, + stay_alive=True, +) @pytest.fixture(scope="module") @@ -93,18 +159,73 @@ def start_cluster(): try: cluster.start() - create_tables('polymorphic_table', [node1, node2], [settings_default, settings_default], "shard1") - create_tables('compact_parts_only', [node1, node2], [settings_compact_only, settings_compact_only], "shard1") - create_tables('non_adaptive_table', [node1, node2], [settings_not_adaptive, settings_not_adaptive], "shard1") - create_tables('polymorphic_table_compact', [node3, node4], [settings_compact, settings_wide], "shard2") - create_tables('polymorphic_table_wide', [node3, node4], [settings_wide, settings_compact], "shard2") - create_tables_old_format('polymorphic_table', [node5, node6], "shard3") - create_tables('in_memory_table', [node9, node10], [settings_in_memory, settings_in_memory], "shard4") - create_tables('wal_table', [node11, node12], [settings_in_memory, settings_in_memory], "shard4") - create_tables('restore_table', [node11, node12], [settings_in_memory, settings_in_memory], "shard5") - create_tables('deduplication_table', [node9, node10], [settings_in_memory, settings_in_memory], "shard5") - create_tables('sync_table', [node9, node10], [settings_in_memory, settings_in_memory], "shard5") - create_tables('alters_table', [node9, node10], [settings_in_memory, settings_in_memory], "shard5") + create_tables( + "polymorphic_table", + [node1, node2], + [settings_default, settings_default], + "shard1", + ) + create_tables( + "compact_parts_only", + [node1, node2], + [settings_compact_only, settings_compact_only], + "shard1", + ) + create_tables( + "non_adaptive_table", + [node1, node2], + [settings_not_adaptive, settings_not_adaptive], + "shard1", + ) + create_tables( + "polymorphic_table_compact", + [node3, node4], + [settings_compact, settings_wide], + "shard2", + ) + create_tables( + "polymorphic_table_wide", + [node3, node4], + [settings_wide, settings_compact], + "shard2", + ) + create_tables_old_format("polymorphic_table", [node5, node6], "shard3") + create_tables( + "in_memory_table", + [node9, node10], + [settings_in_memory, settings_in_memory], + "shard4", + ) + create_tables( + "wal_table", + [node11, node12], + [settings_in_memory, settings_in_memory], + "shard4", + ) + create_tables( + "restore_table", + [node11, node12], + [settings_in_memory, settings_in_memory], + "shard5", + ) + create_tables( + "deduplication_table", + [node9, node10], + [settings_in_memory, settings_in_memory], + "shard5", + ) + create_tables( + "sync_table", + [node9, node10], + [settings_in_memory, settings_in_memory], + "shard5", + ) + create_tables( + "alters_table", + [node9, node10], + [settings_in_memory, settings_in_memory], + "shard5", + ) yield cluster @@ -113,18 +234,18 @@ def start_cluster(): @pytest.mark.parametrize( - ('first_node', 'second_node'), + ("first_node", "second_node"), [ (node1, node2), # compact parts (node5, node6), # compact parts, old-format - ] + ], ) def test_polymorphic_parts_basics(start_cluster, first_node, second_node): first_node.query("SYSTEM STOP MERGES") second_node.query("SYSTEM STOP MERGES") for size in [300, 300, 600]: - insert_random_data('polymorphic_table', first_node, size) + insert_random_data("polymorphic_table", first_node, size) second_node.query("SYSTEM SYNC REPLICA polymorphic_table", timeout=20) assert first_node.query("SELECT count() FROM polymorphic_table") == "1200\n" @@ -132,19 +253,25 @@ def test_polymorphic_parts_basics(start_cluster, first_node, second_node): expected = "Compact\t2\nWide\t1\n" - assert TSV(first_node.query("SELECT part_type, count() FROM system.parts " \ - "WHERE table = 'polymorphic_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV( - expected) - assert TSV(second_node.query("SELECT part_type, count() FROM system.parts " \ - "WHERE table = 'polymorphic_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV( - expected) + assert TSV( + first_node.query( + "SELECT part_type, count() FROM system.parts " + "WHERE table = 'polymorphic_table' AND active GROUP BY part_type ORDER BY part_type" + ) + ) == TSV(expected) + assert TSV( + second_node.query( + "SELECT part_type, count() FROM system.parts " + "WHERE table = 'polymorphic_table' AND active GROUP BY part_type ORDER BY part_type" + ) + ) == TSV(expected) first_node.query("SYSTEM START MERGES") second_node.query("SYSTEM START MERGES") for _ in range(40): - insert_random_data('polymorphic_table', first_node, 10) - insert_random_data('polymorphic_table', second_node, 10) + insert_random_data("polymorphic_table", first_node, 10) + insert_random_data("polymorphic_table", second_node, 10) first_node.query("SYSTEM SYNC REPLICA polymorphic_table", timeout=20) second_node.query("SYSTEM SYNC REPLICA polymorphic_table", timeout=20) @@ -158,10 +285,18 @@ def test_polymorphic_parts_basics(start_cluster, first_node, second_node): assert first_node.query("SELECT count() FROM polymorphic_table") == "2000\n" assert second_node.query("SELECT count() FROM polymorphic_table") == "2000\n" - assert first_node.query( - "SELECT DISTINCT part_type FROM system.parts WHERE table = 'polymorphic_table' AND active") == "Wide\n" - assert second_node.query( - "SELECT DISTINCT part_type FROM system.parts WHERE table = 'polymorphic_table' AND active") == "Wide\n" + assert ( + first_node.query( + "SELECT DISTINCT part_type FROM system.parts WHERE table = 'polymorphic_table' AND active" + ) + == "Wide\n" + ) + assert ( + second_node.query( + "SELECT DISTINCT part_type FROM system.parts WHERE table = 'polymorphic_table' AND active" + ) + == "Wide\n" + ) # Check alters and mutations also work first_node.query("ALTER TABLE polymorphic_table ADD COLUMN ss String") @@ -179,8 +314,8 @@ def test_polymorphic_parts_basics(start_cluster, first_node, second_node): # Checks mostly that merge from compact part to compact part works. def test_compact_parts_only(start_cluster): for i in range(20): - insert_random_data('compact_parts_only', node1, 100) - insert_random_data('compact_parts_only', node2, 100) + insert_random_data("compact_parts_only", node1, 100) + insert_random_data("compact_parts_only", node2, 100) node1.query("SYSTEM SYNC REPLICA compact_parts_only", timeout=20) node2.query("SYSTEM SYNC REPLICA compact_parts_only", timeout=20) @@ -188,38 +323,59 @@ def test_compact_parts_only(start_cluster): assert node1.query("SELECT count() FROM compact_parts_only") == "4000\n" assert node2.query("SELECT count() FROM compact_parts_only") == "4000\n" - assert node1.query( - "SELECT DISTINCT part_type FROM system.parts WHERE table = 'compact_parts_only' AND active") == "Compact\n" - assert node2.query( - "SELECT DISTINCT part_type FROM system.parts WHERE table = 'compact_parts_only' AND active") == "Compact\n" + assert ( + node1.query( + "SELECT DISTINCT part_type FROM system.parts WHERE table = 'compact_parts_only' AND active" + ) + == "Compact\n" + ) + assert ( + node2.query( + "SELECT DISTINCT part_type FROM system.parts WHERE table = 'compact_parts_only' AND active" + ) + == "Compact\n" + ) node1.query("OPTIMIZE TABLE compact_parts_only FINAL") node2.query("SYSTEM SYNC REPLICA compact_parts_only", timeout=20) assert node2.query("SELECT count() FROM compact_parts_only") == "4000\n" expected = "Compact\t1\n" - assert TSV(node1.query("SELECT part_type, count() FROM system.parts " \ - "WHERE table = 'compact_parts_only' AND active GROUP BY part_type ORDER BY part_type")) == TSV( - expected) - assert TSV(node2.query("SELECT part_type, count() FROM system.parts " \ - "WHERE table = 'compact_parts_only' AND active GROUP BY part_type ORDER BY part_type")) == TSV( - expected) + assert TSV( + node1.query( + "SELECT part_type, count() FROM system.parts " + "WHERE table = 'compact_parts_only' AND active GROUP BY part_type ORDER BY part_type" + ) + ) == TSV(expected) + assert TSV( + node2.query( + "SELECT part_type, count() FROM system.parts " + "WHERE table = 'compact_parts_only' AND active GROUP BY part_type ORDER BY part_type" + ) + ) == TSV(expected) # Check that follower replicas create parts of the same type, which leader has chosen at merge. @pytest.mark.parametrize( - ('table', 'part_type'), - [ - ('polymorphic_table_compact', 'Compact'), - ('polymorphic_table_wide', 'Wide') - ] + ("table", "part_type"), + [("polymorphic_table_compact", "Compact"), ("polymorphic_table_wide", "Wide")], ) def test_different_part_types_on_replicas(start_cluster, table, part_type): leader = node3 follower = node4 - assert leader.query("SELECT is_leader FROM system.replicas WHERE table = '{}'".format(table)) == "1\n" - assert node4.query("SELECT is_leader FROM system.replicas WHERE table = '{}'".format(table)) == "0\n" + assert ( + leader.query( + "SELECT is_leader FROM system.replicas WHERE table = '{}'".format(table) + ) + == "1\n" + ) + assert ( + node4.query( + "SELECT is_leader FROM system.replicas WHERE table = '{}'".format(table) + ) + == "0\n" + ) for _ in range(3): insert_random_data(table, leader, 100) @@ -229,47 +385,75 @@ def test_different_part_types_on_replicas(start_cluster, table, part_type): expected = "{}\t1\n".format(part_type) - assert TSV(leader.query("SELECT part_type, count() FROM system.parts " \ - "WHERE table = '{}' AND active GROUP BY part_type ORDER BY part_type".format( - table))) == TSV(expected) - assert TSV(follower.query("SELECT part_type, count() FROM system.parts " \ - "WHERE table = '{}' AND active GROUP BY part_type ORDER BY part_type".format( - table))) == TSV(expected) + assert TSV( + leader.query( + "SELECT part_type, count() FROM system.parts " + "WHERE table = '{}' AND active GROUP BY part_type ORDER BY part_type".format( + table + ) + ) + ) == TSV(expected) + assert TSV( + follower.query( + "SELECT part_type, count() FROM system.parts " + "WHERE table = '{}' AND active GROUP BY part_type ORDER BY part_type".format( + table + ) + ) + ) == TSV(expected) -node7 = cluster.add_instance('node7', user_configs=["configs_old/users.d/not_optimize_count.xml"], with_zookeeper=True, - image='yandex/clickhouse-server', tag='19.17.8.54', stay_alive=True, - with_installed_binary=True) -node8 = cluster.add_instance('node8', user_configs=["configs/users.d/not_optimize_count.xml"], with_zookeeper=True) +node7 = cluster.add_instance( + "node7", + user_configs=["configs_old/users.d/not_optimize_count.xml"], + with_zookeeper=True, + image="yandex/clickhouse-server", + tag="19.17.8.54", + stay_alive=True, + with_installed_binary=True, +) +node8 = cluster.add_instance( + "node8", + user_configs=["configs/users.d/not_optimize_count.xml"], + with_zookeeper=True, +) -settings7 = {'index_granularity_bytes': 10485760} -settings8 = {'index_granularity_bytes': 10485760, 'min_rows_for_wide_part': 512, 'min_rows_for_compact_part': 0} +settings7 = {"index_granularity_bytes": 10485760} +settings8 = { + "index_granularity_bytes": 10485760, + "min_rows_for_wide_part": 512, + "min_rows_for_compact_part": 0, +} @pytest.fixture(scope="module") def start_cluster_diff_versions(): try: - for name in ['polymorphic_table', 'polymorphic_table_2']: + for name in ["polymorphic_table", "polymorphic_table_2"]: cluster.start() node7.query( - ''' + """ CREATE TABLE {name}(date Date, id UInt32, s String, arr Array(Int32)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/shard5/{name}', '1') PARTITION BY toYYYYMM(date) ORDER BY id SETTINGS index_granularity = 64, index_granularity_bytes = {index_granularity_bytes} - '''.format(name=name, **settings7) + """.format( + name=name, **settings7 + ) ) node8.query( - ''' + """ CREATE TABLE {name}(date Date, id UInt32, s String, arr Array(Int32)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/shard5/{name}', '2') PARTITION BY toYYYYMM(date) ORDER BY id SETTINGS index_granularity = 64, index_granularity_bytes = {index_granularity_bytes}, min_rows_for_wide_part = {min_rows_for_wide_part}, min_bytes_for_wide_part = {min_bytes_for_wide_part} - '''.format(name=name, **settings8) + """.format( + name=name, **settings8 + ) ) yield cluster @@ -285,12 +469,16 @@ def test_polymorphic_parts_diff_versions(start_cluster_diff_versions): node_old = node7 node_new = node8 - insert_random_data('polymorphic_table', node7, 100) + insert_random_data("polymorphic_table", node7, 100) node8.query("SYSTEM SYNC REPLICA polymorphic_table", timeout=20) assert node8.query("SELECT count() FROM polymorphic_table") == "100\n" - assert node8.query( - "SELECT DISTINCT part_type FROM system.parts WHERE table = 'polymorphic_table' and active") == "Wide\n" + assert ( + node8.query( + "SELECT DISTINCT part_type FROM system.parts WHERE table = 'polymorphic_table' and active" + ) + == "Wide\n" + ) @pytest.mark.skip(reason="compatability is temporary broken") @@ -301,7 +489,7 @@ def test_polymorphic_parts_diff_versions_2(start_cluster_diff_versions): node_old = node7 node_new = node8 - insert_random_data('polymorphic_table_2', node_new, 100) + insert_random_data("polymorphic_table_2", node_new, 100) assert node_new.query("SELECT count() FROM polymorphic_table_2") == "100\n" assert node_old.query("SELECT count() FROM polymorphic_table_2") == "0\n" @@ -314,29 +502,40 @@ def test_polymorphic_parts_diff_versions_2(start_cluster_diff_versions): # Works after update assert node_old.query("SELECT count() FROM polymorphic_table_2") == "100\n" - assert node_old.query( - "SELECT DISTINCT part_type FROM system.parts WHERE table = 'polymorphic_table_2' and active") == "Compact\n" + assert ( + node_old.query( + "SELECT DISTINCT part_type FROM system.parts WHERE table = 'polymorphic_table_2' and active" + ) + == "Compact\n" + ) def test_polymorphic_parts_non_adaptive(start_cluster): node1.query("SYSTEM STOP MERGES") node2.query("SYSTEM STOP MERGES") - insert_random_data('non_adaptive_table', node1, 100) + insert_random_data("non_adaptive_table", node1, 100) node2.query("SYSTEM SYNC REPLICA non_adaptive_table", timeout=20) - insert_random_data('non_adaptive_table', node2, 100) + insert_random_data("non_adaptive_table", node2, 100) node1.query("SYSTEM SYNC REPLICA non_adaptive_table", timeout=20) - assert TSV(node1.query("SELECT part_type, count() FROM system.parts " \ - "WHERE table = 'non_adaptive_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV( - "Wide\t2\n") - assert TSV(node2.query("SELECT part_type, count() FROM system.parts " \ - "WHERE table = 'non_adaptive_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV( - "Wide\t2\n") + assert TSV( + node1.query( + "SELECT part_type, count() FROM system.parts " + "WHERE table = 'non_adaptive_table' AND active GROUP BY part_type ORDER BY part_type" + ) + ) == TSV("Wide\t2\n") + assert TSV( + node2.query( + "SELECT part_type, count() FROM system.parts " + "WHERE table = 'non_adaptive_table' AND active GROUP BY part_type ORDER BY part_type" + ) + ) == TSV("Wide\t2\n") assert node1.contains_in_log( - " default.non_adaptive_table ([0-9a-f-]*): Table can't create parts with adaptive granularity") + " default.non_adaptive_table ([0-9a-f-]*): Table can't create parts with adaptive granularity" + ) def test_in_memory(start_cluster): @@ -344,7 +543,7 @@ def test_in_memory(start_cluster): node10.query("SYSTEM STOP MERGES") for size in [200, 200, 300, 600]: - insert_random_data('in_memory_table', node9, size) + insert_random_data("in_memory_table", node9, size) node10.query("SYSTEM SYNC REPLICA in_memory_table", timeout=20) assert node9.query("SELECT count() FROM in_memory_table") == "1300\n" @@ -352,81 +551,148 @@ def test_in_memory(start_cluster): expected = "Compact\t1\nInMemory\t2\nWide\t1\n" - assert TSV(node9.query("SELECT part_type, count() FROM system.parts " \ - "WHERE table = 'in_memory_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV( - expected) - assert TSV(node10.query("SELECT part_type, count() FROM system.parts " \ - "WHERE table = 'in_memory_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV( - expected) + assert TSV( + node9.query( + "SELECT part_type, count() FROM system.parts " + "WHERE table = 'in_memory_table' AND active GROUP BY part_type ORDER BY part_type" + ) + ) == TSV(expected) + assert TSV( + node10.query( + "SELECT part_type, count() FROM system.parts " + "WHERE table = 'in_memory_table' AND active GROUP BY part_type ORDER BY part_type" + ) + ) == TSV(expected) node9.query("SYSTEM START MERGES") node10.query("SYSTEM START MERGES") - assert_eq_with_retry(node9, "OPTIMIZE TABLE in_memory_table FINAL SETTINGS optimize_throw_if_noop = 1", "") + assert_eq_with_retry( + node9, + "OPTIMIZE TABLE in_memory_table FINAL SETTINGS optimize_throw_if_noop = 1", + "", + ) node10.query("SYSTEM SYNC REPLICA in_memory_table", timeout=20) assert node9.query("SELECT count() FROM in_memory_table") == "1300\n" assert node10.query("SELECT count() FROM in_memory_table") == "1300\n" - assert TSV(node9.query("SELECT part_type, count() FROM system.parts " \ - "WHERE table = 'in_memory_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV( - "Wide\t1\n") - assert TSV(node10.query("SELECT part_type, count() FROM system.parts " \ - "WHERE table = 'in_memory_table' AND active GROUP BY part_type ORDER BY part_type")) == TSV( - "Wide\t1\n") + assert TSV( + node9.query( + "SELECT part_type, count() FROM system.parts " + "WHERE table = 'in_memory_table' AND active GROUP BY part_type ORDER BY part_type" + ) + ) == TSV("Wide\t1\n") + assert TSV( + node10.query( + "SELECT part_type, count() FROM system.parts " + "WHERE table = 'in_memory_table' AND active GROUP BY part_type ORDER BY part_type" + ) + ) == TSV("Wide\t1\n") def test_in_memory_wal_rotate(start_cluster): # Write every part to single wal - node11.query("ALTER TABLE restore_table MODIFY SETTING write_ahead_log_max_bytes = 10") + node11.query( + "ALTER TABLE restore_table MODIFY SETTING write_ahead_log_max_bytes = 10" + ) for i in range(5): - insert_random_data('restore_table', node11, 50) + insert_random_data("restore_table", node11, 50) for i in range(5): # Check file exists - node11.exec_in_container(['bash', '-c', 'test -f /var/lib/clickhouse/data/default/restore_table/wal_{0}_{0}.bin'.format(i)]) + node11.exec_in_container( + [ + "bash", + "-c", + "test -f /var/lib/clickhouse/data/default/restore_table/wal_{0}_{0}.bin".format( + i + ), + ] + ) for node in [node11, node12]: node.query( - "ALTER TABLE restore_table MODIFY SETTING number_of_free_entries_in_pool_to_lower_max_size_of_merge = 0") - node.query("ALTER TABLE restore_table MODIFY SETTING max_bytes_to_merge_at_max_space_in_pool = 10000000") + "ALTER TABLE restore_table MODIFY SETTING number_of_free_entries_in_pool_to_lower_max_size_of_merge = 0" + ) + node.query( + "ALTER TABLE restore_table MODIFY SETTING max_bytes_to_merge_at_max_space_in_pool = 10000000" + ) - assert_eq_with_retry(node11, "OPTIMIZE TABLE restore_table FINAL SETTINGS optimize_throw_if_noop = 1", "") + assert_eq_with_retry( + node11, + "OPTIMIZE TABLE restore_table FINAL SETTINGS optimize_throw_if_noop = 1", + "", + ) # Restart to be sure, that clearing stale logs task was ran node11.restart_clickhouse(kill=True) for i in range(5): # check file doesn't exist - node11.exec_in_container(['bash', '-c', 'test ! -e /var/lib/clickhouse/data/default/restore_table/wal_{0}_{0}.bin'.format(i)]) + node11.exec_in_container( + [ + "bash", + "-c", + "test ! -e /var/lib/clickhouse/data/default/restore_table/wal_{0}_{0}.bin".format( + i + ), + ] + ) # New wal file was created and ready to write part to it # Check file exists - node11.exec_in_container(['bash', '-c', 'test -f /var/lib/clickhouse/data/default/restore_table/wal.bin']) + node11.exec_in_container( + ["bash", "-c", "test -f /var/lib/clickhouse/data/default/restore_table/wal.bin"] + ) # Chech file empty - node11.exec_in_container(['bash', '-c', 'test ! -s /var/lib/clickhouse/data/default/restore_table/wal.bin']) + node11.exec_in_container( + [ + "bash", + "-c", + "test ! -s /var/lib/clickhouse/data/default/restore_table/wal.bin", + ] + ) def test_in_memory_deduplication(start_cluster): for i in range(3): # table can be in readonly node - exec_query_with_retry(node9, "INSERT INTO deduplication_table (date, id, s) VALUES (toDate('2020-03-03'), 1, 'foo')") - exec_query_with_retry(node10, "INSERT INTO deduplication_table (date, id, s) VALUES (toDate('2020-03-03'), 1, 'foo')") + exec_query_with_retry( + node9, + "INSERT INTO deduplication_table (date, id, s) VALUES (toDate('2020-03-03'), 1, 'foo')", + ) + exec_query_with_retry( + node10, + "INSERT INTO deduplication_table (date, id, s) VALUES (toDate('2020-03-03'), 1, 'foo')", + ) node9.query("SYSTEM SYNC REPLICA deduplication_table", timeout=20) node10.query("SYSTEM SYNC REPLICA deduplication_table", timeout=20) - assert node9.query("SELECT date, id, s FROM deduplication_table") == "2020-03-03\t1\tfoo\n" - assert node10.query("SELECT date, id, s FROM deduplication_table") == "2020-03-03\t1\tfoo\n" + assert ( + node9.query("SELECT date, id, s FROM deduplication_table") + == "2020-03-03\t1\tfoo\n" + ) + assert ( + node10.query("SELECT date, id, s FROM deduplication_table") + == "2020-03-03\t1\tfoo\n" + ) # Checks that restoring from WAL works after table schema changed def test_in_memory_alters(start_cluster): def check_parts_type(parts_num): - assert node9.query("SELECT part_type, count() FROM system.parts WHERE table = 'alters_table' \ - AND active GROUP BY part_type") == "InMemory\t{}\n".format(parts_num) + assert ( + node9.query( + "SELECT part_type, count() FROM system.parts WHERE table = 'alters_table' \ + AND active GROUP BY part_type" + ) + == "InMemory\t{}\n".format(parts_num) + ) node9.query( - "INSERT INTO alters_table (date, id, s) VALUES (toDate('2020-10-10'), 1, 'ab'), (toDate('2020-10-10'), 2, 'cd')") + "INSERT INTO alters_table (date, id, s) VALUES (toDate('2020-10-10'), 1, 'ab'), (toDate('2020-10-10'), 2, 'cd')" + ) node9.query("ALTER TABLE alters_table ADD COLUMN col1 UInt32") node9.restart_clickhouse(kill=True) @@ -434,7 +700,10 @@ def test_in_memory_alters(start_cluster): assert node9.query("SELECT id, s, col1 FROM alters_table ORDER BY id") == expected check_parts_type(1) # After hard restart table can be in readonly mode - exec_query_with_retry(node9, "INSERT INTO alters_table (date, id, col1) VALUES (toDate('2020-10-10'), 3, 100)") + exec_query_with_retry( + node9, + "INSERT INTO alters_table (date, id, col1) VALUES (toDate('2020-10-10'), 3, 100)", + ) node9.query("ALTER TABLE alters_table MODIFY COLUMN col1 String") node9.query("ALTER TABLE alters_table DROP COLUMN s") node9.restart_clickhouse(kill=True) @@ -446,26 +715,49 @@ def test_in_memory_alters(start_cluster): # Values of col1 was not materialized as integers, so they have # default string values after alter expected = "1\t_foo\n2\t_foo\n3\t100_foo\n" - assert node9.query("SELECT id, col1 || '_foo' FROM alters_table ORDER BY id") == expected + assert ( + node9.query("SELECT id, col1 || '_foo' FROM alters_table ORDER BY id") + == expected + ) def test_polymorphic_parts_index(start_cluster): - node1.query('CREATE DATABASE test_index ENGINE=Ordinary') # Different paths with Atomic - node1.query(''' + node1.query( + "CREATE DATABASE test_index ENGINE=Ordinary" + ) # Different paths with Atomic + node1.query( + """ CREATE TABLE test_index.index_compact(a UInt32, s String) ENGINE = MergeTree ORDER BY a - SETTINGS min_rows_for_wide_part = 1000, index_granularity = 128, merge_max_block_size = 100''') + SETTINGS min_rows_for_wide_part = 1000, index_granularity = 128, merge_max_block_size = 100""" + ) - node1.query("INSERT INTO test_index.index_compact SELECT number, toString(number) FROM numbers(100)") - node1.query("INSERT INTO test_index.index_compact SELECT number, toString(number) FROM numbers(30)") + node1.query( + "INSERT INTO test_index.index_compact SELECT number, toString(number) FROM numbers(100)" + ) + node1.query( + "INSERT INTO test_index.index_compact SELECT number, toString(number) FROM numbers(30)" + ) node1.query("OPTIMIZE TABLE test_index.index_compact FINAL") - assert node1.query("SELECT part_type FROM system.parts WHERE table = 'index_compact' AND active") == "Compact\n" - assert node1.query("SELECT marks FROM system.parts WHERE table = 'index_compact' AND active") == "2\n" + assert ( + node1.query( + "SELECT part_type FROM system.parts WHERE table = 'index_compact' AND active" + ) + == "Compact\n" + ) + assert ( + node1.query( + "SELECT marks FROM system.parts WHERE table = 'index_compact' AND active" + ) + == "2\n" + ) - index_path = os.path.join(node1.path, "database/data/test_index/index_compact/all_1_2_1/primary.idx") - f = open(index_path, 'rb') + index_path = os.path.join( + node1.path, "database/data/test_index/index_compact/all_1_2_1/primary.idx" + ) + f = open(index_path, "rb") assert os.path.getsize(index_path) == 8 - assert struct.unpack('I', f.read(4))[0] == 0 - assert struct.unpack('I', f.read(4))[0] == 99 + assert struct.unpack("I", f.read(4))[0] == 0 + assert struct.unpack("I", f.read(4))[0] == 99 diff --git a/tests/integration/test_postgresql_database_engine/test.py b/tests/integration/test_postgresql_database_engine/test.py index 855f365a438..dd5b3a09ca5 100644 --- a/tests/integration/test_postgresql_database_engine/test.py +++ b/tests/integration/test_postgresql_database_engine/test.py @@ -6,7 +6,9 @@ from helpers.test_tools import assert_eq_with_retry from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=["configs/named_collections.xml"], with_postgres=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/named_collections.xml"], with_postgres=True +) postgres_table_template = """ CREATE TABLE {} ( @@ -17,6 +19,7 @@ postgres_drop_table_template = """ DROP TABLE {} """ + def get_postgres_conn(cluster, database=False): if database == True: conn_string = f"host={cluster.postgres_ip} port={cluster.postgres_port} dbname='test_database' user='postgres' password='mysecretpassword'" @@ -27,24 +30,28 @@ def get_postgres_conn(cluster, database=False): conn.autocommit = True return conn + def create_postgres_db(cursor, name): cursor.execute("CREATE DATABASE {}".format(name)) + def create_postgres_table(cursor, table_name): # database was specified in connection string cursor.execute(postgres_table_template.format(table_name)) + def drop_postgres_table(cursor, table_name): # database was specified in connection string cursor.execute(postgres_drop_table_template.format(table_name)) + @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() conn = get_postgres_conn(cluster) cursor = conn.cursor() - create_postgres_db(cursor, 'test_database') + create_postgres_db(cursor, "test_database") yield cluster finally: @@ -57,22 +64,27 @@ def test_postgres_database_engine_with_postgres_ddl(started_cluster): cursor = conn.cursor() node1.query( - "CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword')") - assert 'test_database' in node1.query('SHOW DATABASES') + "CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword')" + ) + assert "test_database" in node1.query("SHOW DATABASES") - create_postgres_table(cursor, 'test_table') - assert 'test_table' in node1.query('SHOW TABLES FROM test_database') + create_postgres_table(cursor, "test_table") + assert "test_table" in node1.query("SHOW TABLES FROM test_database") - cursor.execute('ALTER TABLE test_table ADD COLUMN data Text') - assert 'data' in node1.query("SELECT name FROM system.columns WHERE table = 'test_table' AND database = 'test_database'") + cursor.execute("ALTER TABLE test_table ADD COLUMN data Text") + assert "data" in node1.query( + "SELECT name FROM system.columns WHERE table = 'test_table' AND database = 'test_database'" + ) - cursor.execute('ALTER TABLE test_table DROP COLUMN data') - assert 'data' not in node1.query("SELECT name FROM system.columns WHERE table = 'test_table' AND database = 'test_database'") + cursor.execute("ALTER TABLE test_table DROP COLUMN data") + assert "data" not in node1.query( + "SELECT name FROM system.columns WHERE table = 'test_table' AND database = 'test_database'" + ) node1.query("DROP DATABASE test_database") - assert 'test_database' not in node1.query('SHOW DATABASES') + assert "test_database" not in node1.query("SHOW DATABASES") - drop_postgres_table(cursor, 'test_table') + drop_postgres_table(cursor, "test_table") def test_postgresql_database_engine_with_clickhouse_ddl(started_cluster): @@ -80,27 +92,28 @@ def test_postgresql_database_engine_with_clickhouse_ddl(started_cluster): cursor = conn.cursor() node1.query( - "CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword')") + "CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword')" + ) - create_postgres_table(cursor, 'test_table') - assert 'test_table' in node1.query('SHOW TABLES FROM test_database') + create_postgres_table(cursor, "test_table") + assert "test_table" in node1.query("SHOW TABLES FROM test_database") node1.query("DROP TABLE test_database.test_table") - assert 'test_table' not in node1.query('SHOW TABLES FROM test_database') + assert "test_table" not in node1.query("SHOW TABLES FROM test_database") node1.query("ATTACH TABLE test_database.test_table") - assert 'test_table' in node1.query('SHOW TABLES FROM test_database') + assert "test_table" in node1.query("SHOW TABLES FROM test_database") node1.query("DETACH TABLE test_database.test_table") - assert 'test_table' not in node1.query('SHOW TABLES FROM test_database') + assert "test_table" not in node1.query("SHOW TABLES FROM test_database") node1.query("ATTACH TABLE test_database.test_table") - assert 'test_table' in node1.query('SHOW TABLES FROM test_database') + assert "test_table" in node1.query("SHOW TABLES FROM test_database") node1.query("DROP DATABASE test_database") - assert 'test_database' not in node1.query('SHOW DATABASES') + assert "test_database" not in node1.query("SHOW DATABASES") - drop_postgres_table(cursor, 'test_table') + drop_postgres_table(cursor, "test_table") def test_postgresql_database_engine_queries(started_cluster): @@ -108,19 +121,24 @@ def test_postgresql_database_engine_queries(started_cluster): cursor = conn.cursor() node1.query( - "CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword')") + "CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword')" + ) - create_postgres_table(cursor, 'test_table') - assert node1.query("SELECT count() FROM test_database.test_table").rstrip() == '0' + create_postgres_table(cursor, "test_table") + assert node1.query("SELECT count() FROM test_database.test_table").rstrip() == "0" - node1.query("INSERT INTO test_database.test_table SELECT number, number from numbers(10000)") - assert node1.query("SELECT count() FROM test_database.test_table").rstrip() == '10000' + node1.query( + "INSERT INTO test_database.test_table SELECT number, number from numbers(10000)" + ) + assert ( + node1.query("SELECT count() FROM test_database.test_table").rstrip() == "10000" + ) - drop_postgres_table(cursor, 'test_table') - assert 'test_table' not in node1.query('SHOW TABLES FROM test_database') + drop_postgres_table(cursor, "test_table") + assert "test_table" not in node1.query("SHOW TABLES FROM test_database") node1.query("DROP DATABASE test_database") - assert 'test_database' not in node1.query('SHOW DATABASES') + assert "test_database" not in node1.query("SHOW DATABASES") def test_get_create_table_query_with_multidim_arrays(started_cluster): @@ -128,33 +146,40 @@ def test_get_create_table_query_with_multidim_arrays(started_cluster): cursor = conn.cursor() node1.query( - "CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword')") + "CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword')" + ) - cursor.execute(""" + cursor.execute( + """ CREATE TABLE array_columns ( b Integer[][][] NOT NULL, c Integer[][][] - )""") + )""" + ) node1.query("DETACH TABLE test_database.array_columns") node1.query("ATTACH TABLE test_database.array_columns") - node1.query("INSERT INTO test_database.array_columns " + node1.query( + "INSERT INTO test_database.array_columns " "VALUES (" "[[[1, 1], [1, 1]], [[3, 3], [3, 3]], [[4, 4], [5, 5]]], " "[[[1, NULL], [NULL, 1]], [[NULL, NULL], [NULL, NULL]], [[4, 4], [5, 5]]] " - ")") - result = node1.query(''' - SELECT * FROM test_database.array_columns''') + ")" + ) + result = node1.query( + """ + SELECT * FROM test_database.array_columns""" + ) expected = ( "[[[1,1],[1,1]],[[3,3],[3,3]],[[4,4],[5,5]]]\t" "[[[1,NULL],[NULL,1]],[[NULL,NULL],[NULL,NULL]],[[4,4],[5,5]]]\n" - ) - assert(result == expected) + ) + assert result == expected node1.query("DROP DATABASE test_database") - assert 'test_database' not in node1.query('SHOW DATABASES') - drop_postgres_table(cursor, 'array_columns') + assert "test_database" not in node1.query("SHOW DATABASES") + drop_postgres_table(cursor, "array_columns") def test_postgresql_database_engine_table_cache(started_cluster): @@ -162,99 +187,140 @@ def test_postgresql_database_engine_table_cache(started_cluster): cursor = conn.cursor() node1.query( - "CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword', '', 1)") + "CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword', '', 1)" + ) - create_postgres_table(cursor, 'test_table') - assert node1.query('DESCRIBE TABLE test_database.test_table').rstrip() == 'id\tInt32\t\t\t\t\t\nvalue\tNullable(Int32)' + create_postgres_table(cursor, "test_table") + assert ( + node1.query("DESCRIBE TABLE test_database.test_table").rstrip() + == "id\tInt32\t\t\t\t\t\nvalue\tNullable(Int32)" + ) - cursor.execute('ALTER TABLE test_table ADD COLUMN data Text') - assert node1.query('DESCRIBE TABLE test_database.test_table').rstrip() == 'id\tInt32\t\t\t\t\t\nvalue\tNullable(Int32)' + cursor.execute("ALTER TABLE test_table ADD COLUMN data Text") + assert ( + node1.query("DESCRIBE TABLE test_database.test_table").rstrip() + == "id\tInt32\t\t\t\t\t\nvalue\tNullable(Int32)" + ) node1.query("DETACH TABLE test_database.test_table") - assert 'test_table' not in node1.query('SHOW TABLES FROM test_database') + assert "test_table" not in node1.query("SHOW TABLES FROM test_database") node1.query("ATTACH TABLE test_database.test_table") - assert 'test_table' in node1.query('SHOW TABLES FROM test_database') + assert "test_table" in node1.query("SHOW TABLES FROM test_database") - assert node1.query('DESCRIBE TABLE test_database.test_table').rstrip() == 'id\tInt32\t\t\t\t\t\nvalue\tNullable(Int32)\t\t\t\t\t\ndata\tNullable(String)' + assert ( + node1.query("DESCRIBE TABLE test_database.test_table").rstrip() + == "id\tInt32\t\t\t\t\t\nvalue\tNullable(Int32)\t\t\t\t\t\ndata\tNullable(String)" + ) node1.query("DROP TABLE test_database.test_table") - assert 'test_table' not in node1.query('SHOW TABLES FROM test_database') + assert "test_table" not in node1.query("SHOW TABLES FROM test_database") node1.query("ATTACH TABLE test_database.test_table") - assert 'test_table' in node1.query('SHOW TABLES FROM test_database') + assert "test_table" in node1.query("SHOW TABLES FROM test_database") - node1.query("INSERT INTO test_database.test_table SELECT number, number, toString(number) from numbers(10000)") - assert node1.query("SELECT count() FROM test_database.test_table").rstrip() == '10000' + node1.query( + "INSERT INTO test_database.test_table SELECT number, number, toString(number) from numbers(10000)" + ) + assert ( + node1.query("SELECT count() FROM test_database.test_table").rstrip() == "10000" + ) - cursor.execute('DROP TABLE test_table;') - assert 'test_table' not in node1.query('SHOW TABLES FROM test_database') + cursor.execute("DROP TABLE test_table;") + assert "test_table" not in node1.query("SHOW TABLES FROM test_database") node1.query("DROP DATABASE test_database") - assert 'test_database' not in node1.query('SHOW DATABASES') + assert "test_database" not in node1.query("SHOW DATABASES") def test_postgresql_database_with_schema(started_cluster): conn = get_postgres_conn(started_cluster, True) cursor = conn.cursor() - cursor.execute('CREATE SCHEMA test_schema') - cursor.execute('CREATE TABLE test_schema.table1 (a integer)') - cursor.execute('CREATE TABLE test_schema.table2 (a integer)') - cursor.execute('CREATE TABLE table3 (a integer)') + cursor.execute("CREATE SCHEMA test_schema") + cursor.execute("CREATE TABLE test_schema.table1 (a integer)") + cursor.execute("CREATE TABLE test_schema.table2 (a integer)") + cursor.execute("CREATE TABLE table3 (a integer)") node1.query( - "CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword', 'test_schema')") + "CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword', 'test_schema')" + ) - assert(node1.query('SHOW TABLES FROM test_database') == 'table1\ntable2\n') + assert node1.query("SHOW TABLES FROM test_database") == "table1\ntable2\n" node1.query("INSERT INTO test_database.table1 SELECT number from numbers(10000)") - assert node1.query("SELECT count() FROM test_database.table1").rstrip() == '10000' + assert node1.query("SELECT count() FROM test_database.table1").rstrip() == "10000" node1.query("DETACH TABLE test_database.table1") node1.query("ATTACH TABLE test_database.table1") - assert node1.query("SELECT count() FROM test_database.table1").rstrip() == '10000' + assert node1.query("SELECT count() FROM test_database.table1").rstrip() == "10000" node1.query("DROP DATABASE test_database") - cursor.execute('DROP SCHEMA test_schema CASCADE') - cursor.execute('DROP TABLE table3') + cursor.execute("DROP SCHEMA test_schema CASCADE") + cursor.execute("DROP TABLE table3") def test_predefined_connection_configuration(started_cluster): cursor = started_cluster.postgres_conn.cursor() - cursor.execute(f'DROP TABLE IF EXISTS test_table') - cursor.execute(f'CREATE TABLE test_table (a integer PRIMARY KEY, b integer)') + cursor.execute(f"DROP TABLE IF EXISTS test_table") + cursor.execute(f"CREATE TABLE test_table (a integer PRIMARY KEY, b integer)") node1.query("DROP DATABASE IF EXISTS postgres_database") node1.query("CREATE DATABASE postgres_database ENGINE = PostgreSQL(postgres1)") - result = node1.query("select create_table_query from system.tables where database ='postgres_database'") - assert(result.strip().endswith("ENGINE = PostgreSQL(postgres1, table = \\'test_table\\')")) + result = node1.query( + "select create_table_query from system.tables where database ='postgres_database'" + ) + assert result.strip().endswith( + "ENGINE = PostgreSQL(postgres1, table = \\'test_table\\')" + ) - node1.query("INSERT INTO postgres_database.test_table SELECT number, number from numbers(100)") - assert (node1.query(f"SELECT count() FROM postgres_database.test_table").rstrip() == '100') + node1.query( + "INSERT INTO postgres_database.test_table SELECT number, number from numbers(100)" + ) + assert ( + node1.query(f"SELECT count() FROM postgres_database.test_table").rstrip() + == "100" + ) - cursor.execute('CREATE SCHEMA test_schema') - cursor.execute('CREATE TABLE test_schema.test_table (a integer)') + cursor.execute("CREATE SCHEMA test_schema") + cursor.execute("CREATE TABLE test_schema.test_table (a integer)") node1.query("DROP DATABASE IF EXISTS postgres_database") - node1.query("CREATE DATABASE postgres_database ENGINE = PostgreSQL(postgres1, schema='test_schema')") - node1.query("INSERT INTO postgres_database.test_table SELECT number from numbers(200)") - assert (node1.query(f"SELECT count() FROM postgres_database.test_table").rstrip() == '200') + node1.query( + "CREATE DATABASE postgres_database ENGINE = PostgreSQL(postgres1, schema='test_schema')" + ) + node1.query( + "INSERT INTO postgres_database.test_table SELECT number from numbers(200)" + ) + assert ( + node1.query(f"SELECT count() FROM postgres_database.test_table").rstrip() + == "200" + ) node1.query("DROP DATABASE IF EXISTS postgres_database") - node1.query_and_get_error("CREATE DATABASE postgres_database ENGINE = PostgreSQL(postgres1, 'test_schema')") - node1.query_and_get_error("CREATE DATABASE postgres_database ENGINE = PostgreSQL(postgres2)") - node1.query_and_get_error("CREATE DATABASE postgres_database ENGINE = PostgreSQL(unknown_collection)") - node1.query("CREATE DATABASE postgres_database ENGINE = PostgreSQL(postgres3, port=5432)") - assert (node1.query(f"SELECT count() FROM postgres_database.test_table").rstrip() == '100') + node1.query_and_get_error( + "CREATE DATABASE postgres_database ENGINE = PostgreSQL(postgres1, 'test_schema')" + ) + node1.query_and_get_error( + "CREATE DATABASE postgres_database ENGINE = PostgreSQL(postgres2)" + ) + node1.query_and_get_error( + "CREATE DATABASE postgres_database ENGINE = PostgreSQL(unknown_collection)" + ) + node1.query( + "CREATE DATABASE postgres_database ENGINE = PostgreSQL(postgres3, port=5432)" + ) + assert ( + node1.query(f"SELECT count() FROM postgres_database.test_table").rstrip() + == "100" + ) node1.query("DROP DATABASE postgres_database") - cursor.execute(f'DROP TABLE test_table ') - cursor.execute('DROP SCHEMA IF EXISTS test_schema CASCADE') + cursor.execute(f"DROP TABLE test_table ") + cursor.execute("DROP SCHEMA IF EXISTS test_schema CASCADE") - -if __name__ == '__main__': +if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") cluster.shutdown() diff --git a/tests/integration/test_postgresql_protocol/test.py b/tests/integration/test_postgresql_protocol/test.py index 7bea9569880..5c270fd9ca7 100644 --- a/tests/integration/test_postgresql_protocol/test.py +++ b/tests/integration/test_postgresql_protocol/test.py @@ -19,10 +19,19 @@ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) DOCKER_COMPOSE_PATH = get_docker_compose_path() cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=["configs/postresql.xml", "configs/log.xml", "configs/ssl_conf.xml", - "configs/dhparam.pem", "configs/server.crt", "configs/server.key"], - user_configs=["configs/default_passwd.xml"], - env_variables={'UBSAN_OPTIONS': 'print_stacktrace=1'}) +node = cluster.add_instance( + "node", + main_configs=[ + "configs/postresql.xml", + "configs/log.xml", + "configs/ssl_conf.xml", + "configs/dhparam.pem", + "configs/server.crt", + "configs/server.key", + ], + user_configs=["configs/default_passwd.xml"], + env_variables={"UBSAN_OPTIONS": "print_stacktrace=1"}, +) server_port = 5433 @@ -31,41 +40,75 @@ server_port = 5433 def server_address(): cluster.start() try: - yield cluster.get_instance_ip('node') + yield cluster.get_instance_ip("node") finally: cluster.shutdown() -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def psql_client(): - docker_compose = os.path.join(DOCKER_COMPOSE_PATH, 'docker_compose_postgresql.yml') + docker_compose = os.path.join(DOCKER_COMPOSE_PATH, "docker_compose_postgresql.yml") run_and_check( - ['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d', '--build']) - yield docker.DockerClient(base_url='unix:///var/run/docker.sock', version=cluster.docker_api_version, timeout=600).containers.get(cluster.project_name + '_psql_1') + [ + "docker-compose", + "-p", + cluster.project_name, + "-f", + docker_compose, + "up", + "--no-recreate", + "-d", + "--build", + ] + ) + yield docker.DockerClient( + base_url="unix:///var/run/docker.sock", + version=cluster.docker_api_version, + timeout=600, + ).containers.get(cluster.project_name + "_psql_1") -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def psql_server(psql_client): """Return PostgreSQL container when it is healthy.""" retries = 30 for i in range(retries): info = psql_client.client.api.inspect_container(psql_client.name) - if info['State']['Health']['Status'] == 'healthy': + if info["State"]["Health"]["Status"] == "healthy": break time.sleep(1) else: - print(info['State']) - raise Exception('PostgreSQL server has not started after {} retries.'.format(retries)) + print(info["State"]) + raise Exception( + "PostgreSQL server has not started after {} retries.".format(retries) + ) return psql_client -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def java_container(): - docker_compose = os.path.join(DOCKER_COMPOSE_PATH, 'docker_compose_postgresql_java_client.yml') + docker_compose = os.path.join( + DOCKER_COMPOSE_PATH, "docker_compose_postgresql_java_client.yml" + ) run_and_check( - ['docker-compose', '-p', cluster.project_name, '-f', docker_compose, 'up', '--no-recreate', '-d', '--build']) - yield docker.DockerClient(base_url='unix:///var/run/docker.sock', version=cluster.docker_api_version, timeout=600).containers.get(cluster.project_name + '_java_1') + [ + "docker-compose", + "-p", + cluster.project_name, + "-f", + docker_compose, + "up", + "--no-recreate", + "-d", + "--build", + ] + ) + yield docker.DockerClient( + base_url="unix:///var/run/docker.sock", + version=cluster.docker_api_version, + timeout=600, + ).containers.get(cluster.project_name + "_java_1") def test_psql_is_ready(psql_server): @@ -73,85 +116,119 @@ def test_psql_is_ready(psql_server): def test_psql_client(psql_client, server_address): - cmd_prefix = 'psql "sslmode=require host={server_address} port={server_port} user=default dbname=default password=123" ' \ - .format(server_address=server_address, server_port=server_port) + cmd_prefix = 'psql "sslmode=require host={server_address} port={server_port} user=default dbname=default password=123" '.format( + server_address=server_address, server_port=server_port + ) cmd_prefix += "--no-align --field-separator=' ' " - code, (stdout, stderr) = psql_client.exec_run(cmd_prefix + '-c "SELECT 1 as a"', demux=True) - assert stdout.decode() == '\n'.join(['a', '1', '(1 row)', '']) - - code, (stdout, stderr) = psql_client.exec_run(cmd_prefix + '''-c "SELECT 'колонка' as a"''', demux=True) - assert stdout.decode() == '\n'.join(['a', 'колонка', '(1 row)', '']) + code, (stdout, stderr) = psql_client.exec_run( + cmd_prefix + '-c "SELECT 1 as a"', demux=True + ) + assert stdout.decode() == "\n".join(["a", "1", "(1 row)", ""]) code, (stdout, stderr) = psql_client.exec_run( - cmd_prefix + '-c ' + - ''' + cmd_prefix + '''-c "SELECT 'колонка' as a"''', demux=True + ) + assert stdout.decode() == "\n".join(["a", "колонка", "(1 row)", ""]) + + code, (stdout, stderr) = psql_client.exec_run( + cmd_prefix + + "-c " + + """ "CREATE DATABASE x; USE x; CREATE TABLE table1 (column UInt32) ENGINE = Memory; INSERT INTO table1 VALUES (0), (1), (5); INSERT INTO table1 VALUES (0), (1), (5); SELECT * FROM table1 ORDER BY column;" - ''', - demux=True + """, + demux=True, + ) + assert stdout.decode() == "\n".join( + ["column", "0", "0", "1", "1", "5", "5", "(6 rows)", ""] ) - assert stdout.decode() == '\n'.join(['column', '0', '0', '1', '1', '5', '5', '(6 rows)', '']) code, (stdout, stderr) = psql_client.exec_run( - cmd_prefix + '-c ' + - ''' + cmd_prefix + + "-c " + + """ "DROP DATABASE x; CREATE TEMPORARY TABLE tmp (tmp_column UInt32); INSERT INTO tmp VALUES (0), (1); SELECT * FROM tmp ORDER BY tmp_column;" - ''', - demux=True + """, + demux=True, ) - assert stdout.decode() == '\n'.join(['tmp_column', '0', '1', '(2 rows)', '']) + assert stdout.decode() == "\n".join(["tmp_column", "0", "1", "(2 rows)", ""]) def test_python_client(server_address): with pytest.raises(py_psql.InternalError) as exc_info: - ch = py_psql.connect(host=server_address, port=server_port, user='default', password='123', database='') + ch = py_psql.connect( + host=server_address, + port=server_port, + user="default", + password="123", + database="", + ) cur = ch.cursor() - cur.execute('select name from tables;') + cur.execute("select name from tables;") assert exc_info.value.args == ( - "Query execution failed.\nDB::Exception: Table default.tables doesn't exist\nSSL connection has been closed unexpectedly\n",) + "Query execution failed.\nDB::Exception: Table default.tables doesn't exist\nSSL connection has been closed unexpectedly\n", + ) - ch = py_psql.connect(host=server_address, port=server_port, user='default', password='123', database='') + ch = py_psql.connect( + host=server_address, + port=server_port, + user="default", + password="123", + database="", + ) cur = ch.cursor() - cur.execute('select 1 as a, 2 as b') - assert (cur.description[0].name, cur.description[1].name) == ('a', 'b') + cur.execute("select 1 as a, 2 as b") + assert (cur.description[0].name, cur.description[1].name) == ("a", "b") assert cur.fetchall() == [(1, 2)] - cur.execute('CREATE DATABASE x') - cur.execute('USE x') + cur.execute("CREATE DATABASE x") + cur.execute("USE x") cur.execute( - 'CREATE TEMPORARY TABLE tmp2 (ch Int8, i64 Int64, f64 Float64, str String, date Date, dec Decimal(19, 10), uuid UUID) ENGINE = Memory') + "CREATE TEMPORARY TABLE tmp2 (ch Int8, i64 Int64, f64 Float64, str String, date Date, dec Decimal(19, 10), uuid UUID) ENGINE = Memory" + ) cur.execute( - "insert into tmp2 (ch, i64, f64, str, date, dec, uuid) values (44, 534324234, 0.32423423, 'hello', '2019-01-23', 0.333333, '61f0c404-5cb3-11e7-907b-a6006ad3dba0')") - cur.execute('select * from tmp2') + "insert into tmp2 (ch, i64, f64, str, date, dec, uuid) values (44, 534324234, 0.32423423, 'hello', '2019-01-23', 0.333333, '61f0c404-5cb3-11e7-907b-a6006ad3dba0')" + ) + cur.execute("select * from tmp2") assert cur.fetchall()[0] == ( - '44', 534324234, 0.32423423, 'hello', datetime.date(2019, 1, 23), decimal.Decimal('0.3333330000'), - uuid.UUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0')) + "44", + 534324234, + 0.32423423, + "hello", + datetime.date(2019, 1, 23), + decimal.Decimal("0.3333330000"), + uuid.UUID("61f0c404-5cb3-11e7-907b-a6006ad3dba0"), + ) def test_java_client(server_address, java_container): - with open(os.path.join(SCRIPT_DIR, 'java.reference')) as fp: + with open(os.path.join(SCRIPT_DIR, "java.reference")) as fp: reference = fp.read() # database not exists exception. code, (stdout, stderr) = java_container.exec_run( - 'java JavaConnectorTest --host {host} --port {port} --user default --database ' - 'abc'.format(host=server_address, port=server_port), demux=True) + "java JavaConnectorTest --host {host} --port {port} --user default --database " + "abc".format(host=server_address, port=server_port), + demux=True, + ) assert code == 1 # non-empty password passed. code, (stdout, stderr) = java_container.exec_run( - 'java JavaConnectorTest --host {host} --port {port} --user default --password 123 --database ' - 'default'.format(host=server_address, port=server_port), demux=True) + "java JavaConnectorTest --host {host} --port {port} --user default --password 123 --database " + "default".format(host=server_address, port=server_port), + demux=True, + ) print(stdout, stderr, file=sys.stderr) assert code == 0 assert stdout.decode() == reference diff --git a/tests/integration/test_postgresql_replica_database_engine_1/test.py b/tests/integration/test_postgresql_replica_database_engine_1/test.py index 8b5d7f5f7b2..45aa59001ef 100644 --- a/tests/integration/test_postgresql_replica_database_engine_1/test.py +++ b/tests/integration/test_postgresql_replica_database_engine_1/test.py @@ -20,15 +20,23 @@ from helpers.postgres_utility import check_tables_are_synchronized from helpers.postgres_utility import check_several_tables_are_synchronized from helpers.postgres_utility import assert_nested_table_is_created from helpers.postgres_utility import assert_number_of_columns -from helpers.postgres_utility import postgres_table_template, postgres_table_template_2, postgres_table_template_3, postgres_table_template_4 +from helpers.postgres_utility import ( + postgres_table_template, + postgres_table_template_2, + postgres_table_template_3, + postgres_table_template_4, +) from helpers.postgres_utility import queries cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', - main_configs = ['configs/log_conf.xml'], - user_configs = ['configs/users.xml'], - with_postgres=True, stay_alive=True) +instance = cluster.add_instance( + "instance", + main_configs=["configs/log_conf.xml"], + user_configs=["configs/users.xml"], + with_postgres=True, + stay_alive=True, +) pg_manager = PostgresManager() @@ -54,54 +62,93 @@ def setup_teardown(): def test_load_and_sync_all_database_tables(started_cluster): NUM_TABLES = 5 pg_manager.create_and_fill_postgres_tables(NUM_TABLES) - pg_manager.create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) check_several_tables_are_synchronized(instance, NUM_TABLES) - result = instance.query("SELECT count() FROM system.tables WHERE database = 'test_database';") - assert(int(result) == NUM_TABLES) + result = instance.query( + "SELECT count() FROM system.tables WHERE database = 'test_database';" + ) + assert int(result) == NUM_TABLES def test_replicating_dml(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() NUM_TABLES = 5 for i in range(NUM_TABLES): - create_postgres_table(cursor, 'postgresql_replica_{}'.format(i)); - instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {} from numbers(50)".format(i, i)) + create_postgres_table(cursor, "postgresql_replica_{}".format(i)) + instance.query( + "INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {} from numbers(50)".format( + i, i + ) + ) - pg_manager.create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) for i in range(NUM_TABLES): - instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 50 + number, {} from numbers(1000)".format(i, i)) + instance.query( + "INSERT INTO postgres_database.postgresql_replica_{} SELECT 50 + number, {} from numbers(1000)".format( + i, i + ) + ) check_several_tables_are_synchronized(instance, NUM_TABLES) for i in range(NUM_TABLES): - cursor.execute('UPDATE postgresql_replica_{} SET value = {} * {} WHERE key < 50;'.format(i, i, i)) - cursor.execute('UPDATE postgresql_replica_{} SET value = {} * {} * {} WHERE key >= 50;'.format(i, i, i, i)) + cursor.execute( + "UPDATE postgresql_replica_{} SET value = {} * {} WHERE key < 50;".format( + i, i, i + ) + ) + cursor.execute( + "UPDATE postgresql_replica_{} SET value = {} * {} * {} WHERE key >= 50;".format( + i, i, i, i + ) + ) check_several_tables_are_synchronized(instance, NUM_TABLES) for i in range(NUM_TABLES): - cursor.execute('DELETE FROM postgresql_replica_{} WHERE (value*value + {}) % 2 = 0;'.format(i, i)) - cursor.execute('UPDATE postgresql_replica_{} SET value = value - (value % 7) WHERE key > 128 AND key < 512;'.format(i)) - cursor.execute('DELETE FROM postgresql_replica_{} WHERE key % 7 = 1;'.format(i, i)) + cursor.execute( + "DELETE FROM postgresql_replica_{} WHERE (value*value + {}) % 2 = 0;".format( + i, i + ) + ) + cursor.execute( + "UPDATE postgresql_replica_{} SET value = value - (value % 7) WHERE key > 128 AND key < 512;".format( + i + ) + ) + cursor.execute( + "DELETE FROM postgresql_replica_{} WHERE key % 7 = 1;".format(i, i) + ) check_several_tables_are_synchronized(instance, NUM_TABLES) def test_different_data_types(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - cursor.execute('drop table if exists test_data_types;') - cursor.execute('drop table if exists test_array_data_type;') + cursor.execute("drop table if exists test_data_types;") + cursor.execute("drop table if exists test_array_data_type;") cursor.execute( - '''CREATE TABLE test_data_types ( + """CREATE TABLE test_data_types ( id integer PRIMARY KEY, a smallint, b integer, c bigint, d real, e double precision, f serial, g bigserial, - h timestamp, i date, j decimal(5, 5), k numeric(5, 5))''') + h timestamp, i date, j decimal(5, 5), k numeric(5, 5))""" + ) cursor.execute( - '''CREATE TABLE test_array_data_type + """CREATE TABLE test_array_data_type ( key Integer NOT NULL PRIMARY KEY, a Date[] NOT NULL, -- Date @@ -114,27 +161,42 @@ def test_different_data_types(started_cluster): h Integer[][][], -- Nullable(Int32) i Char(2)[][][][], -- Nullable(String) k Char(2)[] -- Nullable(String) - )''') + )""" + ) - pg_manager.create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) for i in range(10): - instance.query(''' + instance.query( + """ INSERT INTO postgres_database.test_data_types VALUES - ({}, -32768, -2147483648, -9223372036854775808, 1.12345, 1.1234567890, 2147483647, 9223372036854775807, '2000-05-12 12:12:12.012345', '2000-05-12', 0.2, 0.2)'''.format(i)) + ({}, -32768, -2147483648, -9223372036854775808, 1.12345, 1.1234567890, 2147483647, 9223372036854775807, '2000-05-12 12:12:12.012345', '2000-05-12', 0.2, 0.2)""".format( + i + ) + ) - check_tables_are_synchronized(instance, 'test_data_types', 'id'); - result = instance.query('SELECT * FROM test_database.test_data_types ORDER BY id LIMIT 1;') - assert(result == '0\t-32768\t-2147483648\t-9223372036854775808\t1.12345\t1.123456789\t2147483647\t9223372036854775807\t2000-05-12 12:12:12.012345\t2000-05-12\t0.2\t0.2\n') + check_tables_are_synchronized(instance, "test_data_types", "id") + result = instance.query( + "SELECT * FROM test_database.test_data_types ORDER BY id LIMIT 1;" + ) + assert ( + result + == "0\t-32768\t-2147483648\t-9223372036854775808\t1.12345\t1.123456789\t2147483647\t9223372036854775807\t2000-05-12 12:12:12.012345\t2000-05-12\t0.2\t0.2\n" + ) for i in range(10): - col = random.choice(['a', 'b', 'c']) - cursor.execute('UPDATE test_data_types SET {} = {};'.format(col, i)) - cursor.execute('''UPDATE test_data_types SET i = '2020-12-12';'''.format(col, i)) + col = random.choice(["a", "b", "c"]) + cursor.execute("UPDATE test_data_types SET {} = {};".format(col, i)) + cursor.execute( + """UPDATE test_data_types SET i = '2020-12-12';""".format(col, i) + ) - check_tables_are_synchronized(instance, 'test_data_types', 'id'); + check_tables_are_synchronized(instance, "test_data_types", "id") - instance.query("INSERT INTO postgres_database.test_array_data_type " + instance.query( + "INSERT INTO postgres_database.test_array_data_type " "VALUES (" "0, " "['2000-05-12', '2000-05-12'], " @@ -147,144 +209,203 @@ def test_different_data_types(started_cluster): "[[[1, NULL], [NULL, 1]], [[NULL, NULL], [NULL, NULL]], [[4, 4], [5, 5]]], " "[[[[NULL]]]], " "[]" - ")") + ")" + ) expected = ( - "0\t" + - "['2000-05-12','2000-05-12']\t" + - "['2000-05-12 12:12:12.012345','2000-05-12 12:12:12.012345']\t" + - "[[1.12345],[1.12345],[1.12345]]\t" + - "[[1.1234567891],[1.1234567891],[1.1234567891]]\t" + - "[[[0.11111,0.11111]],[[0.22222,0.22222]],[[0.33333,0.33333]]]\t" + "0\t" + + "['2000-05-12','2000-05-12']\t" + + "['2000-05-12 12:12:12.012345','2000-05-12 12:12:12.012345']\t" + + "[[1.12345],[1.12345],[1.12345]]\t" + + "[[1.1234567891],[1.1234567891],[1.1234567891]]\t" + + "[[[0.11111,0.11111]],[[0.22222,0.22222]],[[0.33333,0.33333]]]\t" "[[[1,1],[1,1]],[[3,3],[3,3]],[[4,4],[5,5]]]\t" "[[[[['winx','winx','winx']]]]]\t" "[[[1,NULL],[NULL,1]],[[NULL,NULL],[NULL,NULL]],[[4,4],[5,5]]]\t" "[[[[NULL]]]]\t" "[]\n" - ) + ) - check_tables_are_synchronized(instance, 'test_array_data_type'); - result = instance.query('SELECT * FROM test_database.test_array_data_type ORDER BY key;') - assert(result == expected) + check_tables_are_synchronized(instance, "test_array_data_type") + result = instance.query( + "SELECT * FROM test_database.test_array_data_type ORDER BY key;" + ) + assert result == expected pg_manager.drop_materialized_db() - cursor.execute('drop table if exists test_data_types;') - cursor.execute('drop table if exists test_array_data_type;') + cursor.execute("drop table if exists test_data_types;") + cursor.execute("drop table if exists test_array_data_type;") def test_load_and_sync_subset_of_database_tables(started_cluster): NUM_TABLES = 10 pg_manager.create_and_fill_postgres_tables(NUM_TABLES) - publication_tables = '' + publication_tables = "" for i in range(NUM_TABLES): - if i < int(NUM_TABLES/2): - if publication_tables != '': - publication_tables += ', ' - publication_tables += f'postgresql_replica_{i}' + if i < int(NUM_TABLES / 2): + if publication_tables != "": + publication_tables += ", " + publication_tables += f"postgresql_replica_{i}" pg_manager.create_materialized_db( - ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - settings=["materialized_postgresql_tables_list = '{}'".format(publication_tables)]) + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=[ + "materialized_postgresql_tables_list = '{}'".format(publication_tables) + ], + ) time.sleep(1) - for i in range(int(NUM_TABLES/2)): - table_name = f'postgresql_replica_{i}' + for i in range(int(NUM_TABLES / 2)): + table_name = f"postgresql_replica_{i}" assert_nested_table_is_created(instance, table_name) - result = instance.query('''SELECT count() FROM system.tables WHERE database = 'test_database';''') - assert(int(result) == int(NUM_TABLES/2)) + result = instance.query( + """SELECT count() FROM system.tables WHERE database = 'test_database';""" + ) + assert int(result) == int(NUM_TABLES / 2) - database_tables = instance.query('SHOW TABLES FROM test_database') + database_tables = instance.query("SHOW TABLES FROM test_database") for i in range(NUM_TABLES): - table_name = 'postgresql_replica_{}'.format(i) - if i < int(NUM_TABLES/2): + table_name = "postgresql_replica_{}".format(i) + if i < int(NUM_TABLES / 2): assert table_name in database_tables else: assert table_name not in database_tables - instance.query("INSERT INTO postgres_database.{} SELECT 50 + number, {} from numbers(100)".format(table_name, i)) + instance.query( + "INSERT INTO postgres_database.{} SELECT 50 + number, {} from numbers(100)".format( + table_name, i + ) + ) for i in range(NUM_TABLES): - table_name = f'postgresql_replica_{i}' - if i < int(NUM_TABLES/2): - check_tables_are_synchronized(instance, table_name); + table_name = f"postgresql_replica_{i}" + if i < int(NUM_TABLES / 2): + check_tables_are_synchronized(instance, table_name) def test_changing_replica_identity_value(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - create_postgres_table(cursor, 'postgresql_replica'); - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 50 + number, number from numbers(50)") + create_postgres_table(cursor, "postgresql_replica") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT 50 + number, number from numbers(50)" + ) - pg_manager.create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 100 + number, number from numbers(50)") - check_tables_are_synchronized(instance, 'postgresql_replica'); + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT 100 + number, number from numbers(50)" + ) + check_tables_are_synchronized(instance, "postgresql_replica") cursor.execute("UPDATE postgresql_replica SET key=key-25 WHERE key<100 ") - check_tables_are_synchronized(instance, 'postgresql_replica'); + check_tables_are_synchronized(instance, "postgresql_replica") def test_clickhouse_restart(started_cluster): NUM_TABLES = 5 pg_manager.create_and_fill_postgres_tables(NUM_TABLES) - pg_manager.create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) check_several_tables_are_synchronized(instance, NUM_TABLES) for i in range(NUM_TABLES): - instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 50 + number, {} from numbers(50000)".format(i, i)) + instance.query( + "INSERT INTO postgres_database.postgresql_replica_{} SELECT 50 + number, {} from numbers(50000)".format( + i, i + ) + ) instance.restart_clickhouse() check_several_tables_are_synchronized(instance, NUM_TABLES) def test_replica_identity_index(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - create_postgres_table(cursor, 'postgresql_replica', template=postgres_table_template_3); + create_postgres_table( + cursor, "postgresql_replica", template=postgres_table_template_3 + ) cursor.execute("CREATE unique INDEX idx on postgresql_replica(key1, key2);") cursor.execute("ALTER TABLE postgresql_replica REPLICA IDENTITY USING INDEX idx") - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number, number, number from numbers(50, 10)") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT number, number, number, number from numbers(50, 10)" + ) - pg_manager.create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number, number, number from numbers(100, 10)") - check_tables_are_synchronized(instance, 'postgresql_replica', order_by='key1'); + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT number, number, number, number from numbers(100, 10)" + ) + check_tables_are_synchronized(instance, "postgresql_replica", order_by="key1") cursor.execute("UPDATE postgresql_replica SET key1=key1-25 WHERE key1<100 ") cursor.execute("UPDATE postgresql_replica SET key2=key2-25 WHERE key2>100 ") cursor.execute("UPDATE postgresql_replica SET value1=value1+100 WHERE key1<100 ") cursor.execute("UPDATE postgresql_replica SET value2=value2+200 WHERE key2>100 ") - check_tables_are_synchronized(instance, 'postgresql_replica', order_by='key1'); + check_tables_are_synchronized(instance, "postgresql_replica", order_by="key1") - cursor.execute('DELETE FROM postgresql_replica WHERE key2<75;') - check_tables_are_synchronized(instance, 'postgresql_replica', order_by='key1'); + cursor.execute("DELETE FROM postgresql_replica WHERE key2<75;") + check_tables_are_synchronized(instance, "postgresql_replica", order_by="key1") def test_table_schema_changes(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() NUM_TABLES = 5 for i in range(NUM_TABLES): - create_postgres_table(cursor, 'postgresql_replica_{}'.format(i), template=postgres_table_template_2); - instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {}, {}, {} from numbers(25)".format(i, i, i, i)) + create_postgres_table( + cursor, + "postgresql_replica_{}".format(i), + template=postgres_table_template_2, + ) + instance.query( + "INSERT INTO postgres_database.postgresql_replica_{} SELECT number, {}, {}, {} from numbers(25)".format( + i, i, i, i + ) + ) pg_manager.create_materialized_db( - ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - settings=["materialized_postgresql_allow_automatic_update = 1"]) + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=["materialized_postgresql_allow_automatic_update = 1"], + ) for i in range(NUM_TABLES): - instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 25 + number, {}, {}, {} from numbers(25)".format(i, i, i, i)) + instance.query( + "INSERT INTO postgres_database.postgresql_replica_{} SELECT 25 + number, {}, {}, {} from numbers(25)".format( + i, i, i, i + ) + ) check_several_tables_are_synchronized(instance, NUM_TABLES) - expected = instance.query("SELECT key, value1, value3 FROM test_database.postgresql_replica_3 ORDER BY key"); + expected = instance.query( + "SELECT key, value1, value3 FROM test_database.postgresql_replica_3 ORDER BY key" + ) altered_idx = random.randint(0, 4) - altered_table = f'postgresql_replica_{altered_idx}' + altered_table = f"postgresql_replica_{altered_idx}" cursor.execute(f"ALTER TABLE {altered_table} DROP COLUMN value2") for i in range(NUM_TABLES): @@ -295,47 +416,62 @@ def test_table_schema_changes(started_cluster): assert_nested_table_is_created(instance, altered_table) assert_number_of_columns(instance, 3, altered_table) check_tables_are_synchronized(instance, altered_table) - print('check1 OK') + print("check1 OK") check_several_tables_are_synchronized(instance, NUM_TABLES) for i in range(NUM_TABLES): if i != altered_idx: - instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {}, {} from numbers(49)".format(i, i, i, i)) + instance.query( + "INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {}, {} from numbers(49)".format( + i, i, i, i + ) + ) else: - instance.query("INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {} from numbers(49)".format(i, i, i)) + instance.query( + "INSERT INTO postgres_database.postgresql_replica_{} SELECT 51 + number, {}, {} from numbers(49)".format( + i, i, i + ) + ) - check_tables_are_synchronized(instance, altered_table); - print('check2 OK') + check_tables_are_synchronized(instance, altered_table) + print("check2 OK") check_several_tables_are_synchronized(instance, NUM_TABLES) def test_many_concurrent_queries(started_cluster): - query_pool = ['DELETE FROM postgresql_replica_{} WHERE (value*value) % 3 = 0;', - 'UPDATE postgresql_replica_{} SET value = value - 125 WHERE key % 2 = 0;', - 'DELETE FROM postgresql_replica_{} WHERE key % 10 = 0;', - 'UPDATE postgresql_replica_{} SET value = value*5 WHERE key % 2 = 1;', - 'DELETE FROM postgresql_replica_{} WHERE value % 2 = 0;', - 'UPDATE postgresql_replica_{} SET value = value + 2000 WHERE key % 5 = 0;', - 'DELETE FROM postgresql_replica_{} WHERE value % 3 = 0;', - 'UPDATE postgresql_replica_{} SET value = value * 2 WHERE key % 3 = 0;', - 'DELETE FROM postgresql_replica_{} WHERE value % 9 = 2;', - 'UPDATE postgresql_replica_{} SET value = value + 2 WHERE key % 3 = 1;', - 'DELETE FROM postgresql_replica_{} WHERE value%5 = 0;'] + query_pool = [ + "DELETE FROM postgresql_replica_{} WHERE (value*value) % 3 = 0;", + "UPDATE postgresql_replica_{} SET value = value - 125 WHERE key % 2 = 0;", + "DELETE FROM postgresql_replica_{} WHERE key % 10 = 0;", + "UPDATE postgresql_replica_{} SET value = value*5 WHERE key % 2 = 1;", + "DELETE FROM postgresql_replica_{} WHERE value % 2 = 0;", + "UPDATE postgresql_replica_{} SET value = value + 2000 WHERE key % 5 = 0;", + "DELETE FROM postgresql_replica_{} WHERE value % 3 = 0;", + "UPDATE postgresql_replica_{} SET value = value * 2 WHERE key % 3 = 0;", + "DELETE FROM postgresql_replica_{} WHERE value % 9 = 2;", + "UPDATE postgresql_replica_{} SET value = value + 2 WHERE key % 3 = 1;", + "DELETE FROM postgresql_replica_{} WHERE value%5 = 0;", + ] NUM_TABLES = 5 - conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - pg_manager.create_and_fill_postgres_tables_from_cursor(cursor, NUM_TABLES, numbers=10000) + pg_manager.create_and_fill_postgres_tables_from_cursor( + cursor, NUM_TABLES, numbers=10000 + ) def attack(thread_id): - print('thread {}'.format(thread_id)) + print("thread {}".format(thread_id)) k = 10000 for i in range(20): - query_id = random.randrange(0, len(query_pool)-1) - table_id = random.randrange(0, 5) # num tables + query_id = random.randrange(0, len(query_pool) - 1) + table_id = random.randrange(0, 5) # num tables # random update / delete query cursor.execute(query_pool[query_id].format(table_id)) @@ -344,14 +480,22 @@ def test_many_concurrent_queries(started_cluster): # allow some thread to do inserts (not to violate key constraints) if thread_id < 5: print("try insert table {}".format(thread_id)) - instance.query('INSERT INTO postgres_database.postgresql_replica_{} SELECT {}*10000*({} + number), number from numbers(1000)'.format(i, thread_id, k)) + instance.query( + "INSERT INTO postgres_database.postgresql_replica_{} SELECT {}*10000*({} + number), number from numbers(1000)".format( + i, thread_id, k + ) + ) k += 1 print("insert table {} ok".format(thread_id)) if i == 5: # also change primary key value print("try update primary key {}".format(thread_id)) - cursor.execute("UPDATE postgresql_replica_{} SET key=key%100000+100000*{} WHERE key%{}=0".format(thread_id, i+1, i+1)) + cursor.execute( + "UPDATE postgresql_replica_{} SET key=key%100000+100000*{} WHERE key%{}=0".format( + thread_id, i + 1, i + 1 + ) + ) print("update primary key {} ok".format(thread_id)) n = [10000] @@ -361,7 +505,9 @@ def test_many_concurrent_queries(started_cluster): for i in range(threads_num): threads.append(threading.Thread(target=attack, args=(i,))) - pg_manager.create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) for thread in threads: time.sleep(random.uniform(0, 1)) @@ -370,135 +516,222 @@ def test_many_concurrent_queries(started_cluster): n[0] = 50000 for table_id in range(NUM_TABLES): n[0] += 1 - instance.query('INSERT INTO postgres_database.postgresql_replica_{} SELECT {} + number, number from numbers(5000)'.format(table_id, n[0])) - #cursor.execute("UPDATE postgresql_replica_{} SET key=key%100000+100000*{} WHERE key%{}=0".format(table_id, table_id+1, table_id+1)) + instance.query( + "INSERT INTO postgres_database.postgresql_replica_{} SELECT {} + number, number from numbers(5000)".format( + table_id, n[0] + ) + ) + # cursor.execute("UPDATE postgresql_replica_{} SET key=key%100000+100000*{} WHERE key%{}=0".format(table_id, table_id+1, table_id+1)) for thread in threads: thread.join() for i in range(NUM_TABLES): - check_tables_are_synchronized(instance, 'postgresql_replica_{}'.format(i)); - count1 = instance.query('SELECT count() FROM postgres_database.postgresql_replica_{}'.format(i)) - count2 = instance.query('SELECT count() FROM (SELECT * FROM test_database.postgresql_replica_{})'.format(i)) - assert(int(count1) == int(count2)) + check_tables_are_synchronized(instance, "postgresql_replica_{}".format(i)) + count1 = instance.query( + "SELECT count() FROM postgres_database.postgresql_replica_{}".format(i) + ) + count2 = instance.query( + "SELECT count() FROM (SELECT * FROM test_database.postgresql_replica_{})".format( + i + ) + ) + assert int(count1) == int(count2) print(count1, count2) def test_single_transaction(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - database=True, auto_commit=False) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + auto_commit=False, + ) cursor = conn.cursor() - table_name = 'postgresql_replica_0' - create_postgres_table(cursor, table_name); + table_name = "postgresql_replica_0" + create_postgres_table(cursor, table_name) conn.commit() - pg_manager.create_materialized_db(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port) + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) assert_nested_table_is_created(instance, table_name) for query in queries: - print('query {}'.format(query)) + print("query {}".format(query)) cursor.execute(query.format(0)) time.sleep(5) result = instance.query(f"select count() from test_database.{table_name}") # no commit yet - assert(int(result) == 0) + assert int(result) == 0 conn.commit() - check_tables_are_synchronized(instance, table_name); + check_tables_are_synchronized(instance, table_name) def test_virtual_columns(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - table_name = 'postgresql_replica_0' - create_postgres_table(cursor, table_name); + table_name = "postgresql_replica_0" + create_postgres_table(cursor, table_name) pg_manager.create_materialized_db( - ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - settings=["materialized_postgresql_allow_automatic_update = 1"]) + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=["materialized_postgresql_allow_automatic_update = 1"], + ) assert_nested_table_is_created(instance, table_name) - instance.query(f"INSERT INTO postgres_database.{table_name} SELECT number, number from numbers(10)") - check_tables_are_synchronized(instance, table_name); + instance.query( + f"INSERT INTO postgres_database.{table_name} SELECT number, number from numbers(10)" + ) + check_tables_are_synchronized(instance, table_name) # just check that it works, no check with `expected` because _version is taken as LSN, which will be different each time. - result = instance.query(f'SELECT key, value, _sign, _version FROM test_database.{table_name};') + result = instance.query( + f"SELECT key, value, _sign, _version FROM test_database.{table_name};" + ) print(result) cursor.execute(f"ALTER TABLE {table_name} ADD COLUMN value2 integer") - instance.query(f"INSERT INTO postgres_database.{table_name} SELECT number, number, number from numbers(10, 10)") + instance.query( + f"INSERT INTO postgres_database.{table_name} SELECT number, number, number from numbers(10, 10)" + ) assert_number_of_columns(instance, 3, table_name) - check_tables_are_synchronized(instance, table_name); + check_tables_are_synchronized(instance, table_name) - result = instance.query('SELECT key, value, value2, _sign, _version FROM test_database.postgresql_replica_0;') + result = instance.query( + "SELECT key, value, value2, _sign, _version FROM test_database.postgresql_replica_0;" + ) print(result) - instance.query(f"INSERT INTO postgres_database.{table_name} SELECT number, number, number from numbers(20, 10)") - check_tables_are_synchronized(instance, table_name); + instance.query( + f"INSERT INTO postgres_database.{table_name} SELECT number, number, number from numbers(20, 10)" + ) + check_tables_are_synchronized(instance, table_name) - result = instance.query(f'SELECT key, value, value2, _sign, _version FROM test_database.{table_name};') + result = instance.query( + f"SELECT key, value, value2, _sign, _version FROM test_database.{table_name};" + ) print(result) def test_multiple_databases(started_cluster): NUM_TABLES = 5 - conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - database=False) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=False, + ) cursor = conn.cursor() - pg_manager.create_postgres_db(cursor, 'postgres_database_1') - pg_manager.create_postgres_db(cursor, 'postgres_database_2') + pg_manager.create_postgres_db(cursor, "postgres_database_1") + pg_manager.create_postgres_db(cursor, "postgres_database_2") - conn1 = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - database=True, database_name='postgres_database_1') - conn2 = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - database=True, database_name='postgres_database_2') + conn1 = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + database_name="postgres_database_1", + ) + conn2 = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + database_name="postgres_database_2", + ) cursor1 = conn1.cursor() cursor2 = conn2.cursor() - pg_manager.create_clickhouse_postgres_db(cluster.postgres_ip, cluster.postgres_port, 'postgres_database_1', 'postgres_database_1') - pg_manager.create_clickhouse_postgres_db(cluster.postgres_ip, cluster.postgres_port, 'postgres_database_2', 'postgres_database_2') + pg_manager.create_clickhouse_postgres_db( + cluster.postgres_ip, + cluster.postgres_port, + "postgres_database_1", + "postgres_database_1", + ) + pg_manager.create_clickhouse_postgres_db( + cluster.postgres_ip, + cluster.postgres_port, + "postgres_database_2", + "postgres_database_2", + ) cursors = [cursor1, cursor2] for cursor_id in range(len(cursors)): for i in range(NUM_TABLES): - table_name = 'postgresql_replica_{}'.format(i) - create_postgres_table(cursors[cursor_id], table_name); - instance.query("INSERT INTO postgres_database_{}.{} SELECT number, number from numbers(50)".format(cursor_id + 1, table_name)) - print('database 1 tables: ', instance.query('''SELECT name FROM system.tables WHERE database = 'postgres_database_1';''')) - print('database 2 tables: ', instance.query('''SELECT name FROM system.tables WHERE database = 'postgres_database_2';''')) + table_name = "postgresql_replica_{}".format(i) + create_postgres_table(cursors[cursor_id], table_name) + instance.query( + "INSERT INTO postgres_database_{}.{} SELECT number, number from numbers(50)".format( + cursor_id + 1, table_name + ) + ) + print( + "database 1 tables: ", + instance.query( + """SELECT name FROM system.tables WHERE database = 'postgres_database_1';""" + ), + ) + print( + "database 2 tables: ", + instance.query( + """SELECT name FROM system.tables WHERE database = 'postgres_database_2';""" + ), + ) - pg_manager.create_materialized_db(started_cluster.postgres_ip, started_cluster.postgres_port, - 'test_database_1', 'postgres_database_1') - pg_manager.create_materialized_db(started_cluster.postgres_ip, started_cluster.postgres_port, - 'test_database_2', 'postgres_database_2') + pg_manager.create_materialized_db( + started_cluster.postgres_ip, + started_cluster.postgres_port, + "test_database_1", + "postgres_database_1", + ) + pg_manager.create_materialized_db( + started_cluster.postgres_ip, + started_cluster.postgres_port, + "test_database_2", + "postgres_database_2", + ) cursors = [cursor1, cursor2] for cursor_id in range(len(cursors)): for i in range(NUM_TABLES): - table_name = 'postgresql_replica_{}'.format(i) - instance.query("INSERT INTO postgres_database_{}.{} SELECT 50 + number, number from numbers(50)".format(cursor_id + 1, table_name)) + table_name = "postgresql_replica_{}".format(i) + instance.query( + "INSERT INTO postgres_database_{}.{} SELECT 50 + number, number from numbers(50)".format( + cursor_id + 1, table_name + ) + ) for cursor_id in range(len(cursors)): for i in range(NUM_TABLES): - table_name = 'postgresql_replica_{}'.format(i) - check_tables_are_synchronized(instance, - table_name, 'key', 'postgres_database_{}'.format(cursor_id + 1), 'test_database_{}'.format(cursor_id + 1)); + table_name = "postgresql_replica_{}".format(i) + check_tables_are_synchronized( + instance, + table_name, + "key", + "postgres_database_{}".format(cursor_id + 1), + "test_database_{}".format(cursor_id + 1), + ) def test_concurrent_transactions(started_cluster): def transaction(thread_id): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port, - database=True, auto_commit=False) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + auto_commit=False, + ) cursor = conn.cursor() for query in queries: cursor.execute(query.format(thread_id)) - print('thread {}, query {}'.format(thread_id, query)) + print("thread {}, query {}".format(thread_id, query)) conn.commit() NUM_TABLES = 6 @@ -509,7 +742,9 @@ def test_concurrent_transactions(started_cluster): for i in range(threads_num): threads.append(threading.Thread(target=transaction, args=(i,))) - pg_manager.create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) for thread in threads: time.sleep(random.uniform(0, 0.5)) @@ -519,25 +754,37 @@ def test_concurrent_transactions(started_cluster): thread.join() for i in range(NUM_TABLES): - check_tables_are_synchronized(instance, f'postgresql_replica_{i}'); - count1 = instance.query(f'SELECT count() FROM postgres_database.postgresql_replica_{i}') - count2 = instance.query(f'SELECT count() FROM (SELECT * FROM test_database.postgresql_replica_{i})') - print(int(count1), int(count2), sep=' ') - assert(int(count1) == int(count2)) + check_tables_are_synchronized(instance, f"postgresql_replica_{i}") + count1 = instance.query( + f"SELECT count() FROM postgres_database.postgresql_replica_{i}" + ) + count2 = instance.query( + f"SELECT count() FROM (SELECT * FROM test_database.postgresql_replica_{i})" + ) + print(int(count1), int(count2), sep=" ") + assert int(count1) == int(count2) def test_abrupt_connection_loss_while_heavy_replication(started_cluster): def transaction(thread_id): if thread_id % 2: - conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - database=True, auto_commit=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + auto_commit=True, + ) else: - conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - database=True, auto_commit=False) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + auto_commit=False, + ) cursor = conn.cursor() for query in queries: cursor.execute(query.format(thread_id)) - print('thread {}, query {}'.format(thread_id, query)) + print("thread {}, query {}".format(thread_id, query)) if thread_id % 2 == 0: conn.commit() @@ -549,23 +796,25 @@ def test_abrupt_connection_loss_while_heavy_replication(started_cluster): for i in range(threads_num): threads.append(threading.Thread(target=transaction, args=(i,))) - pg_manager.create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) for thread in threads: time.sleep(random.uniform(0, 0.5)) thread.start() for thread in threads: - thread.join() # Join here because it takes time for data to reach wal + thread.join() # Join here because it takes time for data to reach wal time.sleep(2) - started_cluster.pause_container('postgres1') + started_cluster.pause_container("postgres1") # for i in range(NUM_TABLES): # result = instance.query(f"SELECT count() FROM test_database.postgresql_replica_{i}") # print(result) # Just debug - started_cluster.unpause_container('postgres1') + started_cluster.unpause_container("postgres1") check_several_tables_are_synchronized(instance, NUM_TABLES) @@ -573,7 +822,9 @@ def test_drop_database_while_replication_startup_not_finished(started_cluster): NUM_TABLES = 5 pg_manager.create_and_fill_postgres_tables(NUM_TABLES, 100000) for i in range(6): - pg_manager.create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) time.sleep(0.5 * i) pg_manager.drop_materialized_db() @@ -581,7 +832,9 @@ def test_drop_database_while_replication_startup_not_finished(started_cluster): def test_restart_server_while_replication_startup_not_finished(started_cluster): NUM_TABLES = 5 pg_manager.create_and_fill_postgres_tables(NUM_TABLES, 100000) - pg_manager.create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) time.sleep(1) instance.restart_clickhouse() check_several_tables_are_synchronized(instance, NUM_TABLES) @@ -590,15 +843,23 @@ def test_restart_server_while_replication_startup_not_finished(started_cluster): def test_abrupt_server_restart_while_heavy_replication(started_cluster): def transaction(thread_id): if thread_id % 2: - conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - database=True, auto_commit=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + auto_commit=True, + ) else: - conn = get_postgres_conn(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - database=True, auto_commit=False) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + auto_commit=False, + ) cursor = conn.cursor() for query in queries: cursor.execute(query.format(thread_id)) - print('thread {}, query {}'.format(thread_id, query)) + print("thread {}, query {}".format(thread_id, query)) if thread_id % 2 == 0: conn.commit() @@ -610,65 +871,87 @@ def test_abrupt_server_restart_while_heavy_replication(started_cluster): for i in range(threads_num): threads.append(threading.Thread(target=transaction, args=(i,))) - pg_manager.create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) for thread in threads: time.sleep(random.uniform(0, 0.5)) thread.start() for thread in threads: - thread.join() # Join here because it takes time for data to reach wal + thread.join() # Join here because it takes time for data to reach wal instance.restart_clickhouse() check_several_tables_are_synchronized(instance, NUM_TABLES) def test_quoting_1(started_cluster): - table_name = 'user' + table_name = "user" pg_manager.create_and_fill_postgres_table(table_name) - pg_manager.create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) - check_tables_are_synchronized(instance, table_name); + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) + check_tables_are_synchronized(instance, table_name) def test_quoting_2(started_cluster): - table_name = 'user' + table_name = "user" pg_manager.create_and_fill_postgres_table(table_name) pg_manager.create_materialized_db( - ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - settings=[f"materialized_postgresql_tables_list = '{table_name}'"]) - check_tables_are_synchronized(instance, table_name); + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=[f"materialized_postgresql_tables_list = '{table_name}'"], + ) + check_tables_are_synchronized(instance, table_name) def test_user_managed_slots(started_cluster): - slot_name = 'user_slot' - table_name = 'test_table' + slot_name = "user_slot" + table_name = "test_table" pg_manager.create_and_fill_postgres_table(table_name) replication_connection = get_postgres_conn( - ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - database=True, replication=True, auto_commit=True) + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + replication=True, + auto_commit=True, + ) snapshot = create_replication_slot(replication_connection, slot_name=slot_name) pg_manager.create_materialized_db( - ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - settings=[f"materialized_postgresql_replication_slot = '{slot_name}'", - f"materialized_postgresql_snapshot = '{snapshot}'"]) - check_tables_are_synchronized(instance, table_name); + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=[ + f"materialized_postgresql_replication_slot = '{slot_name}'", + f"materialized_postgresql_snapshot = '{snapshot}'", + ], + ) + check_tables_are_synchronized(instance, table_name) - instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000, 10000)".format(table_name)) - check_tables_are_synchronized(instance, table_name); + instance.query( + "INSERT INTO postgres_database.{} SELECT number, number from numbers(10000, 10000)".format( + table_name + ) + ) + check_tables_are_synchronized(instance, table_name) instance.restart_clickhouse() - instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(20000, 10000)".format(table_name)) - check_tables_are_synchronized(instance, table_name); + instance.query( + "INSERT INTO postgres_database.{} SELECT number, number from numbers(20000, 10000)".format( + table_name + ) + ) + check_tables_are_synchronized(instance, table_name) pg_manager.drop_materialized_db() drop_replication_slot(replication_connection, slot_name) replication_connection.close() -if __name__ == '__main__': +if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") cluster.shutdown() diff --git a/tests/integration/test_postgresql_replica_database_engine_2/test.py b/tests/integration/test_postgresql_replica_database_engine_2/test.py index 0115988222c..2fcd0374fa9 100644 --- a/tests/integration/test_postgresql_replica_database_engine_2/test.py +++ b/tests/integration/test_postgresql_replica_database_engine_2/test.py @@ -18,20 +18,32 @@ from helpers.postgres_utility import PostgresManager from helpers.postgres_utility import create_replication_slot, drop_replication_slot from helpers.postgres_utility import create_postgres_schema, drop_postgres_schema from helpers.postgres_utility import create_postgres_table, drop_postgres_table -from helpers.postgres_utility import create_postgres_table_with_schema, drop_postgres_table_with_schema +from helpers.postgres_utility import ( + create_postgres_table_with_schema, + drop_postgres_table_with_schema, +) from helpers.postgres_utility import check_tables_are_synchronized from helpers.postgres_utility import check_several_tables_are_synchronized from helpers.postgres_utility import assert_nested_table_is_created from helpers.postgres_utility import assert_number_of_columns -from helpers.postgres_utility import postgres_table_template, postgres_table_template_2, postgres_table_template_3, postgres_table_template_4, postgres_table_template_5 +from helpers.postgres_utility import ( + postgres_table_template, + postgres_table_template_2, + postgres_table_template_3, + postgres_table_template_4, + postgres_table_template_5, +) from helpers.postgres_utility import queries cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', - main_configs = ['configs/log_conf.xml'], - user_configs = ['configs/users.xml'], - with_postgres=True, stay_alive=True) +instance = cluster.add_instance( + "instance", + main_configs=["configs/log_conf.xml"], + user_configs=["configs/users.xml"], + with_postgres=True, + stay_alive=True, +) pg_manager = PostgresManager() @@ -56,363 +68,586 @@ def setup_teardown(): def test_add_new_table_to_replication(started_cluster): cursor = pg_manager.get_db_cursor() - cursor.execute('DROP TABLE IF EXISTS test_table') + cursor.execute("DROP TABLE IF EXISTS test_table") NUM_TABLES = 5 pg_manager.create_and_fill_postgres_tables_from_cursor(cursor, NUM_TABLES, 10000) - pg_manager.create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) check_several_tables_are_synchronized(instance, NUM_TABLES) result = instance.query("SHOW TABLES FROM test_database") - assert(result == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\n") + assert ( + result + == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\n" + ) - table_name = 'postgresql_replica_5' + table_name = "postgresql_replica_5" pg_manager.create_and_fill_postgres_table_from_cursor(cursor, table_name) - result = instance.query('SHOW CREATE DATABASE test_database') - assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(") # Check without ip - assert(result[-59:] == "\\'postgres_database\\', \\'postgres\\', \\'mysecretpassword\\')\n") + result = instance.query("SHOW CREATE DATABASE test_database") + assert ( + result[:63] + == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(" + ) # Check without ip + assert ( + result[-59:] + == "\\'postgres_database\\', \\'postgres\\', \\'mysecretpassword\\')\n" + ) - result = instance.query_and_get_error("ALTER DATABASE test_database MODIFY SETTING materialized_postgresql_tables_list='tabl1'") - assert('Changing setting `materialized_postgresql_tables_list` is not allowed' in result) + result = instance.query_and_get_error( + "ALTER DATABASE test_database MODIFY SETTING materialized_postgresql_tables_list='tabl1'" + ) + assert ( + "Changing setting `materialized_postgresql_tables_list` is not allowed" + in result + ) - result = instance.query_and_get_error("ALTER DATABASE test_database MODIFY SETTING materialized_postgresql_tables='tabl1'") - assert('Database engine MaterializedPostgreSQL does not support setting' in result) + result = instance.query_and_get_error( + "ALTER DATABASE test_database MODIFY SETTING materialized_postgresql_tables='tabl1'" + ) + assert "Database engine MaterializedPostgreSQL does not support setting" in result - instance.query(f"ATTACH TABLE test_database.{table_name}"); + instance.query(f"ATTACH TABLE test_database.{table_name}") result = instance.query("SHOW TABLES FROM test_database") - assert(result == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\npostgresql_replica_5\n") + assert ( + result + == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\npostgresql_replica_5\n" + ) - check_tables_are_synchronized(instance, table_name); - instance.query(f"INSERT INTO postgres_database.{table_name} SELECT number, number from numbers(10000, 10000)") - check_tables_are_synchronized(instance, table_name); + check_tables_are_synchronized(instance, table_name) + instance.query( + f"INSERT INTO postgres_database.{table_name} SELECT number, number from numbers(10000, 10000)" + ) + check_tables_are_synchronized(instance, table_name) - result = instance.query_and_get_error(f"ATTACH TABLE test_database.{table_name}"); - assert('Table test_database.postgresql_replica_5 already exists' in result) + result = instance.query_and_get_error(f"ATTACH TABLE test_database.{table_name}") + assert "Table test_database.postgresql_replica_5 already exists" in result - result = instance.query_and_get_error("ATTACH TABLE test_database.unknown_table"); - assert('PostgreSQL table unknown_table does not exist' in result) + result = instance.query_and_get_error("ATTACH TABLE test_database.unknown_table") + assert "PostgreSQL table unknown_table does not exist" in result - result = instance.query('SHOW CREATE DATABASE test_database') - assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(") - assert(result[-180:] == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4,postgresql_replica_5\\'\n") + result = instance.query("SHOW CREATE DATABASE test_database") + assert ( + result[:63] + == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(" + ) + assert ( + result[-180:] + == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4,postgresql_replica_5\\'\n" + ) - table_name = 'postgresql_replica_6' + table_name = "postgresql_replica_6" create_postgres_table(cursor, table_name) - instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000)".format(table_name)) - instance.query(f"ATTACH TABLE test_database.{table_name}"); + instance.query( + "INSERT INTO postgres_database.{} SELECT number, number from numbers(10000)".format( + table_name + ) + ) + instance.query(f"ATTACH TABLE test_database.{table_name}") instance.restart_clickhouse() - table_name = 'postgresql_replica_7' + table_name = "postgresql_replica_7" create_postgres_table(cursor, table_name) - instance.query("INSERT INTO postgres_database.{} SELECT number, number from numbers(10000)".format(table_name)) - instance.query(f"ATTACH TABLE test_database.{table_name}"); + instance.query( + "INSERT INTO postgres_database.{} SELECT number, number from numbers(10000)".format( + table_name + ) + ) + instance.query(f"ATTACH TABLE test_database.{table_name}") - result = instance.query('SHOW CREATE DATABASE test_database') - assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(") - assert(result[-222:] == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4,postgresql_replica_5,postgresql_replica_6,postgresql_replica_7\\'\n") + result = instance.query("SHOW CREATE DATABASE test_database") + assert ( + result[:63] + == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(" + ) + assert ( + result[-222:] + == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4,postgresql_replica_5,postgresql_replica_6,postgresql_replica_7\\'\n" + ) - instance.query(f"INSERT INTO postgres_database.{table_name} SELECT number, number from numbers(10000, 10000)") + instance.query( + f"INSERT INTO postgres_database.{table_name} SELECT number, number from numbers(10000, 10000)" + ) result = instance.query("SHOW TABLES FROM test_database") - assert(result == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\npostgresql_replica_5\npostgresql_replica_6\npostgresql_replica_7\n") + assert ( + result + == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\npostgresql_replica_5\npostgresql_replica_6\npostgresql_replica_7\n" + ) check_several_tables_are_synchronized(instance, NUM_TABLES + 3) def test_remove_table_from_replication(started_cluster): NUM_TABLES = 5 pg_manager.create_and_fill_postgres_tables(NUM_TABLES, 10000) - pg_manager.create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) check_several_tables_are_synchronized(instance, NUM_TABLES) result = instance.query("SHOW TABLES FROM test_database") - assert(result == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\n") + assert ( + result + == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\n" + ) - result = instance.query('SHOW CREATE DATABASE test_database') - assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(") - assert(result[-59:] == "\\'postgres_database\\', \\'postgres\\', \\'mysecretpassword\\')\n") + result = instance.query("SHOW CREATE DATABASE test_database") + assert ( + result[:63] + == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(" + ) + assert ( + result[-59:] + == "\\'postgres_database\\', \\'postgres\\', \\'mysecretpassword\\')\n" + ) - table_name = 'postgresql_replica_4' - instance.query(f'DETACH TABLE test_database.{table_name} PERMANENTLY'); - result = instance.query_and_get_error(f'SELECT * FROM test_database.{table_name}') - assert("doesn't exist" in result) + table_name = "postgresql_replica_4" + instance.query(f"DETACH TABLE test_database.{table_name} PERMANENTLY") + result = instance.query_and_get_error(f"SELECT * FROM test_database.{table_name}") + assert "doesn't exist" in result result = instance.query("SHOW TABLES FROM test_database") - assert(result == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\n") + assert ( + result + == "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\n" + ) - result = instance.query('SHOW CREATE DATABASE test_database') - assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(") - assert(result[-138:] == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3\\'\n") + result = instance.query("SHOW CREATE DATABASE test_database") + assert ( + result[:63] + == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(" + ) + assert ( + result[-138:] + == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3\\'\n" + ) - instance.query(f'ATTACH TABLE test_database.{table_name}'); - check_tables_are_synchronized(instance, table_name); + instance.query(f"ATTACH TABLE test_database.{table_name}") + check_tables_are_synchronized(instance, table_name) check_several_tables_are_synchronized(instance, NUM_TABLES) - instance.query(f"INSERT INTO postgres_database.{table_name} SELECT number, number from numbers(10000, 10000)") - check_tables_are_synchronized(instance, table_name); + instance.query( + f"INSERT INTO postgres_database.{table_name} SELECT number, number from numbers(10000, 10000)" + ) + check_tables_are_synchronized(instance, table_name) - result = instance.query('SHOW CREATE DATABASE test_database') - assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(") - assert(result[-159:] == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4\\'\n") + result = instance.query("SHOW CREATE DATABASE test_database") + assert ( + result[:63] + == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(" + ) + assert ( + result[-159:] + == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_1,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4\\'\n" + ) - table_name = 'postgresql_replica_1' - instance.query(f'DETACH TABLE test_database.{table_name} PERMANENTLY'); - result = instance.query('SHOW CREATE DATABASE test_database') - assert(result[:63] == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(") - assert(result[-138:] == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4\\'\n") + table_name = "postgresql_replica_1" + instance.query(f"DETACH TABLE test_database.{table_name} PERMANENTLY") + result = instance.query("SHOW CREATE DATABASE test_database") + assert ( + result[:63] + == "CREATE DATABASE test_database\\nENGINE = MaterializedPostgreSQL(" + ) + assert ( + result[-138:] + == ")\\nSETTINGS materialized_postgresql_tables_list = \\'postgresql_replica_0,postgresql_replica_2,postgresql_replica_3,postgresql_replica_4\\'\n" + ) cursor = pg_manager.get_db_cursor() - cursor.execute(f'drop table if exists postgresql_replica_0;') + cursor.execute(f"drop table if exists postgresql_replica_0;") # Removing from replication table which does not exist in PostgreSQL must be ok. - instance.query('DETACH TABLE test_database.postgresql_replica_0 PERMANENTLY'); - assert instance.contains_in_log("from publication, because table does not exist in PostgreSQL") + instance.query("DETACH TABLE test_database.postgresql_replica_0 PERMANENTLY") + assert instance.contains_in_log( + "from publication, because table does not exist in PostgreSQL" + ) def test_predefined_connection_configuration(started_cluster): cursor = pg_manager.get_db_cursor() - cursor.execute(f'DROP TABLE IF EXISTS test_table') - cursor.execute(f'CREATE TABLE test_table (key integer PRIMARY KEY, value integer)') - cursor.execute(f'INSERT INTO test_table SELECT 1, 2') - instance.query("CREATE DATABASE test_database ENGINE = MaterializedPostgreSQL(postgres1) SETTINGS materialized_postgresql_tables_list='test_table'") - check_tables_are_synchronized(instance, "test_table"); + cursor.execute(f"DROP TABLE IF EXISTS test_table") + cursor.execute(f"CREATE TABLE test_table (key integer PRIMARY KEY, value integer)") + cursor.execute(f"INSERT INTO test_table SELECT 1, 2") + instance.query( + "CREATE DATABASE test_database ENGINE = MaterializedPostgreSQL(postgres1) SETTINGS materialized_postgresql_tables_list='test_table'" + ) + check_tables_are_synchronized(instance, "test_table") pg_manager.drop_materialized_db() insert_counter = 0 + def test_database_with_single_non_default_schema(started_cluster): cursor = pg_manager.get_db_cursor() - NUM_TABLES=5 - schema_name = 'test_schema' - materialized_db = 'test_database' - clickhouse_postgres_db = 'postgres_database_with_schema' + NUM_TABLES = 5 + schema_name = "test_schema" + materialized_db = "test_database" + clickhouse_postgres_db = "postgres_database_with_schema" global insert_counter insert_counter = 0 def insert_into_tables(): global insert_counter - clickhouse_postgres_db = 'postgres_database_with_schema' + clickhouse_postgres_db = "postgres_database_with_schema" for i in range(NUM_TABLES): - table_name = f'postgresql_replica_{i}' - instance.query(f"INSERT INTO {clickhouse_postgres_db}.{table_name} SELECT number, number from numbers(1000 * {insert_counter}, 1000)") + table_name = f"postgresql_replica_{i}" + instance.query( + f"INSERT INTO {clickhouse_postgres_db}.{table_name} SELECT number, number from numbers(1000 * {insert_counter}, 1000)" + ) insert_counter += 1 def assert_show_tables(expected): - result = instance.query('SHOW TABLES FROM test_database') - assert(result == expected) - print('assert show tables Ok') + result = instance.query("SHOW TABLES FROM test_database") + assert result == expected + print("assert show tables Ok") def check_all_tables_are_synchronized(): for i in range(NUM_TABLES): - print('checking table', i) - check_tables_are_synchronized(instance, f"postgresql_replica_{i}", postgres_database=clickhouse_postgres_db); - print('synchronization Ok') + print("checking table", i) + check_tables_are_synchronized( + instance, + f"postgresql_replica_{i}", + postgres_database=clickhouse_postgres_db, + ) + print("synchronization Ok") create_postgres_schema(cursor, schema_name) - pg_manager.create_clickhouse_postgres_db(ip=cluster.postgres_ip, port=cluster.postgres_port, name=clickhouse_postgres_db, schema_name=schema_name) + pg_manager.create_clickhouse_postgres_db( + ip=cluster.postgres_ip, + port=cluster.postgres_port, + name=clickhouse_postgres_db, + schema_name=schema_name, + ) for i in range(NUM_TABLES): - create_postgres_table_with_schema(cursor, schema_name, f'postgresql_replica_{i}'); + create_postgres_table_with_schema( + cursor, schema_name, f"postgresql_replica_{i}" + ) insert_into_tables() - pg_manager.create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - settings=[f"materialized_postgresql_schema = '{schema_name}'", "materialized_postgresql_allow_automatic_update = 1"]) + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=[ + f"materialized_postgresql_schema = '{schema_name}'", + "materialized_postgresql_allow_automatic_update = 1", + ], + ) insert_into_tables() check_all_tables_are_synchronized() - assert_show_tables("postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\n") + assert_show_tables( + "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\n" + ) instance.restart_clickhouse() check_all_tables_are_synchronized() - assert_show_tables("postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\n") + assert_show_tables( + "postgresql_replica_0\npostgresql_replica_1\npostgresql_replica_2\npostgresql_replica_3\npostgresql_replica_4\n" + ) insert_into_tables() check_all_tables_are_synchronized() - print('ALTER') - altered_table = random.randint(0, NUM_TABLES-1) - cursor.execute("ALTER TABLE test_schema.postgresql_replica_{} ADD COLUMN value2 integer".format(altered_table)) + print("ALTER") + altered_table = random.randint(0, NUM_TABLES - 1) + cursor.execute( + "ALTER TABLE test_schema.postgresql_replica_{} ADD COLUMN value2 integer".format( + altered_table + ) + ) - instance.query(f"INSERT INTO {clickhouse_postgres_db}.postgresql_replica_{altered_table} SELECT number, number, number from numbers(5000, 1000)") - assert_number_of_columns(instance, 3, f'postgresql_replica_{altered_table}') - check_tables_are_synchronized(instance, f"postgresql_replica_{altered_table}", postgres_database=clickhouse_postgres_db); + instance.query( + f"INSERT INTO {clickhouse_postgres_db}.postgresql_replica_{altered_table} SELECT number, number, number from numbers(5000, 1000)" + ) + assert_number_of_columns(instance, 3, f"postgresql_replica_{altered_table}") + check_tables_are_synchronized( + instance, + f"postgresql_replica_{altered_table}", + postgres_database=clickhouse_postgres_db, + ) - print('DETACH-ATTACH') + print("DETACH-ATTACH") detached_table_name = "postgresql_replica_1" instance.query(f"DETACH TABLE {materialized_db}.{detached_table_name} PERMANENTLY") - assert not instance.contains_in_log("from publication, because table does not exist in PostgreSQL") + assert not instance.contains_in_log( + "from publication, because table does not exist in PostgreSQL" + ) instance.query(f"ATTACH TABLE {materialized_db}.{detached_table_name}") - check_tables_are_synchronized(instance, detached_table_name, postgres_database=clickhouse_postgres_db); + check_tables_are_synchronized( + instance, detached_table_name, postgres_database=clickhouse_postgres_db + ) def test_database_with_multiple_non_default_schemas_1(started_cluster): cursor = pg_manager.get_db_cursor() NUM_TABLES = 5 - schema_name = 'test_schema' - clickhouse_postgres_db = 'postgres_database_with_schema' - materialized_db = 'test_database' - publication_tables = '' + schema_name = "test_schema" + clickhouse_postgres_db = "postgres_database_with_schema" + materialized_db = "test_database" + publication_tables = "" global insert_counter insert_counter = 0 def insert_into_tables(): global insert_counter - clickhouse_postgres_db = 'postgres_database_with_schema' + clickhouse_postgres_db = "postgres_database_with_schema" for i in range(NUM_TABLES): - table_name = f'postgresql_replica_{i}' - instance.query(f"INSERT INTO {clickhouse_postgres_db}.{table_name} SELECT number, number from numbers(1000 * {insert_counter}, 1000)") + table_name = f"postgresql_replica_{i}" + instance.query( + f"INSERT INTO {clickhouse_postgres_db}.{table_name} SELECT number, number from numbers(1000 * {insert_counter}, 1000)" + ) insert_counter += 1 def assert_show_tables(expected): - result = instance.query('SHOW TABLES FROM test_database') - assert(result == expected) - print('assert show tables Ok') + result = instance.query("SHOW TABLES FROM test_database") + assert result == expected + print("assert show tables Ok") def check_all_tables_are_synchronized(): for i in range(NUM_TABLES): - print('checking table', i) - check_tables_are_synchronized(instance, "postgresql_replica_{}".format(i), schema_name=schema_name, postgres_database=clickhouse_postgres_db); - print('synchronization Ok') + print("checking table", i) + check_tables_are_synchronized( + instance, + "postgresql_replica_{}".format(i), + schema_name=schema_name, + postgres_database=clickhouse_postgres_db, + ) + print("synchronization Ok") create_postgres_schema(cursor, schema_name) - pg_manager.create_clickhouse_postgres_db(ip=cluster.postgres_ip, port=cluster.postgres_port, name=clickhouse_postgres_db, schema_name=schema_name) + pg_manager.create_clickhouse_postgres_db( + ip=cluster.postgres_ip, + port=cluster.postgres_port, + name=clickhouse_postgres_db, + schema_name=schema_name, + ) for i in range(NUM_TABLES): - table_name = 'postgresql_replica_{}'.format(i) - create_postgres_table_with_schema(cursor, schema_name, table_name); - if publication_tables != '': - publication_tables += ', ' - publication_tables += schema_name + '.' + table_name + table_name = "postgresql_replica_{}".format(i) + create_postgres_table_with_schema(cursor, schema_name, table_name) + if publication_tables != "": + publication_tables += ", " + publication_tables += schema_name + "." + table_name insert_into_tables() - pg_manager.create_materialized_db(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - settings=[f"materialized_postgresql_tables_list = '{publication_tables}'", "materialized_postgresql_tables_list_with_schema=1", "materialized_postgresql_allow_automatic_update = 1"]) + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=[ + f"materialized_postgresql_tables_list = '{publication_tables}'", + "materialized_postgresql_tables_list_with_schema=1", + "materialized_postgresql_allow_automatic_update = 1", + ], + ) check_all_tables_are_synchronized() - assert_show_tables("test_schema.postgresql_replica_0\ntest_schema.postgresql_replica_1\ntest_schema.postgresql_replica_2\ntest_schema.postgresql_replica_3\ntest_schema.postgresql_replica_4\n") + assert_show_tables( + "test_schema.postgresql_replica_0\ntest_schema.postgresql_replica_1\ntest_schema.postgresql_replica_2\ntest_schema.postgresql_replica_3\ntest_schema.postgresql_replica_4\n" + ) instance.restart_clickhouse() check_all_tables_are_synchronized() - assert_show_tables("test_schema.postgresql_replica_0\ntest_schema.postgresql_replica_1\ntest_schema.postgresql_replica_2\ntest_schema.postgresql_replica_3\ntest_schema.postgresql_replica_4\n") + assert_show_tables( + "test_schema.postgresql_replica_0\ntest_schema.postgresql_replica_1\ntest_schema.postgresql_replica_2\ntest_schema.postgresql_replica_3\ntest_schema.postgresql_replica_4\n" + ) insert_into_tables() check_all_tables_are_synchronized() - print('ALTER') - altered_table = random.randint(0, NUM_TABLES-1) - cursor.execute("ALTER TABLE test_schema.postgresql_replica_{} ADD COLUMN value2 integer".format(altered_table)) + print("ALTER") + altered_table = random.randint(0, NUM_TABLES - 1) + cursor.execute( + "ALTER TABLE test_schema.postgresql_replica_{} ADD COLUMN value2 integer".format( + altered_table + ) + ) - instance.query(f"INSERT INTO {clickhouse_postgres_db}.postgresql_replica_{altered_table} SELECT number, number, number from numbers(5000, 1000)") - assert_number_of_columns(instance, 3, f'{schema_name}.postgresql_replica_{altered_table}') - check_tables_are_synchronized(instance, f"postgresql_replica_{altered_table}", schema_name=schema_name, postgres_database=clickhouse_postgres_db); + instance.query( + f"INSERT INTO {clickhouse_postgres_db}.postgresql_replica_{altered_table} SELECT number, number, number from numbers(5000, 1000)" + ) + assert_number_of_columns( + instance, 3, f"{schema_name}.postgresql_replica_{altered_table}" + ) + check_tables_are_synchronized( + instance, + f"postgresql_replica_{altered_table}", + schema_name=schema_name, + postgres_database=clickhouse_postgres_db, + ) - print('DETACH-ATTACH') + print("DETACH-ATTACH") detached_table_name = "postgresql_replica_1" - instance.query(f"DETACH TABLE {materialized_db}.`{schema_name}.{detached_table_name}` PERMANENTLY") - assert not instance.contains_in_log("from publication, because table does not exist in PostgreSQL") - instance.query(f"ATTACH TABLE {materialized_db}.`{schema_name}.{detached_table_name}`") - assert_show_tables("test_schema.postgresql_replica_0\ntest_schema.postgresql_replica_1\ntest_schema.postgresql_replica_2\ntest_schema.postgresql_replica_3\ntest_schema.postgresql_replica_4\n") - check_tables_are_synchronized(instance, detached_table_name, schema_name=schema_name, postgres_database=clickhouse_postgres_db); + instance.query( + f"DETACH TABLE {materialized_db}.`{schema_name}.{detached_table_name}` PERMANENTLY" + ) + assert not instance.contains_in_log( + "from publication, because table does not exist in PostgreSQL" + ) + instance.query( + f"ATTACH TABLE {materialized_db}.`{schema_name}.{detached_table_name}`" + ) + assert_show_tables( + "test_schema.postgresql_replica_0\ntest_schema.postgresql_replica_1\ntest_schema.postgresql_replica_2\ntest_schema.postgresql_replica_3\ntest_schema.postgresql_replica_4\n" + ) + check_tables_are_synchronized( + instance, + detached_table_name, + schema_name=schema_name, + postgres_database=clickhouse_postgres_db, + ) def test_database_with_multiple_non_default_schemas_2(started_cluster): cursor = pg_manager.get_db_cursor() NUM_TABLES = 2 schemas_num = 2 - schema_list = 'schema0, schema1' - materialized_db = 'test_database' + schema_list = "schema0, schema1" + materialized_db = "test_database" global insert_counter insert_counter = 0 def check_all_tables_are_synchronized(): for i in range(schemas_num): - schema_name = f'schema{i}' - clickhouse_postgres_db = f'clickhouse_postgres_db{i}' + schema_name = f"schema{i}" + clickhouse_postgres_db = f"clickhouse_postgres_db{i}" for ti in range(NUM_TABLES): - table_name = f'postgresql_replica_{ti}' - print(f'checking table {schema_name}.{table_name}') - check_tables_are_synchronized(instance, f'{table_name}', schema_name=schema_name, postgres_database=clickhouse_postgres_db); - print('synchronized Ok') + table_name = f"postgresql_replica_{ti}" + print(f"checking table {schema_name}.{table_name}") + check_tables_are_synchronized( + instance, + f"{table_name}", + schema_name=schema_name, + postgres_database=clickhouse_postgres_db, + ) + print("synchronized Ok") def insert_into_tables(): global insert_counter for i in range(schemas_num): - clickhouse_postgres_db = f'clickhouse_postgres_db{i}' + clickhouse_postgres_db = f"clickhouse_postgres_db{i}" for ti in range(NUM_TABLES): - table_name = f'postgresql_replica_{ti}' - instance.query(f'INSERT INTO {clickhouse_postgres_db}.{table_name} SELECT number, number from numbers(1000 * {insert_counter}, 1000)') + table_name = f"postgresql_replica_{ti}" + instance.query( + f"INSERT INTO {clickhouse_postgres_db}.{table_name} SELECT number, number from numbers(1000 * {insert_counter}, 1000)" + ) insert_counter += 1 def assert_show_tables(expected): - result = instance.query('SHOW TABLES FROM test_database') - assert(result == expected) - print('assert show tables Ok') + result = instance.query("SHOW TABLES FROM test_database") + assert result == expected + print("assert show tables Ok") for i in range(schemas_num): - schema_name = f'schema{i}' - clickhouse_postgres_db = f'clickhouse_postgres_db{i}' + schema_name = f"schema{i}" + clickhouse_postgres_db = f"clickhouse_postgres_db{i}" create_postgres_schema(cursor, schema_name) - pg_manager.create_clickhouse_postgres_db(ip=cluster.postgres_ip, port=cluster.postgres_port, name=clickhouse_postgres_db, schema_name=schema_name) + pg_manager.create_clickhouse_postgres_db( + ip=cluster.postgres_ip, + port=cluster.postgres_port, + name=clickhouse_postgres_db, + schema_name=schema_name, + ) for ti in range(NUM_TABLES): - table_name = f'postgresql_replica_{ti}' - create_postgres_table_with_schema(cursor, schema_name, table_name); + table_name = f"postgresql_replica_{ti}" + create_postgres_table_with_schema(cursor, schema_name, table_name) insert_into_tables() pg_manager.create_materialized_db( - ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - settings=[f"materialized_postgresql_schema_list = '{schema_list}'", - "materialized_postgresql_allow_automatic_update = 1"]) + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=[ + f"materialized_postgresql_schema_list = '{schema_list}'", + "materialized_postgresql_allow_automatic_update = 1", + ], + ) check_all_tables_are_synchronized() insert_into_tables() - assert_show_tables("schema0.postgresql_replica_0\nschema0.postgresql_replica_1\nschema1.postgresql_replica_0\nschema1.postgresql_replica_1\n") + assert_show_tables( + "schema0.postgresql_replica_0\nschema0.postgresql_replica_1\nschema1.postgresql_replica_0\nschema1.postgresql_replica_1\n" + ) instance.restart_clickhouse() - assert_show_tables("schema0.postgresql_replica_0\nschema0.postgresql_replica_1\nschema1.postgresql_replica_0\nschema1.postgresql_replica_1\n") + assert_show_tables( + "schema0.postgresql_replica_0\nschema0.postgresql_replica_1\nschema1.postgresql_replica_0\nschema1.postgresql_replica_1\n" + ) check_all_tables_are_synchronized() insert_into_tables() check_all_tables_are_synchronized() - print('ALTER') - altered_schema = random.randint(0, schemas_num-1) - altered_table = random.randint(0, NUM_TABLES-1) - clickhouse_postgres_db = f'clickhouse_postgres_db{altered_schema}' - cursor.execute(f"ALTER TABLE schema{altered_schema}.postgresql_replica_{altered_table} ADD COLUMN value2 integer") + print("ALTER") + altered_schema = random.randint(0, schemas_num - 1) + altered_table = random.randint(0, NUM_TABLES - 1) + clickhouse_postgres_db = f"clickhouse_postgres_db{altered_schema}" + cursor.execute( + f"ALTER TABLE schema{altered_schema}.postgresql_replica_{altered_table} ADD COLUMN value2 integer" + ) - instance.query(f"INSERT INTO clickhouse_postgres_db{altered_schema}.postgresql_replica_{altered_table} SELECT number, number, number from numbers(1000 * {insert_counter}, 1000)") - assert_number_of_columns(instance, 3, f'schema{altered_schema}.postgresql_replica_{altered_table}') - check_tables_are_synchronized(instance, f"postgresql_replica_{altered_table}", schema_name=f"schema{altered_schema}", postgres_database=clickhouse_postgres_db); + instance.query( + f"INSERT INTO clickhouse_postgres_db{altered_schema}.postgresql_replica_{altered_table} SELECT number, number, number from numbers(1000 * {insert_counter}, 1000)" + ) + assert_number_of_columns( + instance, 3, f"schema{altered_schema}.postgresql_replica_{altered_table}" + ) + check_tables_are_synchronized( + instance, + f"postgresql_replica_{altered_table}", + schema_name=f"schema{altered_schema}", + postgres_database=clickhouse_postgres_db, + ) - print('DETACH-ATTACH') + print("DETACH-ATTACH") detached_table_name = "postgresql_replica_1" detached_table_schema = "schema0" - clickhouse_postgres_db = f'clickhouse_postgres_db0' - instance.query(f"DETACH TABLE {materialized_db}.`{detached_table_schema}.{detached_table_name}` PERMANENTLY") - assert not instance.contains_in_log("from publication, because table does not exist in PostgreSQL") - instance.query(f"ATTACH TABLE {materialized_db}.`{detached_table_schema}.{detached_table_name}`") - assert_show_tables("schema0.postgresql_replica_0\nschema0.postgresql_replica_1\nschema1.postgresql_replica_0\nschema1.postgresql_replica_1\n") - check_tables_are_synchronized(instance, f"postgresql_replica_{altered_table}", schema_name=detached_table_schema, postgres_database=clickhouse_postgres_db); + clickhouse_postgres_db = f"clickhouse_postgres_db0" + instance.query( + f"DETACH TABLE {materialized_db}.`{detached_table_schema}.{detached_table_name}` PERMANENTLY" + ) + assert not instance.contains_in_log( + "from publication, because table does not exist in PostgreSQL" + ) + instance.query( + f"ATTACH TABLE {materialized_db}.`{detached_table_schema}.{detached_table_name}`" + ) + assert_show_tables( + "schema0.postgresql_replica_0\nschema0.postgresql_replica_1\nschema1.postgresql_replica_0\nschema1.postgresql_replica_1\n" + ) + check_tables_are_synchronized( + instance, + f"postgresql_replica_{altered_table}", + schema_name=detached_table_schema, + postgres_database=clickhouse_postgres_db, + ) def test_table_override(started_cluster): cursor = pg_manager.get_db_cursor() - table_name = 'table_override' - materialized_database = 'test_database' - create_postgres_table(cursor, table_name, template=postgres_table_template_5); - instance.query(f"create table {table_name}(key Int32, value UUID) engine = PostgreSQL (postgres1, table={table_name})") - instance.query(f"insert into {table_name} select number, generateUUIDv4() from numbers(10)") + table_name = "table_override" + materialized_database = "test_database" + create_postgres_table(cursor, table_name, template=postgres_table_template_5) + instance.query( + f"create table {table_name}(key Int32, value UUID) engine = PostgreSQL (postgres1, table={table_name})" + ) + instance.query( + f"insert into {table_name} select number, generateUUIDv4() from numbers(10)" + ) table_overrides = f" TABLE OVERRIDE {table_name} (COLUMNS (key Int32, value UUID) PARTITION BY key)" pg_manager.create_materialized_db( - ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, settings=[f"materialized_postgresql_tables_list = '{table_name}'"], - table_overrides=table_overrides) + table_overrides=table_overrides, + ) assert_nested_table_is_created(instance, table_name, materialized_database) result = instance.query(f"show create table {materialized_database}.{table_name}") print(result) expected = "CREATE TABLE test_database.table_override\\n(\\n `key` Int32,\\n `value` UUID,\\n `_sign` Int8() MATERIALIZED 1,\\n `_version` UInt64() MATERIALIZED 1\\n)\\nENGINE = ReplacingMergeTree(_version)\\nPARTITION BY key\\nORDER BY tuple(key)" - assert(result.strip() == expected) + assert result.strip() == expected time.sleep(5) query = f"select * from {materialized_database}.{table_name} order by key" expected = instance.query(f"select * from {table_name} order by key") @@ -424,15 +659,23 @@ def test_table_schema_changes_2(started_cluster): cursor = pg_manager.get_db_cursor() table_name = "test_table" - create_postgres_table(cursor, table_name, template=postgres_table_template_2); - instance.query(f"INSERT INTO postgres_database.{table_name} SELECT number, number, number, number from numbers(25)") + create_postgres_table(cursor, table_name, template=postgres_table_template_2) + instance.query( + f"INSERT INTO postgres_database.{table_name} SELECT number, number, number, number from numbers(25)" + ) pg_manager.create_materialized_db( - ip=started_cluster.postgres_ip, port=started_cluster.postgres_port, - settings=["materialized_postgresql_allow_automatic_update = 1, materialized_postgresql_tables_list='test_table'"]) + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=[ + "materialized_postgresql_allow_automatic_update = 1, materialized_postgresql_tables_list='test_table'" + ], + ) - instance.query(f"INSERT INTO postgres_database.{table_name} SELECT number, number, number, number from numbers(25, 25)") - check_tables_are_synchronized(instance, table_name); + instance.query( + f"INSERT INTO postgres_database.{table_name} SELECT number, number, number, number from numbers(25, 25)" + ) + check_tables_are_synchronized(instance, table_name) cursor.execute(f"ALTER TABLE {table_name} DROP COLUMN value1") cursor.execute(f"ALTER TABLE {table_name} DROP COLUMN value2") @@ -442,27 +685,35 @@ def test_table_schema_changes_2(started_cluster): cursor.execute(f"ALTER TABLE {table_name} ADD COLUMN value3 Text") cursor.execute(f"ALTER TABLE {table_name} ADD COLUMN value4 Text") cursor.execute(f"UPDATE {table_name} SET value3 = 'kek' WHERE key%2=0") - check_tables_are_synchronized(instance, table_name); - instance.query(f"INSERT INTO postgres_database.{table_name} SELECT number, toString(number), toString(number), toString(number), toString(number) from numbers(50, 25)") + check_tables_are_synchronized(instance, table_name) + instance.query( + f"INSERT INTO postgres_database.{table_name} SELECT number, toString(number), toString(number), toString(number), toString(number) from numbers(50, 25)" + ) cursor.execute(f"ALTER TABLE {table_name} ADD COLUMN value5 Integer") cursor.execute(f"ALTER TABLE {table_name} DROP COLUMN value2") - instance.query(f"INSERT INTO postgres_database.{table_name} SELECT number, toString(number), toString(number), toString(number), number from numbers(75, 25)") - check_tables_are_synchronized(instance, table_name); + instance.query( + f"INSERT INTO postgres_database.{table_name} SELECT number, toString(number), toString(number), toString(number), number from numbers(75, 25)" + ) + check_tables_are_synchronized(instance, table_name) instance.restart_clickhouse() - check_tables_are_synchronized(instance, table_name); + check_tables_are_synchronized(instance, table_name) cursor.execute(f"ALTER TABLE {table_name} DROP COLUMN value5") cursor.execute(f"ALTER TABLE {table_name} ADD COLUMN value5 Text") - instance.query(f"INSERT INTO postgres_database.{table_name} SELECT number, toString(number), toString(number), toString(number), toString(number) from numbers(100, 25)") - check_tables_are_synchronized(instance, table_name); + instance.query( + f"INSERT INTO postgres_database.{table_name} SELECT number, toString(number), toString(number), toString(number), toString(number) from numbers(100, 25)" + ) + check_tables_are_synchronized(instance, table_name) cursor.execute(f"ALTER TABLE {table_name} ADD COLUMN value6 Text") cursor.execute(f"ALTER TABLE {table_name} ADD COLUMN value7 Integer") cursor.execute(f"ALTER TABLE {table_name} ADD COLUMN value8 Integer") cursor.execute(f"ALTER TABLE {table_name} DROP COLUMN value5") - instance.query(f"INSERT INTO postgres_database.{table_name} SELECT number, toString(number), toString(number), toString(number), toString(number), number, number from numbers(125, 25)") - check_tables_are_synchronized(instance, table_name); + instance.query( + f"INSERT INTO postgres_database.{table_name} SELECT number, toString(number), toString(number), toString(number), toString(number), number, number from numbers(125, 25)" + ) + check_tables_are_synchronized(instance, table_name) -if __name__ == '__main__': +if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") cluster.shutdown() diff --git a/tests/integration/test_profile_events_s3/test.py b/tests/integration/test_profile_events_s3/test.py index 15e2ff97d10..5171ea4ac0e 100644 --- a/tests/integration/test_profile_events_s3/test.py +++ b/tests/integration/test_profile_events_s3/test.py @@ -11,8 +11,16 @@ def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("node", main_configs=["configs/config.d/storage_conf.xml", "configs/log.xml", - "configs/query_log.xml", "configs/ssl_conf.xml"], with_minio=True) + cluster.add_instance( + "node", + main_configs=[ + "configs/config.d/storage_conf.xml", + "configs/log.xml", + "configs/query_log.xml", + "configs/ssl_conf.xml", + ], + with_minio=True, + ) logging.info("Starting cluster...") cluster.start() @@ -41,7 +49,9 @@ init_list = { def get_s3_events(instance): result = init_list.copy() - events = instance.query("SELECT event,value FROM system.events WHERE event LIKE 'S3%'").split("\n") + events = instance.query( + "SELECT event,value FROM system.events WHERE event LIKE 'S3%'" + ).split("\n") for event in events: ev = event.split("\t") if len(ev) == 2: @@ -57,12 +67,15 @@ def get_minio_stat(cluster): "rx_bytes": 0, "tx_bytes": 0, } - stat = requests.get(url="http://{}:{}/minio/prometheus/metrics".format(cluster.minio_ip, cluster.minio_port)).text.split( - "\n") + stat = requests.get( + url="http://{}:{}/minio/prometheus/metrics".format( + cluster.minio_ip, cluster.minio_port + ) + ).text.split("\n") for line in stat: x = re.search("s3_requests_total(\{.*\})?\s(\d+)(\s.*)?", line) if x != None: - y = re.search(".*api=\"(get|list|head|select).*", x.group(1)) + y = re.search('.*api="(get|list|head|select).*', x.group(1)) if y != None: result["get_requests"] += int(x.group(2)) else: @@ -82,12 +95,16 @@ def get_minio_stat(cluster): def get_query_stat(instance, hint): result = init_list.copy() instance.query("SYSTEM FLUSH LOGS") - events = instance.query(''' + events = instance.query( + """ SELECT ProfileEvents.keys, ProfileEvents.values FROM system.query_log ARRAY JOIN ProfileEvents WHERE type != 1 AND query LIKE '%{}%' - '''.format(hint.replace("'", "\\'"))).split("\n") + """.format( + hint.replace("'", "\\'") + ) + ).split("\n") for event in events: ev = event.split("\t") if len(ev) == 2: @@ -99,7 +116,7 @@ def get_query_stat(instance, hint): def get_minio_size(cluster): minio = cluster.minio_client size = 0 - for obj in minio.list_objects(cluster.minio_bucket, 'data/'): + for obj in minio.list_objects(cluster.minio_bucket, "data/"): size += obj.size return size @@ -123,10 +140,14 @@ def test_profile_events(cluster): metrics1 = get_s3_events(instance) minio1 = get_minio_stat(cluster) - assert metrics1["S3ReadRequestsCount"] - metrics0["S3ReadRequestsCount"] == minio1["get_requests"] - minio0[ - "get_requests"] - 1 # 1 from get_minio_size - assert metrics1["S3WriteRequestsCount"] - metrics0["S3WriteRequestsCount"] == minio1["set_requests"] - minio0[ - "set_requests"] + assert ( + metrics1["S3ReadRequestsCount"] - metrics0["S3ReadRequestsCount"] + == minio1["get_requests"] - minio0["get_requests"] - 1 + ) # 1 from get_minio_size + assert ( + metrics1["S3WriteRequestsCount"] - metrics0["S3WriteRequestsCount"] + == minio1["set_requests"] - minio0["set_requests"] + ) stat1 = get_query_stat(instance, query1) for metric in stat1: assert stat1[metric] == metrics1[metric] - metrics0[metric] @@ -139,10 +160,14 @@ def test_profile_events(cluster): metrics2 = get_s3_events(instance) minio2 = get_minio_stat(cluster) - assert metrics2["S3ReadRequestsCount"] - metrics1["S3ReadRequestsCount"] == minio2["get_requests"] - minio1[ - "get_requests"] - 1 # 1 from get_minio_size - assert metrics2["S3WriteRequestsCount"] - metrics1["S3WriteRequestsCount"] == minio2["set_requests"] - minio1[ - "set_requests"] + assert ( + metrics2["S3ReadRequestsCount"] - metrics1["S3ReadRequestsCount"] + == minio2["get_requests"] - minio1["get_requests"] - 1 + ) # 1 from get_minio_size + assert ( + metrics2["S3WriteRequestsCount"] - metrics1["S3WriteRequestsCount"] + == minio2["set_requests"] - minio1["set_requests"] + ) stat2 = get_query_stat(instance, query2) for metric in stat2: assert stat2[metric] == metrics2[metric] - metrics1[metric] @@ -154,12 +179,16 @@ def test_profile_events(cluster): metrics3 = get_s3_events(instance) minio3 = get_minio_stat(cluster) - assert metrics3["S3ReadRequestsCount"] - metrics2["S3ReadRequestsCount"] == minio3["get_requests"] - minio2[ - "get_requests"] - assert metrics3["S3WriteRequestsCount"] - metrics2["S3WriteRequestsCount"] == minio3["set_requests"] - minio2[ - "set_requests"] + assert ( + metrics3["S3ReadRequestsCount"] - metrics2["S3ReadRequestsCount"] + == minio3["get_requests"] - minio2["get_requests"] + ) + assert ( + metrics3["S3WriteRequestsCount"] - metrics2["S3WriteRequestsCount"] + == minio3["set_requests"] - minio2["set_requests"] + ) stat3 = get_query_stat(instance, query3) # With async reads profile events are not updated fully because reads are done in a separate thread. - #for metric in stat3: + # for metric in stat3: # print(metric) # assert stat3[metric] == metrics3[metric] - metrics2[metric] diff --git a/tests/integration/test_prometheus_endpoint/test.py b/tests/integration/test_prometheus_endpoint/test.py index 60d9164acd2..cf3d2ff2d98 100644 --- a/tests/integration/test_prometheus_endpoint/test.py +++ b/tests/integration/test_prometheus_endpoint/test.py @@ -1,5 +1,3 @@ - - import re import time @@ -8,7 +6,7 @@ import requests from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=['configs/prom_conf.xml']) +node = cluster.add_instance("node", main_configs=["configs/prom_conf.xml"]) @pytest.fixture(scope="module") @@ -30,7 +28,7 @@ def parse_response_line(line): if line.startswith("#"): return {} - match = re.match('^([a-zA-Z_:][a-zA-Z0-9_:]+)(\{.*\})? -?(\d)', line) + match = re.match("^([a-zA-Z_:][a-zA-Z0-9_:]+)(\{.*\})? -?(\d)", line) assert match, line name, _, val = match.groups() return {name: int(val)} @@ -39,8 +37,10 @@ def parse_response_line(line): def get_and_check_metrics(retries): while True: try: - response = requests.get("http://{host}:{port}/metrics".format( - host=node.ip_address, port=8001), allow_redirects=False) + response = requests.get( + "http://{host}:{port}/metrics".format(host=node.ip_address, port=8001), + allow_redirects=False, + ) if response.status_code != 200: response.raise_for_status() @@ -54,10 +54,10 @@ def get_and_check_metrics(retries): else: raise - assert response.headers['content-type'].startswith('text/plain') + assert response.headers["content-type"].startswith("text/plain") results = {} - for resp_line in response.text.split('\n'): + for resp_line in response.text.split("\n"): resp_line = resp_line.rstrip() if not resp_line: continue @@ -68,12 +68,12 @@ def get_and_check_metrics(retries): def test_prometheus_endpoint(start_cluster): metrics_dict = get_and_check_metrics(10) - assert metrics_dict['ClickHouseProfileEvents_Query'] >= 0 - prev_query_count = metrics_dict['ClickHouseProfileEvents_Query'] + assert metrics_dict["ClickHouseProfileEvents_Query"] >= 0 + prev_query_count = metrics_dict["ClickHouseProfileEvents_Query"] node.query("SELECT 1") node.query("SELECT 2") node.query("SELECT 3") metrics_dict = get_and_check_metrics(10) - assert metrics_dict['ClickHouseProfileEvents_Query'] >= prev_query_count + 3 + assert metrics_dict["ClickHouseProfileEvents_Query"] >= prev_query_count + 3 diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index 2211333bb26..779d1a69dcc 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -6,20 +6,29 @@ from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -zero = cluster.add_instance("zero", user_configs=["configs/users.d/settings.xml"], - main_configs=["configs/config.d/remote_servers.xml"], - macros={"cluster": "anime", "shard": "0", "replica": "zero"}, - with_zookeeper=True) +zero = cluster.add_instance( + "zero", + user_configs=["configs/users.d/settings.xml"], + main_configs=["configs/config.d/remote_servers.xml"], + macros={"cluster": "anime", "shard": "0", "replica": "zero"}, + with_zookeeper=True, +) -first = cluster.add_instance("first", user_configs=["configs/users.d/settings.xml"], - main_configs=["configs/config.d/remote_servers.xml"], - macros={"cluster": "anime", "shard": "0", "replica": "first"}, - with_zookeeper=True) +first = cluster.add_instance( + "first", + user_configs=["configs/users.d/settings.xml"], + main_configs=["configs/config.d/remote_servers.xml"], + macros={"cluster": "anime", "shard": "0", "replica": "first"}, + with_zookeeper=True, +) -second = cluster.add_instance("second", user_configs=["configs/users.d/settings.xml"], - main_configs=["configs/config.d/remote_servers.xml"], - macros={"cluster": "anime", "shard": "0", "replica": "second"}, - with_zookeeper=True) +second = cluster.add_instance( + "second", + user_configs=["configs/users.d/settings.xml"], + main_configs=["configs/config.d/remote_servers.xml"], + macros={"cluster": "anime", "shard": "0", "replica": "second"}, + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -36,45 +45,54 @@ def started_cluster(): def test_simple_add_replica(started_cluster): zero.query("DROP TABLE IF EXISTS test_simple ON CLUSTER cluster") - create_query = "CREATE TABLE test_simple " \ - "(a Int8, d Date) " \ - "Engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/{table}', '{replica}') " \ - "PARTITION BY d ORDER BY a" + create_query = ( + "CREATE TABLE test_simple " + "(a Int8, d Date) " + "Engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/{table}', '{replica}') " + "PARTITION BY d ORDER BY a" + ) zero.query(create_query) first.query(create_query) first.query("SYSTEM STOP FETCHES test_simple") - zero.query("INSERT INTO test_simple VALUES (1, '2011-01-01')", settings={'insert_quorum': 1}) + zero.query( + "INSERT INTO test_simple VALUES (1, '2011-01-01')", + settings={"insert_quorum": 1}, + ) - assert '1\t2011-01-01\n' == zero.query("SELECT * from test_simple") - assert '' == first.query("SELECT * from test_simple") + assert "1\t2011-01-01\n" == zero.query("SELECT * from test_simple") + assert "" == first.query("SELECT * from test_simple") first.query("SYSTEM START FETCHES test_simple") first.query("SYSTEM SYNC REPLICA test_simple", timeout=20) - assert '1\t2011-01-01\n' == zero.query("SELECT * from test_simple") - assert '1\t2011-01-01\n' == first.query("SELECT * from test_simple") + assert "1\t2011-01-01\n" == zero.query("SELECT * from test_simple") + assert "1\t2011-01-01\n" == first.query("SELECT * from test_simple") second.query(create_query) second.query("SYSTEM SYNC REPLICA test_simple", timeout=20) - assert '1\t2011-01-01\n' == zero.query("SELECT * from test_simple") - assert '1\t2011-01-01\n' == first.query("SELECT * from test_simple") - assert '1\t2011-01-01\n' == second.query("SELECT * from test_simple") + assert "1\t2011-01-01\n" == zero.query("SELECT * from test_simple") + assert "1\t2011-01-01\n" == first.query("SELECT * from test_simple") + assert "1\t2011-01-01\n" == second.query("SELECT * from test_simple") zero.query("DROP TABLE IF EXISTS test_simple ON CLUSTER cluster") def test_drop_replica_and_achieve_quorum(started_cluster): - zero.query("DROP TABLE IF EXISTS test_drop_replica_and_achieve_quorum ON CLUSTER cluster") + zero.query( + "DROP TABLE IF EXISTS test_drop_replica_and_achieve_quorum ON CLUSTER cluster" + ) - create_query = "CREATE TABLE test_drop_replica_and_achieve_quorum " \ - "(a Int8, d Date) " \ - "Engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/{table}', '{replica}') " \ - "PARTITION BY d ORDER BY a" + create_query = ( + "CREATE TABLE test_drop_replica_and_achieve_quorum " + "(a Int8, d Date) " + "Engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/{table}', '{replica}') " + "PARTITION BY d ORDER BY a" + ) print("Create Replicated table with two replicas") zero.query(create_query) @@ -86,14 +104,23 @@ def test_drop_replica_and_achieve_quorum(started_cluster): print("Insert to other replica. This query will fail.") quorum_timeout = zero.query_and_get_error( "INSERT INTO test_drop_replica_and_achieve_quorum(a,d) VALUES (1, '2011-01-01')", - settings={'insert_quorum_timeout': 5000}) + settings={"insert_quorum_timeout": 5000}, + ) assert "Timeout while waiting for quorum" in quorum_timeout, "Query must fail." - assert TSV("1\t2011-01-01\n") == TSV(zero.query("SELECT * FROM test_drop_replica_and_achieve_quorum", - settings={'select_sequential_consistency': 0})) + assert TSV("1\t2011-01-01\n") == TSV( + zero.query( + "SELECT * FROM test_drop_replica_and_achieve_quorum", + settings={"select_sequential_consistency": 0}, + ) + ) - assert TSV("") == TSV(zero.query("SELECT * FROM test_drop_replica_and_achieve_quorum", - settings={'select_sequential_consistency': 1})) + assert TSV("") == TSV( + zero.query( + "SELECT * FROM test_drop_replica_and_achieve_quorum", + settings={"select_sequential_consistency": 1}, + ) + ) # TODO:(Mikhaylov) begin; maybe delete this lines. I want clickhouse to fetch parts and update quorum. print("START FETCHES first replica") @@ -110,36 +137,45 @@ def test_drop_replica_and_achieve_quorum(started_cluster): second.query("SYSTEM SYNC REPLICA test_drop_replica_and_achieve_quorum", timeout=20) print("Quorum for previous insert achieved.") - assert TSV("1\t2011-01-01\n") == TSV(second.query("SELECT * FROM test_drop_replica_and_achieve_quorum", - settings={'select_sequential_consistency': 1})) + assert TSV("1\t2011-01-01\n") == TSV( + second.query( + "SELECT * FROM test_drop_replica_and_achieve_quorum", + settings={"select_sequential_consistency": 1}, + ) + ) print("Now we can insert some other data.") - zero.query("INSERT INTO test_drop_replica_and_achieve_quorum(a,d) VALUES (2, '2012-02-02')") + zero.query( + "INSERT INTO test_drop_replica_and_achieve_quorum(a,d) VALUES (2, '2012-02-02')" + ) assert TSV("1\t2011-01-01\n2\t2012-02-02\n") == TSV( - zero.query("SELECT * FROM test_drop_replica_and_achieve_quorum ORDER BY a")) + zero.query("SELECT * FROM test_drop_replica_and_achieve_quorum ORDER BY a") + ) assert TSV("1\t2011-01-01\n2\t2012-02-02\n") == TSV( - first.query("SELECT * FROM test_drop_replica_and_achieve_quorum ORDER BY a")) + first.query("SELECT * FROM test_drop_replica_and_achieve_quorum ORDER BY a") + ) assert TSV("1\t2011-01-01\n2\t2012-02-02\n") == TSV( - second.query("SELECT * FROM test_drop_replica_and_achieve_quorum ORDER BY a")) + second.query("SELECT * FROM test_drop_replica_and_achieve_quorum ORDER BY a") + ) - zero.query("DROP TABLE IF EXISTS test_drop_replica_and_achieve_quorum ON CLUSTER cluster") + zero.query( + "DROP TABLE IF EXISTS test_drop_replica_and_achieve_quorum ON CLUSTER cluster" + ) -@pytest.mark.parametrize( - ('add_new_data'), - [ - False, - True - ] -) +@pytest.mark.parametrize(("add_new_data"), [False, True]) def test_insert_quorum_with_drop_partition(started_cluster, add_new_data): - zero.query("DROP TABLE IF EXISTS test_quorum_insert_with_drop_partition ON CLUSTER cluster") + zero.query( + "DROP TABLE IF EXISTS test_quorum_insert_with_drop_partition ON CLUSTER cluster" + ) - create_query = "CREATE TABLE test_quorum_insert_with_drop_partition ON CLUSTER cluster " \ - "(a Int8, d Date) " \ - "Engine = ReplicatedMergeTree " \ - "PARTITION BY d ORDER BY a " + create_query = ( + "CREATE TABLE test_quorum_insert_with_drop_partition ON CLUSTER cluster " + "(a Int8, d Date) " + "Engine = ReplicatedMergeTree " + "PARTITION BY d ORDER BY a " + ) print("Create Replicated table with three replicas") zero.query(create_query) @@ -148,14 +184,20 @@ def test_insert_quorum_with_drop_partition(started_cluster, add_new_data): first.query("SYSTEM STOP FETCHES test_quorum_insert_with_drop_partition") print("Insert with quorum. (zero and second)") - zero.query("INSERT INTO test_quorum_insert_with_drop_partition(a,d) VALUES(1, '2011-01-01')") + zero.query( + "INSERT INTO test_quorum_insert_with_drop_partition(a,d) VALUES(1, '2011-01-01')" + ) print("Drop partition.") - zero.query("ALTER TABLE test_quorum_insert_with_drop_partition DROP PARTITION '2011-01-01'") + zero.query( + "ALTER TABLE test_quorum_insert_with_drop_partition DROP PARTITION '2011-01-01'" + ) - if (add_new_data): + if add_new_data: print("Insert to deleted partition") - zero.query("INSERT INTO test_quorum_insert_with_drop_partition(a,d) VALUES(2, '2011-01-01')") + zero.query( + "INSERT INTO test_quorum_insert_with_drop_partition(a,d) VALUES(2, '2011-01-01')" + ) print("Resume fetches for test_quorum_insert_with_drop_partition at first replica.") first.query("SYSTEM START FETCHES test_quorum_insert_with_drop_partition") @@ -163,43 +205,57 @@ def test_insert_quorum_with_drop_partition(started_cluster, add_new_data): print("Sync first replica with others.") first.query("SYSTEM SYNC REPLICA test_quorum_insert_with_drop_partition") - assert "20110101" not in first.query(""" + assert "20110101" not in first.query( + """ WITH (SELECT toString(uuid) FROM system.tables WHERE name = 'test_quorum_insert_with_drop_partition') AS uuid, '/clickhouse/tables/' || uuid || '/0/quorum/last_part' AS p SELECT * FROM system.zookeeper WHERE path = p FORMAT Vertical - """) + """ + ) print("Select from updated partition.") - if (add_new_data): - assert TSV("2\t2011-01-01\n") == TSV(zero.query("SELECT * FROM test_quorum_insert_with_drop_partition")) - assert TSV("2\t2011-01-01\n") == TSV(second.query("SELECT * FROM test_quorum_insert_with_drop_partition")) + if add_new_data: + assert TSV("2\t2011-01-01\n") == TSV( + zero.query("SELECT * FROM test_quorum_insert_with_drop_partition") + ) + assert TSV("2\t2011-01-01\n") == TSV( + second.query("SELECT * FROM test_quorum_insert_with_drop_partition") + ) else: - assert TSV("") == TSV(zero.query("SELECT * FROM test_quorum_insert_with_drop_partition")) - assert TSV("") == TSV(second.query("SELECT * FROM test_quorum_insert_with_drop_partition")) + assert TSV("") == TSV( + zero.query("SELECT * FROM test_quorum_insert_with_drop_partition") + ) + assert TSV("") == TSV( + second.query("SELECT * FROM test_quorum_insert_with_drop_partition") + ) - zero.query("DROP TABLE IF EXISTS test_quorum_insert_with_drop_partition ON CLUSTER cluster") + zero.query( + "DROP TABLE IF EXISTS test_quorum_insert_with_drop_partition ON CLUSTER cluster" + ) -@pytest.mark.parametrize( - ('add_new_data'), - [ - False, - True - ] -) +@pytest.mark.parametrize(("add_new_data"), [False, True]) def test_insert_quorum_with_move_partition(started_cluster, add_new_data): - zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_move_partition_source ON CLUSTER cluster") - zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_move_partition_destination ON CLUSTER cluster") + zero.query( + "DROP TABLE IF EXISTS test_insert_quorum_with_move_partition_source ON CLUSTER cluster" + ) + zero.query( + "DROP TABLE IF EXISTS test_insert_quorum_with_move_partition_destination ON CLUSTER cluster" + ) - create_source = "CREATE TABLE test_insert_quorum_with_move_partition_source ON CLUSTER cluster " \ - "(a Int8, d Date) " \ - "Engine = ReplicatedMergeTree " \ - "PARTITION BY d ORDER BY a " + create_source = ( + "CREATE TABLE test_insert_quorum_with_move_partition_source ON CLUSTER cluster " + "(a Int8, d Date) " + "Engine = ReplicatedMergeTree " + "PARTITION BY d ORDER BY a " + ) - create_destination = "CREATE TABLE test_insert_quorum_with_move_partition_destination ON CLUSTER cluster " \ - "(a Int8, d Date) " \ - "Engine = ReplicatedMergeTree " \ - "PARTITION BY d ORDER BY a " + create_destination = ( + "CREATE TABLE test_insert_quorum_with_move_partition_destination ON CLUSTER cluster " + "(a Int8, d Date) " + "Engine = ReplicatedMergeTree " + "PARTITION BY d ORDER BY a " + ) print("Create source Replicated table with three replicas") zero.query(create_source) @@ -207,54 +263,78 @@ def test_insert_quorum_with_move_partition(started_cluster, add_new_data): print("Create destination Replicated table with three replicas") zero.query(create_destination) - print("Stop fetches for test_insert_quorum_with_move_partition_source at first replica.") + print( + "Stop fetches for test_insert_quorum_with_move_partition_source at first replica." + ) first.query("SYSTEM STOP FETCHES test_insert_quorum_with_move_partition_source") print("Insert with quorum. (zero and second)") - zero.query("INSERT INTO test_insert_quorum_with_move_partition_source(a,d) VALUES(1, '2011-01-01')") + zero.query( + "INSERT INTO test_insert_quorum_with_move_partition_source(a,d) VALUES(1, '2011-01-01')" + ) print("Drop partition.") zero.query( - "ALTER TABLE test_insert_quorum_with_move_partition_source MOVE PARTITION '2011-01-01' TO TABLE test_insert_quorum_with_move_partition_destination") + "ALTER TABLE test_insert_quorum_with_move_partition_source MOVE PARTITION '2011-01-01' TO TABLE test_insert_quorum_with_move_partition_destination" + ) - if (add_new_data): + if add_new_data: print("Insert to deleted partition") - zero.query("INSERT INTO test_insert_quorum_with_move_partition_source(a,d) VALUES(2, '2011-01-01')") + zero.query( + "INSERT INTO test_insert_quorum_with_move_partition_source(a,d) VALUES(2, '2011-01-01')" + ) - print("Resume fetches for test_insert_quorum_with_move_partition_source at first replica.") + print( + "Resume fetches for test_insert_quorum_with_move_partition_source at first replica." + ) first.query("SYSTEM START FETCHES test_insert_quorum_with_move_partition_source") print("Sync first replica with others.") first.query("SYSTEM SYNC REPLICA test_insert_quorum_with_move_partition_source") - assert "20110101" not in first.query(""" + assert "20110101" not in first.query( + """ WITH (SELECT toString(uuid) FROM system.tables WHERE name = 'test_insert_quorum_with_move_partition_source') AS uuid, '/clickhouse/tables/' || uuid || '/0/quorum/last_part' AS p SELECT * FROM system.zookeeper WHERE path = p FORMAT Vertical - """) + """ + ) print("Select from updated partition.") - if (add_new_data): - assert TSV("2\t2011-01-01\n") == TSV(zero.query("SELECT * FROM test_insert_quorum_with_move_partition_source")) + if add_new_data: assert TSV("2\t2011-01-01\n") == TSV( - second.query("SELECT * FROM test_insert_quorum_with_move_partition_source")) + zero.query("SELECT * FROM test_insert_quorum_with_move_partition_source") + ) + assert TSV("2\t2011-01-01\n") == TSV( + second.query("SELECT * FROM test_insert_quorum_with_move_partition_source") + ) else: - assert TSV("") == TSV(zero.query("SELECT * FROM test_insert_quorum_with_move_partition_source")) - assert TSV("") == TSV(second.query("SELECT * FROM test_insert_quorum_with_move_partition_source")) + assert TSV("") == TSV( + zero.query("SELECT * FROM test_insert_quorum_with_move_partition_source") + ) + assert TSV("") == TSV( + second.query("SELECT * FROM test_insert_quorum_with_move_partition_source") + ) - zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_move_partition_source ON CLUSTER cluster") - zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_move_partition_destination ON CLUSTER cluster") + zero.query( + "DROP TABLE IF EXISTS test_insert_quorum_with_move_partition_source ON CLUSTER cluster" + ) + zero.query( + "DROP TABLE IF EXISTS test_insert_quorum_with_move_partition_destination ON CLUSTER cluster" + ) def test_insert_quorum_with_ttl(started_cluster): zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster") - create_query = "CREATE TABLE test_insert_quorum_with_ttl " \ - "(a Int8, d Date) " \ - "Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}') " \ - "PARTITION BY d ORDER BY a " \ - "TTL d + INTERVAL 5 second DELETE WHERE toYear(d) = 2011 " \ - "SETTINGS merge_with_ttl_timeout=2 " + create_query = ( + "CREATE TABLE test_insert_quorum_with_ttl " + "(a Int8, d Date) " + "Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}') " + "PARTITION BY d ORDER BY a " + "TTL d + INTERVAL 5 second DELETE WHERE toYear(d) = 2011 " + "SETTINGS merge_with_ttl_timeout=2 " + ) print("Create Replicated table with two replicas") zero.query(create_query) @@ -264,14 +344,22 @@ def test_insert_quorum_with_ttl(started_cluster): first.query("SYSTEM STOP FETCHES test_insert_quorum_with_ttl") print("Insert should fail since it can not reach the quorum.") - quorum_timeout = zero.query_and_get_error("INSERT INTO test_insert_quorum_with_ttl(a,d) VALUES(1, '2011-01-01')", - settings={'insert_quorum_timeout': 5000}) + quorum_timeout = zero.query_and_get_error( + "INSERT INTO test_insert_quorum_with_ttl(a,d) VALUES(1, '2011-01-01')", + settings={"insert_quorum_timeout": 5000}, + ) assert "Timeout while waiting for quorum" in quorum_timeout, "Query must fail." - print("Wait 10 seconds and TTL merge have to be executed. But it won't delete data.") + print( + "Wait 10 seconds and TTL merge have to be executed. But it won't delete data." + ) time.sleep(10) assert TSV("1\t2011-01-01\n") == TSV( - zero.query("SELECT * FROM test_insert_quorum_with_ttl", settings={'select_sequential_consistency': 0})) + zero.query( + "SELECT * FROM test_insert_quorum_with_ttl", + settings={"select_sequential_consistency": 0}, + ) + ) print("Resume fetches for test_insert_quorum_with_ttl at first replica.") first.query("SYSTEM START FETCHES test_insert_quorum_with_ttl") @@ -279,8 +367,10 @@ def test_insert_quorum_with_ttl(started_cluster): print("Sync first replica.") first.query("SYSTEM SYNC REPLICA test_insert_quorum_with_ttl") - zero.query("INSERT INTO test_insert_quorum_with_ttl(a,d) VALUES(1, '2011-01-01')", - settings={'insert_quorum_timeout': 5000}) + zero.query( + "INSERT INTO test_insert_quorum_with_ttl(a,d) VALUES(1, '2011-01-01')", + settings={"insert_quorum_timeout": 5000}, + ) print("Inserts should resume.") zero.query("INSERT INTO test_insert_quorum_with_ttl(a, d) VALUES(2, '2012-02-02')") @@ -290,8 +380,16 @@ def test_insert_quorum_with_ttl(started_cluster): zero.query("SYSTEM SYNC REPLICA test_insert_quorum_with_ttl") assert TSV("2\t2012-02-02\n") == TSV( - first.query("SELECT * FROM test_insert_quorum_with_ttl", settings={'select_sequential_consistency': 0})) + first.query( + "SELECT * FROM test_insert_quorum_with_ttl", + settings={"select_sequential_consistency": 0}, + ) + ) assert TSV("2\t2012-02-02\n") == TSV( - first.query("SELECT * FROM test_insert_quorum_with_ttl", settings={'select_sequential_consistency': 1})) + first.query( + "SELECT * FROM test_insert_quorum_with_ttl", + settings={"select_sequential_consistency": 1}, + ) + ) zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster") diff --git a/tests/integration/test_quorum_inserts_parallel/test.py b/tests/integration/test_quorum_inserts_parallel/test.py index c89f1a03df7..99548e37a54 100644 --- a/tests/integration/test_quorum_inserts_parallel/test.py +++ b/tests/integration/test_quorum_inserts_parallel/test.py @@ -14,6 +14,7 @@ node1 = cluster.add_instance("node1", with_zookeeper=True) node2 = cluster.add_instance("node2", with_zookeeper=True) node3 = cluster.add_instance("node3", with_zookeeper=True) + @pytest.fixture(scope="module") def started_cluster(): global cluster @@ -28,12 +29,19 @@ def started_cluster(): def test_parallel_quorum_actually_parallel(started_cluster): settings = {"insert_quorum": "3", "insert_quorum_parallel": "1"} for i, node in enumerate([node1, node2, node3]): - node.query("CREATE TABLE r (a UInt64, b String) ENGINE=ReplicatedMergeTree('/test/r', '{num}') ORDER BY tuple()".format(num=i)) + node.query( + "CREATE TABLE r (a UInt64, b String) ENGINE=ReplicatedMergeTree('/test/r', '{num}') ORDER BY tuple()".format( + num=i + ) + ) p = Pool(10) def long_insert(node): - node.query("INSERT INTO r SELECT number, toString(number) FROM numbers(5) where sleepEachRow(1) == 0", settings=settings) + node.query( + "INSERT INTO r SELECT number, toString(number) FROM numbers(5) where sleepEachRow(1) == 0", + settings=settings, + ) job = p.apply_async(long_insert, (node1,)) @@ -58,19 +66,37 @@ def test_parallel_quorum_actually_parallel(started_cluster): def test_parallel_quorum_actually_quorum(started_cluster): for i, node in enumerate([node1, node2, node3]): - node.query("CREATE TABLE q (a UInt64, b String) ENGINE=ReplicatedMergeTree('/test/q', '{num}') ORDER BY tuple()".format(num=i)) + node.query( + "CREATE TABLE q (a UInt64, b String) ENGINE=ReplicatedMergeTree('/test/q', '{num}') ORDER BY tuple()".format( + num=i + ) + ) with PartitionManager() as pm: pm.partition_instances(node2, node1, port=9009) pm.partition_instances(node2, node3, port=9009) with pytest.raises(QueryRuntimeException): - node1.query("INSERT INTO q VALUES(1, 'Hello')", settings={"insert_quorum": "3", "insert_quorum_parallel": "1", "insert_quorum_timeout": "3000"}) + node1.query( + "INSERT INTO q VALUES(1, 'Hello')", + settings={ + "insert_quorum": "3", + "insert_quorum_parallel": "1", + "insert_quorum_timeout": "3000", + }, + ) assert_eq_with_retry(node1, "SELECT COUNT() FROM q", "1") assert_eq_with_retry(node2, "SELECT COUNT() FROM q", "0") assert_eq_with_retry(node3, "SELECT COUNT() FROM q", "1") - node1.query("INSERT INTO q VALUES(2, 'wlrd')", settings={"insert_quorum": "2", "insert_quorum_parallel": "1", "insert_quorum_timeout": "3000"}) + node1.query( + "INSERT INTO q VALUES(2, 'wlrd')", + settings={ + "insert_quorum": "2", + "insert_quorum_parallel": "1", + "insert_quorum_timeout": "3000", + }, + ) assert_eq_with_retry(node1, "SELECT COUNT() FROM q", "2") assert_eq_with_retry(node2, "SELECT COUNT() FROM q", "0") @@ -80,14 +106,38 @@ def test_parallel_quorum_actually_quorum(started_cluster): node.query("INSERT INTO q VALUES(3, 'Hi')", settings=settings) p = Pool(2) - res = p.apply_async(insert_value_to_node, (node1, {"insert_quorum": "3", "insert_quorum_parallel": "1", "insert_quorum_timeout": "60000"})) + res = p.apply_async( + insert_value_to_node, + ( + node1, + { + "insert_quorum": "3", + "insert_quorum_parallel": "1", + "insert_quorum_timeout": "60000", + }, + ), + ) - assert_eq_with_retry(node1, "SELECT COUNT() FROM system.parts WHERE table == 'q' and active == 1", "3") - assert_eq_with_retry(node3, "SELECT COUNT() FROM system.parts WHERE table == 'q' and active == 1", "3") - assert_eq_with_retry(node2, "SELECT COUNT() FROM system.parts WHERE table == 'q' and active == 1", "0") + assert_eq_with_retry( + node1, + "SELECT COUNT() FROM system.parts WHERE table == 'q' and active == 1", + "3", + ) + assert_eq_with_retry( + node3, + "SELECT COUNT() FROM system.parts WHERE table == 'q' and active == 1", + "3", + ) + assert_eq_with_retry( + node2, + "SELECT COUNT() FROM system.parts WHERE table == 'q' and active == 1", + "0", + ) # Insert to the second to satisfy quorum - insert_value_to_node(node2, {"insert_quorum": "3", "insert_quorum_parallel": "1"}) + insert_value_to_node( + node2, {"insert_quorum": "3", "insert_quorum_parallel": "1"} + ) res.get() diff --git a/tests/integration/test_quota/test.py b/tests/integration/test_quota/test.py index 83ee32bd7dd..651726f30c0 100644 --- a/tests/integration/test_quota/test.py +++ b/tests/integration/test_quota/test.py @@ -7,10 +7,15 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry, TSV cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', user_configs=["configs/users.d/assign_myquota_to_default_user.xml", - "configs/users.d/drop_default_quota.xml", - "configs/users.d/myquota.xml", - "configs/users.d/user_with_no_quota.xml"]) +instance = cluster.add_instance( + "instance", + user_configs=[ + "configs/users.d/assign_myquota_to_default_user.xml", + "configs/users.d/drop_default_quota.xml", + "configs/users.d/myquota.xml", + "configs/users.d/user_with_no_quota.xml", + ], +) def check_system_quotas(canonical): @@ -22,16 +27,22 @@ def check_system_quotas(canonical): def system_quota_limits(canonical): canonical_tsv = TSV(canonical) - r = TSV(instance.query("SELECT * FROM system.quota_limits ORDER BY quota_name, duration")) + r = TSV( + instance.query( + "SELECT * FROM system.quota_limits ORDER BY quota_name, duration" + ) + ) print(("system_quota_limits: {},\ncanonical: {}".format(r, TSV(canonical_tsv)))) assert r == canonical_tsv def system_quota_usage(canonical): canonical_tsv = TSV(canonical) - query = "SELECT quota_name, quota_key, duration, queries, max_queries, query_selects, max_query_selects, query_inserts, max_query_inserts, errors, max_errors, result_rows, max_result_rows," \ - "result_bytes, max_result_bytes, read_rows, max_read_rows, read_bytes, max_read_bytes, max_execution_time " \ - "FROM system.quota_usage ORDER BY duration" + query = ( + "SELECT quota_name, quota_key, duration, queries, max_queries, query_selects, max_query_selects, query_inserts, max_query_inserts, errors, max_errors, result_rows, max_result_rows," + "result_bytes, max_result_bytes, read_rows, max_read_rows, read_bytes, max_read_bytes, max_execution_time " + "FROM system.quota_usage ORDER BY duration" + ) r = TSV(instance.query(query)) print(("system_quota_usage: {},\ncanonical: {}".format(r, TSV(canonical_tsv)))) assert r == canonical_tsv @@ -39,9 +50,11 @@ def system_quota_usage(canonical): def system_quotas_usage(canonical): canonical_tsv = TSV(canonical) - query = "SELECT quota_name, quota_key, is_current, duration, queries, max_queries, query_selects, max_query_selects, query_inserts, max_query_inserts, errors, max_errors, result_rows, max_result_rows, " \ - "result_bytes, max_result_bytes, read_rows, max_read_rows, read_bytes, max_read_bytes, max_execution_time " \ - "FROM system.quotas_usage ORDER BY quota_name, quota_key, duration" + query = ( + "SELECT quota_name, quota_key, is_current, duration, queries, max_queries, query_selects, max_query_selects, query_inserts, max_query_inserts, errors, max_errors, result_rows, max_result_rows, " + "result_bytes, max_result_bytes, read_rows, max_read_rows, read_bytes, max_read_bytes, max_execution_time " + "FROM system.quotas_usage ORDER BY quota_name, quota_key, duration" + ) r = TSV(instance.query(query)) print(("system_quotas_usage: {},\ncanonical: {}".format(r, TSV(canonical_tsv)))) assert r == canonical_tsv @@ -49,12 +62,14 @@ def system_quotas_usage(canonical): def copy_quota_xml(local_file_name, reload_immediately=True): script_dir = os.path.dirname(os.path.realpath(__file__)) - instance.copy_file_to_container(os.path.join(script_dir, local_file_name), - '/etc/clickhouse-server/users.d/myquota.xml') + instance.copy_file_to_container( + os.path.join(script_dir, local_file_name), + "/etc/clickhouse-server/users.d/myquota.xml", + ) if reload_immediately: - # We use the special user 'user_with_no_quota' here because - # we don't want SYSTEM RELOAD CONFIG to mess our quota consuming checks. - instance.query("SYSTEM RELOAD CONFIG", user='user_with_no_quota') + # We use the special user 'user_with_no_quota' here because + # we don't want SYSTEM RELOAD CONFIG to mess our quota consuming checks. + instance.query("SYSTEM RELOAD CONFIG", user="user_with_no_quota") @pytest.fixture(scope="module", autouse=True) @@ -62,7 +77,9 @@ def started_cluster(): try: cluster.start() instance.query("DROP TABLE IF EXISTS test_table") - instance.query("CREATE TABLE test_table(x UInt32) ENGINE = MergeTree ORDER BY tuple()") + instance.query( + "CREATE TABLE test_table(x UInt32) ENGINE = MergeTree ORDER BY tuple()" + ) instance.query("INSERT INTO test_table SELECT number FROM numbers(50)") yield cluster @@ -75,320 +92,1592 @@ def started_cluster(): def reset_quotas_and_usage_info(): try: instance.query("DROP QUOTA IF EXISTS qA, qB") - copy_quota_xml('simpliest.xml') # To reset usage info. - copy_quota_xml('normal_limits.xml') + copy_quota_xml("simpliest.xml") # To reset usage info. + copy_quota_xml("normal_limits.xml") yield finally: pass def test_quota_from_users_xml(): - check_system_quotas([["myQuota", "e651da9c-a748-8703-061a-7e5e5096dae7", "users.xml", "['user_name']", [31556952], - 0, "['default']", "[]"]]) - system_quota_limits([["myQuota", 31556952, 0, 1000, 500, 500, "\\N", "\\N", "\\N", 1000, "\\N", "\\N"]]) - system_quota_usage([["myQuota", "default", 31556952, 0, 1000, 0, 500, 0, 500, 0, "\\N", 0, "\\N", 0, "\\N", 0, 1000, 0, "\\N", "\\N"]]) + check_system_quotas( + [ + [ + "myQuota", + "e651da9c-a748-8703-061a-7e5e5096dae7", + "users.xml", + "['user_name']", + [31556952], + 0, + "['default']", + "[]", + ] + ] + ) + system_quota_limits( + [ + [ + "myQuota", + 31556952, + 0, + 1000, + 500, + 500, + "\\N", + "\\N", + "\\N", + 1000, + "\\N", + "\\N", + ] + ] + ) + system_quota_usage( + [ + [ + "myQuota", + "default", + 31556952, + 0, + 1000, + 0, + 500, + 0, + 500, + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + 1000, + 0, + "\\N", + "\\N", + ] + ] + ) system_quotas_usage( - [["myQuota", "default", 1, 31556952, 0, 1000, 0, 500, 0, 500, 0, "\\N", 0, "\\N", 0, "\\N", 0, 1000, 0, "\\N", "\\N"]]) + [ + [ + "myQuota", + "default", + 1, + 31556952, + 0, + 1000, + 0, + 500, + 0, + 500, + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + 1000, + 0, + "\\N", + "\\N", + ] + ] + ) instance.query("SELECT * from test_table") system_quota_usage( - [["myQuota", "default", 31556952, 1, 1000, 1, 500, 0, 500, 0, "\\N", 50, "\\N", 200, "\\N", 50, 1000, 200, "\\N", "\\N"]]) + [ + [ + "myQuota", + "default", + 31556952, + 1, + 1000, + 1, + 500, + 0, + 500, + 0, + "\\N", + 50, + "\\N", + 200, + "\\N", + 50, + 1000, + 200, + "\\N", + "\\N", + ] + ] + ) instance.query("SELECT SUM(x) from test_table") system_quota_usage( - [["myQuota", "default", 31556952, 2, 1000, 2, 500, 0, 500, 0, "\\N", 51, "\\N", 208, "\\N", 100, 1000, 400, "\\N", "\\N"]]) + [ + [ + "myQuota", + "default", + 31556952, + 2, + 1000, + 2, + 500, + 0, + 500, + 0, + "\\N", + 51, + "\\N", + 208, + "\\N", + 100, + 1000, + 400, + "\\N", + "\\N", + ] + ] + ) def test_simpliest_quota(): # Simpliest quota doesn't even track usage. - copy_quota_xml('simpliest.xml') - check_system_quotas([["myQuota", "e651da9c-a748-8703-061a-7e5e5096dae7", "users.xml", "['user_name']", "[]", 0, - "['default']", "[]"]]) + copy_quota_xml("simpliest.xml") + check_system_quotas( + [ + [ + "myQuota", + "e651da9c-a748-8703-061a-7e5e5096dae7", + "users.xml", + "['user_name']", + "[]", + 0, + "['default']", + "[]", + ] + ] + ) system_quota_limits("") system_quota_usage( - [["myQuota", "default", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N"]]) + [ + [ + "myQuota", + "default", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + ] + ] + ) instance.query("SELECT * from test_table") system_quota_usage( - [["myQuota", "default", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N"]]) + [ + [ + "myQuota", + "default", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + ] + ] + ) def test_tracking_quota(): # Now we're tracking usage. - copy_quota_xml('tracking.xml') - check_system_quotas([["myQuota", "e651da9c-a748-8703-061a-7e5e5096dae7", "users.xml", "['user_name']", "[31556952]", - 0, "['default']", "[]"]]) - system_quota_limits([["myQuota", 31556952, 0, "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N"]]) - system_quota_usage([["myQuota", "default", 31556952, 0, "\\N", 0, "\\N", 0, "\\N", 0, "\\N", 0, "\\N", 0, "\\N", 0, "\\N", 0, "\\N", "\\N"]]) + copy_quota_xml("tracking.xml") + check_system_quotas( + [ + [ + "myQuota", + "e651da9c-a748-8703-061a-7e5e5096dae7", + "users.xml", + "['user_name']", + "[31556952]", + 0, + "['default']", + "[]", + ] + ] + ) + system_quota_limits( + [ + [ + "myQuota", + 31556952, + 0, + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + ] + ] + ) + system_quota_usage( + [ + [ + "myQuota", + "default", + 31556952, + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + "\\N", + ] + ] + ) instance.query("SELECT * from test_table") system_quota_usage( - [["myQuota", "default", 31556952, 1, "\\N", 1, "\\N", 0, "\\N", 0, "\\N", 50, "\\N", 200, "\\N", 50, "\\N", 200, "\\N", "\\N"]]) + [ + [ + "myQuota", + "default", + 31556952, + 1, + "\\N", + 1, + "\\N", + 0, + "\\N", + 0, + "\\N", + 50, + "\\N", + 200, + "\\N", + 50, + "\\N", + 200, + "\\N", + "\\N", + ] + ] + ) instance.query("SELECT SUM(x) from test_table") system_quota_usage( - [["myQuota", "default", 31556952, 2, "\\N", 2, "\\N", 0, "\\N", 0, "\\N", 51, "\\N", 208, "\\N", 100, "\\N", 400, "\\N", "\\N"]]) + [ + [ + "myQuota", + "default", + 31556952, + 2, + "\\N", + 2, + "\\N", + 0, + "\\N", + 0, + "\\N", + 51, + "\\N", + 208, + "\\N", + 100, + "\\N", + 400, + "\\N", + "\\N", + ] + ] + ) def test_exceed_quota(): # Change quota, now the limits are tiny so we will exceed the quota. - copy_quota_xml('tiny_limits.xml') - check_system_quotas([["myQuota", "e651da9c-a748-8703-061a-7e5e5096dae7", "users.xml", "['user_name']", "[31556952]", - 0, "['default']", "[]"]]) - system_quota_limits([["myQuota", 31556952, 0, 1, 1, 1, 1, 1, "\\N", 1, "\\N", "\\N"]]) - system_quota_usage([["myQuota", "default", 31556952, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, "\\N", 0, 1, 0, "\\N", "\\N"]]) + copy_quota_xml("tiny_limits.xml") + check_system_quotas( + [ + [ + "myQuota", + "e651da9c-a748-8703-061a-7e5e5096dae7", + "users.xml", + "['user_name']", + "[31556952]", + 0, + "['default']", + "[]", + ] + ] + ) + system_quota_limits( + [["myQuota", 31556952, 0, 1, 1, 1, 1, 1, "\\N", 1, "\\N", "\\N"]] + ) + system_quota_usage( + [ + [ + "myQuota", + "default", + 31556952, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + "\\N", + 0, + 1, + 0, + "\\N", + "\\N", + ] + ] + ) - assert re.search("Quota.*has\ been\ exceeded", instance.query_and_get_error("SELECT * from test_table")) - system_quota_usage([["myQuota", "default", 31556952, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, "\\N", 50, 1, 0, "\\N", "\\N"]]) + assert re.search( + "Quota.*has\ been\ exceeded", + instance.query_and_get_error("SELECT * from test_table"), + ) + system_quota_usage( + [ + [ + "myQuota", + "default", + 31556952, + 1, + 1, + 1, + 1, + 0, + 1, + 1, + 1, + 0, + 1, + 0, + "\\N", + 50, + 1, + 0, + "\\N", + "\\N", + ] + ] + ) # Change quota, now the limits are enough to execute queries. - copy_quota_xml('normal_limits.xml') - check_system_quotas([["myQuota", "e651da9c-a748-8703-061a-7e5e5096dae7", "users.xml", "['user_name']", "[31556952]", - 0, "['default']", "[]"]]) - system_quota_limits([["myQuota", 31556952, 0, 1000, 500, 500, "\\N", "\\N", "\\N", 1000, "\\N", "\\N"]]) - system_quota_usage([["myQuota", "default", 31556952, 1, 1000, 1, 500, 0, 500, 1, "\\N", 0, "\\N", 0, "\\N", 50, 1000, 0, "\\N", "\\N"]]) + copy_quota_xml("normal_limits.xml") + check_system_quotas( + [ + [ + "myQuota", + "e651da9c-a748-8703-061a-7e5e5096dae7", + "users.xml", + "['user_name']", + "[31556952]", + 0, + "['default']", + "[]", + ] + ] + ) + system_quota_limits( + [ + [ + "myQuota", + 31556952, + 0, + 1000, + 500, + 500, + "\\N", + "\\N", + "\\N", + 1000, + "\\N", + "\\N", + ] + ] + ) + system_quota_usage( + [ + [ + "myQuota", + "default", + 31556952, + 1, + 1000, + 1, + 500, + 0, + 500, + 1, + "\\N", + 0, + "\\N", + 0, + "\\N", + 50, + 1000, + 0, + "\\N", + "\\N", + ] + ] + ) instance.query("SELECT * from test_table") system_quota_usage( - [["myQuota", "default", 31556952, 2, 1000, 2, 500, 0, 500, 1, "\\N", 50, "\\N", 200, "\\N", 100, 1000, 200, "\\N", "\\N"]]) + [ + [ + "myQuota", + "default", + 31556952, + 2, + 1000, + 2, + 500, + 0, + 500, + 1, + "\\N", + 50, + "\\N", + 200, + "\\N", + 100, + 1000, + 200, + "\\N", + "\\N", + ] + ] + ) def test_add_remove_interval(): - check_system_quotas([["myQuota", "e651da9c-a748-8703-061a-7e5e5096dae7", "users.xml", "['user_name']", [31556952], - 0, "['default']", "[]"]]) - system_quota_limits([["myQuota", 31556952, 0, 1000, 500, 500, "\\N", "\\N", "\\N", 1000, "\\N", "\\N"]]) - system_quota_usage([["myQuota", "default", 31556952, 0, 1000, 0, 500, 0, 500, 0, "\\N", 0, "\\N", 0, "\\N", 0, 1000, 0, "\\N", "\\N"]]) + check_system_quotas( + [ + [ + "myQuota", + "e651da9c-a748-8703-061a-7e5e5096dae7", + "users.xml", + "['user_name']", + [31556952], + 0, + "['default']", + "[]", + ] + ] + ) + system_quota_limits( + [ + [ + "myQuota", + 31556952, + 0, + 1000, + 500, + 500, + "\\N", + "\\N", + "\\N", + 1000, + "\\N", + "\\N", + ] + ] + ) + system_quota_usage( + [ + [ + "myQuota", + "default", + 31556952, + 0, + 1000, + 0, + 500, + 0, + 500, + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + 1000, + 0, + "\\N", + "\\N", + ] + ] + ) # Add interval. - copy_quota_xml('two_intervals.xml') - check_system_quotas([["myQuota", "e651da9c-a748-8703-061a-7e5e5096dae7", "users.xml", "['user_name']", - "[31556952,63113904]", 0, "['default']", "[]"]]) - system_quota_limits([["myQuota", 31556952, 0, 1000, "\\N", "\\N", "\\N", "\\N", "\\N", 1000, "\\N", "\\N"], - ["myQuota", 63113904, 1, "\\N", "\\N", "\\N", "\\N", "\\N", 30000, "\\N", 20000, 120]]) - system_quota_usage([["myQuota", "default", 31556952, 0, 1000, 0, "\\N", 0, "\\N", 0, "\\N", 0, "\\N", 0, "\\N", 0, 1000, 0, "\\N", "\\N"], - ["myQuota", "default", 63113904, 0, "\\N", 0, "\\N", 0, "\\N", 0, "\\N", 0, "\\N", 0, 30000, 0, "\\N", 0, 20000, 120]]) + copy_quota_xml("two_intervals.xml") + check_system_quotas( + [ + [ + "myQuota", + "e651da9c-a748-8703-061a-7e5e5096dae7", + "users.xml", + "['user_name']", + "[31556952,63113904]", + 0, + "['default']", + "[]", + ] + ] + ) + system_quota_limits( + [ + [ + "myQuota", + 31556952, + 0, + 1000, + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + 1000, + "\\N", + "\\N", + ], + [ + "myQuota", + 63113904, + 1, + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + 30000, + "\\N", + 20000, + 120, + ], + ] + ) + system_quota_usage( + [ + [ + "myQuota", + "default", + 31556952, + 0, + 1000, + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + 1000, + 0, + "\\N", + "\\N", + ], + [ + "myQuota", + "default", + 63113904, + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + 30000, + 0, + "\\N", + 0, + 20000, + 120, + ], + ] + ) instance.query("SELECT * from test_table") system_quota_usage( - [["myQuota", "default", 31556952, 1, 1000, 1, "\\N", 0, "\\N", 0, "\\N", 50, "\\N", 200, "\\N", 50, 1000, 200, "\\N", "\\N"], - ["myQuota", "default", 63113904, 1, "\\N", 1, "\\N", 0, "\\N", 0, "\\N", 50, "\\N", 200, 30000, 50, "\\N", 200, 20000, 120]]) + [ + [ + "myQuota", + "default", + 31556952, + 1, + 1000, + 1, + "\\N", + 0, + "\\N", + 0, + "\\N", + 50, + "\\N", + 200, + "\\N", + 50, + 1000, + 200, + "\\N", + "\\N", + ], + [ + "myQuota", + "default", + 63113904, + 1, + "\\N", + 1, + "\\N", + 0, + "\\N", + 0, + "\\N", + 50, + "\\N", + 200, + 30000, + 50, + "\\N", + 200, + 20000, + 120, + ], + ] + ) # Remove interval. - copy_quota_xml('normal_limits.xml') - check_system_quotas([["myQuota", "e651da9c-a748-8703-061a-7e5e5096dae7", "users.xml", "['user_name']", [31556952], - 0, "['default']", "[]"]]) - system_quota_limits([["myQuota", 31556952, 0, 1000, 500, 500, "\\N", "\\N", "\\N", 1000, "\\N", "\\N"]]) + copy_quota_xml("normal_limits.xml") + check_system_quotas( + [ + [ + "myQuota", + "e651da9c-a748-8703-061a-7e5e5096dae7", + "users.xml", + "['user_name']", + [31556952], + 0, + "['default']", + "[]", + ] + ] + ) + system_quota_limits( + [ + [ + "myQuota", + 31556952, + 0, + 1000, + 500, + 500, + "\\N", + "\\N", + "\\N", + 1000, + "\\N", + "\\N", + ] + ] + ) system_quota_usage( - [["myQuota", "default", 31556952, 1, 1000, 1, 500, 0, 500, 0, "\\N", 50, "\\N", 200, "\\N", 50, 1000, 200, "\\N", "\\N"]]) + [ + [ + "myQuota", + "default", + 31556952, + 1, + 1000, + 1, + 500, + 0, + 500, + 0, + "\\N", + 50, + "\\N", + 200, + "\\N", + 50, + 1000, + 200, + "\\N", + "\\N", + ] + ] + ) instance.query("SELECT * from test_table") system_quota_usage( - [["myQuota", "default", 31556952, 2, 1000, 2, 500, 0, 500, 0, "\\N", 100, "\\N", 400, "\\N", 100, 1000, 400, "\\N", "\\N"]]) + [ + [ + "myQuota", + "default", + 31556952, + 2, + 1000, + 2, + 500, + 0, + 500, + 0, + "\\N", + 100, + "\\N", + 400, + "\\N", + 100, + 1000, + 400, + "\\N", + "\\N", + ] + ] + ) # Remove all intervals. - copy_quota_xml('simpliest.xml') - check_system_quotas([["myQuota", "e651da9c-a748-8703-061a-7e5e5096dae7", "users.xml", "['user_name']", "[]", 0, - "['default']", "[]"]]) + copy_quota_xml("simpliest.xml") + check_system_quotas( + [ + [ + "myQuota", + "e651da9c-a748-8703-061a-7e5e5096dae7", + "users.xml", + "['user_name']", + "[]", + 0, + "['default']", + "[]", + ] + ] + ) system_quota_limits("") system_quota_usage( - [["myQuota", "default", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N"]]) + [ + [ + "myQuota", + "default", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + ] + ] + ) instance.query("SELECT * from test_table") system_quota_usage( - [["myQuota", "default", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N"]]) + [ + [ + "myQuota", + "default", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + ] + ] + ) # Add one interval back. - copy_quota_xml('normal_limits.xml') - check_system_quotas([["myQuota", "e651da9c-a748-8703-061a-7e5e5096dae7", "users.xml", "['user_name']", [31556952], - 0, "['default']", "[]"]]) - system_quota_limits([["myQuota", 31556952, 0, 1000, 500, 500, "\\N", "\\N", "\\N", 1000, "\\N", "\\N"]]) - system_quota_usage([["myQuota", "default", 31556952, 0, 1000, 0, 500, 0, 500, 0, "\\N", 0, "\\N", 0, "\\N", 0, 1000, 0, "\\N", "\\N"]]) + copy_quota_xml("normal_limits.xml") + check_system_quotas( + [ + [ + "myQuota", + "e651da9c-a748-8703-061a-7e5e5096dae7", + "users.xml", + "['user_name']", + [31556952], + 0, + "['default']", + "[]", + ] + ] + ) + system_quota_limits( + [ + [ + "myQuota", + 31556952, + 0, + 1000, + 500, + 500, + "\\N", + "\\N", + "\\N", + 1000, + "\\N", + "\\N", + ] + ] + ) + system_quota_usage( + [ + [ + "myQuota", + "default", + 31556952, + 0, + 1000, + 0, + 500, + 0, + 500, + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + 1000, + 0, + "\\N", + "\\N", + ] + ] + ) def test_add_remove_quota(): - check_system_quotas([["myQuota", "e651da9c-a748-8703-061a-7e5e5096dae7", "users.xml", "['user_name']", [31556952], - 0, "['default']", "[]"]]) - system_quota_limits([["myQuota", 31556952, 0, 1000, 500, 500, "\\N", "\\N", "\\N", 1000, "\\N", "\\N"]]) + check_system_quotas( + [ + [ + "myQuota", + "e651da9c-a748-8703-061a-7e5e5096dae7", + "users.xml", + "['user_name']", + [31556952], + 0, + "['default']", + "[]", + ] + ] + ) + system_quota_limits( + [ + [ + "myQuota", + 31556952, + 0, + 1000, + 500, + 500, + "\\N", + "\\N", + "\\N", + 1000, + "\\N", + "\\N", + ] + ] + ) system_quotas_usage( - [["myQuota", "default", 1, 31556952, 0, 1000, 0, 500, 0, 500, 0, "\\N", 0, "\\N", 0, "\\N", 0, 1000, 0, "\\N", "\\N"]]) + [ + [ + "myQuota", + "default", + 1, + 31556952, + 0, + 1000, + 0, + 500, + 0, + 500, + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + 1000, + 0, + "\\N", + "\\N", + ] + ] + ) # Add quota. - copy_quota_xml('two_quotas.xml') - check_system_quotas([["myQuota", "e651da9c-a748-8703-061a-7e5e5096dae7", "users.xml", "['user_name']", "[31556952]", - 0, "['default']", "[]"], - ["myQuota2", "4590510c-4d13-bf21-ec8a-c2187b092e73", "users.xml", "['client_key','user_name']", - "[3600,2629746]", 0, "[]", "[]"]]) - system_quota_limits([["myQuota", 31556952, 0, 1000, "\\N", "\\N", "\\N", "\\N", "\\N", 1000, "\\N", "\\N"], - ["myQuota2", 3600, 1, "\\N", "\\N", "\\N", "\\N", 4000, 400000, 4000, 400000, 60], - ["myQuota2", 2629746, 0, "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", "\\N", 1800]]) + copy_quota_xml("two_quotas.xml") + check_system_quotas( + [ + [ + "myQuota", + "e651da9c-a748-8703-061a-7e5e5096dae7", + "users.xml", + "['user_name']", + "[31556952]", + 0, + "['default']", + "[]", + ], + [ + "myQuota2", + "4590510c-4d13-bf21-ec8a-c2187b092e73", + "users.xml", + "['client_key','user_name']", + "[3600,2629746]", + 0, + "[]", + "[]", + ], + ] + ) + system_quota_limits( + [ + [ + "myQuota", + 31556952, + 0, + 1000, + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + 1000, + "\\N", + "\\N", + ], + [ + "myQuota2", + 3600, + 1, + "\\N", + "\\N", + "\\N", + "\\N", + 4000, + 400000, + 4000, + 400000, + 60, + ], + [ + "myQuota2", + 2629746, + 0, + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + "\\N", + 1800, + ], + ] + ) system_quotas_usage( - [["myQuota", "default", 1, 31556952, 0, 1000, 0, "\\N", 0, "\\N", 0, "\\N", 0, "\\N", 0, "\\N", 0, 1000, 0, "\\N", "\\N"]]) + [ + [ + "myQuota", + "default", + 1, + 31556952, + 0, + 1000, + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + 1000, + 0, + "\\N", + "\\N", + ] + ] + ) # Drop quota. - copy_quota_xml('normal_limits.xml') - check_system_quotas([["myQuota", "e651da9c-a748-8703-061a-7e5e5096dae7", "users.xml", "['user_name']", "[31556952]", - 0, "['default']", "[]"]]) - system_quota_limits([["myQuota", 31556952, 0, 1000, 500, 500, "\\N", "\\N", "\\N", 1000, "\\N", "\\N"]]) + copy_quota_xml("normal_limits.xml") + check_system_quotas( + [ + [ + "myQuota", + "e651da9c-a748-8703-061a-7e5e5096dae7", + "users.xml", + "['user_name']", + "[31556952]", + 0, + "['default']", + "[]", + ] + ] + ) + system_quota_limits( + [ + [ + "myQuota", + 31556952, + 0, + 1000, + 500, + 500, + "\\N", + "\\N", + "\\N", + 1000, + "\\N", + "\\N", + ] + ] + ) system_quotas_usage( - [["myQuota", "default", 1, 31556952, 0, 1000, 0, 500, 0, 500, 0, "\\N", 0, "\\N", 0, "\\N", 0, 1000, 0, "\\N", "\\N"]]) + [ + [ + "myQuota", + "default", + 1, + 31556952, + 0, + 1000, + 0, + 500, + 0, + 500, + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + 1000, + 0, + "\\N", + "\\N", + ] + ] + ) # Drop all quotas. - copy_quota_xml('no_quotas.xml') + copy_quota_xml("no_quotas.xml") check_system_quotas("") system_quota_limits("") system_quotas_usage("") # Add one quota back. - copy_quota_xml('normal_limits.xml') - check_system_quotas([["myQuota", "e651da9c-a748-8703-061a-7e5e5096dae7", "users.xml", "['user_name']", "[31556952]", - 0, "['default']", "[]"]]) - system_quota_limits([["myQuota", 31556952, 0, 1000, 500, 500, "\\N", "\\N", "\\N", 1000, "\\N", "\\N"]]) + copy_quota_xml("normal_limits.xml") + check_system_quotas( + [ + [ + "myQuota", + "e651da9c-a748-8703-061a-7e5e5096dae7", + "users.xml", + "['user_name']", + "[31556952]", + 0, + "['default']", + "[]", + ] + ] + ) + system_quota_limits( + [ + [ + "myQuota", + 31556952, + 0, + 1000, + 500, + 500, + "\\N", + "\\N", + "\\N", + 1000, + "\\N", + "\\N", + ] + ] + ) system_quotas_usage( - [["myQuota", "default", 1, 31556952, 0, 1000, 0, 500, 0, 500, 0, "\\N", 0, "\\N", 0, "\\N", 0, 1000, 0, "\\N", "\\N"]]) + [ + [ + "myQuota", + "default", + 1, + 31556952, + 0, + 1000, + 0, + 500, + 0, + 500, + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + 1000, + 0, + "\\N", + "\\N", + ] + ] + ) def test_reload_users_xml_by_timer(): - check_system_quotas([["myQuota", "e651da9c-a748-8703-061a-7e5e5096dae7", "users.xml", "['user_name']", "[31556952]", - 0, "['default']", "[]"]]) - system_quota_limits([["myQuota", 31556952, 0, 1000, 500, 500, "\\N", "\\N", "\\N", 1000, "\\N", "\\N"]]) + check_system_quotas( + [ + [ + "myQuota", + "e651da9c-a748-8703-061a-7e5e5096dae7", + "users.xml", + "['user_name']", + "[31556952]", + 0, + "['default']", + "[]", + ] + ] + ) + system_quota_limits( + [ + [ + "myQuota", + 31556952, + 0, + 1000, + 500, + 500, + "\\N", + "\\N", + "\\N", + 1000, + "\\N", + "\\N", + ] + ] + ) time.sleep(1) # The modification time of the 'quota.xml' file should be different, # because config files are reload by timer only when the modification time is changed. - copy_quota_xml('tiny_limits.xml', reload_immediately=False) - assert_eq_with_retry(instance, "SELECT * FROM system.quotas", [ - ["myQuota", "e651da9c-a748-8703-061a-7e5e5096dae7", "users.xml", ['user_name'], "[31556952]", 0, "['default']", - "[]"]]) - assert_eq_with_retry(instance, "SELECT * FROM system.quota_limits", - [["myQuota", 31556952, 0, 1, 1, 1, 1, 1, "\\N", 1, "\\N", "\\N"]]) + copy_quota_xml("tiny_limits.xml", reload_immediately=False) + assert_eq_with_retry( + instance, + "SELECT * FROM system.quotas", + [ + [ + "myQuota", + "e651da9c-a748-8703-061a-7e5e5096dae7", + "users.xml", + ["user_name"], + "[31556952]", + 0, + "['default']", + "[]", + ] + ], + ) + assert_eq_with_retry( + instance, + "SELECT * FROM system.quota_limits", + [["myQuota", 31556952, 0, 1, 1, 1, 1, 1, "\\N", 1, "\\N", "\\N"]], + ) def test_dcl_introspection(): assert instance.query("SHOW QUOTAS") == "myQuota\n" - assert instance.query( - "SHOW CREATE QUOTA") == "CREATE QUOTA myQuota KEYED BY user_name FOR INTERVAL 1 year MAX queries = 1000, query_selects = 500, query_inserts = 500, read_rows = 1000 TO default\n" - assert instance.query( - "SHOW CREATE QUOTAS") == "CREATE QUOTA myQuota KEYED BY user_name FOR INTERVAL 1 year MAX queries = 1000, query_selects = 500, query_inserts = 500, read_rows = 1000 TO default\n" + assert ( + instance.query("SHOW CREATE QUOTA") + == "CREATE QUOTA myQuota KEYED BY user_name FOR INTERVAL 1 year MAX queries = 1000, query_selects = 500, query_inserts = 500, read_rows = 1000 TO default\n" + ) + assert ( + instance.query("SHOW CREATE QUOTAS") + == "CREATE QUOTA myQuota KEYED BY user_name FOR INTERVAL 1 year MAX queries = 1000, query_selects = 500, query_inserts = 500, read_rows = 1000 TO default\n" + ) assert re.match( "myQuota\\tdefault\\t.*\\t31556952\\t0\\t1000\\t0\\t500\\t0\\t500\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t1000\\t0\\t\\\\N\\t.*\\t\\\\N\n", - instance.query("SHOW QUOTA")) + instance.query("SHOW QUOTA"), + ) instance.query("SELECT * from test_table") assert re.match( "myQuota\\tdefault\\t.*\\t31556952\\t1\\t1000\\t1\\t500\\t0\\t500\\t0\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t1000\\t200\\t\\\\N\\t.*\\t\\\\N\n", - instance.query("SHOW QUOTA")) + instance.query("SHOW QUOTA"), + ) expected_access = "CREATE QUOTA myQuota KEYED BY user_name FOR INTERVAL 1 year MAX queries = 1000, query_selects = 500, query_inserts = 500, read_rows = 1000 TO default\n" assert expected_access in instance.query("SHOW ACCESS") # Add interval. - copy_quota_xml('two_intervals.xml') + copy_quota_xml("two_intervals.xml") assert instance.query("SHOW QUOTAS") == "myQuota\n" - assert instance.query( - "SHOW CREATE QUOTA") == "CREATE QUOTA myQuota KEYED BY user_name FOR INTERVAL 1 year MAX queries = 1000, read_rows = 1000, FOR RANDOMIZED INTERVAL 2 year MAX result_bytes = 30000, read_bytes = 20000, execution_time = 120 TO default\n" + assert ( + instance.query("SHOW CREATE QUOTA") + == "CREATE QUOTA myQuota KEYED BY user_name FOR INTERVAL 1 year MAX queries = 1000, read_rows = 1000, FOR RANDOMIZED INTERVAL 2 year MAX result_bytes = 30000, read_bytes = 20000, execution_time = 120 TO default\n" + ) assert re.match( "myQuota\\tdefault\\t.*\\t31556952\\t1\\t1000\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t1000\\t200\\t\\\\N\\t.*\\t\\\\N\n" "myQuota\\tdefault\\t.*\\t63113904\\t0\\t\\\\N\t0\\t\\\\N\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t30000\\t0\\t\\\\N\\t0\\t20000\\t.*\\t120", - instance.query("SHOW QUOTA")) + instance.query("SHOW QUOTA"), + ) # Drop interval, add quota. - copy_quota_xml('two_quotas.xml') + copy_quota_xml("two_quotas.xml") assert instance.query("SHOW QUOTAS") == "myQuota\nmyQuota2\n" - assert instance.query( - "SHOW CREATE QUOTA myQuota") == "CREATE QUOTA myQuota KEYED BY user_name FOR INTERVAL 1 year MAX queries = 1000, read_rows = 1000 TO default\n" - assert instance.query( - "SHOW CREATE QUOTA myQuota2") == "CREATE QUOTA myQuota2 KEYED BY client_key, user_name FOR RANDOMIZED INTERVAL 1 hour MAX result_rows = 4000, result_bytes = 400000, read_rows = 4000, read_bytes = 400000, execution_time = 60, FOR INTERVAL 1 month MAX execution_time = 1800\n" - assert instance.query( - "SHOW CREATE QUOTAS") == "CREATE QUOTA myQuota KEYED BY user_name FOR INTERVAL 1 year MAX queries = 1000, read_rows = 1000 TO default\n" \ - "CREATE QUOTA myQuota2 KEYED BY client_key, user_name FOR RANDOMIZED INTERVAL 1 hour MAX result_rows = 4000, result_bytes = 400000, read_rows = 4000, read_bytes = 400000, execution_time = 60, FOR INTERVAL 1 month MAX execution_time = 1800\n" + assert ( + instance.query("SHOW CREATE QUOTA myQuota") + == "CREATE QUOTA myQuota KEYED BY user_name FOR INTERVAL 1 year MAX queries = 1000, read_rows = 1000 TO default\n" + ) + assert ( + instance.query("SHOW CREATE QUOTA myQuota2") + == "CREATE QUOTA myQuota2 KEYED BY client_key, user_name FOR RANDOMIZED INTERVAL 1 hour MAX result_rows = 4000, result_bytes = 400000, read_rows = 4000, read_bytes = 400000, execution_time = 60, FOR INTERVAL 1 month MAX execution_time = 1800\n" + ) + assert ( + instance.query("SHOW CREATE QUOTAS") + == "CREATE QUOTA myQuota KEYED BY user_name FOR INTERVAL 1 year MAX queries = 1000, read_rows = 1000 TO default\n" + "CREATE QUOTA myQuota2 KEYED BY client_key, user_name FOR RANDOMIZED INTERVAL 1 hour MAX result_rows = 4000, result_bytes = 400000, read_rows = 4000, read_bytes = 400000, execution_time = 60, FOR INTERVAL 1 month MAX execution_time = 1800\n" + ) assert re.match( "myQuota\\tdefault\\t.*\\t31556952\\t1\\t1000\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t1000\\t200\\t\\\\N\\t.*\\t\\\\N\n", - instance.query("SHOW QUOTA")) + instance.query("SHOW QUOTA"), + ) # Drop all quotas. - copy_quota_xml('no_quotas.xml') + copy_quota_xml("no_quotas.xml") assert instance.query("SHOW QUOTAS") == "" assert instance.query("SHOW CREATE QUOTA") == "" assert instance.query("SHOW QUOTA") == "" def test_dcl_management(): - copy_quota_xml('no_quotas.xml') + copy_quota_xml("no_quotas.xml") assert instance.query("SHOW QUOTA") == "" - instance.query("CREATE QUOTA qA FOR INTERVAL 15 MONTH MAX QUERIES 123 TO CURRENT_USER") - assert instance.query( - "SHOW CREATE QUOTA qA") == "CREATE QUOTA qA FOR INTERVAL 5 quarter MAX queries = 123 TO default\n" + instance.query( + "CREATE QUOTA qA FOR INTERVAL 15 MONTH MAX QUERIES 123 TO CURRENT_USER" + ) + assert ( + instance.query("SHOW CREATE QUOTA qA") + == "CREATE QUOTA qA FOR INTERVAL 5 quarter MAX queries = 123 TO default\n" + ) assert re.match( "qA\\t\\t.*\\t39446190\\t0\\t123\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t.*\\t\\\\N\n", - instance.query("SHOW QUOTA")) + instance.query("SHOW QUOTA"), + ) instance.query("SELECT * from test_table") assert re.match( "qA\\t\\t.*\\t39446190\\t1\\t123\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t.*\\t\\\\N\n", - instance.query("SHOW QUOTA")) + instance.query("SHOW QUOTA"), + ) instance.query( - "ALTER QUOTA qA FOR INTERVAL 15 MONTH MAX QUERIES 321, MAX ERRORS 10, FOR INTERVAL 0.5 HOUR MAX EXECUTION TIME 0.5") - assert instance.query( - "SHOW CREATE QUOTA qA") == "CREATE QUOTA qA FOR INTERVAL 30 minute MAX execution_time = 0.5, FOR INTERVAL 5 quarter MAX queries = 321, errors = 10 TO default\n" + "ALTER QUOTA qA FOR INTERVAL 15 MONTH MAX QUERIES 321, MAX ERRORS 10, FOR INTERVAL 0.5 HOUR MAX EXECUTION TIME 0.5" + ) + assert ( + instance.query("SHOW CREATE QUOTA qA") + == "CREATE QUOTA qA FOR INTERVAL 30 minute MAX execution_time = 0.5, FOR INTERVAL 5 quarter MAX queries = 321, errors = 10 TO default\n" + ) assert re.match( "qA\\t\\t.*\\t1800\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t.*\\t0.5\n" "qA\\t\\t.*\\t39446190\\t1\\t321\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t10\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t.*\\t\\\\N\n", - instance.query("SHOW QUOTA")) + instance.query("SHOW QUOTA"), + ) instance.query("SELECT * from test_table") assert re.match( "qA\\t\\t.*\\t1800\\t1\\t\\\\N\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t.*\\t0.5\n" "qA\\t\\t.*\\t39446190\\t2\\t321\\t2\\t\\\\N\\t0\\t\\\\N\\t0\\t10\\t100\\t\\\\N\\t400\\t\\\\N\\t100\\t\\\\N\\t400\\t\\\\N\\t.*\\t\\\\N\n", - instance.query("SHOW QUOTA")) + instance.query("SHOW QUOTA"), + ) instance.query( - "ALTER QUOTA qA FOR INTERVAL 15 MONTH NO LIMITS, FOR RANDOMIZED INTERVAL 16 MONTH TRACKING ONLY, FOR INTERVAL 1800 SECOND NO LIMITS") + "ALTER QUOTA qA FOR INTERVAL 15 MONTH NO LIMITS, FOR RANDOMIZED INTERVAL 16 MONTH TRACKING ONLY, FOR INTERVAL 1800 SECOND NO LIMITS" + ) assert re.match( "qA\\t\\t.*\\t42075936\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t.*\\t\\\\N\n", - instance.query("SHOW QUOTA")) + instance.query("SHOW QUOTA"), + ) instance.query("SELECT * from test_table") assert re.match( "qA\\t\\t.*\\t42075936\\t1\\t\\\\N\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t.*\\t\\\\N\n", - instance.query("SHOW QUOTA")) + instance.query("SHOW QUOTA"), + ) instance.query("ALTER QUOTA qA RENAME TO qB") - assert instance.query( - "SHOW CREATE QUOTA qB") == "CREATE QUOTA qB FOR RANDOMIZED INTERVAL 16 month TRACKING ONLY TO default\n" + assert ( + instance.query("SHOW CREATE QUOTA qB") + == "CREATE QUOTA qB FOR RANDOMIZED INTERVAL 16 month TRACKING ONLY TO default\n" + ) assert re.match( "qB\\t\\t.*\\t42075936\\t1\\t\\\\N\\t1\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t50\\t\\\\N\\t200\\t\\\\N\\t.*\\t\\\\N\n", - instance.query("SHOW QUOTA")) + instance.query("SHOW QUOTA"), + ) instance.query("SELECT * from test_table") assert re.match( "qB\\t\\t.*\\t42075936\\t2\\t\\\\N\\t2\\t\\\\N\\t0\\t\\\\N\\t0\\t\\\\N\\t100\\t\\\\N\\t400\\t\\\\N\\t100\\t\\\\N\\t400\\t\\\\N\\t.*\\t\\\\N\n", - instance.query("SHOW QUOTA")) + instance.query("SHOW QUOTA"), + ) instance.query("DROP QUOTA qB") assert instance.query("SHOW QUOTA") == "" def test_users_xml_is_readonly(): - assert re.search("storage is readonly", instance.query_and_get_error("DROP QUOTA myQuota")) + assert re.search( + "storage is readonly", instance.query_and_get_error("DROP QUOTA myQuota") + ) def test_query_inserts(): - check_system_quotas([["myQuota", "e651da9c-a748-8703-061a-7e5e5096dae7", "users.xml", "['user_name']", [31556952], - 0, "['default']", "[]"]]) - system_quota_limits([["myQuota", 31556952, 0, 1000, 500, 500, "\\N", "\\N", "\\N", 1000, "\\N", "\\N"]]) - system_quota_usage([["myQuota", "default", 31556952, 0, 1000, 0, 500, 0, 500, 0, "\\N", 0, "\\N", 0, "\\N", 0, 1000, 0, "\\N", "\\N"]]) + check_system_quotas( + [ + [ + "myQuota", + "e651da9c-a748-8703-061a-7e5e5096dae7", + "users.xml", + "['user_name']", + [31556952], + 0, + "['default']", + "[]", + ] + ] + ) + system_quota_limits( + [ + [ + "myQuota", + 31556952, + 0, + 1000, + 500, + 500, + "\\N", + "\\N", + "\\N", + 1000, + "\\N", + "\\N", + ] + ] + ) + system_quota_usage( + [ + [ + "myQuota", + "default", + 31556952, + 0, + 1000, + 0, + 500, + 0, + 500, + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + 1000, + 0, + "\\N", + "\\N", + ] + ] + ) system_quotas_usage( - [["myQuota", "default", 1, 31556952, 0, 1000, 0, 500, 0, 500, 0, "\\N", 0, "\\N", 0, "\\N", 0, 1000, 0, "\\N", "\\N"]]) + [ + [ + "myQuota", + "default", + 1, + 31556952, + 0, + 1000, + 0, + 500, + 0, + 500, + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + 1000, + 0, + "\\N", + "\\N", + ] + ] + ) instance.query("DROP TABLE IF EXISTS test_table_ins") - instance.query("CREATE TABLE test_table_ins(x UInt32) ENGINE = MergeTree ORDER BY tuple()") + instance.query( + "CREATE TABLE test_table_ins(x UInt32) ENGINE = MergeTree ORDER BY tuple()" + ) system_quota_usage( - [["myQuota", "default", 31556952, 2, 1000, 0, 500, 0, 500, 0, "\\N", 0, "\\N", 0, "\\N", 0, 1000, 0, "\\N", "\\N"]]) - + [ + [ + "myQuota", + "default", + 31556952, + 2, + 1000, + 0, + 500, + 0, + 500, + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + 1000, + 0, + "\\N", + "\\N", + ] + ] + ) + instance.query("INSERT INTO test_table_ins values(1)") system_quota_usage( - [["myQuota", "default", 31556952, 3, 1000, 0, 500, 1, 500, 0, "\\N", 0, "\\N", 0, "\\N", 0, 1000, 0, "\\N", "\\N"]]) + [ + [ + "myQuota", + "default", + 31556952, + 3, + 1000, + 0, + 500, + 1, + 500, + 0, + "\\N", + 0, + "\\N", + 0, + "\\N", + 0, + 1000, + 0, + "\\N", + "\\N", + ] + ] + ) instance.query("DROP TABLE test_table_ins") @@ -396,28 +1685,40 @@ def test_consumption_of_show_tables(): assert instance.query("SHOW TABLES") == "test_table\n" assert re.match( "myQuota\\tdefault\\t.*\\t31556952\\t1\\t1000\\t1\\t500\\t0\\t500\\t0\\t\\\\N\\t1\\t\\\\N.*", - instance.query("SHOW QUOTA")) + instance.query("SHOW QUOTA"), + ) + def test_consumption_of_show_databases(): - assert instance.query("SHOW DATABASES") == "INFORMATION_SCHEMA\ndefault\ninformation_schema\nsystem\n" + assert ( + instance.query("SHOW DATABASES") + == "INFORMATION_SCHEMA\ndefault\ninformation_schema\nsystem\n" + ) assert re.match( "myQuota\\tdefault\\t.*\\t31556952\\t1\\t1000\\t1\\t500\\t0\\t500\\t0\\t\\\\N\\t4\\t\\\\N.*", - instance.query("SHOW QUOTA")) + instance.query("SHOW QUOTA"), + ) + def test_consumption_of_show_clusters(): assert len(instance.query("SHOW CLUSTERS")) > 0 assert re.match( "myQuota\\tdefault\\t.*\\t31556952\\t1\\t1000\\t1\\t500\\t0\\t500\\t0\\t\\\\N.*", - instance.query("SHOW QUOTA")) + instance.query("SHOW QUOTA"), + ) + def test_consumption_of_show_processlist(): instance.query("SHOW PROCESSLIST") assert re.match( "myQuota\\tdefault\\t.*\\t31556952\\t1\\t1000\\t1\\t500\\t0\\t500\\t0\\t\\\\N\\t0\\t\\\\N.*", - instance.query("SHOW QUOTA")) + instance.query("SHOW QUOTA"), + ) + def test_consumption_of_show_privileges(): assert len(instance.query("SHOW PRIVILEGES")) > 0 assert re.match( "myQuota\\tdefault\\t.*\\t31556952\\t1\\t1000\\t1\\t500\\t0\\t500\\t0\\t\\\\N.*", - instance.query("SHOW QUOTA")) + instance.query("SHOW QUOTA"), + ) diff --git a/tests/integration/test_random_inserts/test.py b/tests/integration/test_random_inserts/test.py index a06649dba52..4d6aaa9276d 100644 --- a/tests/integration/test_random_inserts/test.py +++ b/tests/integration/test_random_inserts/test.py @@ -11,12 +11,18 @@ from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', - main_configs=["configs/conf.d/merge_tree.xml", "configs/conf.d/remote_servers.xml"], - with_zookeeper=True, macros={"layer": 0, "shard": 0, "replica": 1}) -node2 = cluster.add_instance('node2', - main_configs=["configs/conf.d/merge_tree.xml", "configs/conf.d/remote_servers.xml"], - with_zookeeper=True, macros={"layer": 0, "shard": 0, "replica": 2}) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/conf.d/merge_tree.xml", "configs/conf.d/remote_servers.xml"], + with_zookeeper=True, + macros={"layer": 0, "shard": 0, "replica": 1}, +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/conf.d/merge_tree.xml", "configs/conf.d/remote_servers.xml"], + with_zookeeper=True, + macros={"layer": 0, "shard": 0, "replica": 2}, +) nodes = [node1, node2] @@ -35,9 +41,11 @@ def test_random_inserts(started_cluster): # Duration of the test, reduce it if don't want to wait DURATION_SECONDS = 10 # * 60 - node1.query(""" + node1.query( + """ CREATE TABLE simple ON CLUSTER test_cluster (date Date, i UInt32, s String) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/simple', '{replica}', date, i, 8192)""") + ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/simple', '{replica}', date, i, 8192)""" + ) with PartitionManager() as pm_random_drops: for sacrifice in nodes: @@ -52,21 +60,38 @@ def test_random_inserts(started_cluster): bash_script = os.path.join(os.path.dirname(__file__), "test.sh") inserters = [] for node in nodes: - cmd = ['/bin/bash', bash_script, node.ip_address, str(min_timestamp), str(max_timestamp), - str(cluster.get_client_cmd())] - inserters.append(CommandRequest(cmd, timeout=DURATION_SECONDS * 2, stdin='')) + cmd = [ + "/bin/bash", + bash_script, + node.ip_address, + str(min_timestamp), + str(max_timestamp), + str(cluster.get_client_cmd()), + ] + inserters.append( + CommandRequest(cmd, timeout=DURATION_SECONDS * 2, stdin="") + ) print(node.name, node.ip_address) for inserter in inserters: inserter.get_answer() - answer = "{}\t{}\t{}\t{}\n".format(num_timestamps, num_timestamps, min_timestamp, max_timestamp) + answer = "{}\t{}\t{}\t{}\n".format( + num_timestamps, num_timestamps, min_timestamp, max_timestamp + ) for node in nodes: - res = node.query_with_retry("SELECT count(), uniqExact(i), min(i), max(i) FROM simple", - check_callback=lambda res: TSV(res) == TSV(answer)) - assert TSV(res) == TSV(answer), node.name + " : " + node.query( - "SELECT groupArray(_part), i, count() AS c FROM simple GROUP BY i ORDER BY c DESC LIMIT 1") + res = node.query_with_retry( + "SELECT count(), uniqExact(i), min(i), max(i) FROM simple", + check_callback=lambda res: TSV(res) == TSV(answer), + ) + assert TSV(res) == TSV(answer), ( + node.name + + " : " + + node.query( + "SELECT groupArray(_part), i, count() AS c FROM simple GROUP BY i ORDER BY c DESC LIMIT 1" + ) + ) node1.query("""DROP TABLE simple ON CLUSTER test_cluster""") @@ -84,14 +109,16 @@ class Runner: self.stop_ev.wait(random.random()) year = 2000 - month = '01' + month = "01" day = str(thread_num + 1).zfill(2) x = 1 while not self.stop_ev.is_set(): payload = """ {year}-{month}-{day} {x1} {year}-{month}-{day} {x2} -""".format(year=year, month=month, day=day, x1=x, x2=(x + 1)).strip() +""".format( + year=year, month=month, day=day, x1=x, x2=(x + 1) + ).strip() try: random.choice(nodes).query("INSERT INTO repl_test FORMAT TSV", payload) @@ -106,7 +133,7 @@ class Runner: self.mtx.release() except Exception as e: - print('Exception:', e) + print("Exception:", e) x += 2 self.stop_ev.wait(0.1 + random.random() / 10) @@ -120,7 +147,8 @@ def test_insert_multithreaded(started_cluster): for node in nodes: node.query( - "CREATE TABLE repl_test(d Date, x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/test/repl_test', '{replica}') ORDER BY x PARTITION BY toYYYYMM(d)") + "CREATE TABLE repl_test(d Date, x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/test/repl_test', '{replica}') ORDER BY x PARTITION BY toYYYYMM(d)" + ) runner = Runner() @@ -145,7 +173,11 @@ def test_insert_multithreaded(started_cluster): time.sleep(0.5) def get_delay(node): - return int(node.query("SELECT absolute_delay FROM system.replicas WHERE table = 'repl_test'").rstrip()) + return int( + node.query( + "SELECT absolute_delay FROM system.replicas WHERE table = 'repl_test'" + ).rstrip() + ) if all([get_delay(n) == 0 for n in nodes]): all_replicated = True diff --git a/tests/integration/test_range_hashed_dictionary_types/test.py b/tests/integration/test_range_hashed_dictionary_types/test.py index 198e2e27db8..91b0184c791 100644 --- a/tests/integration/test_range_hashed_dictionary_types/test.py +++ b/tests/integration/test_range_hashed_dictionary_types/test.py @@ -4,7 +4,7 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1') +node1 = cluster.add_instance("node1") @pytest.fixture(scope="module") @@ -19,7 +19,8 @@ def started_cluster(): def test_range_hashed_dict(started_cluster): script = "echo '4990954156238030839\t2018-12-31 21:00:00\t2020-12-30 20:59:59\t0.1\tRU' > /var/lib/clickhouse/user_files/rates.tsv" node1.exec_in_container(["bash", "-c", script]) - node1.query(""" + node1.query( + """ CREATE DICTIONARY rates ( hash_id UInt64, @@ -36,8 +37,13 @@ def test_range_hashed_dict(started_cluster): LAYOUT(RANGE_HASHED()) RANGE(MIN start_date MAX end_date) LIFETIME(60); - """) + """ + ) node1.query("SYSTEM RELOAD DICTIONARY default.rates") - assert node1.query( - "SELECT dictGetString('default.rates', 'currency', toUInt64(4990954156238030839), toDateTime('2019-10-01 00:00:00'))") == "RU\n" + assert ( + node1.query( + "SELECT dictGetString('default.rates', 'currency', toUInt64(4990954156238030839), toDateTime('2019-10-01 00:00:00'))" + ) + == "RU\n" + ) diff --git a/tests/integration/test_read_temporary_tables_on_failure/test.py b/tests/integration/test_read_temporary_tables_on_failure/test.py index ae59fb31641..fd1d92eff92 100644 --- a/tests/integration/test_read_temporary_tables_on_failure/test.py +++ b/tests/integration/test_read_temporary_tables_on_failure/test.py @@ -4,7 +4,7 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node') +node = cluster.add_instance("node") @pytest.fixture(scope="module") @@ -21,7 +21,7 @@ def test_different_versions(start_cluster): with pytest.raises(QueryTimeoutExceedException): node.query("SELECT sleepEachRow(3) FROM numbers(10)", timeout=5) with pytest.raises(QueryRuntimeException): - node.query("SELECT 1", settings={'max_concurrent_queries_for_user': 1}) - assert node.contains_in_log('Too many simultaneous queries for user') - assert not node.contains_in_log('Unknown packet') - assert not node.contains_in_log('Unexpected packet') + node.query("SELECT 1", settings={"max_concurrent_queries_for_user": 1}) + assert node.contains_in_log("Too many simultaneous queries for user") + assert not node.contains_in_log("Unknown packet") + assert not node.contains_in_log("Unexpected packet") diff --git a/tests/integration/test_recompression_ttl/test.py b/tests/integration/test_recompression_ttl/test.py index e74ae928b51..851e3bb4eb8 100644 --- a/tests/integration/test_recompression_ttl/test.py +++ b/tests/integration/test_recompression_ttl/test.py @@ -4,8 +4,12 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/background_pool_config.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/background_pool_config.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/background_pool_config.xml"], with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/background_pool_config.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -24,7 +28,11 @@ def started_cluster(): def wait_part_in_parts(node, table, part_name, retries=40): for i in range(retries): - result = node.query("SELECT name FROM system.parts where name = '{}' and table = '{}'".format(part_name, table)) + result = node.query( + "SELECT name FROM system.parts where name = '{}' and table = '{}'".format( + part_name, table + ) + ) if result: return True time.sleep(0.5) @@ -35,7 +43,10 @@ def wait_part_in_parts(node, table, part_name, retries=40): def optimize_final_table_until_success(node, table_name, retries=40): for i in range(retries): try: - node.query("OPTIMIZE TABLE {} FINAL".format(table_name), settings={"optimize_throw_if_noop": "1"}) + node.query( + "OPTIMIZE TABLE {} FINAL".format(table_name), + settings={"optimize_throw_if_noop": "1"}, + ) return True except: time.sleep(0.5) @@ -46,19 +57,29 @@ def optimize_final_table_until_success(node, table_name, retries=40): def wait_part_and_get_compression_codec(node, table, part_name, retries=40): if wait_part_in_parts(node, table, part_name, retries): return node.query( - "SELECT default_compression_codec FROM system.parts where name = '{}' and table = '{}'".format(part_name, - table)).strip() + "SELECT default_compression_codec FROM system.parts where name = '{}' and table = '{}'".format( + part_name, table + ) + ).strip() return None def test_recompression_simple(started_cluster): node1.query( - "CREATE TABLE table_for_recompression (d DateTime, key UInt64, data String) ENGINE MergeTree() ORDER BY tuple() TTL d + INTERVAL 10 SECOND RECOMPRESS CODEC(ZSTD(10)) SETTINGS merge_with_recompression_ttl_timeout = 0") + "CREATE TABLE table_for_recompression (d DateTime, key UInt64, data String) ENGINE MergeTree() ORDER BY tuple() TTL d + INTERVAL 10 SECOND RECOMPRESS CODEC(ZSTD(10)) SETTINGS merge_with_recompression_ttl_timeout = 0" + ) node1.query("INSERT INTO table_for_recompression VALUES (now(), 1, '1')") - assert node1.query("SELECT default_compression_codec FROM system.parts where name = 'all_1_1_0'") == "LZ4\n" + assert ( + node1.query( + "SELECT default_compression_codec FROM system.parts where name = 'all_1_1_0'" + ) + == "LZ4\n" + ) - codec = wait_part_and_get_compression_codec(node1, "table_for_recompression", "all_1_1_1") + codec = wait_part_and_get_compression_codec( + node1, "table_for_recompression", "all_1_1_1" + ) if not codec: assert False, "Part all_1_1_1 doesn't appeared in system.parts" @@ -69,32 +90,50 @@ def test_recompression_simple(started_cluster): optimize_final_table_until_success(node1, "table_for_recompression") - assert node1.query("SELECT default_compression_codec FROM system.parts where name = 'all_1_1_2'") == "ZSTD(10)\n" + assert ( + node1.query( + "SELECT default_compression_codec FROM system.parts where name = 'all_1_1_2'" + ) + == "ZSTD(10)\n" + ) def test_recompression_multiple_ttls(started_cluster): - node2.query("CREATE TABLE table_for_recompression (d DateTime, key UInt64, data String) ENGINE MergeTree() ORDER BY tuple() \ + node2.query( + "CREATE TABLE table_for_recompression (d DateTime, key UInt64, data String) ENGINE MergeTree() ORDER BY tuple() \ TTL d + INTERVAL 5 SECOND RECOMPRESS CODEC(ZSTD(10)), \ d + INTERVAL 10 SECOND RECOMPRESS CODEC(ZSTD(11)), \ - d + INTERVAL 15 SECOND RECOMPRESS CODEC(ZSTD(12)) SETTINGS merge_with_recompression_ttl_timeout = 0") + d + INTERVAL 15 SECOND RECOMPRESS CODEC(ZSTD(12)) SETTINGS merge_with_recompression_ttl_timeout = 0" + ) node2.query("INSERT INTO table_for_recompression VALUES (now(), 1, '1')") - assert node2.query("SELECT default_compression_codec FROM system.parts where name = 'all_1_1_0'") == "LZ4\n" + assert ( + node2.query( + "SELECT default_compression_codec FROM system.parts where name = 'all_1_1_0'" + ) + == "LZ4\n" + ) - codec = wait_part_and_get_compression_codec(node2, "table_for_recompression", "all_1_1_1") + codec = wait_part_and_get_compression_codec( + node2, "table_for_recompression", "all_1_1_1" + ) if not codec: assert False, "Part all_1_1_1 doesn't appeared in system.parts" assert codec == "ZSTD(10)" - codec = wait_part_and_get_compression_codec(node2, "table_for_recompression", "all_1_1_2") + codec = wait_part_and_get_compression_codec( + node2, "table_for_recompression", "all_1_1_2" + ) if not codec: assert False, "Part all_1_1_2 doesn't appeared in system.parts" assert codec == "ZSTD(11)" - codec = wait_part_and_get_compression_codec(node2, "table_for_recompression", "all_1_1_3") + codec = wait_part_and_get_compression_codec( + node2, "table_for_recompression", "all_1_1_3" + ) if not codec: assert False, "Part all_1_1_3 doesn't appeared in system.parts" @@ -105,32 +144,56 @@ def test_recompression_multiple_ttls(started_cluster): optimize_final_table_until_success(node2, "table_for_recompression") - assert node2.query("SELECT default_compression_codec FROM system.parts where name = 'all_1_1_4'") == "ZSTD(12)\n" + assert ( + node2.query( + "SELECT default_compression_codec FROM system.parts where name = 'all_1_1_4'" + ) + == "ZSTD(12)\n" + ) - assert node2.query( - "SELECT recompression_ttl_info.expression FROM system.parts where name = 'all_1_1_4'") == "['plus(d, toIntervalSecond(10))','plus(d, toIntervalSecond(15))','plus(d, toIntervalSecond(5))']\n" + assert ( + node2.query( + "SELECT recompression_ttl_info.expression FROM system.parts where name = 'all_1_1_4'" + ) + == "['plus(d, toIntervalSecond(10))','plus(d, toIntervalSecond(15))','plus(d, toIntervalSecond(5))']\n" + ) def test_recompression_replicated(started_cluster): for i, node in enumerate([node1, node2]): - node.query("CREATE TABLE recompression_replicated (d DateTime, key UInt64, data String) \ + node.query( + "CREATE TABLE recompression_replicated (d DateTime, key UInt64, data String) \ ENGINE ReplicatedMergeTree('/test/rr', '{}') ORDER BY tuple() \ TTL d + INTERVAL 10 SECOND RECOMPRESS CODEC(ZSTD(13)) SETTINGS merge_with_recompression_ttl_timeout = 0".format( - i + 1)) + i + 1 + ) + ) node1.query("INSERT INTO recompression_replicated VALUES (now(), 1, '1')") node2.query("SYSTEM SYNC REPLICA recompression_replicated", timeout=5) - assert node1.query( - "SELECT default_compression_codec FROM system.parts where name = 'all_0_0_0' and table = 'recompression_replicated'") == "LZ4\n" - assert node2.query( - "SELECT default_compression_codec FROM system.parts where name = 'all_0_0_0' and table = 'recompression_replicated'") == "LZ4\n" + assert ( + node1.query( + "SELECT default_compression_codec FROM system.parts where name = 'all_0_0_0' and table = 'recompression_replicated'" + ) + == "LZ4\n" + ) + assert ( + node2.query( + "SELECT default_compression_codec FROM system.parts where name = 'all_0_0_0' and table = 'recompression_replicated'" + ) + == "LZ4\n" + ) - codec1 = wait_part_and_get_compression_codec(node1, "recompression_replicated", "all_0_0_1") + codec1 = wait_part_and_get_compression_codec( + node1, "recompression_replicated", "all_0_0_1" + ) if not codec1: assert False, "Part all_0_0_1 doesn't appeared in system.parts on node1" - codec2 = wait_part_and_get_compression_codec(node2, "recompression_replicated", "all_0_0_1") + codec2 = wait_part_and_get_compression_codec( + node2, "recompression_replicated", "all_0_0_1" + ) if not codec2: assert False, "Part all_0_0_1 doesn't appeared in system.parts on node2" diff --git a/tests/integration/test_recovery_replica/test.py b/tests/integration/test_recovery_replica/test.py index bf869d0de31..4a1298162da 100644 --- a/tests/integration/test_recovery_replica/test.py +++ b/tests/integration/test_recovery_replica/test.py @@ -6,26 +6,32 @@ from helpers.test_tools import assert_eq_with_retry SETTINGS = "SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0" + def fill_nodes(nodes): for node in nodes: node.query( - ''' + """ CREATE TABLE test_table(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/replicated', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) {settings}; - '''.format(replica=node.name, settings=SETTINGS)) + """.format( + replica=node.name, settings=SETTINGS + ) + ) cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True) -node2 = cluster.add_instance('node2', with_zookeeper=True) -node3 = cluster.add_instance('node3', with_zookeeper=True) +node1 = cluster.add_instance("node1", with_zookeeper=True) +node2 = cluster.add_instance("node2", with_zookeeper=True) +node3 = cluster.add_instance("node3", with_zookeeper=True) nodes = [node1, node2, node3] + def sync_replicas(table): for node in nodes: node.query("SYSTEM SYNC REPLICA {}".format(table)) + @pytest.fixture(scope="module") def start_cluster(): try: @@ -50,35 +56,53 @@ def test_recovery(start_cluster): for i in range(1, 11): node1.query("INSERT INTO test_table VALUES (1, {})".format(i)) - node2.query_with_retry("ATTACH TABLE test_table", - check_callback=lambda x: len(node2.query("select * from test_table")) > 0) + node2.query_with_retry( + "ATTACH TABLE test_table", + check_callback=lambda x: len(node2.query("select * from test_table")) > 0, + ) - assert_eq_with_retry(node2, "SELECT count(*) FROM test_table", node1.query("SELECT count(*) FROM test_table")) + assert_eq_with_retry( + node2, + "SELECT count(*) FROM test_table", + node1.query("SELECT count(*) FROM test_table"), + ) lost_marker = "Will mark replica node2 as lost" assert node1.contains_in_log(lost_marker) or node3.contains_in_log(lost_marker) sync_replicas("test_table") for node in nodes: - assert node.query("SELECT count(), sum(id) FROM test_table WHERE date=toDate(1)") == "11\t55\n" + assert ( + node.query("SELECT count(), sum(id) FROM test_table WHERE date=toDate(1)") + == "11\t55\n" + ) + def test_choose_source_replica(start_cluster): node3.query("INSERT INTO test_table VALUES (2, 0)") sync_replicas("test_table") node2.query("DETACH TABLE test_table") - node1.query("SYSTEM STOP FETCHES test_table") # node1 will have many entries in queue, so node2 will clone node3 + node1.query( + "SYSTEM STOP FETCHES test_table" + ) # node1 will have many entries in queue, so node2 will clone node3 for i in range(1, 11): node3.query("INSERT INTO test_table VALUES (2, {})".format(i)) - node2.query_with_retry("ATTACH TABLE test_table", - check_callback=lambda x: len(node2.query("select * from test_table")) > 0) + node2.query_with_retry( + "ATTACH TABLE test_table", + check_callback=lambda x: len(node2.query("select * from test_table")) > 0, + ) node1.query("SYSTEM START FETCHES test_table") node1.query("SYSTEM SYNC REPLICA test_table") node2.query("SYSTEM SYNC REPLICA test_table") - assert node1.query("SELECT count(*) FROM test_table") == node3.query("SELECT count(*) FROM test_table") - assert node2.query("SELECT count(*) FROM test_table") == node3.query("SELECT count(*) FROM test_table") + assert node1.query("SELECT count(*) FROM test_table") == node3.query( + "SELECT count(*) FROM test_table" + ) + assert node2.query("SELECT count(*) FROM test_table") == node3.query( + "SELECT count(*) FROM test_table" + ) lost_marker = "Will mark replica node2 as lost" assert node1.contains_in_log(lost_marker) or node3.contains_in_log(lost_marker) @@ -86,17 +110,23 @@ def test_choose_source_replica(start_cluster): sync_replicas("test_table") for node in nodes: - assert node.query("SELECT count(), sum(id) FROM test_table WHERE date=toDate(2)") == "11\t55\n" + assert ( + node.query("SELECT count(), sum(id) FROM test_table WHERE date=toDate(2)") + == "11\t55\n" + ) def test_update_metadata(start_cluster): for node in nodes: node.query( - ''' + """ CREATE TABLE update_metadata(key UInt32) ENGINE = ReplicatedMergeTree('/test/update_metadata', '{replica}') ORDER BY key PARTITION BY key % 10 {settings}; - '''.format(replica=node.name, settings=SETTINGS)) + """.format( + replica=node.name, settings=SETTINGS + ) + ) for i in range(1, 11): node1.query("INSERT INTO update_metadata VALUES ({})".format(i)) @@ -106,17 +136,26 @@ def test_update_metadata(start_cluster): node1.query("ALTER TABLE update_metadata ADD COLUMN col1 UInt32") for i in range(1, 11): - node1.query("INSERT INTO update_metadata VALUES ({}, {})".format(i * 10, i * 10)) + node1.query( + "INSERT INTO update_metadata VALUES ({}, {})".format(i * 10, i * 10) + ) lost_marker = "Will mark replica node2 as lost" assert node1.contains_in_log(lost_marker) or node3.contains_in_log(lost_marker) node2.query("ATTACH TABLE update_metadata") sync_replicas("update_metadata") - assert node1.query("DESC TABLE update_metadata") == node2.query("DESC TABLE update_metadata") - assert node1.query("DESC TABLE update_metadata") == node3.query("DESC TABLE update_metadata") + assert node1.query("DESC TABLE update_metadata") == node2.query( + "DESC TABLE update_metadata" + ) + assert node1.query("DESC TABLE update_metadata") == node3.query( + "DESC TABLE update_metadata" + ) for node in nodes: - assert node.query("SELECT count(), sum(key), sum(col1) FROM update_metadata") == "20\t605\t550\n" + assert ( + node.query("SELECT count(), sum(key), sum(col1) FROM update_metadata") + == "20\t605\t550\n" + ) node2.query("DETACH TABLE update_metadata") # alter with mutation @@ -129,14 +168,21 @@ def test_update_metadata(start_cluster): node2.query("ATTACH TABLE update_metadata") sync_replicas("update_metadata") - assert node1.query("DESC TABLE update_metadata") == node2.query("DESC TABLE update_metadata") - assert node1.query("DESC TABLE update_metadata") == node3.query("DESC TABLE update_metadata") + assert node1.query("DESC TABLE update_metadata") == node2.query( + "DESC TABLE update_metadata" + ) + assert node1.query("DESC TABLE update_metadata") == node3.query( + "DESC TABLE update_metadata" + ) # check that it's possible to execute alter on cloned replica node2.query("ALTER TABLE update_metadata ADD COLUMN col1 UInt32") sync_replicas("update_metadata") for node in nodes: - assert node.query("SELECT count(), sum(key), sum(col1) FROM update_metadata") == "30\t6105\t0\n" + assert ( + node.query("SELECT count(), sum(key), sum(col1) FROM update_metadata") + == "30\t6105\t0\n" + ) # more complex case with multiple alters node2.query("TRUNCATE TABLE update_metadata") @@ -144,21 +190,31 @@ def test_update_metadata(start_cluster): node1.query("INSERT INTO update_metadata VALUES ({}, {})".format(i, i)) # The following alters hang because of "No active replica has part ... or covering part" - #node2.query("SYSTEM STOP REPLICATED SENDS update_metadata") - #node2.query("INSERT INTO update_metadata VALUES (42, 42)") # this part will be lost + # node2.query("SYSTEM STOP REPLICATED SENDS update_metadata") + # node2.query("INSERT INTO update_metadata VALUES (42, 42)") # this part will be lost node2.query("DETACH TABLE update_metadata") node1.query("ALTER TABLE update_metadata MODIFY COLUMN col1 String") node1.query("ALTER TABLE update_metadata ADD COLUMN col2 INT") for i in range(1, 11): - node3.query("INSERT INTO update_metadata VALUES ({}, '{}', {})".format(i * 10, i * 10, i * 10)) + node3.query( + "INSERT INTO update_metadata VALUES ({}, '{}', {})".format( + i * 10, i * 10, i * 10 + ) + ) node1.query("ALTER TABLE update_metadata DROP COLUMN col1") node1.query("ALTER TABLE update_metadata ADD COLUMN col3 Date") node2.query("ATTACH TABLE update_metadata") sync_replicas("update_metadata") - assert node1.query("DESC TABLE update_metadata") == node2.query("DESC TABLE update_metadata") - assert node1.query("DESC TABLE update_metadata") == node3.query("DESC TABLE update_metadata") + assert node1.query("DESC TABLE update_metadata") == node2.query( + "DESC TABLE update_metadata" + ) + assert node1.query("DESC TABLE update_metadata") == node3.query( + "DESC TABLE update_metadata" + ) for node in nodes: - assert node.query("SELECT count(), sum(key), sum(col2) FROM update_metadata") == "20\t605\t550\n" - + assert ( + node.query("SELECT count(), sum(key), sum(col2) FROM update_metadata") + == "20\t605\t550\n" + ) diff --git a/tests/integration/test_redirect_url_storage/test.py b/tests/integration/test_redirect_url_storage/test.py index 061920954b6..06ff78707d7 100644 --- a/tests/integration/test_redirect_url_storage/test.py +++ b/tests/integration/test_redirect_url_storage/test.py @@ -6,7 +6,12 @@ import threading import time cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/named_collections.xml'], with_zookeeper=False, with_hdfs=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/named_collections.xml"], + with_zookeeper=False, + with_hdfs=True, +) @pytest.fixture(scope="module") @@ -27,7 +32,8 @@ def test_url_without_redirect(started_cluster): # access datanode port directly node1.query( - "create table WebHDFSStorage (id UInt32, name String, weight Float64) ENGINE = URL('http://hdfs1:50075/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'TSV')") + "create table WebHDFSStorage (id UInt32, name String, weight Float64) ENGINE = URL('http://hdfs1:50075/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'TSV')" + ) assert node1.query("select * from WebHDFSStorage") == "1\tMark\t72.53\n" @@ -42,7 +48,8 @@ def test_url_with_globs(started_cluster): hdfs_api.write_data("/simple_storage_2_3", "6\n") result = node1.query( - "select * from url('http://hdfs1:50075/webhdfs/v1/simple_storage_{1..2}_{1..3}?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'TSV', 'data String') as data order by data") + "select * from url('http://hdfs1:50075/webhdfs/v1/simple_storage_{1..2}_{1..3}?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'TSV', 'data String') as data order by data" + ) assert result == "1\n2\n3\n4\n5\n6\n" @@ -57,7 +64,8 @@ def test_url_with_globs_and_failover(started_cluster): hdfs_api.write_data("/simple_storage_3_3", "6\n") result = node1.query( - "select * from url('http://hdfs1:50075/webhdfs/v1/simple_storage_{0|1|2|3}_{1..3}?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'TSV', 'data String') as data order by data") + "select * from url('http://hdfs1:50075/webhdfs/v1/simple_storage_{0|1|2|3}_{1..3}?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'TSV', 'data String') as data order by data" + ) assert result == "1\n2\n3\n" or result == "4\n5\n6\n" @@ -69,9 +77,13 @@ def test_url_with_redirect_not_allowed(started_cluster): # access proxy port without allowing redirects node1.query( - "create table WebHDFSStorageWithoutRedirect (id UInt32, name String, weight Float64) ENGINE = URL('http://hdfs1:50070/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'TSV')") + "create table WebHDFSStorageWithoutRedirect (id UInt32, name String, weight Float64) ENGINE = URL('http://hdfs1:50070/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'TSV')" + ) with pytest.raises(Exception): - assert node1.query("select * from WebHDFSStorageWithoutRedirect") == "1\tMark\t72.53\n" + assert ( + node1.query("select * from WebHDFSStorageWithoutRedirect") + == "1\tMark\t72.53\n" + ) def test_url_with_redirect_allowed(started_cluster): @@ -83,10 +95,17 @@ def test_url_with_redirect_allowed(started_cluster): # access proxy port with allowing redirects # http://localhost:50070/webhdfs/v1/b?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0 node1.query( - "create table WebHDFSStorageWithRedirect (id UInt32, name String, weight Float64) ENGINE = URL('http://hdfs1:50070/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'TSV')") - assert node1.query("SET max_http_get_redirects=1; select * from WebHDFSStorageWithRedirect") == "1\tMark\t72.53\n" + "create table WebHDFSStorageWithRedirect (id UInt32, name String, weight Float64) ENGINE = URL('http://hdfs1:50070/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'TSV')" + ) + assert ( + node1.query( + "SET max_http_get_redirects=1; select * from WebHDFSStorageWithRedirect" + ) + == "1\tMark\t72.53\n" + ) node1.query("drop table WebHDFSStorageWithRedirect") + def test_predefined_connection_configuration(started_cluster): hdfs_api = started_cluster.hdfs_api @@ -94,29 +113,45 @@ def test_predefined_connection_configuration(started_cluster): assert hdfs_api.read_data("/simple_storage") == "1\tMark\t72.53\n" node1.query( - "create table WebHDFSStorageWithRedirect (id UInt32, name String, weight Float64) ENGINE = URL(url1, url='http://hdfs1:50070/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', format='TSV')") - assert node1.query("SET max_http_get_redirects=1; select * from WebHDFSStorageWithRedirect") == "1\tMark\t72.53\n" - result = node1.query("SET max_http_get_redirects=1; select * from url(url1, url='http://hdfs1:50070/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', format='TSV', structure='id UInt32, name String, weight Float64')") - assert(result == "1\tMark\t72.53\n") + "create table WebHDFSStorageWithRedirect (id UInt32, name String, weight Float64) ENGINE = URL(url1, url='http://hdfs1:50070/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', format='TSV')" + ) + assert ( + node1.query( + "SET max_http_get_redirects=1; select * from WebHDFSStorageWithRedirect" + ) + == "1\tMark\t72.53\n" + ) + result = node1.query( + "SET max_http_get_redirects=1; select * from url(url1, url='http://hdfs1:50070/webhdfs/v1/simple_storage?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', format='TSV', structure='id UInt32, name String, weight Float64')" + ) + assert result == "1\tMark\t72.53\n" node1.query("drop table WebHDFSStorageWithRedirect") -result = '' +result = "" + + def test_url_reconnect(started_cluster): hdfs_api = started_cluster.hdfs_api with PartitionManager() as pm: node1.query( - "insert into table function hdfs('hdfs://hdfs1:9000/storage_big', 'TSV', 'id Int32') select number from numbers(500000)") + "insert into table function hdfs('hdfs://hdfs1:9000/storage_big', 'TSV', 'id Int32') select number from numbers(500000)" + ) - pm_rule = {'destination': node1.ip_address, 'source_port': 50075, 'action': 'REJECT'} + pm_rule = { + "destination": node1.ip_address, + "source_port": 50075, + "action": "REJECT", + } pm._add_rule(pm_rule) def select(): global result result = node1.query( - "select sum(cityHash64(id)) from url('http://hdfs1:50075/webhdfs/v1/storage_big?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'TSV', 'id Int32') settings http_max_tries = 10, http_retry_max_backoff_ms=1000") - assert(int(result), 6581218782194912115) + "select sum(cityHash64(id)) from url('http://hdfs1:50075/webhdfs/v1/storage_big?op=OPEN&namenoderpcaddress=hdfs1:9000&offset=0', 'TSV', 'id Int32') settings http_max_tries = 10, http_retry_max_backoff_ms=1000" + ) + assert (int(result), 6581218782194912115) thread = threading.Thread(target=select) thread.start() @@ -126,5 +161,5 @@ def test_url_reconnect(started_cluster): thread.join() - assert(int(result), 6581218782194912115) - assert node1.contains_in_log("Error: Timeout: connect timed out") + assert (int(result), 6581218782194912115) + assert node1.contains_in_log("Timeout: connect timed out") diff --git a/tests/integration/test_relative_filepath/test.py b/tests/integration/test_relative_filepath/test.py index 45c969b86f5..a9701092b65 100644 --- a/tests/integration/test_relative_filepath/test.py +++ b/tests/integration/test_relative_filepath/test.py @@ -3,7 +3,7 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=['configs/config.xml']) +node = cluster.add_instance("node", main_configs=["configs/config.xml"]) path_to_userfiles_from_defaut_config = "user_files" @@ -20,19 +20,41 @@ def test_filepath(start_cluster): # 2 rows data some_data = "Test\t111.222\nData\t333.444" - node.exec_in_container(['bash', '-c', 'mkdir -p {}'.format( - path_to_userfiles_from_defaut_config - )], privileged=True, user='root') + node.exec_in_container( + ["bash", "-c", "mkdir -p {}".format(path_to_userfiles_from_defaut_config)], + privileged=True, + user="root", + ) - node.exec_in_container(['bash', '-c', 'echo "{}" > {}'.format( - some_data, - path_to_userfiles_from_defaut_config + "/relative_user_file_test" - )], privileged=True, user='root') + node.exec_in_container( + [ + "bash", + "-c", + 'echo "{}" > {}'.format( + some_data, + path_to_userfiles_from_defaut_config + "/relative_user_file_test", + ), + ], + privileged=True, + user="root", + ) - test_requests = [("relative_user_file_test", "2"), - ("../" + path_to_userfiles_from_defaut_config + "/relative_user_file_test", "2")] + test_requests = [ + ("relative_user_file_test", "2"), + ( + "../" + path_to_userfiles_from_defaut_config + "/relative_user_file_test", + "2", + ), + ] for pattern, value in test_requests: - assert node.query(''' + assert ( + node.query( + """ select count() from file('{}', 'TSV', 'text String, number Float64') - '''.format(pattern)) == '{}\n'.format(value) + """.format( + pattern + ) + ) + == "{}\n".format(value) + ) diff --git a/tests/integration/test_reload_auxiliary_zookeepers/test.py b/tests/integration/test_reload_auxiliary_zookeepers/test.py index a52f21b5e02..bb1455333fc 100644 --- a/tests/integration/test_reload_auxiliary_zookeepers/test.py +++ b/tests/integration/test_reload_auxiliary_zookeepers/test.py @@ -60,7 +60,9 @@ def test_reload_auxiliary_zookeepers(start_cluster): """ - node.replace_config("/etc/clickhouse-server/conf.d/zookeeper_config.xml", new_config) + node.replace_config( + "/etc/clickhouse-server/conf.d/zookeeper_config.xml", new_config + ) node.query("SYSTEM RELOAD CONFIG") @@ -81,7 +83,9 @@ def test_reload_auxiliary_zookeepers(start_cluster): 2000
""" - node.replace_config("/etc/clickhouse-server/conf.d/zookeeper_config.xml", new_config) + node.replace_config( + "/etc/clickhouse-server/conf.d/zookeeper_config.xml", new_config + ) node.query("SYSTEM RELOAD CONFIG") time.sleep(5) diff --git a/tests/integration/test_reload_certificate/test.py b/tests/integration/test_reload_certificate/test.py index d37fd1bccbc..0f2579f4544 100644 --- a/tests/integration/test_reload_certificate/test.py +++ b/tests/integration/test_reload_certificate/test.py @@ -4,10 +4,19 @@ from helpers.cluster import ClickHouseCluster SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=["configs/first.crt", "configs/first.key", - "configs/second.crt", "configs/second.key", - "configs/ECcert.crt", "configs/ECcert.key", - "configs/cert.xml"]) +node = cluster.add_instance( + "node", + main_configs=[ + "configs/first.crt", + "configs/first.key", + "configs/second.crt", + "configs/second.key", + "configs/ECcert.crt", + "configs/ECcert.key", + "configs/cert.xml", + ], +) + @pytest.fixture(scope="module", autouse=True) def started_cluster(): @@ -17,12 +26,17 @@ def started_cluster(): finally: cluster.shutdown() + def change_config_to_key(name): - ''' - * Generate config with certificate/key name from args. - * Reload config. - ''' - node.exec_in_container(["bash", "-c" , """cat > /etc/clickhouse-server/config.d/cert.xml << EOF + """ + * Generate config with certificate/key name from args. + * Reload config. + """ + node.exec_in_container( + [ + "bash", + "-c", + """cat > /etc/clickhouse-server/config.d/cert.xml << EOF 8443 @@ -37,64 +51,145 @@ def change_config_to_key(name): -EOF""".format(cur_name=name)]) +EOF""".format( + cur_name=name + ), + ] + ) node.query("SYSTEM RELOAD CONFIG") + def test_first_than_second_cert(): - ''' Consistently set first key and check that only it will be accepted, then repeat same for second key. ''' + """Consistently set first key and check that only it will be accepted, then repeat same for second key.""" # Set first key - change_config_to_key('first') + change_config_to_key("first") # Command with correct certificate - assert node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='first'), - 'https://localhost:8443/']) == 'Ok.\n' + assert ( + node.exec_in_container( + [ + "curl", + "--silent", + "--cacert", + "/etc/clickhouse-server/config.d/{cur_name}.crt".format( + cur_name="first" + ), + "https://localhost:8443/", + ] + ) + == "Ok.\n" + ) # Command with wrong certificate - # This command don't use option '-k', so it will lead to error while execution. + # This command don't use option '-k', so it will lead to error while execution. # That's why except will always work try: - node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='second'), - 'https://localhost:8443/']) + node.exec_in_container( + [ + "curl", + "--silent", + "--cacert", + "/etc/clickhouse-server/config.d/{cur_name}.crt".format( + cur_name="second" + ), + "https://localhost:8443/", + ] + ) assert False except: assert True - + # Change to other key - change_config_to_key('second') + change_config_to_key("second") # Command with correct certificate - assert node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='second'), - 'https://localhost:8443/']) == 'Ok.\n' + assert ( + node.exec_in_container( + [ + "curl", + "--silent", + "--cacert", + "/etc/clickhouse-server/config.d/{cur_name}.crt".format( + cur_name="second" + ), + "https://localhost:8443/", + ] + ) + == "Ok.\n" + ) # Command with wrong certificate # Same as previous try: - node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='first'), - 'https://localhost:8443/']) + node.exec_in_container( + [ + "curl", + "--silent", + "--cacert", + "/etc/clickhouse-server/config.d/{cur_name}.crt".format( + cur_name="first" + ), + "https://localhost:8443/", + ] + ) assert False except: assert True + def test_ECcert_reload(): # Set first key - change_config_to_key('first') + change_config_to_key("first") # Command with correct certificate - assert node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='first'), - 'https://localhost:8443/']) == 'Ok.\n' - + assert ( + node.exec_in_container( + [ + "curl", + "--silent", + "--cacert", + "/etc/clickhouse-server/config.d/{cur_name}.crt".format( + cur_name="first" + ), + "https://localhost:8443/", + ] + ) + == "Ok.\n" + ) + # Change to other key - change_config_to_key('ECcert') + change_config_to_key("ECcert") # Command with correct certificate - assert node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='ECcert'), - 'https://localhost:8443/']) == 'Ok.\n' + assert ( + node.exec_in_container( + [ + "curl", + "--silent", + "--cacert", + "/etc/clickhouse-server/config.d/{cur_name}.crt".format( + cur_name="ECcert" + ), + "https://localhost:8443/", + ] + ) + == "Ok.\n" + ) # Command with wrong certificate # Same as previous try: - node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='first'), - 'https://localhost:8443/']) + node.exec_in_container( + [ + "curl", + "--silent", + "--cacert", + "/etc/clickhouse-server/config.d/{cur_name}.crt".format( + cur_name="first" + ), + "https://localhost:8443/", + ] + ) assert False except: assert True diff --git a/tests/integration/test_reload_clusters_config/test.py b/tests/integration/test_reload_clusters_config/test.py index 048b704034b..6979fd5565b 100644 --- a/tests/integration/test_reload_clusters_config/test.py +++ b/tests/integration/test_reload_clusters_config/test.py @@ -11,23 +11,30 @@ from helpers.network import PartitionManager from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', with_zookeeper=True, main_configs=['configs/remote_servers.xml']) -node_1 = cluster.add_instance('node_1', with_zookeeper=True) -node_2 = cluster.add_instance('node_2', with_zookeeper=True) +node = cluster.add_instance( + "node", with_zookeeper=True, main_configs=["configs/remote_servers.xml"] +) +node_1 = cluster.add_instance("node_1", with_zookeeper=True) +node_2 = cluster.add_instance("node_2", with_zookeeper=True) + @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() - node.query('''CREATE TABLE distributed (id UInt32) ENGINE = - Distributed('test_cluster', 'default', 'replicated')''') - - node.query('''CREATE TABLE distributed2 (id UInt32) ENGINE = - Distributed('test_cluster2', 'default', 'replicated')''') + node.query( + """CREATE TABLE distributed (id UInt32) ENGINE = + Distributed('test_cluster', 'default', 'replicated')""" + ) - cluster.pause_container('node_1') - cluster.pause_container('node_2') + node.query( + """CREATE TABLE distributed2 (id UInt32) ENGINE = + Distributed('test_cluster2', 'default', 'replicated')""" + ) + + cluster.pause_container("node_1") + cluster.pause_container("node_2") yield cluster @@ -35,7 +42,7 @@ def started_cluster(): cluster.shutdown() -base_config = ''' +base_config = """ @@ -66,9 +73,9 @@ base_config = ''' -''' +""" -test_config1 = ''' +test_config1 = """ @@ -95,9 +102,9 @@ test_config1 = ''' -''' +""" -test_config2 = ''' +test_config2 = """ @@ -115,9 +122,9 @@ test_config2 = ''' -''' +""" -test_config3 = ''' +test_config3 = """ @@ -157,16 +164,24 @@ test_config3 = ''' -''' +""" def send_repeated_query(table, count=5): for i in range(count): - node.query_and_get_error("SELECT count() FROM {} SETTINGS receive_timeout=1".format(table)) + node.query_and_get_error( + "SELECT count() FROM {} SETTINGS receive_timeout=1".format(table) + ) def get_errors_count(cluster, host_name="node_1"): - return int(node.query("SELECT errors_count FROM system.clusters WHERE cluster='{}' and host_name='{}'".format(cluster, host_name))) + return int( + node.query( + "SELECT errors_count FROM system.clusters WHERE cluster='{}' and host_name='{}'".format( + cluster, host_name + ) + ) + ) def set_config(config): @@ -178,7 +193,7 @@ def test_simple_reload(started_cluster): send_repeated_query("distributed") assert get_errors_count("test_cluster") > 0 - + node.query("SYSTEM RELOAD CONFIG") assert get_errors_count("test_cluster") > 0 @@ -209,9 +224,9 @@ def test_delete_cluster(started_cluster): set_config(test_config2) assert get_errors_count("test_cluster") > 0 - + result = node.query("SELECT * FROM system.clusters WHERE cluster='test_cluster2'") - assert result == '' + assert result == "" set_config(base_config) @@ -229,7 +244,6 @@ def test_add_cluster(started_cluster): assert get_errors_count("test_cluster2") > 0 result = node.query("SELECT * FROM system.clusters WHERE cluster='test_cluster3'") - assert result != '' + assert result != "" set_config(base_config) - diff --git a/tests/integration/test_reload_max_table_size_to_drop/test.py b/tests/integration/test_reload_max_table_size_to_drop/test.py index 7e7219088b8..da7dba12fa0 100644 --- a/tests/integration/test_reload_max_table_size_to_drop/test.py +++ b/tests/integration/test_reload_max_table_size_to_drop/test.py @@ -5,18 +5,23 @@ import pytest from helpers.cluster import ClickHouseCluster, get_instances_dir cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=["configs/max_table_size_to_drop.xml"]) +node = cluster.add_instance("node", main_configs=["configs/max_table_size_to_drop.xml"]) SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -CONFIG_PATH = os.path.join(SCRIPT_DIR, './{}/node/configs/config.d/max_table_size_to_drop.xml'.format(get_instances_dir())) +CONFIG_PATH = os.path.join( + SCRIPT_DIR, + "./{}/node/configs/config.d/max_table_size_to_drop.xml".format(get_instances_dir()), +) @pytest.fixture(scope="module") def start_cluster(): try: cluster.start() - node.query("CREATE TABLE test(date Date, id UInt32) ENGINE = MergeTree() PARTITION BY date ORDER BY id") + node.query( + "CREATE TABLE test(date Date, id UInt32) ENGINE = MergeTree() PARTITION BY date ORDER BY id" + ) yield cluster finally: cluster.shutdown() @@ -32,11 +37,14 @@ def test_reload_max_table_size_to_drop(start_cluster): assert out == "" assert err != "" - config = open(CONFIG_PATH, 'r') + config = open(CONFIG_PATH, "r") config_lines = config.readlines() config.close() - config_lines = [line.replace("1", "1000000") for line in config_lines] - config = open(CONFIG_PATH, 'w') + config_lines = [ + line.replace("1", "1000000") + for line in config_lines + ] + config = open(CONFIG_PATH, "w") config.writelines(config_lines) config.close() diff --git a/tests/integration/test_reload_zookeeper/test.py b/tests/integration/test_reload_zookeeper/test.py index 73ef42a86f6..8924376d6fd 100644 --- a/tests/integration/test_reload_zookeeper/test.py +++ b/tests/integration/test_reload_zookeeper/test.py @@ -7,8 +7,8 @@ from helpers.client import QueryRuntimeException from helpers.test_tools import assert_eq_with_retry -cluster = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper.xml') -node = cluster.add_instance('node', with_zookeeper=True) +cluster = ClickHouseCluster(__file__, zookeeper_config_path="configs/zookeeper.xml") +node = cluster.add_instance("node", with_zookeeper=True) @pytest.fixture(scope="module") @@ -16,33 +16,36 @@ def start_cluster(): try: cluster.start() node.query( - ''' + """ CREATE TABLE test_table(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/shard1/test/test_table', '1') PARTITION BY toYYYYMM(date) ORDER BY id - ''') + """ + ) yield cluster finally: cluster.shutdown() -def test_reload_zookeeper(start_cluster): +def test_reload_zookeeper(start_cluster): def wait_zookeeper_node_to_start(zk_nodes, timeout=60): start = time.time() while time.time() - start < timeout: try: for instance in zk_nodes: conn = start_cluster.get_kazoo_client(instance) - conn.get_children('/') + conn.get_children("/") print("All instances of ZooKeeper started") return except Exception as ex: print(("Can't connect to ZooKeeper " + str(ex))) time.sleep(0.5) - node.query("INSERT INTO test_table(date, id) select today(), number FROM numbers(1000)") + node.query( + "INSERT INTO test_table(date, id) select today(), number FROM numbers(1000)" + ) ## remove zoo2, zoo3 from configs new_config = """ @@ -59,23 +62,41 @@ def test_reload_zookeeper(start_cluster): node.replace_config("/etc/clickhouse-server/conf.d/zookeeper.xml", new_config) node.query("SYSTEM RELOAD CONFIG") ## config reloads, but can still work - assert_eq_with_retry(node, "SELECT COUNT() FROM test_table", '1000', retry_count=120, sleep_time=0.5) + assert_eq_with_retry( + node, "SELECT COUNT() FROM test_table", "1000", retry_count=120, sleep_time=0.5 + ) ## stop all zookeepers, table will be readonly cluster.stop_zookeeper_nodes(["zoo1", "zoo2", "zoo3"]) node.query("SELECT COUNT() FROM test_table") with pytest.raises(QueryRuntimeException): - node.query("SELECT COUNT() FROM test_table", settings={"select_sequential_consistency" : 1}) + node.query( + "SELECT COUNT() FROM test_table", + settings={"select_sequential_consistency": 1}, + ) ## start zoo2, zoo3, table will be readonly too, because it only connect to zoo1 cluster.start_zookeeper_nodes(["zoo2", "zoo3"]) wait_zookeeper_node_to_start(["zoo2", "zoo3"]) node.query("SELECT COUNT() FROM test_table") with pytest.raises(QueryRuntimeException): - node.query("SELECT COUNT() FROM test_table", settings={"select_sequential_consistency" : 1}) + node.query( + "SELECT COUNT() FROM test_table", + settings={"select_sequential_consistency": 1}, + ) def get_active_zk_connections(): - return str(node.exec_in_container(['bash', '-c', 'lsof -a -i4 -i6 -itcp -w | grep 2181 | grep ESTABLISHED | wc -l'], privileged=True, user='root')).strip() + return str( + node.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep 2181 | grep ESTABLISHED | wc -l", + ], + privileged=True, + user="root", + ) + ).strip() ## set config to zoo2, server will be normal new_config = """ @@ -93,9 +114,15 @@ def test_reload_zookeeper(start_cluster): node.query("SYSTEM RELOAD CONFIG") active_zk_connections = get_active_zk_connections() - assert active_zk_connections == '1', "Total connections to ZooKeeper not equal to 1, {}".format(active_zk_connections) + assert ( + active_zk_connections == "1" + ), "Total connections to ZooKeeper not equal to 1, {}".format(active_zk_connections) - assert_eq_with_retry(node, "SELECT COUNT() FROM test_table", '1000', retry_count=120, sleep_time=0.5) + assert_eq_with_retry( + node, "SELECT COUNT() FROM test_table", "1000", retry_count=120, sleep_time=0.5 + ) active_zk_connections = get_active_zk_connections() - assert active_zk_connections == '1', "Total connections to ZooKeeper not equal to 1, {}".format(active_zk_connections) + assert ( + active_zk_connections == "1" + ), "Total connections to ZooKeeper not equal to 1, {}".format(active_zk_connections) diff --git a/tests/integration/test_reloading_settings_from_users_xml/test.py b/tests/integration/test_reloading_settings_from_users_xml/test.py index b45568ee904..3b95796ab9c 100644 --- a/tests/integration/test_reloading_settings_from_users_xml/test.py +++ b/tests/integration/test_reloading_settings_from_users_xml/test.py @@ -6,7 +6,8 @@ from helpers.test_tools import assert_eq_with_retry, assert_logs_contain_with_re SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', user_configs=["configs/normal_settings.xml"]) +node = cluster.add_instance("node", user_configs=["configs/normal_settings.xml"]) + @pytest.fixture(scope="module", autouse=True) def started_cluster(): @@ -20,7 +21,10 @@ def started_cluster(): @pytest.fixture(autouse=True) def reset_to_normal_settings_after_test(): try: - node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/normal_settings.xml"), '/etc/clickhouse-server/users.d/z.xml') + node.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/normal_settings.xml"), + "/etc/clickhouse-server/users.d/z.xml", + ) node.query("SYSTEM RELOAD CONFIG") yield finally: @@ -30,8 +34,11 @@ def reset_to_normal_settings_after_test(): def test_force_reload(): assert node.query("SELECT getSetting('max_memory_usage')") == "10000000000\n" assert node.query("SELECT getSetting('load_balancing')") == "first_or_random\n" - - node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/changed_settings.xml"), '/etc/clickhouse-server/users.d/z.xml') + + node.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/changed_settings.xml"), + "/etc/clickhouse-server/users.d/z.xml", + ) node.query("SYSTEM RELOAD CONFIG") assert node.query("SELECT getSetting('max_memory_usage')") == "20000000000\n" @@ -42,16 +49,24 @@ def test_reload_on_timeout(): assert node.query("SELECT getSetting('max_memory_usage')") == "10000000000\n" assert node.query("SELECT getSetting('load_balancing')") == "first_or_random\n" - time.sleep(1) # The modification time of the 'z.xml' file should be different, - # because config files are reload by timer only when the modification time is changed. - node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/changed_settings.xml"), '/etc/clickhouse-server/users.d/z.xml') + time.sleep(1) # The modification time of the 'z.xml' file should be different, + # because config files are reload by timer only when the modification time is changed. + node.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/changed_settings.xml"), + "/etc/clickhouse-server/users.d/z.xml", + ) assert_eq_with_retry(node, "SELECT getSetting('max_memory_usage')", "20000000000") - assert_eq_with_retry(node, "SELECT getSetting('load_balancing')", "nearest_hostname") + assert_eq_with_retry( + node, "SELECT getSetting('load_balancing')", "nearest_hostname" + ) def test_unknown_setting_force_reload(): - node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/unknown_setting.xml"), '/etc/clickhouse-server/users.d/z.xml') + node.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/unknown_setting.xml"), + "/etc/clickhouse-server/users.d/z.xml", + ) error_message = "Setting xyz is neither a builtin setting nor started with the prefix 'custom_' registered for user-defined settings" assert error_message in node.query_and_get_error("SYSTEM RELOAD CONFIG") @@ -61,9 +76,12 @@ def test_unknown_setting_force_reload(): def test_unknown_setting_reload_on_timeout(): - time.sleep(1) # The modification time of the 'z.xml' file should be different, - # because config files are reload by timer only when the modification time is changed. - node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/unknown_setting.xml"), '/etc/clickhouse-server/users.d/z.xml') + time.sleep(1) # The modification time of the 'z.xml' file should be different, + # because config files are reload by timer only when the modification time is changed. + node.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/unknown_setting.xml"), + "/etc/clickhouse-server/users.d/z.xml", + ) error_message = "Setting xyz is neither a builtin setting nor started with the prefix 'custom_' registered for user-defined settings" assert_logs_contain_with_retry(node, error_message) @@ -73,7 +91,10 @@ def test_unknown_setting_reload_on_timeout(): def test_unexpected_setting_int(): - node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/unexpected_setting_int.xml"), '/etc/clickhouse-server/users.d/z.xml') + node.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/unexpected_setting_int.xml"), + "/etc/clickhouse-server/users.d/z.xml", + ) error_message = "Cannot parse" assert error_message in node.query_and_get_error("SYSTEM RELOAD CONFIG") @@ -82,7 +103,10 @@ def test_unexpected_setting_int(): def test_unexpected_setting_enum(): - node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/unexpected_setting_int.xml"), '/etc/clickhouse-server/users.d/z.xml') + node.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/unexpected_setting_int.xml"), + "/etc/clickhouse-server/users.d/z.xml", + ) error_message = "Cannot parse" assert error_message in node.query_and_get_error("SYSTEM RELOAD CONFIG") diff --git a/tests/integration/test_reloading_storage_configuration/test.py b/tests/integration/test_reloading_storage_configuration/test.py index e9fba6012f7..4b21919ab3d 100644 --- a/tests/integration/test_reloading_storage_configuration/test.py +++ b/tests/integration/test_reloading_storage_configuration/test.py @@ -13,25 +13,41 @@ import pytest cluster = helpers.cluster.ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', - main_configs=['configs/logs_config.xml'], - with_zookeeper=True, - stay_alive=True, - tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/jbod3:size=40M', '/jbod4:size=40M', - '/external:size=200M'], - macros={"shard": 0, "replica": 1}) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/logs_config.xml"], + with_zookeeper=True, + stay_alive=True, + tmpfs=[ + "/jbod1:size=40M", + "/jbod2:size=40M", + "/jbod3:size=40M", + "/jbod4:size=40M", + "/external:size=200M", + ], + macros={"shard": 0, "replica": 1}, +) -node2 = cluster.add_instance('node2', - main_configs=['configs/logs_config.xml'], - with_zookeeper=True, - stay_alive=True, - tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/jbod3:size=40M', '/jbod4:size=40M', - '/external:size=200M'], - macros={"shard": 0, "replica": 2}) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/logs_config.xml"], + with_zookeeper=True, + stay_alive=True, + tmpfs=[ + "/jbod1:size=40M", + "/jbod2:size=40M", + "/jbod3:size=40M", + "/jbod4:size=40M", + "/external:size=200M", + ], + macros={"shard": 0, "replica": 2}, +) def get_log(node): - return node.exec_in_container(["bash", "-c", "cat /var/log/clickhouse-server/clickhouse-server.log"]) + return node.exec_in_container( + ["bash", "-c", "cat /var/log/clickhouse-server/clickhouse-server.log"] + ) @pytest.fixture(scope="module") @@ -45,11 +61,17 @@ def started_cluster(): def start_over(): - shutil.copy(os.path.join(os.path.dirname(__file__), "configs/config.d/storage_configuration.xml"), - os.path.join(node1.config_d_dir, "storage_configuration.xml")) + shutil.copy( + os.path.join( + os.path.dirname(__file__), "configs/config.d/storage_configuration.xml" + ), + os.path.join(node1.config_d_dir, "storage_configuration.xml"), + ) for node in (node1, node2): - separate_configuration_path = os.path.join(node.config_d_dir, "separate_configuration.xml") + separate_configuration_path = os.path.join( + node.config_d_dir, "separate_configuration.xml" + ) try: os.remove(separate_configuration_path) except: @@ -57,16 +79,23 @@ def start_over(): def add_disk(node, name, path, separate_file=False): - separate_configuration_path = os.path.join(node.config_d_dir, "separate_configuration.xml") + separate_configuration_path = os.path.join( + node.config_d_dir, "separate_configuration.xml" + ) try: if separate_file: tree = ET.parse(separate_configuration_path) else: - tree = ET.parse(os.path.join(node.config_d_dir, "storage_configuration.xml")) + tree = ET.parse( + os.path.join(node.config_d_dir, "storage_configuration.xml") + ) except: tree = ET.ElementTree( - ET.fromstring('')) + ET.fromstring( + "" + ) + ) root = tree.getroot() new_disk = ET.Element(name) new_path = ET.Element("path") @@ -78,19 +107,25 @@ def add_disk(node, name, path, separate_file=False): else: tree.write(os.path.join(node.config_d_dir, "storage_configuration.xml")) + def update_disk(node, name, path, keep_free_space_bytes, separate_file=False): - separate_configuration_path = os.path.join(node.config_d_dir, - "separate_configuration.xml") + separate_configuration_path = os.path.join( + node.config_d_dir, "separate_configuration.xml" + ) try: if separate_file: tree = ET.parse(separate_configuration_path) else: tree = ET.parse( - os.path.join(node.config_d_dir, "storage_configuration.xml")) + os.path.join(node.config_d_dir, "storage_configuration.xml") + ) except: tree = ET.ElementTree( - ET.fromstring('')) + ET.fromstring( + "" + ) + ) root = tree.getroot() disk = root.find("storage_configuration").find("disks").find(name) @@ -136,15 +171,21 @@ def test_add_disk(started_cluster): node1.restart_clickhouse(kill=True) time.sleep(2) - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( d UInt64 ) ENGINE = {engine} ORDER BY d SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) - assert "jbod3" not in set(node1.query("SELECT name FROM system.disks").splitlines()) + assert "jbod3" not in set( + node1.query("SELECT name FROM system.disks").splitlines() + ) add_disk(node1, "jbod3", "/jbod3/") node1.query("SYSTEM RELOAD CONFIG") @@ -156,6 +197,7 @@ def test_add_disk(started_cluster): except: """""" + def test_update_disk(started_cluster): try: name = "test_update_disk" @@ -165,28 +207,35 @@ def test_update_disk(started_cluster): node1.restart_clickhouse(kill=True) time.sleep(2) - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( d UInt64 ) ENGINE = {engine} ORDER BY d SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) - assert node1.query("SELECT path, keep_free_space FROM system.disks where name = 'jbod2'") == TSV([ - ["/jbod2/", "10485760"]]) + assert node1.query( + "SELECT path, keep_free_space FROM system.disks where name = 'jbod2'" + ) == TSV([["/jbod2/", "10485760"]]) update_disk(node1, "jbod2", "/jbod2/", "20971520") node1.query("SYSTEM RELOAD CONFIG") - assert node1.query("SELECT path, keep_free_space FROM system.disks where name = 'jbod2'") == TSV([ - ["/jbod2/", "20971520"]]) + assert node1.query( + "SELECT path, keep_free_space FROM system.disks where name = 'jbod2'" + ) == TSV([["/jbod2/", "20971520"]]) finally: try: node1.query("DROP TABLE IF EXISTS {}".format(name)) except: """""" + def test_add_disk_to_separate_config(started_cluster): try: name = "test_add_disk" @@ -196,15 +245,21 @@ def test_add_disk_to_separate_config(started_cluster): node1.restart_clickhouse(kill=True) time.sleep(2) - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( d UInt64 ) ENGINE = {engine} ORDER BY d SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) - assert "jbod3" not in set(node1.query("SELECT name FROM system.disks").splitlines()) + assert "jbod3" not in set( + node1.query("SELECT name FROM system.disks").splitlines() + ) add_disk(node1, "jbod3", "/jbod3/", separate_file=True) node1.query("SYSTEM RELOAD CONFIG") @@ -230,23 +285,35 @@ def test_add_policy(started_cluster): node1.restart_clickhouse(kill=True) time.sleep(2) - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( d UInt64 ) ENGINE = {engine} ORDER BY d SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) add_policy(node1, "cool_policy", {"volume1": ["jbod3", "jbod4"]}) node1.query("SYSTEM RELOAD CONFIG") disks = set(node1.query("SELECT name FROM system.disks").splitlines()) - assert "cool_policy" in set(node1.query("SELECT policy_name FROM system.storage_policies").splitlines()) - assert {"volume1"} == set(node1.query( - "SELECT volume_name FROM system.storage_policies WHERE policy_name = 'cool_policy'").splitlines()) + assert "cool_policy" in set( + node1.query("SELECT policy_name FROM system.storage_policies").splitlines() + ) + assert {"volume1"} == set( + node1.query( + "SELECT volume_name FROM system.storage_policies WHERE policy_name = 'cool_policy'" + ).splitlines() + ) assert {"['jbod3','jbod4']"} == set( - node1.query("SELECT disks FROM system.storage_policies WHERE policy_name = 'cool_policy'").splitlines()) + node1.query( + "SELECT disks FROM system.storage_policies WHERE policy_name = 'cool_policy'" + ).splitlines() + ) finally: try: @@ -265,39 +332,69 @@ def test_new_policy_works(started_cluster): node1.restart_clickhouse(kill=True) time.sleep(2) - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( d UInt64 ) ENGINE = {engine} ORDER BY d SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) add_policy(node1, "cool_policy", {"volume1": ["jbod3"]}) node1.query("SYSTEM RELOAD CONFIG") # Incompatible storage policy. with pytest.raises(helpers.client.QueryRuntimeException): - node1.query(""" + node1.query( + """ ALTER TABLE {name} MODIFY SETTING storage_policy='cool_policy' - """.format(name=name)) + """.format( + name=name + ) + ) start_over() add_disk(node1, "jbod3", "/jbod3/") add_disk(node1, "jbod4", "/jbod4/") - add_policy(node1, "cool_policy", collections.OrderedDict( - [("volume1", ["jbod3"]), ("main", ["jbod1", "jbod2"]), ("external", ["external"])])) + add_policy( + node1, + "cool_policy", + collections.OrderedDict( + [ + ("volume1", ["jbod3"]), + ("main", ["jbod1", "jbod2"]), + ("external", ["external"]), + ] + ), + ) node1.query("SYSTEM RELOAD CONFIG") - node1.query(""" + node1.query( + """ ALTER TABLE {name} MODIFY SETTING storage_policy='cool_policy' - """.format(name=name)) + """.format( + name=name + ) + ) - node1.query(""" + node1.query( + """ INSERT INTO TABLE {name} VALUES (1) - """.format(name=name)) - assert {"jbod3"} == set(node1.query( - "SELECT disk_name FROM system.parts WHERE active = 1 AND table = '{name}'".format(name=name)).splitlines()) + """.format( + name=name + ) + ) + assert {"jbod3"} == set( + node1.query( + "SELECT disk_name FROM system.parts WHERE active = 1 AND table = '{name}'".format( + name=name + ) + ).splitlines() + ) finally: try: @@ -318,24 +415,38 @@ def test_add_volume_to_policy(started_cluster): node1.restart_clickhouse(kill=True) time.sleep(2) - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( d UInt64 ) ENGINE = {engine} ORDER BY d SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) start_over() add_disk(node1, "jbod3", "/jbod3/") add_disk(node1, "jbod4", "/jbod4/") - add_policy(node1, "cool_policy", collections.OrderedDict([("volume1", ["jbod3"]), ("volume2", ["jbod4"])])) + add_policy( + node1, + "cool_policy", + collections.OrderedDict([("volume1", ["jbod3"]), ("volume2", ["jbod4"])]), + ) node1.query("SYSTEM RELOAD CONFIG") - volumes = set(node1.query( - "SELECT volume_name FROM system.storage_policies WHERE policy_name = 'cool_policy'").splitlines()) + volumes = set( + node1.query( + "SELECT volume_name FROM system.storage_policies WHERE policy_name = 'cool_policy'" + ).splitlines() + ) disks_sets = set( - node1.query("SELECT disks FROM system.storage_policies WHERE policy_name = 'cool_policy'").splitlines()) + node1.query( + "SELECT disks FROM system.storage_policies WHERE policy_name = 'cool_policy'" + ).splitlines() + ) assert {"volume1", "volume2"} == volumes assert {"['jbod3']", "['jbod4']"} == disks_sets @@ -358,13 +469,17 @@ def test_add_disk_to_policy(started_cluster): node1.restart_clickhouse(kill=True) time.sleep(2) - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( d UInt64 ) ENGINE = {engine} ORDER BY d SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) start_over() add_disk(node1, "jbod3", "/jbod3/") @@ -372,10 +487,16 @@ def test_add_disk_to_policy(started_cluster): add_policy(node1, "cool_policy", {"volume1": ["jbod3", "jbod4"]}) node1.query("SYSTEM RELOAD CONFIG") - volumes = set(node1.query( - "SELECT volume_name FROM system.storage_policies WHERE policy_name = 'cool_policy'").splitlines()) + volumes = set( + node1.query( + "SELECT volume_name FROM system.storage_policies WHERE policy_name = 'cool_policy'" + ).splitlines() + ) disks_sets = set( - node1.query("SELECT disks FROM system.storage_policies WHERE policy_name = 'cool_policy'").splitlines()) + node1.query( + "SELECT disks FROM system.storage_policies WHERE policy_name = 'cool_policy'" + ).splitlines() + ) assert {"volume1"} == volumes assert {"['jbod3','jbod4']"} == disks_sets @@ -396,20 +517,28 @@ def test_remove_disk(started_cluster): node1.restart_clickhouse(kill=True) time.sleep(2) - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( d UInt64 ) ENGINE = {engine} ORDER BY d SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) - assert "remove_disk_jbod3" in set(node1.query("SELECT name FROM system.disks").splitlines()) + assert "remove_disk_jbod3" in set( + node1.query("SELECT name FROM system.disks").splitlines() + ) start_over() node1.query("SYSTEM RELOAD CONFIG") - assert "remove_disk_jbod3" in set(node1.query("SELECT name FROM system.disks").splitlines()) + assert "remove_disk_jbod3" in set( + node1.query("SELECT name FROM system.disks").splitlines() + ) assert re.search("Warning.*remove_disk_jbod3", get_log(node1)) finally: try: @@ -430,16 +559,21 @@ def test_remove_policy(started_cluster): node1.restart_clickhouse(kill=True) time.sleep(2) - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( d UInt64 ) ENGINE = {engine} ORDER BY d SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) assert "remove_policy_cool_policy" in set( - node1.query("SELECT policy_name FROM system.storage_policies").splitlines()) + node1.query("SELECT policy_name FROM system.storage_policies").splitlines() + ) start_over() add_disk(node1, "jbod3", "/jbod3/") @@ -447,7 +581,8 @@ def test_remove_policy(started_cluster): node1.query("SYSTEM RELOAD CONFIG") assert "remove_policy_cool_policy" in set( - node1.query("SELECT policy_name FROM system.storage_policies").splitlines()) + node1.query("SELECT policy_name FROM system.storage_policies").splitlines() + ) assert re.search("Error.*remove_policy_cool_policy", get_log(node1)) finally: @@ -465,23 +600,36 @@ def test_remove_volume_from_policy(started_cluster): start_over() add_disk(node1, "jbod3", "/jbod3/") add_disk(node1, "jbod4", "/jbod4/") - add_policy(node1, "test_remove_volume_from_policy_cool_policy", - collections.OrderedDict([("volume1", ["jbod3"]), ("volume2", ["jbod4"])])) + add_policy( + node1, + "test_remove_volume_from_policy_cool_policy", + collections.OrderedDict([("volume1", ["jbod3"]), ("volume2", ["jbod4"])]), + ) node1.restart_clickhouse(kill=True) time.sleep(2) - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( d UInt64 ) ENGINE = {engine} ORDER BY d SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) - volumes = set(node1.query( - "SELECT volume_name FROM system.storage_policies WHERE policy_name = 'test_remove_volume_from_policy_cool_policy'").splitlines()) - disks_sets = set(node1.query( - "SELECT disks FROM system.storage_policies WHERE policy_name = 'test_remove_volume_from_policy_cool_policy'").splitlines()) + volumes = set( + node1.query( + "SELECT volume_name FROM system.storage_policies WHERE policy_name = 'test_remove_volume_from_policy_cool_policy'" + ).splitlines() + ) + disks_sets = set( + node1.query( + "SELECT disks FROM system.storage_policies WHERE policy_name = 'test_remove_volume_from_policy_cool_policy'" + ).splitlines() + ) assert {"volume1", "volume2"} == volumes assert {"['jbod3']", "['jbod4']"} == disks_sets @@ -491,13 +639,21 @@ def test_remove_volume_from_policy(started_cluster): add_policy(node1, "cool_policy", {"volume1": ["jbod3"]}) node1.query("SYSTEM RELOAD CONFIG") - volumes = set(node1.query( - "SELECT volume_name FROM system.storage_policies WHERE policy_name = 'test_remove_volume_from_policy_cool_policy'").splitlines()) - disks_sets = set(node1.query( - "SELECT disks FROM system.storage_policies WHERE policy_name = 'test_remove_volume_from_policy_cool_policy'").splitlines()) + volumes = set( + node1.query( + "SELECT volume_name FROM system.storage_policies WHERE policy_name = 'test_remove_volume_from_policy_cool_policy'" + ).splitlines() + ) + disks_sets = set( + node1.query( + "SELECT disks FROM system.storage_policies WHERE policy_name = 'test_remove_volume_from_policy_cool_policy'" + ).splitlines() + ) assert {"volume1", "volume2"} == volumes assert {"['jbod3']", "['jbod4']"} == disks_sets - assert re.search("Error.*test_remove_volume_from_policy_cool_policy", get_log(node1)) + assert re.search( + "Error.*test_remove_volume_from_policy_cool_policy", get_log(node1) + ) finally: try: @@ -514,22 +670,36 @@ def test_remove_disk_from_policy(started_cluster): start_over() add_disk(node1, "jbod3", "/jbod3/") add_disk(node1, "jbod4", "/jbod4/") - add_policy(node1, "test_remove_disk_from_policy_cool_policy", {"volume1": ["jbod3", "jbod4"]}) + add_policy( + node1, + "test_remove_disk_from_policy_cool_policy", + {"volume1": ["jbod3", "jbod4"]}, + ) node1.restart_clickhouse(kill=True) time.sleep(2) - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( d UInt64 ) ENGINE = {engine} ORDER BY d SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) - volumes = set(node1.query( - "SELECT volume_name FROM system.storage_policies WHERE policy_name = 'test_remove_disk_from_policy_cool_policy'").splitlines()) - disks_sets = set(node1.query( - "SELECT disks FROM system.storage_policies WHERE policy_name = 'test_remove_disk_from_policy_cool_policy'").splitlines()) + volumes = set( + node1.query( + "SELECT volume_name FROM system.storage_policies WHERE policy_name = 'test_remove_disk_from_policy_cool_policy'" + ).splitlines() + ) + disks_sets = set( + node1.query( + "SELECT disks FROM system.storage_policies WHERE policy_name = 'test_remove_disk_from_policy_cool_policy'" + ).splitlines() + ) assert {"volume1"} == volumes assert {"['jbod3','jbod4']"} == disks_sets @@ -539,13 +709,21 @@ def test_remove_disk_from_policy(started_cluster): add_policy(node1, "cool_policy", {"volume1": ["jbod3"]}) node1.query("SYSTEM RELOAD CONFIG") - volumes = set(node1.query( - "SELECT volume_name FROM system.storage_policies WHERE policy_name = 'test_remove_disk_from_policy_cool_policy'").splitlines()) - disks_sets = set(node1.query( - "SELECT disks FROM system.storage_policies WHERE policy_name = 'test_remove_disk_from_policy_cool_policy'").splitlines()) + volumes = set( + node1.query( + "SELECT volume_name FROM system.storage_policies WHERE policy_name = 'test_remove_disk_from_policy_cool_policy'" + ).splitlines() + ) + disks_sets = set( + node1.query( + "SELECT disks FROM system.storage_policies WHERE policy_name = 'test_remove_disk_from_policy_cool_policy'" + ).splitlines() + ) assert {"volume1"} == volumes assert {"['jbod3','jbod4']"} == disks_sets - assert re.search("Error.*test_remove_disk_from_policy_cool_policy", get_log(node1)) + assert re.search( + "Error.*test_remove_disk_from_policy_cool_policy", get_log(node1) + ) finally: try: diff --git a/tests/integration/test_remote_prewhere/test.py b/tests/integration/test_remote_prewhere/test.py index 907a9d43d2a..60372b3028e 100644 --- a/tests/integration/test_remote_prewhere/test.py +++ b/tests/integration/test_remote_prewhere/test.py @@ -3,8 +3,8 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1') -node2 = cluster.add_instance('node2') +node1 = cluster.add_instance("node1") +node2 = cluster.add_instance("node2") @pytest.fixture(scope="module") @@ -13,7 +13,8 @@ def start_cluster(): cluster.start() for node in [node1, node2]: - node.query(""" + node.query( + """ CREATE TABLE test_table( APIKey UInt32, CustomAttributeId UInt64, @@ -22,7 +23,8 @@ def start_cluster(): Data String) ENGINE = SummingMergeTree() ORDER BY (APIKey, CustomAttributeId, ProfileIDHash, DeviceIDHash, intHash32(DeviceIDHash)) - """) + """ + ) yield cluster finally: @@ -30,5 +32,9 @@ def start_cluster(): def test_remote(start_cluster): - assert node1.query( - "SELECT 1 FROM remote('node{1,2}', default.test_table) WHERE (APIKey = 137715) AND (CustomAttributeId IN (45, 66)) AND (ProfileIDHash != 0) LIMIT 1") == "" + assert ( + node1.query( + "SELECT 1 FROM remote('node{1,2}', default.test_table) WHERE (APIKey = 137715) AND (CustomAttributeId IN (45, 66)) AND (ProfileIDHash != 0) LIMIT 1" + ) + == "" + ) diff --git a/tests/integration/test_rename_column/test.py b/tests/integration/test_rename_column/test.py index 7269ee73d8e..33343da8f6d 100644 --- a/tests/integration/test_rename_column/test.py +++ b/tests/integration/test_rename_column/test.py @@ -1,5 +1,3 @@ - - import random import time from multiprocessing.dummy import Pool @@ -11,16 +9,21 @@ from helpers.cluster import ClickHouseCluster node_options = dict( with_zookeeper=True, - main_configs=["configs/remote_servers.xml", "configs/config.d/instant_moves.xml", - "configs/config.d/part_log.xml", "configs/config.d/zookeeper_session_timeout.xml", - "configs/config.d/storage_configuration.xml"], - tmpfs=['/external:size=200M', '/internal:size=1M']) + main_configs=[ + "configs/remote_servers.xml", + "configs/config.d/instant_moves.xml", + "configs/config.d/part_log.xml", + "configs/config.d/zookeeper_session_timeout.xml", + "configs/config.d/storage_configuration.xml", + ], + tmpfs=["/external:size=200M", "/internal:size=1M"], +) cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', macros={"shard": 0, "replica": 1}, **node_options) -node2 = cluster.add_instance('node2', macros={"shard": 0, "replica": 2}, **node_options) -node3 = cluster.add_instance('node3', macros={"shard": 1, "replica": 1}, **node_options) -node4 = cluster.add_instance('node4', macros={"shard": 1, "replica": 2}, **node_options) +node1 = cluster.add_instance("node1", macros={"shard": 0, "replica": 1}, **node_options) +node2 = cluster.add_instance("node2", macros={"shard": 0, "replica": 2}, **node_options) +node3 = cluster.add_instance("node3", macros={"shard": 1, "replica": 1}, **node_options) +node4 = cluster.add_instance("node4", macros={"shard": 1, "replica": 2}, **node_options) nodes = [node1, node2, node3, node4] @@ -40,8 +43,14 @@ def drop_table(nodes, table_name): node.query("DROP TABLE IF EXISTS {} NO DELAY".format(table_name)) -def create_table(nodes, table_name, with_storage_policy=False, with_time_column=False, - with_ttl_move=False, with_ttl_delete=False): +def create_table( + nodes, + table_name, + with_storage_policy=False, + with_time_column=False, + with_ttl_move=False, + with_ttl_delete=False, +): extra_columns = "" settings = [] @@ -71,13 +80,19 @@ def create_table(nodes, table_name, with_storage_policy=False, with_time_column= if settings: sql += """ SETTINGS {} - """.format(", ".join(settings)) + """.format( + ", ".join(settings) + ) if with_time_column: extra_columns = """, time DateTime """ - node.query(sql.format(table_name=table_name, replica=node.name, extra_columns=extra_columns)) + node.query( + sql.format( + table_name=table_name, replica=node.name, extra_columns=extra_columns + ) + ) def create_distributed_table(node, table_name): @@ -89,25 +104,45 @@ def create_distributed_table(node, table_name): ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{shard}/%(table_name)s_replicated', '{replica}') ORDER BY num PARTITION BY num %% 100; - """ % dict(table_name=table_name) + """ % dict( + table_name=table_name + ) node.query(sql) sql = """ CREATE TABLE %(table_name)s ON CLUSTER test_cluster AS %(table_name)s_replicated ENGINE = Distributed(test_cluster, default, %(table_name)s_replicated, rand()) - """ % dict(table_name=table_name) + """ % dict( + table_name=table_name + ) node.query(sql) def drop_distributed_table(node, table_name): - node.query("DROP TABLE IF EXISTS {} ON CLUSTER test_cluster SYNC".format(table_name)) - node.query("DROP TABLE IF EXISTS {}_replicated ON CLUSTER test_cluster SYNC".format(table_name)) + node.query( + "DROP TABLE IF EXISTS {} ON CLUSTER test_cluster SYNC".format(table_name) + ) + node.query( + "DROP TABLE IF EXISTS {}_replicated ON CLUSTER test_cluster SYNC".format( + table_name + ) + ) time.sleep(1) -def insert(node, table_name, chunk=1000, col_names=None, iterations=1, ignore_exception=False, - slow=False, with_many_parts=False, offset=0, with_time_column=False): +def insert( + node, + table_name, + chunk=1000, + col_names=None, + iterations=1, + ignore_exception=False, + slow=False, + with_many_parts=False, + offset=0, + with_time_column=False, +): if col_names is None: - col_names = ['num', 'num2'] + col_names = ["num", "num2"] for i in range(iterations): try: query = ["SET max_partitions_per_insert_block = 10000000"] @@ -115,25 +150,48 @@ def insert(node, table_name, chunk=1000, col_names=None, iterations=1, ignore_ex query.append("SET max_insert_block_size = 256") if with_time_column: query.append( - "INSERT INTO {table_name} ({col0}, {col1}, time) SELECT number AS {col0}, number + 1 AS {col1}, now() + 10 AS time FROM numbers_mt({chunk})" - .format(table_name=table_name, chunk=chunk, col0=col_names[0], col1=col_names[1])) + "INSERT INTO {table_name} ({col0}, {col1}, time) SELECT number AS {col0}, number + 1 AS {col1}, now() + 10 AS time FROM numbers_mt({chunk})".format( + table_name=table_name, + chunk=chunk, + col0=col_names[0], + col1=col_names[1], + ) + ) elif slow: query.append( - "INSERT INTO {table_name} ({col0}, {col1}) SELECT number + sleepEachRow(0.001) AS {col0}, number + 1 AS {col1} FROM numbers_mt({chunk})" - .format(table_name=table_name, chunk=chunk, col0=col_names[0], col1=col_names[1])) + "INSERT INTO {table_name} ({col0}, {col1}) SELECT number + sleepEachRow(0.001) AS {col0}, number + 1 AS {col1} FROM numbers_mt({chunk})".format( + table_name=table_name, + chunk=chunk, + col0=col_names[0], + col1=col_names[1], + ) + ) else: query.append( - "INSERT INTO {table_name} ({col0},{col1}) SELECT number + {offset} AS {col0}, number + 1 + {offset} AS {col1} FROM numbers_mt({chunk})" - .format(table_name=table_name, chunk=chunk, col0=col_names[0], col1=col_names[1], - offset=str(offset))) + "INSERT INTO {table_name} ({col0},{col1}) SELECT number + {offset} AS {col0}, number + 1 + {offset} AS {col1} FROM numbers_mt({chunk})".format( + table_name=table_name, + chunk=chunk, + col0=col_names[0], + col1=col_names[1], + offset=str(offset), + ) + ) node.query(";\n".join(query)) except QueryRuntimeException as ex: if not ignore_exception: raise -def select(node, table_name, col_name="num", expected_result=None, iterations=1, ignore_exception=False, slow=False, - poll=None): +def select( + node, + table_name, + col_name="num", + expected_result=None, + iterations=1, + ignore_exception=False, + slow=False, + poll=None, +): for i in range(iterations): start_time = time.time() while True: @@ -141,11 +199,21 @@ def select(node, table_name, col_name="num", expected_result=None, iterations=1, if slow: r = node.query( "SELECT count() FROM (SELECT num2, sleepEachRow(0.5) FROM {} WHERE {} % 1000 > 0)".format( - table_name, col_name)) + table_name, col_name + ) + ) else: - r = node.query("SELECT count() FROM {} WHERE {} % 1000 > 0".format(table_name, col_name)) + r = node.query( + "SELECT count() FROM {} WHERE {} % 1000 > 0".format( + table_name, col_name + ) + ) if expected_result: - if r != expected_result and poll and time.time() - start_time < poll: + if ( + r != expected_result + and poll + and time.time() - start_time < poll + ): continue assert r == expected_result except QueryRuntimeException as ex: @@ -154,23 +222,31 @@ def select(node, table_name, col_name="num", expected_result=None, iterations=1, break -def rename_column(node, table_name, name, new_name, iterations=1, ignore_exception=False): +def rename_column( + node, table_name, name, new_name, iterations=1, ignore_exception=False +): for i in range(iterations): try: - node.query("ALTER TABLE {table_name} RENAME COLUMN {name} to {new_name}".format( - table_name=table_name, name=name, new_name=new_name - )) + node.query( + "ALTER TABLE {table_name} RENAME COLUMN {name} to {new_name}".format( + table_name=table_name, name=name, new_name=new_name + ) + ) except QueryRuntimeException as ex: if not ignore_exception: raise -def rename_column_on_cluster(node, table_name, name, new_name, iterations=1, ignore_exception=False): +def rename_column_on_cluster( + node, table_name, name, new_name, iterations=1, ignore_exception=False +): for i in range(iterations): try: - node.query("ALTER TABLE {table_name} ON CLUSTER test_cluster RENAME COLUMN {name} to {new_name}".format( - table_name=table_name, name=name, new_name=new_name - )) + node.query( + "ALTER TABLE {table_name} ON CLUSTER test_cluster RENAME COLUMN {name} to {new_name}".format( + table_name=table_name, name=name, new_name=new_name + ) + ) except QueryRuntimeException as ex: if not ignore_exception: raise @@ -179,10 +255,13 @@ def rename_column_on_cluster(node, table_name, name, new_name, iterations=1, ign def alter_move(node, table_name, iterations=1, ignore_exception=False): for i in range(iterations): move_part = random.randint(0, 99) - move_volume = 'external' + move_volume = "external" try: - node.query("ALTER TABLE {table_name} MOVE PARTITION '{move_part}' TO VOLUME '{move_volume}'" - .format(table_name=table_name, move_part=move_part, move_volume=move_volume)) + node.query( + "ALTER TABLE {table_name} MOVE PARTITION '{move_part}' TO VOLUME '{move_volume}'".format( + table_name=table_name, move_part=move_part, move_volume=move_volume + ) + ) except QueryRuntimeException as ex: if not ignore_exception: raise @@ -198,9 +277,21 @@ def test_rename_parallel_same_node(started_cluster): p = Pool(15) tasks = [] for i in range(1): - tasks.append(p.apply_async(rename_column, (node1, table_name, "num2", "foo2", 5, True))) - tasks.append(p.apply_async(rename_column, (node1, table_name, "foo2", "foo3", 5, True))) - tasks.append(p.apply_async(rename_column, (node1, table_name, "foo3", "num2", 5, True))) + tasks.append( + p.apply_async( + rename_column, (node1, table_name, "num2", "foo2", 5, True) + ) + ) + tasks.append( + p.apply_async( + rename_column, (node1, table_name, "foo2", "foo3", 5, True) + ) + ) + tasks.append( + p.apply_async( + rename_column, (node1, table_name, "foo3", "num2", 5, True) + ) + ) for task in tasks: task.get(timeout=240) @@ -224,9 +315,21 @@ def test_rename_parallel(started_cluster): p = Pool(15) tasks = [] for i in range(1): - tasks.append(p.apply_async(rename_column, (node1, table_name, "num2", "foo2", 5, True))) - tasks.append(p.apply_async(rename_column, (node2, table_name, "foo2", "foo3", 5, True))) - tasks.append(p.apply_async(rename_column, (node3, table_name, "foo3", "num2", 5, True))) + tasks.append( + p.apply_async( + rename_column, (node1, table_name, "num2", "foo2", 5, True) + ) + ) + tasks.append( + p.apply_async( + rename_column, (node2, table_name, "foo2", "foo3", 5, True) + ) + ) + tasks.append( + p.apply_async( + rename_column, (node3, table_name, "foo3", "num2", 5, True) + ) + ) for task in tasks: task.get(timeout=240) @@ -254,12 +357,30 @@ def test_rename_with_parallel_select(started_cluster): p = Pool(15) tasks = [] for i in range(1): - tasks.append(p.apply_async(rename_column, (node1, table_name, "num2", "foo2", 5, True))) - tasks.append(p.apply_async(rename_column, (node2, table_name, "foo2", "foo3", 5, True))) - tasks.append(p.apply_async(rename_column, (node3, table_name, "foo3", "num2", 5, True))) - tasks.append(p.apply_async(select, (node1, table_name, "foo3", "999\n", 5, True))) - tasks.append(p.apply_async(select, (node2, table_name, "num2", "999\n", 5, True))) - tasks.append(p.apply_async(select, (node3, table_name, "foo2", "999\n", 5, True))) + tasks.append( + p.apply_async( + rename_column, (node1, table_name, "num2", "foo2", 5, True) + ) + ) + tasks.append( + p.apply_async( + rename_column, (node2, table_name, "foo2", "foo3", 5, True) + ) + ) + tasks.append( + p.apply_async( + rename_column, (node3, table_name, "foo3", "num2", 5, True) + ) + ) + tasks.append( + p.apply_async(select, (node1, table_name, "foo3", "999\n", 5, True)) + ) + tasks.append( + p.apply_async(select, (node2, table_name, "num2", "999\n", 5, True)) + ) + tasks.append( + p.apply_async(select, (node3, table_name, "foo2", "999\n", 5, True)) + ) for task in tasks: task.get(timeout=240) @@ -283,12 +404,36 @@ def test_rename_with_parallel_insert(started_cluster): p = Pool(15) tasks = [] for i in range(1): - tasks.append(p.apply_async(rename_column, (node1, table_name, "num2", "foo2", 5, True))) - tasks.append(p.apply_async(rename_column, (node2, table_name, "foo2", "foo3", 5, True))) - tasks.append(p.apply_async(rename_column, (node3, table_name, "foo3", "num2", 5, True))) - tasks.append(p.apply_async(insert, (node1, table_name, 100, ["num", "foo3"], 5, True))) - tasks.append(p.apply_async(insert, (node2, table_name, 100, ["num", "num2"], 5, True))) - tasks.append(p.apply_async(insert, (node3, table_name, 100, ["num", "foo2"], 5, True))) + tasks.append( + p.apply_async( + rename_column, (node1, table_name, "num2", "foo2", 5, True) + ) + ) + tasks.append( + p.apply_async( + rename_column, (node2, table_name, "foo2", "foo3", 5, True) + ) + ) + tasks.append( + p.apply_async( + rename_column, (node3, table_name, "foo3", "num2", 5, True) + ) + ) + tasks.append( + p.apply_async( + insert, (node1, table_name, 100, ["num", "foo3"], 5, True) + ) + ) + tasks.append( + p.apply_async( + insert, (node2, table_name, 100, ["num", "num2"], 5, True) + ) + ) + tasks.append( + p.apply_async( + insert, (node3, table_name, 100, ["num", "foo2"], 5, True) + ) + ) for task in tasks: task.get(timeout=240) @@ -309,7 +454,17 @@ def test_rename_with_parallel_merges(started_cluster): print("Creating tables", datetime.datetime.now()) create_table(nodes, table_name) for i in range(5): - insert(node1, table_name, 100, ["num", "num2"], 1, False, False, True, offset=i * 100) + insert( + node1, + table_name, + 100, + ["num", "num2"], + 1, + False, + False, + True, + offset=i * 100, + ) print("Data inserted", datetime.datetime.now()) @@ -323,9 +478,15 @@ def test_rename_with_parallel_merges(started_cluster): print("Creating pool") p = Pool(15) tasks = [] - tasks.append(p.apply_async(rename_column, (node1, table_name, "num2", "foo2", 2, True))) - tasks.append(p.apply_async(rename_column, (node2, table_name, "foo2", "foo3", 2, True))) - tasks.append(p.apply_async(rename_column, (node3, table_name, "foo3", "num2", 2, True))) + tasks.append( + p.apply_async(rename_column, (node1, table_name, "num2", "foo2", 2, True)) + ) + tasks.append( + p.apply_async(rename_column, (node2, table_name, "foo2", "foo3", 2, True)) + ) + tasks.append( + p.apply_async(rename_column, (node3, table_name, "foo3", "num2", 2, True)) + ) tasks.append(p.apply_async(merge_parts, (node1, table_name, 2))) tasks.append(p.apply_async(merge_parts, (node2, table_name, 2))) tasks.append(p.apply_async(merge_parts, (node3, table_name, 2))) @@ -358,8 +519,16 @@ def test_rename_with_parallel_slow_insert(started_cluster): p = Pool(15) tasks = [] - tasks.append(p.apply_async(insert, (node1, table_name, 10000, ["num", "num2"], 1, False, True))) - tasks.append(p.apply_async(insert, (node1, table_name, 10000, ["num", "num2"], 1, True, True))) # deduplicated + tasks.append( + p.apply_async( + insert, (node1, table_name, 10000, ["num", "num2"], 1, False, True) + ) + ) + tasks.append( + p.apply_async( + insert, (node1, table_name, 10000, ["num", "num2"], 1, True, True) + ) + ) # deduplicated time.sleep(0.5) tasks.append(p.apply_async(rename_column, (node1, table_name, "num2", "foo2"))) @@ -380,30 +549,64 @@ def test_rename_with_parallel_slow_insert(started_cluster): def test_rename_with_parallel_ttl_move(started_cluster): - table_name = 'test_rename_with_parallel_ttl_move' + table_name = "test_rename_with_parallel_ttl_move" try: - create_table(nodes, table_name, with_storage_policy=True, with_time_column=True, with_ttl_move=True) + create_table( + nodes, + table_name, + with_storage_policy=True, + with_time_column=True, + with_ttl_move=True, + ) rename_column(node1, table_name, "time", "time2", 1, False) rename_column(node1, table_name, "time2", "time", 1, False) p = Pool(15) tasks = [] - tasks.append(p.apply_async(insert, (node1, table_name, 10000, ["num", "num2"], 1, False, False, True, 0, True))) + tasks.append( + p.apply_async( + insert, + ( + node1, + table_name, + 10000, + ["num", "num2"], + 1, + False, + False, + True, + 0, + True, + ), + ) + ) time.sleep(5) rename_column(node1, table_name, "time", "time2", 1, False) time.sleep(4) - tasks.append(p.apply_async(rename_column, (node1, table_name, "num2", "foo2", 5, True))) - tasks.append(p.apply_async(rename_column, (node2, table_name, "foo2", "foo3", 5, True))) - tasks.append(p.apply_async(rename_column, (node3, table_name, "num3", "num2", 5, True))) + tasks.append( + p.apply_async(rename_column, (node1, table_name, "num2", "foo2", 5, True)) + ) + tasks.append( + p.apply_async(rename_column, (node2, table_name, "foo2", "foo3", 5, True)) + ) + tasks.append( + p.apply_async(rename_column, (node3, table_name, "num3", "num2", 5, True)) + ) for task in tasks: task.get(timeout=240) # check some parts got moved - assert "external" in set(node1.query( - "SELECT disk_name FROM system.parts WHERE table == '{}' AND active=1 ORDER BY modification_time".format( - table_name)).strip().splitlines()) + assert "external" in set( + node1.query( + "SELECT disk_name FROM system.parts WHERE table == '{}' AND active=1 ORDER BY modification_time".format( + table_name + ) + ) + .strip() + .splitlines() + ) # rename column back to original rename_column(node1, table_name, "foo2", "num2", 1, True) @@ -416,7 +619,7 @@ def test_rename_with_parallel_ttl_move(started_cluster): def test_rename_with_parallel_ttl_delete(started_cluster): - table_name = 'test_rename_with_parallel_ttl_delete' + table_name = "test_rename_with_parallel_ttl_delete" try: create_table(nodes, table_name, with_time_column=True, with_ttl_delete=True) rename_column(node1, table_name, "time", "time2", 1, False) @@ -429,11 +632,33 @@ def test_rename_with_parallel_ttl_delete(started_cluster): p = Pool(15) tasks = [] - tasks.append(p.apply_async(insert, (node1, table_name, 10000, ["num", "num2"], 1, False, False, True, 0, True))) + tasks.append( + p.apply_async( + insert, + ( + node1, + table_name, + 10000, + ["num", "num2"], + 1, + False, + False, + True, + 0, + True, + ), + ) + ) time.sleep(15) - tasks.append(p.apply_async(rename_column, (node1, table_name, "num2", "foo2", 5, True))) - tasks.append(p.apply_async(rename_column, (node2, table_name, "foo2", "foo3", 5, True))) - tasks.append(p.apply_async(rename_column, (node3, table_name, "num3", "num2", 5, True))) + tasks.append( + p.apply_async(rename_column, (node1, table_name, "num2", "foo2", 5, True)) + ) + tasks.append( + p.apply_async(rename_column, (node2, table_name, "foo2", "foo3", 5, True)) + ) + tasks.append( + p.apply_async(rename_column, (node3, table_name, "num3", "num2", 5, True)) + ) tasks.append(p.apply_async(merge_parts, (node1, table_name, 3))) tasks.append(p.apply_async(merge_parts, (node2, table_name, 3))) tasks.append(p.apply_async(merge_parts, (node3, table_name, 3))) @@ -445,29 +670,32 @@ def test_rename_with_parallel_ttl_delete(started_cluster): rename_column(node1, table_name, "foo2", "num2", 1, True) rename_column(node1, table_name, "foo3", "num2", 1, True) - assert int(node1.query("SELECT count() FROM {}".format(table_name)).strip()) < 10000 + assert ( + int(node1.query("SELECT count() FROM {}".format(table_name)).strip()) + < 10000 + ) finally: drop_table(nodes, table_name) def test_rename_distributed(started_cluster): - table_name = 'test_rename_distributed' + table_name = "test_rename_distributed" try: create_distributed_table(node1, table_name) insert(node1, table_name, 1000) - rename_column_on_cluster(node1, table_name, 'num2', 'foo2') - rename_column_on_cluster(node1, '%s_replicated' % table_name, 'num2', 'foo2') + rename_column_on_cluster(node1, table_name, "num2", "foo2") + rename_column_on_cluster(node1, "%s_replicated" % table_name, "num2", "foo2") - insert(node1, table_name, 1000, col_names=['num', 'foo2']) + insert(node1, table_name, 1000, col_names=["num", "foo2"]) - select(node1, table_name, "foo2", '1998\n', poll=30) + select(node1, table_name, "foo2", "1998\n", poll=30) finally: drop_distributed_table(node1, table_name) def test_rename_distributed_parallel_insert_and_select(started_cluster): - table_name = 'test_rename_distributed_parallel_insert_and_select' + table_name = "test_rename_distributed_parallel_insert_and_select" try: create_distributed_table(node1, table_name) insert(node1, table_name, 1000) @@ -475,30 +703,73 @@ def test_rename_distributed_parallel_insert_and_select(started_cluster): p = Pool(15) tasks = [] for i in range(1): - tasks.append(p.apply_async(rename_column_on_cluster, (node1, table_name, 'num2', 'foo2', 3, True))) tasks.append( - p.apply_async(rename_column_on_cluster, (node1, '%s_replicated' % table_name, 'num2', 'foo2', 3, True))) - tasks.append(p.apply_async(rename_column_on_cluster, (node1, table_name, 'foo2', 'foo3', 3, True))) + p.apply_async( + rename_column_on_cluster, + (node1, table_name, "num2", "foo2", 3, True), + ) + ) tasks.append( - p.apply_async(rename_column_on_cluster, (node1, '%s_replicated' % table_name, 'foo2', 'foo3', 3, True))) - tasks.append(p.apply_async(rename_column_on_cluster, (node1, table_name, 'foo3', 'num2', 3, True))) + p.apply_async( + rename_column_on_cluster, + (node1, "%s_replicated" % table_name, "num2", "foo2", 3, True), + ) + ) tasks.append( - p.apply_async(rename_column_on_cluster, (node1, '%s_replicated' % table_name, 'foo3', 'num2', 3, True))) - tasks.append(p.apply_async(insert, (node1, table_name, 10, ["num", "foo3"], 5, True))) - tasks.append(p.apply_async(insert, (node2, table_name, 10, ["num", "num2"], 5, True))) - tasks.append(p.apply_async(insert, (node3, table_name, 10, ["num", "foo2"], 5, True))) - tasks.append(p.apply_async(select, (node1, table_name, "foo2", None, 5, True))) - tasks.append(p.apply_async(select, (node2, table_name, "foo3", None, 5, True))) - tasks.append(p.apply_async(select, (node3, table_name, "num2", None, 5, True))) + p.apply_async( + rename_column_on_cluster, + (node1, table_name, "foo2", "foo3", 3, True), + ) + ) + tasks.append( + p.apply_async( + rename_column_on_cluster, + (node1, "%s_replicated" % table_name, "foo2", "foo3", 3, True), + ) + ) + tasks.append( + p.apply_async( + rename_column_on_cluster, + (node1, table_name, "foo3", "num2", 3, True), + ) + ) + tasks.append( + p.apply_async( + rename_column_on_cluster, + (node1, "%s_replicated" % table_name, "foo3", "num2", 3, True), + ) + ) + tasks.append( + p.apply_async(insert, (node1, table_name, 10, ["num", "foo3"], 5, True)) + ) + tasks.append( + p.apply_async(insert, (node2, table_name, 10, ["num", "num2"], 5, True)) + ) + tasks.append( + p.apply_async(insert, (node3, table_name, 10, ["num", "foo2"], 5, True)) + ) + tasks.append( + p.apply_async(select, (node1, table_name, "foo2", None, 5, True)) + ) + tasks.append( + p.apply_async(select, (node2, table_name, "foo3", None, 5, True)) + ) + tasks.append( + p.apply_async(select, (node3, table_name, "num2", None, 5, True)) + ) for task in tasks: task.get(timeout=240) - rename_column_on_cluster(node1, table_name, 'foo2', 'num2', 1, True) - rename_column_on_cluster(node1, '%s_replicated' % table_name, 'foo2', 'num2', 1, True) - rename_column_on_cluster(node1, table_name, 'foo3', 'num2', 1, True) - rename_column_on_cluster(node1, '%s_replicated' % table_name, 'foo3', 'num2', 1, True) + rename_column_on_cluster(node1, table_name, "foo2", "num2", 1, True) + rename_column_on_cluster( + node1, "%s_replicated" % table_name, "foo2", "num2", 1, True + ) + rename_column_on_cluster(node1, table_name, "foo3", "num2", 1, True) + rename_column_on_cluster( + node1, "%s_replicated" % table_name, "foo3", "num2", 1, True + ) - insert(node1, table_name, 1000, col_names=['num', 'num2']) + insert(node1, table_name, 1000, col_names=["num", "num2"]) select(node1, table_name, "num2") select(node2, table_name, "num2") select(node3, table_name, "num2") diff --git a/tests/integration/test_replace_partition/test.py b/tests/integration/test_replace_partition/test.py index d30a038825f..7ce79d9aca8 100644 --- a/tests/integration/test_replace_partition/test.py +++ b/tests/integration/test_replace_partition/test.py @@ -16,7 +16,7 @@ cluster = ClickHouseCluster(__file__) def _fill_nodes(nodes, shard): for node in nodes: node.query( - ''' + """ CREATE DATABASE test; CREATE TABLE real_table(date Date, id UInt32, dummy UInt32) @@ -27,11 +27,18 @@ def _fill_nodes(nodes, shard): CREATE TABLE test_table(date Date, id UInt32, dummy UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}', date, id, 8192); - '''.format(shard=shard, replica=node.name)) + """.format( + shard=shard, replica=node.name + ) + ) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -51,18 +58,22 @@ def test_normal_work(normal_work): node1.query("insert into test_table values ('2017-06-16', 111, 0)") node1.query("insert into real_table values ('2017-06-16', 222, 0)") - assert_eq_with_retry(node1, "SELECT id FROM test_table order by id", '111') - assert_eq_with_retry(node1, "SELECT id FROM real_table order by id", '222') - assert_eq_with_retry(node2, "SELECT id FROM test_table order by id", '111') + assert_eq_with_retry(node1, "SELECT id FROM test_table order by id", "111") + assert_eq_with_retry(node1, "SELECT id FROM real_table order by id", "222") + assert_eq_with_retry(node2, "SELECT id FROM test_table order by id", "111") node1.query("ALTER TABLE test_table REPLACE PARTITION 201706 FROM real_table") - assert_eq_with_retry(node1, "SELECT id FROM test_table order by id", '222') - assert_eq_with_retry(node2, "SELECT id FROM test_table order by id", '222') + assert_eq_with_retry(node1, "SELECT id FROM test_table order by id", "222") + assert_eq_with_retry(node2, "SELECT id FROM test_table order by id", "222") -node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node3 = cluster.add_instance( + "node3", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node4 = cluster.add_instance( + "node4", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -82,9 +93,9 @@ def test_drop_failover(drop_failover): node3.query("insert into test_table values ('2017-06-16', 111, 0)") node3.query("insert into real_table values ('2017-06-16', 222, 0)") - assert_eq_with_retry(node3, "SELECT id FROM test_table order by id", '111') - assert_eq_with_retry(node3, "SELECT id FROM real_table order by id", '222') - assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", '111') + assert_eq_with_retry(node3, "SELECT id FROM test_table order by id", "111") + assert_eq_with_retry(node3, "SELECT id FROM real_table order by id", "222") + assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", "111") with PartitionManager() as pm: # Hinder replication between replicas @@ -95,9 +106,9 @@ def test_drop_failover(drop_failover): node3.query("ALTER TABLE test_table REPLACE PARTITION 201706 FROM real_table") # Node3 replace is ok - assert_eq_with_retry(node3, "SELECT id FROM test_table order by id", '222') + assert_eq_with_retry(node3, "SELECT id FROM test_table order by id", "222") # Network interrupted -- replace is not ok, but it's ok - assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", '111') + assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", "111") # Drop partition on source node node3.query("ALTER TABLE test_table DROP PARTITION 201706") @@ -107,13 +118,19 @@ def test_drop_failover(drop_failover): msg = node4.query_with_retry( "select last_exception from system.replication_queue where type = 'REPLACE_RANGE'", - check_callback=lambda x: 'Not found part' not in x, sleep_time=1) - assert 'Not found part' not in msg - assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", '') + check_callback=lambda x: "Not found part" not in x, + sleep_time=1, + ) + assert "Not found part" not in msg + assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", "") -node5 = cluster.add_instance('node5', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node6 = cluster.add_instance('node6', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node5 = cluster.add_instance( + "node5", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node6 = cluster.add_instance( + "node6", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -134,10 +151,10 @@ def test_replace_after_replace_failover(replace_after_replace_failover): node5.query("insert into real_table values ('2017-06-16', 222, 0)") node5.query("insert into other_table values ('2017-06-16', 333, 0)") - assert_eq_with_retry(node5, "SELECT id FROM test_table order by id", '111') - assert_eq_with_retry(node5, "SELECT id FROM real_table order by id", '222') - assert_eq_with_retry(node5, "SELECT id FROM other_table order by id", '333') - assert_eq_with_retry(node6, "SELECT id FROM test_table order by id", '111') + assert_eq_with_retry(node5, "SELECT id FROM test_table order by id", "111") + assert_eq_with_retry(node5, "SELECT id FROM real_table order by id", "222") + assert_eq_with_retry(node5, "SELECT id FROM other_table order by id", "333") + assert_eq_with_retry(node6, "SELECT id FROM test_table order by id", "111") with PartitionManager() as pm: # Hinder replication between replicas @@ -148,20 +165,22 @@ def test_replace_after_replace_failover(replace_after_replace_failover): node5.query("ALTER TABLE test_table REPLACE PARTITION 201706 FROM real_table") # Node5 replace is ok - assert_eq_with_retry(node5, "SELECT id FROM test_table order by id", '222') + assert_eq_with_retry(node5, "SELECT id FROM test_table order by id", "222") # Network interrupted -- replace is not ok, but it's ok - assert_eq_with_retry(node6, "SELECT id FROM test_table order by id", '111') + assert_eq_with_retry(node6, "SELECT id FROM test_table order by id", "111") # Replace partition on source node node5.query("ALTER TABLE test_table REPLACE PARTITION 201706 FROM other_table") - assert_eq_with_retry(node5, "SELECT id FROM test_table order by id", '333') + assert_eq_with_retry(node5, "SELECT id FROM test_table order by id", "333") # Wait few seconds for connection to zookeeper to be restored time.sleep(5) msg = node6.query_with_retry( "select last_exception from system.replication_queue where type = 'REPLACE_RANGE'", - check_callback=lambda x: 'Not found part' not in x, sleep_time=1) - assert 'Not found part' not in msg - assert_eq_with_retry(node6, "SELECT id FROM test_table order by id", '333') + check_callback=lambda x: "Not found part" not in x, + sleep_time=1, + ) + assert "Not found part" not in msg + assert_eq_with_retry(node6, "SELECT id FROM test_table order by id", "333") diff --git a/tests/integration/test_replica_can_become_leader/test.py b/tests/integration/test_replica_can_become_leader/test.py index fae4fa28226..58e7b6f6e19 100644 --- a/tests/integration/test_replica_can_become_leader/test.py +++ b/tests/integration/test_replica_can_become_leader/test.py @@ -3,9 +3,13 @@ from helpers.client import QueryRuntimeException from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/notleader.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/notleaderignorecase.xml'], with_zookeeper=True) -node3 = cluster.add_instance('node3', with_zookeeper=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/notleader.xml"], with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/notleaderignorecase.xml"], with_zookeeper=True +) +node3 = cluster.add_instance("node3", with_zookeeper=True) @pytest.fixture(scope="module") @@ -15,20 +19,24 @@ def start_cluster(): for i, node in enumerate((node1, node2)): node.query( - ''' + """ CREATE TABLE test_table(date Date, id UInt32, dummy UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_table', '{}') PARTITION BY date ORDER BY id - '''.format(i) + """.format( + i + ) ) with pytest.raises(QueryRuntimeException): node3.query( - ''' + """ CREATE TABLE test_table(date Date, id UInt32, dummy UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_table', '{}') PARTITION BY date ORDER BY id SETTINGS replicated_can_become_leader=0sad - '''.format(3) + """.format( + 3 + ) ) yield cluster @@ -38,5 +46,15 @@ def start_cluster(): def test_can_become_leader(start_cluster): - assert node1.query("select can_become_leader from system.replicas where table = 'test_table'") == '0\n' - assert node2.query("select can_become_leader from system.replicas where table = 'test_table'") == '0\n' + assert ( + node1.query( + "select can_become_leader from system.replicas where table = 'test_table'" + ) + == "0\n" + ) + assert ( + node2.query( + "select can_become_leader from system.replicas where table = 'test_table'" + ) + == "0\n" + ) diff --git a/tests/integration/test_replica_is_active/test.py b/tests/integration/test_replica_is_active/test.py index f786ff71958..d5e0931dff2 100644 --- a/tests/integration/test_replica_is_active/test.py +++ b/tests/integration/test_replica_is_active/test.py @@ -4,9 +4,10 @@ from helpers.cluster import ClickHouseCluster from ast import literal_eval cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True) -node2 = cluster.add_instance('node2', with_zookeeper=True) -node3 = cluster.add_instance('node3', with_zookeeper=True) +node1 = cluster.add_instance("node1", with_zookeeper=True) +node2 = cluster.add_instance("node2", with_zookeeper=True) +node3 = cluster.add_instance("node3", with_zookeeper=True) + @pytest.fixture(scope="module") def start_cluster(): @@ -14,13 +15,15 @@ def start_cluster(): cluster.start() for i, node in enumerate((node1, node2, node3)): - node_name = 'node' + str(i + 1) + node_name = "node" + str(i + 1) node.query( - ''' + """ CREATE TABLE test_table(date Date, id UInt32, dummy UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_table', '{}') PARTITION BY date ORDER BY id - '''.format(node_name) + """.format( + node_name + ) ) yield cluster @@ -30,13 +33,19 @@ def start_cluster(): def test_replica_is_active(start_cluster): - query_result = node1.query("select replica_is_active from system.replicas where table = 'test_table'") - assert literal_eval(query_result) == {'node1': 1, 'node2': 1, 'node3': 1} + query_result = node1.query( + "select replica_is_active from system.replicas where table = 'test_table'" + ) + assert literal_eval(query_result) == {"node1": 1, "node2": 1, "node3": 1} node3.stop() - query_result = node1.query("select replica_is_active from system.replicas where table = 'test_table'") - assert literal_eval(query_result) == {'node1': 1, 'node2': 1, 'node3': 0} + query_result = node1.query( + "select replica_is_active from system.replicas where table = 'test_table'" + ) + assert literal_eval(query_result) == {"node1": 1, "node2": 1, "node3": 0} node2.stop() - query_result = node1.query("select replica_is_active from system.replicas where table = 'test_table'") - assert literal_eval(query_result) == {'node1': 1, 'node2': 0, 'node3': 0} + query_result = node1.query( + "select replica_is_active from system.replicas where table = 'test_table'" + ) + assert literal_eval(query_result) == {"node1": 1, "node2": 0, "node3": 0} diff --git a/tests/integration/test_replicated_database/test.py b/tests/integration/test_replicated_database/test.py index 171ae24b98d..13e9c225a61 100644 --- a/tests/integration/test_replicated_database/test.py +++ b/tests/integration/test_replicated_database/test.py @@ -12,21 +12,61 @@ test_recover_staled_replica_run = 1 cluster = ClickHouseCluster(__file__) -main_node = cluster.add_instance('main_node', main_configs=['configs/config.xml'], user_configs=['configs/settings.xml'], with_zookeeper=True, stay_alive=True, macros={"shard": 1, "replica": 1}) -dummy_node = cluster.add_instance('dummy_node', main_configs=['configs/config.xml'], user_configs=['configs/settings.xml'], with_zookeeper=True, stay_alive=True, macros={"shard": 1, "replica": 2}) -competing_node = cluster.add_instance('competing_node', main_configs=['configs/config.xml'], user_configs=['configs/settings.xml'], with_zookeeper=True, macros={"shard": 1, "replica": 3}) -snapshotting_node = cluster.add_instance('snapshotting_node', main_configs=['configs/config.xml'], user_configs=['configs/settings.xml'], with_zookeeper=True, macros={"shard": 2, "replica": 1}) -snapshot_recovering_node = cluster.add_instance('snapshot_recovering_node', main_configs=['configs/config.xml'], user_configs=['configs/settings.xml'], with_zookeeper=True) +main_node = cluster.add_instance( + "main_node", + main_configs=["configs/config.xml"], + user_configs=["configs/settings.xml"], + with_zookeeper=True, + stay_alive=True, + macros={"shard": 1, "replica": 1}, +) +dummy_node = cluster.add_instance( + "dummy_node", + main_configs=["configs/config.xml"], + user_configs=["configs/settings.xml"], + with_zookeeper=True, + stay_alive=True, + macros={"shard": 1, "replica": 2}, +) +competing_node = cluster.add_instance( + "competing_node", + main_configs=["configs/config.xml"], + user_configs=["configs/settings.xml"], + with_zookeeper=True, + macros={"shard": 1, "replica": 3}, +) +snapshotting_node = cluster.add_instance( + "snapshotting_node", + main_configs=["configs/config.xml"], + user_configs=["configs/settings.xml"], + with_zookeeper=True, + macros={"shard": 2, "replica": 1}, +) +snapshot_recovering_node = cluster.add_instance( + "snapshot_recovering_node", + main_configs=["configs/config.xml"], + user_configs=["configs/settings.xml"], + with_zookeeper=True, +) -all_nodes = [main_node, dummy_node, competing_node, snapshotting_node, snapshot_recovering_node] +all_nodes = [ + main_node, + dummy_node, + competing_node, + snapshotting_node, + snapshot_recovering_node, +] uuid_regex = re.compile("[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}") + + def assert_create_query(nodes, table_name, expected): replace_uuid = lambda x: re.sub(uuid_regex, "uuid", x) query = "show create table {}".format(table_name) for node in nodes: assert_eq_with_retry(node, query, expected, get_result=replace_uuid) + @pytest.fixture(scope="module") def started_cluster(): try: @@ -36,103 +76,182 @@ def started_cluster(): finally: cluster.shutdown() + def test_create_replicated_table(started_cluster): - main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica' || '1');") - dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") - assert "Explicit zookeeper_path and replica_name are specified" in \ - main_node.query_and_get_error("CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) " - "ENGINE=ReplicatedMergeTree('/test/tmp', 'r') ORDER BY k PARTITION BY toYYYYMM(d);") + main_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica' || '1');" + ) + dummy_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');" + ) + assert ( + "Explicit zookeeper_path and replica_name are specified" + in main_node.query_and_get_error( + "CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) " + "ENGINE=ReplicatedMergeTree('/test/tmp', 'r') ORDER BY k PARTITION BY toYYYYMM(d);" + ) + ) - assert "Explicit zookeeper_path and replica_name are specified" in \ - main_node.query_and_get_error("CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) " - "ENGINE=ReplicatedMergeTree('/test/tmp', 'r', d, k, 8192);") + assert ( + "Explicit zookeeper_path and replica_name are specified" + in main_node.query_and_get_error( + "CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) " + "ENGINE=ReplicatedMergeTree('/test/tmp', 'r', d, k, 8192);" + ) + ) - assert "Old syntax is not allowed" in \ - main_node.query_and_get_error("CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) " - "ENGINE=ReplicatedMergeTree('/test/tmp/{shard}', '{replica}', d, k, 8192);") + assert "Old syntax is not allowed" in main_node.query_and_get_error( + "CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) " + "ENGINE=ReplicatedMergeTree('/test/tmp/{shard}', '{replica}', d, k, 8192);" + ) - main_node.query("CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree ORDER BY k PARTITION BY toYYYYMM(d);") + main_node.query( + "CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree ORDER BY k PARTITION BY toYYYYMM(d);" + ) - expected = "CREATE TABLE testdb.replicated_table\\n(\\n `d` Date,\\n `k` UInt64,\\n `i32` Int32\\n)\\n" \ - "ENGINE = ReplicatedMergeTree(\\'/clickhouse/tables/uuid/{shard}\\', \\'{replica}\\')\\n" \ - "PARTITION BY toYYYYMM(d)\\nORDER BY k\\nSETTINGS index_granularity = 8192" + expected = ( + "CREATE TABLE testdb.replicated_table\\n(\\n `d` Date,\\n `k` UInt64,\\n `i32` Int32\\n)\\n" + "ENGINE = ReplicatedMergeTree(\\'/clickhouse/tables/uuid/{shard}\\', \\'{replica}\\')\\n" + "PARTITION BY toYYYYMM(d)\\nORDER BY k\\nSETTINGS index_granularity = 8192" + ) assert_create_query([main_node, dummy_node], "testdb.replicated_table", expected) # assert without replacing uuid - assert main_node.query("show create testdb.replicated_table") == dummy_node.query("show create testdb.replicated_table") + assert main_node.query("show create testdb.replicated_table") == dummy_node.query( + "show create testdb.replicated_table" + ) main_node.query("DROP DATABASE testdb SYNC") dummy_node.query("DROP DATABASE testdb SYNC") -@pytest.mark.parametrize("engine", ['MergeTree', 'ReplicatedMergeTree']) + +@pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"]) def test_simple_alter_table(started_cluster, engine): - main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") - dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + main_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');" + ) + dummy_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');" + ) # test_simple_alter_table - name = "testdb.alter_test_{}".format(engine) - main_node.query("CREATE TABLE {} " - "(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) " - "ENGINE = {} PARTITION BY StartDate ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID);".format(name, engine)) + name = "testdb.alter_test_{}".format(engine) + main_node.query( + "CREATE TABLE {} " + "(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) " + "ENGINE = {} PARTITION BY StartDate ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID);".format( + name, engine + ) + ) main_node.query("ALTER TABLE {} ADD COLUMN Added0 UInt32;".format(name)) main_node.query("ALTER TABLE {} ADD COLUMN Added2 UInt32;".format(name)) - main_node.query("ALTER TABLE {} ADD COLUMN Added1 UInt32 AFTER Added0;".format(name)) - main_node.query("ALTER TABLE {} ADD COLUMN AddedNested1 Nested(A UInt32, B UInt64) AFTER Added2;".format(name)) - main_node.query("ALTER TABLE {} ADD COLUMN AddedNested1.C Array(String) AFTER AddedNested1.B;".format(name)) - main_node.query("ALTER TABLE {} ADD COLUMN AddedNested2 Nested(A UInt32, B UInt64) AFTER AddedNested1;".format(name)) + main_node.query( + "ALTER TABLE {} ADD COLUMN Added1 UInt32 AFTER Added0;".format(name) + ) + main_node.query( + "ALTER TABLE {} ADD COLUMN AddedNested1 Nested(A UInt32, B UInt64) AFTER Added2;".format( + name + ) + ) + main_node.query( + "ALTER TABLE {} ADD COLUMN AddedNested1.C Array(String) AFTER AddedNested1.B;".format( + name + ) + ) + main_node.query( + "ALTER TABLE {} ADD COLUMN AddedNested2 Nested(A UInt32, B UInt64) AFTER AddedNested1;".format( + name + ) + ) - full_engine = engine if not "Replicated" in engine else engine + "(\\'/clickhouse/tables/uuid/{shard}\\', \\'{replica}\\')" - expected = "CREATE TABLE {}\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n" \ - " `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n" \ - " `ToDrop` UInt32,\\n `Added0` UInt32,\\n `Added1` UInt32,\\n `Added2` UInt32,\\n" \ - " `AddedNested1.A` Array(UInt32),\\n `AddedNested1.B` Array(UInt64),\\n `AddedNested1.C` Array(String),\\n" \ - " `AddedNested2.A` Array(UInt32),\\n `AddedNested2.B` Array(UInt64)\\n)\\n" \ - "ENGINE = {}\\nPARTITION BY StartDate\\nORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)\\n" \ - "SETTINGS index_granularity = 8192".format(name, full_engine) + full_engine = ( + engine + if not "Replicated" in engine + else engine + "(\\'/clickhouse/tables/uuid/{shard}\\', \\'{replica}\\')" + ) + expected = ( + "CREATE TABLE {}\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n" + " `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n" + " `ToDrop` UInt32,\\n `Added0` UInt32,\\n `Added1` UInt32,\\n `Added2` UInt32,\\n" + " `AddedNested1.A` Array(UInt32),\\n `AddedNested1.B` Array(UInt64),\\n `AddedNested1.C` Array(String),\\n" + " `AddedNested2.A` Array(UInt32),\\n `AddedNested2.B` Array(UInt64)\\n)\\n" + "ENGINE = {}\\nPARTITION BY StartDate\\nORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)\\n" + "SETTINGS index_granularity = 8192".format(name, full_engine) + ) assert_create_query([main_node, dummy_node], name, expected) # test_create_replica_after_delay - competing_node.query("CREATE DATABASE IF NOT EXISTS testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica3');") + competing_node.query( + "CREATE DATABASE IF NOT EXISTS testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica3');" + ) - name = "testdb.alter_test_{}".format(engine) + name = "testdb.alter_test_{}".format(engine) main_node.query("ALTER TABLE {} ADD COLUMN Added3 UInt32;".format(name)) main_node.query("ALTER TABLE {} DROP COLUMN AddedNested1;".format(name)) main_node.query("ALTER TABLE {} RENAME COLUMN Added1 TO AddedNested1;".format(name)) - full_engine = engine if not "Replicated" in engine else engine + "(\\'/clickhouse/tables/uuid/{shard}\\', \\'{replica}\\')" - expected = "CREATE TABLE {}\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n" \ - " `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n" \ - " `ToDrop` UInt32,\\n `Added0` UInt32,\\n `AddedNested1` UInt32,\\n `Added2` UInt32,\\n" \ - " `AddedNested2.A` Array(UInt32),\\n `AddedNested2.B` Array(UInt64),\\n `Added3` UInt32\\n)\\n" \ - "ENGINE = {}\\nPARTITION BY StartDate\\nORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)\\n" \ - "SETTINGS index_granularity = 8192".format(name, full_engine) + full_engine = ( + engine + if not "Replicated" in engine + else engine + "(\\'/clickhouse/tables/uuid/{shard}\\', \\'{replica}\\')" + ) + expected = ( + "CREATE TABLE {}\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n" + " `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n" + " `ToDrop` UInt32,\\n `Added0` UInt32,\\n `AddedNested1` UInt32,\\n `Added2` UInt32,\\n" + " `AddedNested2.A` Array(UInt32),\\n `AddedNested2.B` Array(UInt64),\\n `Added3` UInt32\\n)\\n" + "ENGINE = {}\\nPARTITION BY StartDate\\nORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)\\n" + "SETTINGS index_granularity = 8192".format(name, full_engine) + ) assert_create_query([main_node, dummy_node, competing_node], name, expected) main_node.query("DROP DATABASE testdb SYNC") dummy_node.query("DROP DATABASE testdb SYNC") competing_node.query("DROP DATABASE testdb SYNC") + def get_table_uuid(database, name): - return main_node.query(f"SELECT uuid FROM system.tables WHERE database = '{database}' and name = '{name}'").strip() + return main_node.query( + f"SELECT uuid FROM system.tables WHERE database = '{database}' and name = '{name}'" + ).strip() + @pytest.fixture(scope="module", name="attachable_part") def fixture_attachable_part(started_cluster): main_node.query(f"CREATE DATABASE testdb_attach_atomic ENGINE = Atomic") - main_node.query(f"CREATE TABLE testdb_attach_atomic.test (CounterID UInt32) ENGINE = MergeTree ORDER BY (CounterID)") + main_node.query( + f"CREATE TABLE testdb_attach_atomic.test (CounterID UInt32) ENGINE = MergeTree ORDER BY (CounterID)" + ) main_node.query(f"INSERT INTO testdb_attach_atomic.test VALUES (123)") - main_node.query(f"ALTER TABLE testdb_attach_atomic.test FREEZE WITH NAME 'test_attach'") + main_node.query( + f"ALTER TABLE testdb_attach_atomic.test FREEZE WITH NAME 'test_attach'" + ) table_uuid = get_table_uuid("testdb_attach_atomic", "test") - return os.path.join(main_node.path, f"database/shadow/test_attach/store/{table_uuid[:3]}/{table_uuid}/all_1_1_0") + return os.path.join( + main_node.path, + f"database/shadow/test_attach/store/{table_uuid[:3]}/{table_uuid}/all_1_1_0", + ) + @pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"]) def test_alter_attach(started_cluster, attachable_part, engine): - main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") - dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + main_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');" + ) + dummy_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');" + ) - name = "alter_attach_test_{}".format(engine) - main_node.query(f"CREATE TABLE testdb.{name} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)") + name = "alter_attach_test_{}".format(engine) + main_node.query( + f"CREATE TABLE testdb.{name} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)" + ) table_uuid = get_table_uuid("testdb", name) # Provide and attach a part to the main node shutil.copytree( - attachable_part, os.path.join(main_node.path, f"database/store/{table_uuid[:3]}/{table_uuid}/detached/all_1_1_0") + attachable_part, + os.path.join( + main_node.path, + f"database/store/{table_uuid[:3]}/{table_uuid}/detached/all_1_1_0", + ), ) main_node.query(f"ALTER TABLE testdb.{name} ATTACH PART 'all_1_1_0'") # On the main node, data is attached @@ -145,14 +264,21 @@ def test_alter_attach(started_cluster, attachable_part, engine): main_node.query("DROP DATABASE testdb SYNC") dummy_node.query("DROP DATABASE testdb SYNC") + @pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"]) def test_alter_drop_part(started_cluster, engine): - main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") - dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + main_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');" + ) + dummy_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');" + ) table = f"alter_drop_{engine}" part_name = "all_0_0_0" if engine == "ReplicatedMergeTree" else "all_1_1_0" - main_node.query(f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)") + main_node.query( + f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)" + ) main_node.query(f"INSERT INTO testdb.{table} VALUES (123)") if engine == "MergeTree": dummy_node.query(f"INSERT INTO testdb.{table} VALUES (456)") @@ -166,14 +292,21 @@ def test_alter_drop_part(started_cluster, engine): main_node.query("DROP DATABASE testdb SYNC") dummy_node.query("DROP DATABASE testdb SYNC") + @pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"]) def test_alter_detach_part(started_cluster, engine): - main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") - dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + main_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');" + ) + dummy_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');" + ) table = f"alter_detach_{engine}" part_name = "all_0_0_0" if engine == "ReplicatedMergeTree" else "all_1_1_0" - main_node.query(f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)") + main_node.query( + f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)" + ) main_node.query(f"INSERT INTO testdb.{table} VALUES (123)") if engine == "MergeTree": dummy_node.query(f"INSERT INTO testdb.{table} VALUES (456)") @@ -188,14 +321,21 @@ def test_alter_detach_part(started_cluster, engine): main_node.query("DROP DATABASE testdb SYNC") dummy_node.query("DROP DATABASE testdb SYNC") + @pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"]) def test_alter_drop_detached_part(started_cluster, engine): - main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") - dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + main_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');" + ) + dummy_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');" + ) table = f"alter_drop_detached_{engine}" part_name = "all_0_0_0" if engine == "ReplicatedMergeTree" else "all_1_1_0" - main_node.query(f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)") + main_node.query( + f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)" + ) main_node.query(f"INSERT INTO testdb.{table} VALUES (123)") main_node.query(f"ALTER TABLE testdb.{table} DETACH PART '{part_name}'") if engine == "MergeTree": @@ -211,14 +351,24 @@ def test_alter_drop_detached_part(started_cluster, engine): def test_alter_fetch(started_cluster): - main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") - dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + main_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');" + ) + dummy_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');" + ) - main_node.query("CREATE TABLE testdb.fetch_source (CounterID UInt32) ENGINE = ReplicatedMergeTree ORDER BY (CounterID)") - main_node.query("CREATE TABLE testdb.fetch_target (CounterID UInt32) ENGINE = ReplicatedMergeTree ORDER BY (CounterID)") + main_node.query( + "CREATE TABLE testdb.fetch_source (CounterID UInt32) ENGINE = ReplicatedMergeTree ORDER BY (CounterID)" + ) + main_node.query( + "CREATE TABLE testdb.fetch_target (CounterID UInt32) ENGINE = ReplicatedMergeTree ORDER BY (CounterID)" + ) main_node.query("INSERT INTO testdb.fetch_source VALUES (123)") table_uuid = get_table_uuid("testdb", "fetch_source") - main_node.query(f"ALTER TABLE testdb.fetch_target FETCH PART 'all_0_0_0' FROM '/clickhouse/tables/{table_uuid}/{{shard}}' ") + main_node.query( + f"ALTER TABLE testdb.fetch_target FETCH PART 'all_0_0_0' FROM '/clickhouse/tables/{table_uuid}/{{shard}}' " + ) detached_parts_query = "SELECT name FROM system.detached_parts WHERE database='testdb' AND table='fetch_target'" assert main_node.query(detached_parts_query) == "all_0_0_0\n" assert dummy_node.query(detached_parts_query) == "" @@ -226,91 +376,153 @@ def test_alter_fetch(started_cluster): main_node.query("DROP DATABASE testdb SYNC") dummy_node.query("DROP DATABASE testdb SYNC") + def test_alters_from_different_replicas(started_cluster): - main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") - dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + main_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');" + ) + dummy_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');" + ) # test_alters_from_different_replicas - competing_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica3');") + competing_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica3');" + ) - main_node.query("CREATE TABLE testdb.concurrent_test " - "(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) " - "ENGINE = MergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192);") + main_node.query( + "CREATE TABLE testdb.concurrent_test " + "(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) " + "ENGINE = MergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192);" + ) - main_node.query("CREATE TABLE testdb.dist AS testdb.concurrent_test ENGINE = Distributed(testdb, testdb, concurrent_test, CounterID)") + main_node.query( + "CREATE TABLE testdb.dist AS testdb.concurrent_test ENGINE = Distributed(testdb, testdb, concurrent_test, CounterID)" + ) dummy_node.stop_clickhouse(kill=True) settings = {"distributed_ddl_task_timeout": 5} - assert "There are 1 unfinished hosts (0 of them are currently active)" in \ - competing_node.query_and_get_error("ALTER TABLE testdb.concurrent_test ADD COLUMN Added0 UInt32;", settings=settings) - settings = {"distributed_ddl_task_timeout": 5, "distributed_ddl_output_mode": "null_status_on_timeout"} - assert "shard1|replica2\t\\N\t\\N" in \ - main_node.query("ALTER TABLE testdb.concurrent_test ADD COLUMN Added2 UInt32;", settings=settings) - settings = {"distributed_ddl_task_timeout": 5, "distributed_ddl_output_mode": "never_throw"} - assert "shard1|replica2\t\\N\t\\N" in \ - competing_node.query("ALTER TABLE testdb.concurrent_test ADD COLUMN Added1 UInt32 AFTER Added0;", settings=settings) + assert ( + "There are 1 unfinished hosts (0 of them are currently active)" + in competing_node.query_and_get_error( + "ALTER TABLE testdb.concurrent_test ADD COLUMN Added0 UInt32;", + settings=settings, + ) + ) + settings = { + "distributed_ddl_task_timeout": 5, + "distributed_ddl_output_mode": "null_status_on_timeout", + } + assert "shard1|replica2\t\\N\t\\N" in main_node.query( + "ALTER TABLE testdb.concurrent_test ADD COLUMN Added2 UInt32;", + settings=settings, + ) + settings = { + "distributed_ddl_task_timeout": 5, + "distributed_ddl_output_mode": "never_throw", + } + assert "shard1|replica2\t\\N\t\\N" in competing_node.query( + "ALTER TABLE testdb.concurrent_test ADD COLUMN Added1 UInt32 AFTER Added0;", + settings=settings, + ) dummy_node.start_clickhouse() - main_node.query("ALTER TABLE testdb.concurrent_test ADD COLUMN AddedNested1 Nested(A UInt32, B UInt64) AFTER Added2;") - competing_node.query("ALTER TABLE testdb.concurrent_test ADD COLUMN AddedNested1.C Array(String) AFTER AddedNested1.B;") - main_node.query("ALTER TABLE testdb.concurrent_test ADD COLUMN AddedNested2 Nested(A UInt32, B UInt64) AFTER AddedNested1;") + main_node.query( + "ALTER TABLE testdb.concurrent_test ADD COLUMN AddedNested1 Nested(A UInt32, B UInt64) AFTER Added2;" + ) + competing_node.query( + "ALTER TABLE testdb.concurrent_test ADD COLUMN AddedNested1.C Array(String) AFTER AddedNested1.B;" + ) + main_node.query( + "ALTER TABLE testdb.concurrent_test ADD COLUMN AddedNested2 Nested(A UInt32, B UInt64) AFTER AddedNested1;" + ) - expected = "CREATE TABLE testdb.concurrent_test\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n" \ - " `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n `ToDrop` UInt32,\\n" \ - " `Added0` UInt32,\\n `Added1` UInt32,\\n `Added2` UInt32,\\n `AddedNested1.A` Array(UInt32),\\n" \ - " `AddedNested1.B` Array(UInt64),\\n `AddedNested1.C` Array(String),\\n `AddedNested2.A` Array(UInt32),\\n" \ - " `AddedNested2.B` Array(UInt64)\\n)\\n" \ - "ENGINE = MergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192)" + expected = ( + "CREATE TABLE testdb.concurrent_test\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n" + " `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n `ToDrop` UInt32,\\n" + " `Added0` UInt32,\\n `Added1` UInt32,\\n `Added2` UInt32,\\n `AddedNested1.A` Array(UInt32),\\n" + " `AddedNested1.B` Array(UInt64),\\n `AddedNested1.C` Array(String),\\n `AddedNested2.A` Array(UInt32),\\n" + " `AddedNested2.B` Array(UInt64)\\n)\\n" + "ENGINE = MergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192)" + ) assert_create_query([main_node, competing_node], "testdb.concurrent_test", expected) # test_create_replica_after_delay main_node.query("DROP TABLE testdb.concurrent_test SYNC") - main_node.query("CREATE TABLE testdb.concurrent_test " - "(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) " - "ENGINE = ReplicatedMergeTree ORDER BY CounterID;") + main_node.query( + "CREATE TABLE testdb.concurrent_test " + "(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) " + "ENGINE = ReplicatedMergeTree ORDER BY CounterID;" + ) - expected = "CREATE TABLE testdb.concurrent_test\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n" \ - " `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n `ToDrop` UInt32\\n)\\n" \ - "ENGINE = ReplicatedMergeTree(\\'/clickhouse/tables/uuid/{shard}\\', \\'{replica}\\')\\nORDER BY CounterID\\nSETTINGS index_granularity = 8192" + expected = ( + "CREATE TABLE testdb.concurrent_test\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n" + " `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n `ToDrop` UInt32\\n)\\n" + "ENGINE = ReplicatedMergeTree(\\'/clickhouse/tables/uuid/{shard}\\', \\'{replica}\\')\\nORDER BY CounterID\\nSETTINGS index_granularity = 8192" + ) assert_create_query([main_node, competing_node], "testdb.concurrent_test", expected) - main_node.query("INSERT INTO testdb.dist (CounterID, StartDate, UserID) SELECT number, addDays(toDate('2020-02-02'), number), intHash32(number) FROM numbers(10)") + main_node.query( + "INSERT INTO testdb.dist (CounterID, StartDate, UserID) SELECT number, addDays(toDate('2020-02-02'), number), intHash32(number) FROM numbers(10)" + ) # test_replica_restart main_node.restart_clickhouse() - expected = "CREATE TABLE testdb.concurrent_test\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n" \ - " `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n `ToDrop` UInt32\\n)\\n" \ - "ENGINE = ReplicatedMergeTree(\\'/clickhouse/tables/uuid/{shard}\\', \\'{replica}\\')\\nORDER BY CounterID\\nSETTINGS index_granularity = 8192" - + expected = ( + "CREATE TABLE testdb.concurrent_test\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n" + " `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n `ToDrop` UInt32\\n)\\n" + "ENGINE = ReplicatedMergeTree(\\'/clickhouse/tables/uuid/{shard}\\', \\'{replica}\\')\\nORDER BY CounterID\\nSETTINGS index_granularity = 8192" + ) # test_snapshot_and_snapshot_recover - snapshotting_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard2', 'replica1');") - snapshot_recovering_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard2', 'replica2');") + snapshotting_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard2', 'replica1');" + ) + snapshot_recovering_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard2', 'replica2');" + ) assert_create_query(all_nodes, "testdb.concurrent_test", expected) main_node.query("SYSTEM FLUSH DISTRIBUTED testdb.dist") - main_node.query("ALTER TABLE testdb.concurrent_test UPDATE StartDate = addYears(StartDate, 1) WHERE 1") + main_node.query( + "ALTER TABLE testdb.concurrent_test UPDATE StartDate = addYears(StartDate, 1) WHERE 1" + ) res = main_node.query("ALTER TABLE testdb.concurrent_test DELETE WHERE UserID % 2") - assert "shard1|replica1" in res and "shard1|replica2" in res and "shard1|replica3" in res + assert ( + "shard1|replica1" in res + and "shard1|replica2" in res + and "shard1|replica3" in res + ) assert "shard2|replica1" in res and "shard2|replica2" in res - expected = "1\t1\tmain_node\n" \ - "1\t2\tdummy_node\n" \ - "1\t3\tcompeting_node\n" \ - "2\t1\tsnapshotting_node\n" \ - "2\t2\tsnapshot_recovering_node\n" - assert main_node.query("SELECT shard_num, replica_num, host_name FROM system.clusters WHERE cluster='testdb'") == expected + expected = ( + "1\t1\tmain_node\n" + "1\t2\tdummy_node\n" + "1\t3\tcompeting_node\n" + "2\t1\tsnapshotting_node\n" + "2\t2\tsnapshot_recovering_node\n" + ) + assert ( + main_node.query( + "SELECT shard_num, replica_num, host_name FROM system.clusters WHERE cluster='testdb'" + ) + == expected + ) # test_drop_and_create_replica main_node.query("DROP DATABASE testdb SYNC") - main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") + main_node.query( + "CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');" + ) - expected = "CREATE TABLE testdb.concurrent_test\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n" \ - " `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n `ToDrop` UInt32\\n)\\n" \ - "ENGINE = ReplicatedMergeTree(\\'/clickhouse/tables/uuid/{shard}\\', \\'{replica}\\')\\nORDER BY CounterID\\nSETTINGS index_granularity = 8192" + expected = ( + "CREATE TABLE testdb.concurrent_test\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n" + " `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n `ToDrop` UInt32\\n)\\n" + "ENGINE = ReplicatedMergeTree(\\'/clickhouse/tables/uuid/{shard}\\', \\'{replica}\\')\\nORDER BY CounterID\\nSETTINGS index_granularity = 8192" + ) assert_create_query([main_node, competing_node], "testdb.concurrent_test", expected) assert_create_query(all_nodes, "testdb.concurrent_test", expected) @@ -318,112 +530,242 @@ def test_alters_from_different_replicas(started_cluster): for node in all_nodes: node.query("SYSTEM SYNC REPLICA testdb.concurrent_test") - expected = "0\t2021-02-02\t4249604106\n" \ - "1\t2021-02-03\t1343103100\n" \ - "4\t2021-02-06\t3902320246\n" \ - "7\t2021-02-09\t3844986530\n" \ - "9\t2021-02-11\t1241149650\n" + expected = ( + "0\t2021-02-02\t4249604106\n" + "1\t2021-02-03\t1343103100\n" + "4\t2021-02-06\t3902320246\n" + "7\t2021-02-09\t3844986530\n" + "9\t2021-02-11\t1241149650\n" + ) - assert_eq_with_retry(dummy_node, "SELECT CounterID, StartDate, UserID FROM testdb.dist ORDER BY CounterID", expected) + assert_eq_with_retry( + dummy_node, + "SELECT CounterID, StartDate, UserID FROM testdb.dist ORDER BY CounterID", + expected, + ) main_node.query("DROP DATABASE testdb SYNC") dummy_node.query("DROP DATABASE testdb SYNC") competing_node.query("DROP DATABASE testdb SYNC") snapshotting_node.query("DROP DATABASE testdb SYNC") snapshot_recovering_node.query("DROP DATABASE testdb SYNC") + def test_recover_staled_replica(started_cluster): - main_node.query("CREATE DATABASE recover ENGINE = Replicated('/clickhouse/databases/recover', 'shard1', 'replica1');") - started_cluster.get_kazoo_client('zoo1').set('/clickhouse/databases/recover/logs_to_keep', b'10') - dummy_node.query("CREATE DATABASE recover ENGINE = Replicated('/clickhouse/databases/recover', 'shard1', 'replica2');") + main_node.query( + "CREATE DATABASE recover ENGINE = Replicated('/clickhouse/databases/recover', 'shard1', 'replica1');" + ) + started_cluster.get_kazoo_client("zoo1").set( + "/clickhouse/databases/recover/logs_to_keep", b"10" + ) + dummy_node.query( + "CREATE DATABASE recover ENGINE = Replicated('/clickhouse/databases/recover', 'shard1', 'replica2');" + ) settings = {"distributed_ddl_task_timeout": 0} main_node.query("CREATE TABLE recover.t1 (n int) ENGINE=Memory", settings=settings) - dummy_node.query("CREATE TABLE recover.t2 (s String) ENGINE=Memory", settings=settings) - main_node.query("CREATE TABLE recover.mt1 (n int) ENGINE=MergeTree order by n", settings=settings) - dummy_node.query("CREATE TABLE recover.mt2 (n int) ENGINE=MergeTree order by n", settings=settings) - main_node.query("CREATE TABLE recover.rmt1 (n int) ENGINE=ReplicatedMergeTree order by n", settings=settings) - dummy_node.query("CREATE TABLE recover.rmt2 (n int) ENGINE=ReplicatedMergeTree order by n", settings=settings) - main_node.query("CREATE TABLE recover.rmt3 (n int) ENGINE=ReplicatedMergeTree order by n", settings=settings) - dummy_node.query("CREATE TABLE recover.rmt5 (n int) ENGINE=ReplicatedMergeTree order by n", settings=settings) - main_node.query("CREATE MATERIALIZED VIEW recover.mv1 (n int) ENGINE=ReplicatedMergeTree order by n AS SELECT n FROM recover.rmt1", settings=settings) - dummy_node.query("CREATE MATERIALIZED VIEW recover.mv2 (n int) ENGINE=ReplicatedMergeTree order by n AS SELECT n FROM recover.rmt2", settings=settings) - main_node.query("CREATE DICTIONARY recover.d1 (n int DEFAULT 0, m int DEFAULT 1) PRIMARY KEY n " - "SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'rmt1' PASSWORD '' DB 'recover')) " - "LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT())") - dummy_node.query("CREATE DICTIONARY recover.d2 (n int DEFAULT 0, m int DEFAULT 1) PRIMARY KEY n " - "SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'rmt2' PASSWORD '' DB 'recover')) " - "LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT())") + dummy_node.query( + "CREATE TABLE recover.t2 (s String) ENGINE=Memory", settings=settings + ) + main_node.query( + "CREATE TABLE recover.mt1 (n int) ENGINE=MergeTree order by n", + settings=settings, + ) + dummy_node.query( + "CREATE TABLE recover.mt2 (n int) ENGINE=MergeTree order by n", + settings=settings, + ) + main_node.query( + "CREATE TABLE recover.rmt1 (n int) ENGINE=ReplicatedMergeTree order by n", + settings=settings, + ) + dummy_node.query( + "CREATE TABLE recover.rmt2 (n int) ENGINE=ReplicatedMergeTree order by n", + settings=settings, + ) + main_node.query( + "CREATE TABLE recover.rmt3 (n int) ENGINE=ReplicatedMergeTree order by n", + settings=settings, + ) + dummy_node.query( + "CREATE TABLE recover.rmt5 (n int) ENGINE=ReplicatedMergeTree order by n", + settings=settings, + ) + main_node.query( + "CREATE MATERIALIZED VIEW recover.mv1 (n int) ENGINE=ReplicatedMergeTree order by n AS SELECT n FROM recover.rmt1", + settings=settings, + ) + dummy_node.query( + "CREATE MATERIALIZED VIEW recover.mv2 (n int) ENGINE=ReplicatedMergeTree order by n AS SELECT n FROM recover.rmt2", + settings=settings, + ) + main_node.query( + "CREATE DICTIONARY recover.d1 (n int DEFAULT 0, m int DEFAULT 1) PRIMARY KEY n " + "SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'rmt1' PASSWORD '' DB 'recover')) " + "LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT())" + ) + dummy_node.query( + "CREATE DICTIONARY recover.d2 (n int DEFAULT 0, m int DEFAULT 1) PRIMARY KEY n " + "SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'rmt2' PASSWORD '' DB 'recover')) " + "LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT())" + ) - for table in ['t1', 't2', 'mt1', 'mt2', 'rmt1', 'rmt2', 'rmt3', 'rmt5']: + for table in ["t1", "t2", "mt1", "mt2", "rmt1", "rmt2", "rmt3", "rmt5"]: main_node.query("INSERT INTO recover.{} VALUES (42)".format(table)) - for table in ['t1', 't2', 'mt1', 'mt2']: + for table in ["t1", "t2", "mt1", "mt2"]: dummy_node.query("INSERT INTO recover.{} VALUES (42)".format(table)) - for table in ['rmt1', 'rmt2', 'rmt3', 'rmt5']: + for table in ["rmt1", "rmt2", "rmt3", "rmt5"]: main_node.query("SYSTEM SYNC REPLICA recover.{}".format(table)) with PartitionManager() as pm: pm.drop_instance_zk_connections(dummy_node) dummy_node.query_and_get_error("RENAME TABLE recover.t1 TO recover.m1") - main_node.query_with_retry("RENAME TABLE recover.t1 TO recover.m1", settings=settings) - main_node.query_with_retry("ALTER TABLE recover.mt1 ADD COLUMN m int", settings=settings) - main_node.query_with_retry("ALTER TABLE recover.rmt1 ADD COLUMN m int", settings=settings) - main_node.query_with_retry("RENAME TABLE recover.rmt3 TO recover.rmt4", settings=settings) + main_node.query_with_retry( + "RENAME TABLE recover.t1 TO recover.m1", settings=settings + ) + main_node.query_with_retry( + "ALTER TABLE recover.mt1 ADD COLUMN m int", settings=settings + ) + main_node.query_with_retry( + "ALTER TABLE recover.rmt1 ADD COLUMN m int", settings=settings + ) + main_node.query_with_retry( + "RENAME TABLE recover.rmt3 TO recover.rmt4", settings=settings + ) main_node.query_with_retry("DROP TABLE recover.rmt5", settings=settings) main_node.query_with_retry("DROP DICTIONARY recover.d2", settings=settings) - main_node.query_with_retry("CREATE DICTIONARY recover.d2 (n int DEFAULT 0, m int DEFAULT 1) PRIMARY KEY n " - "SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'rmt1' PASSWORD '' DB 'recover')) " - "LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT());", settings=settings) + main_node.query_with_retry( + "CREATE DICTIONARY recover.d2 (n int DEFAULT 0, m int DEFAULT 1) PRIMARY KEY n " + "SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'rmt1' PASSWORD '' DB 'recover')) " + "LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT());", + settings=settings, + ) - inner_table = ".inner_id." + dummy_node.query_with_retry("SELECT uuid FROM system.tables WHERE database='recover' AND name='mv1'").strip() - main_node.query_with_retry("ALTER TABLE recover.`{}` MODIFY COLUMN n int DEFAULT 42".format(inner_table), settings=settings) - main_node.query_with_retry("ALTER TABLE recover.mv1 MODIFY QUERY SELECT m FROM recover.rmt1".format(inner_table), settings=settings) - main_node.query_with_retry("RENAME TABLE recover.mv2 TO recover.mv3".format(inner_table), settings=settings) + inner_table = ( + ".inner_id." + + dummy_node.query_with_retry( + "SELECT uuid FROM system.tables WHERE database='recover' AND name='mv1'" + ).strip() + ) + main_node.query_with_retry( + "ALTER TABLE recover.`{}` MODIFY COLUMN n int DEFAULT 42".format( + inner_table + ), + settings=settings, + ) + main_node.query_with_retry( + "ALTER TABLE recover.mv1 MODIFY QUERY SELECT m FROM recover.rmt1".format( + inner_table + ), + settings=settings, + ) + main_node.query_with_retry( + "RENAME TABLE recover.mv2 TO recover.mv3".format(inner_table), + settings=settings, + ) - main_node.query_with_retry("CREATE TABLE recover.tmp AS recover.m1", settings=settings) + main_node.query_with_retry( + "CREATE TABLE recover.tmp AS recover.m1", settings=settings + ) main_node.query_with_retry("DROP TABLE recover.tmp", settings=settings) - main_node.query_with_retry("CREATE TABLE recover.tmp AS recover.m1", settings=settings) + main_node.query_with_retry( + "CREATE TABLE recover.tmp AS recover.m1", settings=settings + ) main_node.query_with_retry("DROP TABLE recover.tmp", settings=settings) - main_node.query_with_retry("CREATE TABLE recover.tmp AS recover.m1", settings=settings) + main_node.query_with_retry( + "CREATE TABLE recover.tmp AS recover.m1", settings=settings + ) - assert main_node.query("SELECT name FROM system.tables WHERE database='recover' AND name NOT LIKE '.inner_id.%' ORDER BY name") == \ - "d1\nd2\nm1\nmt1\nmt2\nmv1\nmv3\nrmt1\nrmt2\nrmt4\nt2\ntmp\n" - query = "SELECT name, uuid, create_table_query FROM system.tables WHERE database='recover' AND name NOT LIKE '.inner_id.%' " \ - "ORDER BY name SETTINGS show_table_uuid_in_table_create_query_if_not_nil=1" + assert ( + main_node.query( + "SELECT name FROM system.tables WHERE database='recover' AND name NOT LIKE '.inner_id.%' ORDER BY name" + ) + == "d1\nd2\nm1\nmt1\nmt2\nmv1\nmv3\nrmt1\nrmt2\nrmt4\nt2\ntmp\n" + ) + query = ( + "SELECT name, uuid, create_table_query FROM system.tables WHERE database='recover' AND name NOT LIKE '.inner_id.%' " + "ORDER BY name SETTINGS show_table_uuid_in_table_create_query_if_not_nil=1" + ) expected = main_node.query(query) assert_eq_with_retry(dummy_node, query, expected) - assert main_node.query("SELECT count() FROM system.tables WHERE database='recover' AND name LIKE '.inner_id.%'") == "2\n" - assert dummy_node.query("SELECT count() FROM system.tables WHERE database='recover' AND name LIKE '.inner_id.%'") == "2\n" + assert ( + main_node.query( + "SELECT count() FROM system.tables WHERE database='recover' AND name LIKE '.inner_id.%'" + ) + == "2\n" + ) + assert ( + dummy_node.query( + "SELECT count() FROM system.tables WHERE database='recover' AND name LIKE '.inner_id.%'" + ) + == "2\n" + ) - for table in ['m1', 't2', 'mt1', 'mt2', 'rmt1', 'rmt2', 'rmt4', 'd1', 'd2', 'mv1', 'mv3']: + for table in [ + "m1", + "t2", + "mt1", + "mt2", + "rmt1", + "rmt2", + "rmt4", + "d1", + "d2", + "mv1", + "mv3", + ]: assert main_node.query("SELECT (*,).1 FROM recover.{}".format(table)) == "42\n" - for table in ['t2', 'rmt1', 'rmt2', 'rmt4', 'd1', 'd2', 'mt2', 'mv1', 'mv3']: + for table in ["t2", "rmt1", "rmt2", "rmt4", "d1", "d2", "mt2", "mv1", "mv3"]: assert dummy_node.query("SELECT (*,).1 FROM recover.{}".format(table)) == "42\n" - for table in ['m1', 'mt1']: + for table in ["m1", "mt1"]: assert dummy_node.query("SELECT count() FROM recover.{}".format(table)) == "0\n" global test_recover_staled_replica_run - assert dummy_node.query("SELECT count() FROM system.tables WHERE database='recover_broken_tables'") == f"{2*test_recover_staled_replica_run}\n" + assert ( + dummy_node.query( + "SELECT count() FROM system.tables WHERE database='recover_broken_tables'" + ) + == f"{2*test_recover_staled_replica_run}\n" + ) test_recover_staled_replica_run += 1 - table = dummy_node.query("SHOW TABLES FROM recover_broken_tables LIKE 'mt1_29_%' LIMIT 1").strip() - assert dummy_node.query("SELECT (*,).1 FROM recover_broken_tables.{}".format(table)) == "42\n" - table = dummy_node.query("SHOW TABLES FROM recover_broken_tables LIKE 'rmt5_29_%' LIMIT 1").strip() - assert dummy_node.query("SELECT (*,).1 FROM recover_broken_tables.{}".format(table)) == "42\n" + table = dummy_node.query( + "SHOW TABLES FROM recover_broken_tables LIKE 'mt1_29_%' LIMIT 1" + ).strip() + assert ( + dummy_node.query("SELECT (*,).1 FROM recover_broken_tables.{}".format(table)) + == "42\n" + ) + table = dummy_node.query( + "SHOW TABLES FROM recover_broken_tables LIKE 'rmt5_29_%' LIMIT 1" + ).strip() + assert ( + dummy_node.query("SELECT (*,).1 FROM recover_broken_tables.{}".format(table)) + == "42\n" + ) expected = "Cleaned 6 outdated objects: dropped 1 dictionaries and 3 tables, moved 2 tables" assert_logs_contain(dummy_node, expected) dummy_node.query("DROP TABLE recover.tmp") - assert_eq_with_retry(main_node, "SELECT count() FROM system.tables WHERE database='recover' AND name='tmp'", "0\n") + assert_eq_with_retry( + main_node, + "SELECT count() FROM system.tables WHERE database='recover' AND name='tmp'", + "0\n", + ) main_node.query("DROP DATABASE recover SYNC") dummy_node.query("DROP DATABASE recover SYNC") + def test_startup_without_zk(started_cluster): with PartitionManager() as pm: pm.drop_instance_zk_connections(main_node) - err = main_node.query_and_get_error("CREATE DATABASE startup ENGINE = Replicated('/clickhouse/databases/startup', 'shard1', 'replica1');") + err = main_node.query_and_get_error( + "CREATE DATABASE startup ENGINE = Replicated('/clickhouse/databases/startup', 'shard1', 'replica1');" + ) assert "ZooKeeper" in err - main_node.query("CREATE DATABASE startup ENGINE = Replicated('/clickhouse/databases/startup', 'shard1', 'replica1');") - #main_node.query("CREATE TABLE startup.rmt (n int) ENGINE=ReplicatedMergeTree order by n") + main_node.query( + "CREATE DATABASE startup ENGINE = Replicated('/clickhouse/databases/startup', 'shard1', 'replica1');" + ) + # main_node.query("CREATE TABLE startup.rmt (n int) ENGINE=ReplicatedMergeTree order by n") main_node.query("CREATE TABLE startup.rmt (n int) ENGINE=MergeTree order by n") main_node.query("INSERT INTO startup.rmt VALUES (42)") with PartitionManager() as pm: @@ -442,6 +784,7 @@ def test_startup_without_zk(started_cluster): assert main_node.query("SELECT (*,).1 FROM startup.m") == "42\n" main_node.query("DROP DATABASE startup SYNC") + def test_server_uuid(started_cluster): uuid1 = main_node.query("select serverUUID()") uuid2 = dummy_node.query("select serverUUID()") diff --git a/tests/integration/test_replicated_fetches_bandwidth/test.py b/tests/integration/test_replicated_fetches_bandwidth/test.py index f39baea064c..059102f8683 100644 --- a/tests/integration/test_replicated_fetches_bandwidth/test.py +++ b/tests/integration/test_replicated_fetches_bandwidth/test.py @@ -9,9 +9,12 @@ import time import statistics cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True) -node2 = cluster.add_instance('node2', with_zookeeper=True) -node3 = cluster.add_instance('node3', user_configs=['configs/limit_replication_config.xml'], with_zookeeper=True) +node1 = cluster.add_instance("node1", with_zookeeper=True) +node2 = cluster.add_instance("node2", with_zookeeper=True) +node3 = cluster.add_instance( + "node3", user_configs=["configs/limit_replication_config.xml"], with_zookeeper=True +) + @pytest.fixture(scope="module") def start_cluster(): @@ -22,19 +25,29 @@ def start_cluster(): finally: cluster.shutdown() + def get_random_string(length): - return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length)) + return "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(length) + ) + def test_limited_fetch_single_table(start_cluster): print("Limited fetches single table") try: for i, node in enumerate([node1, node2]): - node.query(f"CREATE TABLE limited_fetch_table(key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/limited_fetch_table', '{i}') ORDER BY tuple() PARTITION BY key SETTINGS max_replicated_fetches_network_bandwidth=10485760") + node.query( + f"CREATE TABLE limited_fetch_table(key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/limited_fetch_table', '{i}') ORDER BY tuple() PARTITION BY key SETTINGS max_replicated_fetches_network_bandwidth=10485760" + ) node2.query("SYSTEM STOP FETCHES limited_fetch_table") for i in range(5): - node1.query("INSERT INTO limited_fetch_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(300)".format(i)) + node1.query( + "INSERT INTO limited_fetch_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(300)".format( + i + ) + ) n1_net = NetThroughput(node1) n2_net = NetThroughput(node2) @@ -42,31 +55,41 @@ def test_limited_fetch_single_table(start_cluster): node2.query("SYSTEM START FETCHES limited_fetch_table") n2_fetch_speed = [] for i in range(10): - n1_in, n1_out = n1_net.measure_speed('megabytes') - n2_in, n2_out = n2_net.measure_speed('megabytes') - print("[N1] input:", n1_in, 'MB/s', "output:", n1_out, "MB/s") - print("[N2] input:", n2_in, 'MB/s', "output:", n2_out, "MB/s") + n1_in, n1_out = n1_net.measure_speed("megabytes") + n2_in, n2_out = n2_net.measure_speed("megabytes") + print("[N1] input:", n1_in, "MB/s", "output:", n1_out, "MB/s") + print("[N2] input:", n2_in, "MB/s", "output:", n2_out, "MB/s") n2_fetch_speed.append(n2_in) time.sleep(0.5) median_speed = statistics.median(n2_fetch_speed) # approximate border. Without limit we will have more than 100 MB/s for very slow builds. - assert median_speed <= 15, "We exceeded max fetch speed for more than 10MB/s. Must be around 10 (+- 5), got " + str(median_speed) + assert median_speed <= 15, ( + "We exceeded max fetch speed for more than 10MB/s. Must be around 10 (+- 5), got " + + str(median_speed) + ) finally: for node in [node1, node2]: node.query("DROP TABLE IF EXISTS limited_fetch_table SYNC") + def test_limited_send_single_table(start_cluster): print("Limited sends single table") try: for i, node in enumerate([node1, node2]): - node.query(f"CREATE TABLE limited_send_table(key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/limited_fetch_table', '{i}') ORDER BY tuple() PARTITION BY key SETTINGS max_replicated_sends_network_bandwidth=5242880") + node.query( + f"CREATE TABLE limited_send_table(key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/limited_fetch_table', '{i}') ORDER BY tuple() PARTITION BY key SETTINGS max_replicated_sends_network_bandwidth=5242880" + ) node2.query("SYSTEM STOP FETCHES limited_send_table") for i in range(5): - node1.query("INSERT INTO limited_send_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(150)".format(i)) + node1.query( + "INSERT INTO limited_send_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(150)".format( + i + ) + ) n1_net = NetThroughput(node1) n2_net = NetThroughput(node2) @@ -74,16 +97,19 @@ def test_limited_send_single_table(start_cluster): node2.query("SYSTEM START FETCHES limited_send_table") n1_sends_speed = [] for i in range(10): - n1_in, n1_out = n1_net.measure_speed('megabytes') - n2_in, n2_out = n2_net.measure_speed('megabytes') - print("[N1] input:", n1_in, 'MB/s', "output:", n1_out, "MB/s") - print("[N2] input:", n2_in, 'MB/s', "output:", n2_out, "MB/s") + n1_in, n1_out = n1_net.measure_speed("megabytes") + n2_in, n2_out = n2_net.measure_speed("megabytes") + print("[N1] input:", n1_in, "MB/s", "output:", n1_out, "MB/s") + print("[N2] input:", n2_in, "MB/s", "output:", n2_out, "MB/s") n1_sends_speed.append(n1_out) time.sleep(0.5) median_speed = statistics.median(n1_sends_speed) # approximate border. Without limit we will have more than 100 MB/s for very slow builds. - assert median_speed <= 10, "We exceeded max send speed for more than 5MB/s. Must be around 5 (+- 5), got " + str(median_speed) + assert median_speed <= 10, ( + "We exceeded max send speed for more than 5MB/s. Must be around 5 (+- 5), got " + + str(median_speed) + ) finally: for node in [node1, node2]: @@ -95,12 +121,18 @@ def test_limited_fetches_for_server(start_cluster): try: for i, node in enumerate([node1, node3]): for j in range(5): - node.query(f"CREATE TABLE limited_fetches{j}(key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/limited_fetches{j}', '{i}') ORDER BY tuple() PARTITION BY key") + node.query( + f"CREATE TABLE limited_fetches{j}(key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/limited_fetches{j}', '{i}') ORDER BY tuple() PARTITION BY key" + ) for j in range(5): node3.query(f"SYSTEM STOP FETCHES limited_fetches{j}") for i in range(5): - node1.query("INSERT INTO limited_fetches{} SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(50)".format(j, i)) + node1.query( + "INSERT INTO limited_fetches{} SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(50)".format( + j, i + ) + ) n1_net = NetThroughput(node1) n3_net = NetThroughput(node3) @@ -110,16 +142,19 @@ def test_limited_fetches_for_server(start_cluster): n3_fetches_speed = [] for i in range(5): - n1_in, n1_out = n1_net.measure_speed('megabytes') - n3_in, n3_out = n3_net.measure_speed('megabytes') - print("[N1] input:", n1_in, 'MB/s', "output:", n1_out, "MB/s") - print("[N3] input:", n3_in, 'MB/s', "output:", n3_out, "MB/s") + n1_in, n1_out = n1_net.measure_speed("megabytes") + n3_in, n3_out = n3_net.measure_speed("megabytes") + print("[N1] input:", n1_in, "MB/s", "output:", n1_out, "MB/s") + print("[N3] input:", n3_in, "MB/s", "output:", n3_out, "MB/s") n3_fetches_speed.append(n3_in) time.sleep(0.5) median_speed = statistics.median(n3_fetches_speed) # approximate border. Without limit we will have more than 100 MB/s for very slow builds. - assert median_speed <= 15, "We exceeded max fetch speed for more than 15MB/s. Must be around 5 (+- 10), got " + str(median_speed) + assert median_speed <= 15, ( + "We exceeded max fetch speed for more than 15MB/s. Must be around 5 (+- 10), got " + + str(median_speed) + ) finally: for node in [node1, node3]: @@ -132,12 +167,18 @@ def test_limited_sends_for_server(start_cluster): try: for i, node in enumerate([node1, node3]): for j in range(5): - node.query(f"CREATE TABLE limited_sends{j}(key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/limited_sends{j}', '{i}') ORDER BY tuple() PARTITION BY key") + node.query( + f"CREATE TABLE limited_sends{j}(key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/limited_sends{j}', '{i}') ORDER BY tuple() PARTITION BY key" + ) for j in range(5): node1.query(f"SYSTEM STOP FETCHES limited_sends{j}") for i in range(5): - node3.query("INSERT INTO limited_sends{} SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(50)".format(j, i)) + node3.query( + "INSERT INTO limited_sends{} SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(50)".format( + j, i + ) + ) n1_net = NetThroughput(node1) n3_net = NetThroughput(node3) @@ -147,16 +188,19 @@ def test_limited_sends_for_server(start_cluster): n3_sends_speed = [] for i in range(5): - n1_in, n1_out = n1_net.measure_speed('megabytes') - n3_in, n3_out = n3_net.measure_speed('megabytes') - print("[N1] input:", n1_in, 'MB/s', "output:", n1_out, "MB/s") - print("[N3] input:", n3_in, 'MB/s', "output:", n3_out, "MB/s") + n1_in, n1_out = n1_net.measure_speed("megabytes") + n3_in, n3_out = n3_net.measure_speed("megabytes") + print("[N1] input:", n1_in, "MB/s", "output:", n1_out, "MB/s") + print("[N3] input:", n3_in, "MB/s", "output:", n3_out, "MB/s") n3_sends_speed.append(n3_out) time.sleep(0.5) median_speed = statistics.median(n3_sends_speed) # approximate border. Without limit we will have more than 100 MB/s for very slow builds. - assert median_speed <= 20, "We exceeded max send speed for more than 20MB/s. Must be around 5 (+- 10), got " + str(median_speed) + assert median_speed <= 20, ( + "We exceeded max send speed for more than 20MB/s. Must be around 5 (+- 10), got " + + str(median_speed) + ) finally: for node in [node1, node3]: @@ -168,12 +212,18 @@ def test_should_execute_fetch(start_cluster): print("Should execute fetch") try: for i, node in enumerate([node1, node2]): - node.query(f"CREATE TABLE should_execute_table(key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/should_execute_table', '{i}') ORDER BY tuple() PARTITION BY key SETTINGS max_replicated_fetches_network_bandwidth=3505253") + node.query( + f"CREATE TABLE should_execute_table(key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/should_execute_table', '{i}') ORDER BY tuple() PARTITION BY key SETTINGS max_replicated_fetches_network_bandwidth=3505253" + ) node2.query("SYSTEM STOP FETCHES should_execute_table") for i in range(3): - node1.query("INSERT INTO should_execute_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(200)".format(i)) + node1.query( + "INSERT INTO should_execute_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(200)".format( + i + ) + ) n1_net = NetThroughput(node1) n2_net = NetThroughput(node2) @@ -181,19 +231,27 @@ def test_should_execute_fetch(start_cluster): node2.query("SYSTEM START FETCHES should_execute_table") for i in range(10): - node1.query("INSERT INTO should_execute_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(3)".format(i)) + node1.query( + "INSERT INTO should_execute_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(3)".format( + i + ) + ) n2_fetch_speed = [] replication_queue_data = [] for i in range(10): - n1_in, n1_out = n1_net.measure_speed('megabytes') - n2_in, n2_out = n2_net.measure_speed('megabytes') + n1_in, n1_out = n1_net.measure_speed("megabytes") + n2_in, n2_out = n2_net.measure_speed("megabytes") fetches_count = node2.query("SELECT count() FROM system.replicated_fetches") if fetches_count == "0\n": break print("Fetches count", fetches_count) - replication_queue_data.append(node2.query("SELECT count() FROM system.replication_queue WHERE postpone_reason like '%fetches have already throttled%'")) + replication_queue_data.append( + node2.query( + "SELECT count() FROM system.replication_queue WHERE postpone_reason like '%fetches have already throttled%'" + ) + ) n2_fetch_speed.append(n2_in) time.sleep(0.5) diff --git a/tests/integration/test_replicated_fetches_timeouts/test.py b/tests/integration/test_replicated_fetches_timeouts/test.py index 88763265270..7d5da55549c 100644 --- a/tests/integration/test_replicated_fetches_timeouts/test.py +++ b/tests/integration/test_replicated_fetches_timeouts/test.py @@ -10,12 +10,12 @@ from helpers.network import PartitionManager cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( - 'node1', with_zookeeper=True, - main_configs=['configs/server.xml']) + "node1", with_zookeeper=True, main_configs=["configs/server.xml"] +) node2 = cluster.add_instance( - 'node2', with_zookeeper=True, - main_configs=['configs/server.xml']) + "node2", with_zookeeper=True, main_configs=["configs/server.xml"] +) @pytest.fixture(scope="module") @@ -30,22 +30,34 @@ def started_cluster(): def get_random_string(length): - return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length)) + return "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(length) + ) def test_no_stall(started_cluster): for instance in started_cluster.instances.values(): - instance.query(""" + instance.query( + """ CREATE TABLE t (key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/test/t', '{instance}') ORDER BY tuple() - PARTITION BY key""") + PARTITION BY key""" + ) # Pause node3 until the test setup is prepared node2.query("SYSTEM STOP FETCHES t") - node1.query("INSERT INTO t SELECT 1, '{}' FROM numbers(500)".format(get_random_string(104857))) - node1.query("INSERT INTO t SELECT 2, '{}' FROM numbers(500)".format(get_random_string(104857))) + node1.query( + "INSERT INTO t SELECT 1, '{}' FROM numbers(500)".format( + get_random_string(104857) + ) + ) + node1.query( + "INSERT INTO t SELECT 2, '{}' FROM numbers(500)".format( + get_random_string(104857) + ) + ) with PartitionManager() as pm: pm.add_network_delay(node1, 2000) @@ -53,12 +65,15 @@ def test_no_stall(started_cluster): # Wait for timeout exceptions to confirm that timeout is triggered. while True: - conn_timeout_exceptions = int(node2.query( - """ + conn_timeout_exceptions = int( + node2.query( + """ SELECT count() FROM system.replication_queue WHERE last_exception LIKE '%connect timed out%' - """)) + """ + ) + ) if conn_timeout_exceptions >= 2: break @@ -68,19 +83,24 @@ def test_no_stall(started_cluster): print("Connection timeouts tested!") # Increase connection timeout and wait for receive timeouts. - node2.query(""" + node2.query( + """ ALTER TABLE t MODIFY SETTING replicated_fetches_http_connection_timeout = 30, - replicated_fetches_http_receive_timeout = 1""") + replicated_fetches_http_receive_timeout = 1""" + ) while True: - timeout_exceptions = int(node2.query( - """ + timeout_exceptions = int( + node2.query( + """ SELECT count() FROM system.replication_queue WHERE last_exception LIKE '%Timeout%' AND last_exception NOT LIKE '%connect timed out%' - """).strip()) + """ + ).strip() + ) if timeout_exceptions >= 2: break diff --git a/tests/integration/test_replicated_merge_tree_compatibility/test.py b/tests/integration/test_replicated_merge_tree_compatibility/test.py index b56aa5706c9..00367daad33 100644 --- a/tests/integration/test_replicated_merge_tree_compatibility/test.py +++ b/tests/integration/test_replicated_merge_tree_compatibility/test.py @@ -2,8 +2,23 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True, image='yandex/clickhouse-server', tag='20.12.4.5', stay_alive=True, with_installed_binary=True) -node2 = cluster.add_instance('node2', with_zookeeper=True, image='yandex/clickhouse-server', tag='20.12.4.5', stay_alive=True, with_installed_binary=True) +node1 = cluster.add_instance( + "node1", + with_zookeeper=True, + image="yandex/clickhouse-server", + tag="20.12.4.5", + stay_alive=True, + with_installed_binary=True, +) +node2 = cluster.add_instance( + "node2", + with_zookeeper=True, + image="yandex/clickhouse-server", + tag="20.12.4.5", + stay_alive=True, + with_installed_binary=True, +) + @pytest.fixture(scope="module") def started_cluster(): @@ -18,13 +33,14 @@ def started_cluster(): finally: cluster.shutdown() + def test_replicated_merge_tree_defaults_compatibility(started_cluster): # This test checks, that result of parsing list of columns with defaults # from 'CREATE/ATTACH' is compatible with parsing from zookeeper metadata on different versions. # We create table and write 'columns' node in zookeeper with old version, than restart with new version # drop and try recreate one replica. During startup of table structure is checked between 'CREATE' query and zookeeper. - create_query = ''' + create_query = """ CREATE TABLE test.table ( a UInt32, @@ -33,7 +49,7 @@ def test_replicated_merge_tree_defaults_compatibility(started_cluster): ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/table', '{replica}') ORDER BY a - ''' + """ for node in (node1, node2): node.query("CREATE DATABASE test ENGINE = Ordinary") @@ -41,10 +57,12 @@ def test_replicated_merge_tree_defaults_compatibility(started_cluster): node1.query("DETACH TABLE test.table") node2.query("SYSTEM DROP REPLICA 'node1' FROM TABLE test.table") - node1.exec_in_container(["bash", "-c", "rm /var/lib/clickhouse/metadata/test/table.sql"]) + node1.exec_in_container( + ["bash", "-c", "rm /var/lib/clickhouse/metadata/test/table.sql"] + ) node1.exec_in_container(["bash", "-c", "rm -r /var/lib/clickhouse/data/test/table"]) - zk = cluster.get_kazoo_client('zoo1') + zk = cluster.get_kazoo_client("zoo1") exists_replica_1 = zk.exists("/clickhouse/tables/test/table/replicas/node1") assert exists_replica_1 == None diff --git a/tests/integration/test_replicated_merge_tree_config/test.py b/tests/integration/test_replicated_merge_tree_config/test.py index 2a7725960bf..b5c033032ba 100644 --- a/tests/integration/test_replicated_merge_tree_config/test.py +++ b/tests/integration/test_replicated_merge_tree_config/test.py @@ -3,7 +3,9 @@ from helpers.cluster import ClickHouseCluster import logging cluster = ClickHouseCluster(__file__) -node = cluster.add_instance("node", main_configs=["configs/config.xml"], with_zookeeper=True) +node = cluster.add_instance( + "node", main_configs=["configs/config.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") diff --git a/tests/integration/test_replicated_merge_tree_encrypted_disk/test.py b/tests/integration/test_replicated_merge_tree_encrypted_disk/test.py index bc5a419aaf2..aea41fc0684 100644 --- a/tests/integration/test_replicated_merge_tree_encrypted_disk/test.py +++ b/tests/integration/test_replicated_merge_tree_encrypted_disk/test.py @@ -7,17 +7,22 @@ import os SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance("node1", - main_configs=["configs/remote_servers.xml", "configs/storage.xml"], - tmpfs=["/disk:size=100M"], - macros={'replica': 'node1'}, - with_zookeeper=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/remote_servers.xml", "configs/storage.xml"], + tmpfs=["/disk:size=100M"], + macros={"replica": "node1"}, + with_zookeeper=True, +) + +node2 = cluster.add_instance( + "node2", + main_configs=["configs/remote_servers.xml", "configs/storage.xml"], + tmpfs=["/disk:size=100M"], + macros={"replica": "node2"}, + with_zookeeper=True, +) -node2 = cluster.add_instance("node2", - main_configs=["configs/remote_servers.xml", "configs/storage.xml"], - tmpfs=["/disk:size=100M"], - macros={'replica': 'node2'}, - with_zookeeper=True) @pytest.fixture(scope="module", autouse=True) def start_cluster(): @@ -29,9 +34,13 @@ def start_cluster(): def copy_keys(instance, keys_file_name): - instance.copy_file_to_container(os.path.join(SCRIPT_DIR, f"configs/{keys_file_name}.xml"), "/etc/clickhouse-server/config.d/z_keys.xml") + instance.copy_file_to_container( + os.path.join(SCRIPT_DIR, f"configs/{keys_file_name}.xml"), + "/etc/clickhouse-server/config.d/z_keys.xml", + ) instance.query("SYSTEM RELOAD CONFIG") + def create_table(): node1.query("DROP TABLE IF EXISTS tbl ON CLUSTER 'cluster' NO DELAY") node1.query( @@ -45,16 +54,19 @@ def create_table(): """ ) + def insert_data(): node1.query("INSERT INTO tbl VALUES (1, 'str1')") - node2.query("INSERT INTO tbl VALUES (1, 'str1')") # Test deduplication + node2.query("INSERT INTO tbl VALUES (1, 'str1')") # Test deduplication node2.query("INSERT INTO tbl VALUES (2, 'str2')") + def optimize_table(): node1.query("OPTIMIZE TABLE tbl ON CLUSTER 'cluster' FINAL") + def check_table(): - expected=[[1, 'str1'], [2, 'str2']] + expected = [[1, "str1"], [2, "str2"]] assert node1.query("SELECT * FROM tbl ORDER BY id") == TSV(expected) assert node2.query("SELECT * FROM tbl ORDER BY id") == TSV(expected) assert node1.query("CHECK TABLE tbl") == "1\n" @@ -63,9 +75,10 @@ def check_table(): # Actual tests: + def test_same_keys(): - copy_keys(node1, 'key_a') - copy_keys(node2, 'key_a') + copy_keys(node1, "key_a") + copy_keys(node2, "key_a") create_table() insert_data() @@ -76,8 +89,8 @@ def test_same_keys(): def test_different_keys(): - copy_keys(node1, 'key_a') - copy_keys(node2, 'key_b') + copy_keys(node1, "key_a") + copy_keys(node2, "key_b") create_table() insert_data() diff --git a/tests/integration/test_replicated_merge_tree_encryption_codec/test.py b/tests/integration/test_replicated_merge_tree_encryption_codec/test.py index 3aec2259703..6f08daae4cf 100644 --- a/tests/integration/test_replicated_merge_tree_encryption_codec/test.py +++ b/tests/integration/test_replicated_merge_tree_encryption_codec/test.py @@ -7,15 +7,20 @@ import os SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance("node1", - main_configs=["configs/remote_servers.xml", "configs/encryption_codec.xml"], - macros={'replica': 'node1'}, - with_zookeeper=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/remote_servers.xml", "configs/encryption_codec.xml"], + macros={"replica": "node1"}, + with_zookeeper=True, +) + +node2 = cluster.add_instance( + "node2", + main_configs=["configs/remote_servers.xml", "configs/encryption_codec.xml"], + macros={"replica": "node2"}, + with_zookeeper=True, +) -node2 = cluster.add_instance("node2", - main_configs=["configs/remote_servers.xml", "configs/encryption_codec.xml"], - macros={'replica': 'node2'}, - with_zookeeper=True) @pytest.fixture(scope="module", autouse=True) def start_cluster(): @@ -27,9 +32,13 @@ def start_cluster(): def copy_keys(instance, keys_file_name): - instance.copy_file_to_container(os.path.join(SCRIPT_DIR, f"configs/{keys_file_name}.xml"), "/etc/clickhouse-server/config.d/z_keys.xml") + instance.copy_file_to_container( + os.path.join(SCRIPT_DIR, f"configs/{keys_file_name}.xml"), + "/etc/clickhouse-server/config.d/z_keys.xml", + ) instance.query("SYSTEM RELOAD CONFIG") + def create_table(): node1.query("DROP TABLE IF EXISTS tbl ON CLUSTER 'cluster' NO DELAY") node1.query( @@ -42,16 +51,19 @@ def create_table(): """ ) + def insert_data(): node1.query("INSERT INTO tbl VALUES (1, 'str1')") - node2.query("INSERT INTO tbl VALUES (1, 'str1')") # Test deduplication + node2.query("INSERT INTO tbl VALUES (1, 'str1')") # Test deduplication node2.query("INSERT INTO tbl VALUES (2, 'str2')") + def optimize_table(): node1.query("OPTIMIZE TABLE tbl ON CLUSTER 'cluster' FINAL") + def check_table(): - expected=[[1, 'str1'], [2, 'str2']] + expected = [[1, "str1"], [2, "str2"]] assert node1.query("SELECT * FROM tbl ORDER BY id") == TSV(expected) assert node2.query("SELECT * FROM tbl ORDER BY id") == TSV(expected) assert node1.query("CHECK TABLE tbl") == "1\n" @@ -60,9 +72,10 @@ def check_table(): # Actual tests: + def test_same_keys(): - copy_keys(node1, 'key_a') - copy_keys(node2, 'key_a') + copy_keys(node1, "key_a") + copy_keys(node2, "key_a") create_table() insert_data() @@ -73,8 +86,8 @@ def test_same_keys(): def test_different_keys(): - copy_keys(node1, 'key_a') - copy_keys(node2, 'key_b') + copy_keys(node1, "key_a") + copy_keys(node2, "key_b") create_table() insert_data() @@ -82,13 +95,13 @@ def test_different_keys(): assert "BAD_DECRYPT" in node2.query_and_get_error("SELECT * FROM tbl") # Hang? - #optimize_table() - #check_table() + # optimize_table() + # check_table() def test_different_current_key_ids(): - copy_keys(node1, 'key_a_and_b_current_a') - copy_keys(node2, 'key_a_and_b_current_b') + copy_keys(node1, "key_a_and_b_current_a") + copy_keys(node2, "key_a_and_b_current_b") create_table() insert_data() @@ -99,8 +112,8 @@ def test_different_current_key_ids(): def test_different_nonces(): - copy_keys(node1, 'key_a_and_nonce_x') - copy_keys(node2, 'key_a_and_nonce_y') + copy_keys(node1, "key_a_and_nonce_x") + copy_keys(node2, "key_a_and_nonce_y") create_table() insert_data() diff --git a/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/test.py b/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/test.py index f557a69569a..1e34a924e39 100644 --- a/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/test.py +++ b/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/test.py @@ -8,7 +8,7 @@ from helpers.cluster import ClickHouseCluster from pyhdfs import HdfsClient SHARDS = 2 -FILES_OVERHEAD_PER_TABLE = 1 # format_version.txt +FILES_OVERHEAD_PER_TABLE = 1 # format_version.txt FILES_OVERHEAD_PER_PART_COMPACT = 7 @@ -20,31 +20,39 @@ def wait_for_hdfs_objects(cluster, fp, expected, num_tries=30): break num_tries -= 1 time.sleep(1) - assert(len(fs.listdir(fp)) == expected) + assert len(fs.listdir(fp)) == expected @pytest.fixture(scope="module") def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("node1", main_configs=["configs/config.d/storage_conf.xml"], - macros={'replica': 'node1'}, - with_zookeeper=True, - with_hdfs=True) - cluster.add_instance("node2", main_configs=["configs/config.d/storage_conf.xml"], - macros={'replica': 'node2'}, - with_zookeeper=True, - with_hdfs=True) + cluster.add_instance( + "node1", + main_configs=["configs/config.d/storage_conf.xml"], + macros={"replica": "node1"}, + with_zookeeper=True, + with_hdfs=True, + ) + cluster.add_instance( + "node2", + main_configs=["configs/config.d/storage_conf.xml"], + macros={"replica": "node2"}, + with_zookeeper=True, + with_hdfs=True, + ) logging.info("Starting cluster...") cluster.start() if cluster.instances["node1"].is_debug_build(): # https://github.com/ClickHouse/ClickHouse/issues/27814 - pytest.skip("libhdfs3 calls rand function which does not pass harmful check in debug build") + pytest.skip( + "libhdfs3 calls rand function which does not pass harmful check in debug build" + ) logging.info("Cluster started") fs = HdfsClient(hosts=cluster.hdfs_ip) - fs.mkdirs('/clickhouse1') - fs.mkdirs('/clickhouse2') + fs.mkdirs("/clickhouse1") + fs.mkdirs("/clickhouse2") logging.info("Created HDFS directory") yield cluster @@ -64,111 +72,190 @@ def test_hdfs_zero_copy_replication_insert(cluster): SETTINGS storage_policy='hdfs_only' """ ) - wait_for_hdfs_objects(cluster, "/clickhouse1", SHARDS * FILES_OVERHEAD_PER_TABLE) + wait_for_hdfs_objects( + cluster, "/clickhouse1", SHARDS * FILES_OVERHEAD_PER_TABLE + ) node1.query("INSERT INTO hdfs_test VALUES (now() - INTERVAL 3 DAY, 10)") node2.query("SYSTEM SYNC REPLICA hdfs_test") assert node1.query("SELECT count() FROM hdfs_test FORMAT Values") == "(1)" assert node2.query("SELECT count() FROM hdfs_test FORMAT Values") == "(1)" - assert node1.query("SELECT id FROM hdfs_test ORDER BY dt FORMAT Values") == "(10)" - assert node2.query("SELECT id FROM hdfs_test ORDER BY dt FORMAT Values") == "(10)" - assert node1.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hdfs_test' FORMAT Values") == "('all','hdfs1')" - assert node2.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hdfs_test' FORMAT Values") == "('all','hdfs1')" - wait_for_hdfs_objects(cluster, "/clickhouse1", SHARDS * FILES_OVERHEAD_PER_TABLE + FILES_OVERHEAD_PER_PART_COMPACT) + assert ( + node1.query("SELECT id FROM hdfs_test ORDER BY dt FORMAT Values") == "(10)" + ) + assert ( + node2.query("SELECT id FROM hdfs_test ORDER BY dt FORMAT Values") == "(10)" + ) + assert ( + node1.query( + "SELECT partition_id,disk_name FROM system.parts WHERE table='hdfs_test' FORMAT Values" + ) + == "('all','hdfs1')" + ) + assert ( + node2.query( + "SELECT partition_id,disk_name FROM system.parts WHERE table='hdfs_test' FORMAT Values" + ) + == "('all','hdfs1')" + ) + wait_for_hdfs_objects( + cluster, + "/clickhouse1", + SHARDS * FILES_OVERHEAD_PER_TABLE + FILES_OVERHEAD_PER_PART_COMPACT, + ) finally: node1.query("DROP TABLE IF EXISTS hdfs_test NO DELAY") node2.query("DROP TABLE IF EXISTS hdfs_test NO DELAY") - @pytest.mark.parametrize( ("storage_policy", "init_objects"), - [("hybrid", 0), - ("tiered", 0), - ("tiered_copy", FILES_OVERHEAD_PER_TABLE)] + [("hybrid", 0), ("tiered", 0), ("tiered_copy", FILES_OVERHEAD_PER_TABLE)], ) def test_hdfs_zero_copy_replication_single_move(cluster, storage_policy, init_objects): node1 = cluster.instances["node1"] try: node1.query( - Template(""" + Template( + """ CREATE TABLE single_node_move_test (dt DateTime, id Int64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/single_node_move_test', '{replica}') ORDER BY (dt, id) SETTINGS storage_policy='$policy' - """).substitute(policy=storage_policy) + """ + ).substitute(policy=storage_policy) ) wait_for_hdfs_objects(cluster, "/clickhouse1", init_objects) - node1.query("INSERT INTO single_node_move_test VALUES (now() - INTERVAL 3 DAY, 10), (now() - INTERVAL 1 DAY, 11)") - assert node1.query("SELECT id FROM single_node_move_test ORDER BY dt FORMAT Values") == "(10),(11)" + node1.query( + "INSERT INTO single_node_move_test VALUES (now() - INTERVAL 3 DAY, 10), (now() - INTERVAL 1 DAY, 11)" + ) + assert ( + node1.query( + "SELECT id FROM single_node_move_test ORDER BY dt FORMAT Values" + ) + == "(10),(11)" + ) - node1.query("ALTER TABLE single_node_move_test MOVE PARTITION ID 'all' TO VOLUME 'external'") - assert node1.query("SELECT partition_id,disk_name FROM system.parts WHERE table='single_node_move_test' FORMAT Values") == "('all','hdfs1')" - assert node1.query("SELECT id FROM single_node_move_test ORDER BY dt FORMAT Values") == "(10),(11)" - wait_for_hdfs_objects(cluster, "/clickhouse1", init_objects + FILES_OVERHEAD_PER_PART_COMPACT) + node1.query( + "ALTER TABLE single_node_move_test MOVE PARTITION ID 'all' TO VOLUME 'external'" + ) + assert ( + node1.query( + "SELECT partition_id,disk_name FROM system.parts WHERE table='single_node_move_test' FORMAT Values" + ) + == "('all','hdfs1')" + ) + assert ( + node1.query( + "SELECT id FROM single_node_move_test ORDER BY dt FORMAT Values" + ) + == "(10),(11)" + ) + wait_for_hdfs_objects( + cluster, "/clickhouse1", init_objects + FILES_OVERHEAD_PER_PART_COMPACT + ) - node1.query("ALTER TABLE single_node_move_test MOVE PARTITION ID 'all' TO VOLUME 'main'") - assert node1.query("SELECT id FROM single_node_move_test ORDER BY dt FORMAT Values") == "(10),(11)" + node1.query( + "ALTER TABLE single_node_move_test MOVE PARTITION ID 'all' TO VOLUME 'main'" + ) + assert ( + node1.query( + "SELECT id FROM single_node_move_test ORDER BY dt FORMAT Values" + ) + == "(10),(11)" + ) finally: node1.query("DROP TABLE IF EXISTS single_node_move_test NO DELAY") @pytest.mark.parametrize( ("storage_policy", "init_objects"), - [("hybrid", 0), - ("tiered", 0), - ("tiered_copy", SHARDS * FILES_OVERHEAD_PER_TABLE)] + [("hybrid", 0), ("tiered", 0), ("tiered_copy", SHARDS * FILES_OVERHEAD_PER_TABLE)], ) def test_hdfs_zero_copy_replication_move(cluster, storage_policy, init_objects): node1 = cluster.instances["node1"] node2 = cluster.instances["node2"] try: node1.query( - Template(""" + Template( + """ CREATE TABLE move_test ON CLUSTER test_cluster (dt DateTime, id Int64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/move_test', '{replica}') ORDER BY (dt, id) SETTINGS storage_policy='$policy' - """).substitute(policy=storage_policy) + """ + ).substitute(policy=storage_policy) ) wait_for_hdfs_objects(cluster, "/clickhouse1", init_objects) - node1.query("INSERT INTO move_test VALUES (now() - INTERVAL 3 DAY, 10), (now() - INTERVAL 1 DAY, 11)") + node1.query( + "INSERT INTO move_test VALUES (now() - INTERVAL 3 DAY, 10), (now() - INTERVAL 1 DAY, 11)" + ) node2.query("SYSTEM SYNC REPLICA move_test") - assert node1.query("SELECT id FROM move_test ORDER BY dt FORMAT Values") == "(10),(11)" - assert node2.query("SELECT id FROM move_test ORDER BY dt FORMAT Values") == "(10),(11)" + assert ( + node1.query("SELECT id FROM move_test ORDER BY dt FORMAT Values") + == "(10),(11)" + ) + assert ( + node2.query("SELECT id FROM move_test ORDER BY dt FORMAT Values") + == "(10),(11)" + ) - node1.query("ALTER TABLE move_test MOVE PARTITION ID 'all' TO VOLUME 'external'") - wait_for_hdfs_objects(cluster, "/clickhouse1", init_objects + FILES_OVERHEAD_PER_PART_COMPACT) + node1.query( + "ALTER TABLE move_test MOVE PARTITION ID 'all' TO VOLUME 'external'" + ) + wait_for_hdfs_objects( + cluster, "/clickhouse1", init_objects + FILES_OVERHEAD_PER_PART_COMPACT + ) - node2.query("ALTER TABLE move_test MOVE PARTITION ID 'all' TO VOLUME 'external'") - assert node1.query("SELECT partition_id,disk_name FROM system.parts WHERE table='move_test' FORMAT Values") == "('all','hdfs1')" - assert node2.query("SELECT partition_id,disk_name FROM system.parts WHERE table='move_test' FORMAT Values") == "('all','hdfs1')" - assert node1.query("SELECT id FROM move_test ORDER BY dt FORMAT Values") == "(10),(11)" - assert node2.query("SELECT id FROM move_test ORDER BY dt FORMAT Values") == "(10),(11)" - wait_for_hdfs_objects(cluster, "/clickhouse1", init_objects + FILES_OVERHEAD_PER_PART_COMPACT) + node2.query( + "ALTER TABLE move_test MOVE PARTITION ID 'all' TO VOLUME 'external'" + ) + assert ( + node1.query( + "SELECT partition_id,disk_name FROM system.parts WHERE table='move_test' FORMAT Values" + ) + == "('all','hdfs1')" + ) + assert ( + node2.query( + "SELECT partition_id,disk_name FROM system.parts WHERE table='move_test' FORMAT Values" + ) + == "('all','hdfs1')" + ) + assert ( + node1.query("SELECT id FROM move_test ORDER BY dt FORMAT Values") + == "(10),(11)" + ) + assert ( + node2.query("SELECT id FROM move_test ORDER BY dt FORMAT Values") + == "(10),(11)" + ) + wait_for_hdfs_objects( + cluster, "/clickhouse1", init_objects + FILES_OVERHEAD_PER_PART_COMPACT + ) finally: node1.query("DROP TABLE IF EXISTS move_test NO DELAY") node2.query("DROP TABLE IF EXISTS move_test NO DELAY") -@pytest.mark.parametrize( - ("storage_policy"), ["hybrid", "tiered", "tiered_copy"] -) +@pytest.mark.parametrize(("storage_policy"), ["hybrid", "tiered", "tiered_copy"]) def test_hdfs_zero_copy_with_ttl_move(cluster, storage_policy): node1 = cluster.instances["node1"] node2 = cluster.instances["node2"] try: node1.query( - Template(""" + Template( + """ CREATE TABLE ttl_move_test ON CLUSTER test_cluster (dt DateTime, id Int64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/ttl_move_test', '{replica}') ORDER BY (dt, id) TTL dt + INTERVAL 2 DAY TO VOLUME 'external' SETTINGS storage_policy='$policy' - """).substitute(policy=storage_policy) + """ + ).substitute(policy=storage_policy) ) node1.query("INSERT INTO ttl_move_test VALUES (now() - INTERVAL 3 DAY, 10)") @@ -179,8 +266,14 @@ def test_hdfs_zero_copy_with_ttl_move(cluster, storage_policy): assert node1.query("SELECT count() FROM ttl_move_test FORMAT Values") == "(2)" assert node2.query("SELECT count() FROM ttl_move_test FORMAT Values") == "(2)" - assert node1.query("SELECT id FROM ttl_move_test ORDER BY id FORMAT Values") == "(10),(11)" - assert node2.query("SELECT id FROM ttl_move_test ORDER BY id FORMAT Values") == "(10),(11)" + assert ( + node1.query("SELECT id FROM ttl_move_test ORDER BY id FORMAT Values") + == "(10),(11)" + ) + assert ( + node2.query("SELECT id FROM ttl_move_test ORDER BY id FORMAT Values") + == "(10),(11)" + ) finally: node1.query("DROP TABLE IF EXISTS ttl_move_test NO DELAY") node2.query("DROP TABLE IF EXISTS ttl_move_test NO DELAY") @@ -208,8 +301,14 @@ def test_hdfs_zero_copy_with_ttl_delete(cluster): assert node1.query("SELECT count() FROM ttl_delete_test FORMAT Values") == "(1)" assert node2.query("SELECT count() FROM ttl_delete_test FORMAT Values") == "(1)" - assert node1.query("SELECT id FROM ttl_delete_test ORDER BY id FORMAT Values") == "(11)" - assert node2.query("SELECT id FROM ttl_delete_test ORDER BY id FORMAT Values") == "(11)" + assert ( + node1.query("SELECT id FROM ttl_delete_test ORDER BY id FORMAT Values") + == "(11)" + ) + assert ( + node2.query("SELECT id FROM ttl_delete_test ORDER BY id FORMAT Values") + == "(11)" + ) finally: node1.query("DROP TABLE IF EXISTS ttl_delete_test NO DELAY") node2.query("DROP TABLE IF EXISTS ttl_delete_test NO DELAY") diff --git a/tests/integration/test_replicated_merge_tree_s3/test.py b/tests/integration/test_replicated_merge_tree_s3/test.py index d04bdae36e2..cc85a4eab02 100644 --- a/tests/integration/test_replicated_merge_tree_s3/test.py +++ b/tests/integration/test_replicated_merge_tree_s3/test.py @@ -11,12 +11,25 @@ def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("node1", main_configs=["configs/config.d/storage_conf.xml"], macros={'replica': '1'}, - with_minio=True, with_zookeeper=True) - cluster.add_instance("node2", main_configs=["configs/config.d/storage_conf.xml"], macros={'replica': '2'}, - with_zookeeper=True) - cluster.add_instance("node3", main_configs=["configs/config.d/storage_conf.xml"], macros={'replica': '3'}, - with_zookeeper=True) + cluster.add_instance( + "node1", + main_configs=["configs/config.d/storage_conf.xml"], + macros={"replica": "1"}, + with_minio=True, + with_zookeeper=True, + ) + cluster.add_instance( + "node2", + main_configs=["configs/config.d/storage_conf.xml"], + macros={"replica": "2"}, + with_zookeeper=True, + ) + cluster.add_instance( + "node3", + main_configs=["configs/config.d/storage_conf.xml"], + macros={"replica": "3"}, + with_zookeeper=True, + ) logging.info("Starting cluster...") cluster.start() @@ -35,7 +48,7 @@ FILES_OVERHEAD_PER_PART_COMPACT = 10 + 1 def random_string(length): letters = string.ascii_letters - return ''.join(random.choice(letters) for i in range(length)) + return "".join(random.choice(letters) for i in range(length)) def generate_values(date_str, count, sign=1): @@ -71,32 +84,43 @@ def drop_table(cluster): minio = cluster.minio_client # Remove extra objects to prevent tests cascade failing - for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')): + for obj in list(minio.list_objects(cluster.minio_bucket, "data/")): minio.remove_object(cluster.minio_bucket, obj.object_name) + @pytest.mark.parametrize( "min_rows_for_wide_part,files_per_part", - [ - (0, FILES_OVERHEAD_PER_PART_WIDE), - (8192, FILES_OVERHEAD_PER_PART_COMPACT) - ] + [(0, FILES_OVERHEAD_PER_PART_WIDE), (8192, FILES_OVERHEAD_PER_PART_COMPACT)], ) def test_insert_select_replicated(cluster, min_rows_for_wide_part, files_per_part): - create_table(cluster, additional_settings="min_rows_for_wide_part={}".format(min_rows_for_wide_part)) + create_table( + cluster, + additional_settings="min_rows_for_wide_part={}".format(min_rows_for_wide_part), + ) all_values = "" for node_idx in range(1, 4): node = cluster.instances["node" + str(node_idx)] values = generate_values("2020-01-0" + str(node_idx), 4096) - node.query("INSERT INTO s3_test VALUES {}".format(values), settings={"insert_quorum": 3}) + node.query( + "INSERT INTO s3_test VALUES {}".format(values), + settings={"insert_quorum": 3}, + ) if node_idx != 1: all_values += "," all_values += values for node_idx in range(1, 4): node = cluster.instances["node" + str(node_idx)] - assert node.query("SELECT * FROM s3_test order by dt, id FORMAT Values", - settings={"select_sequential_consistency": 1}) == all_values + assert ( + node.query( + "SELECT * FROM s3_test order by dt, id FORMAT Values", + settings={"select_sequential_consistency": 1}, + ) + == all_values + ) minio = cluster.minio_client - assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == 3 * (FILES_OVERHEAD + files_per_part * 3) + assert len(list(minio.list_objects(cluster.minio_bucket, "data/"))) == 3 * ( + FILES_OVERHEAD + files_per_part * 3 + ) diff --git a/tests/integration/test_replicated_merge_tree_s3_zero_copy/test.py b/tests/integration/test_replicated_merge_tree_s3_zero_copy/test.py index edf39969b47..73b611ad169 100644 --- a/tests/integration/test_replicated_merge_tree_s3_zero_copy/test.py +++ b/tests/integration/test_replicated_merge_tree_s3_zero_copy/test.py @@ -14,12 +14,25 @@ def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("node1", main_configs=["configs/config.d/storage_conf.xml"], macros={'replica': '1'}, - with_minio=True, with_zookeeper=True) - cluster.add_instance("node2", main_configs=["configs/config.d/storage_conf.xml"], macros={'replica': '2'}, - with_zookeeper=True) - cluster.add_instance("node3", main_configs=["configs/config.d/storage_conf.xml"], macros={'replica': '3'}, - with_zookeeper=True) + cluster.add_instance( + "node1", + main_configs=["configs/config.d/storage_conf.xml"], + macros={"replica": "1"}, + with_minio=True, + with_zookeeper=True, + ) + cluster.add_instance( + "node2", + main_configs=["configs/config.d/storage_conf.xml"], + macros={"replica": "2"}, + with_zookeeper=True, + ) + cluster.add_instance( + "node3", + main_configs=["configs/config.d/storage_conf.xml"], + macros={"replica": "3"}, + with_zookeeper=True, + ) logging.info("Starting cluster...") cluster.start() @@ -38,7 +51,7 @@ FILES_OVERHEAD_PER_PART_COMPACT = 10 + 1 def random_string(length): letters = string.ascii_letters - return ''.join(random.choice(letters) for i in range(length)) + return "".join(random.choice(letters) for i in range(length)) def generate_values(date_str, count, sign=1): @@ -65,6 +78,7 @@ def create_table(cluster, additional_settings=None): list(cluster.instances.values())[0].query(create_table_statement) + @pytest.fixture(autouse=True) def drop_table(cluster): yield @@ -73,32 +87,43 @@ def drop_table(cluster): minio = cluster.minio_client # Remove extra objects to prevent tests cascade failing - for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')): + for obj in list(minio.list_objects(cluster.minio_bucket, "data/")): minio.remove_object(cluster.minio_bucket, obj.object_name) + @pytest.mark.parametrize( "min_rows_for_wide_part,files_per_part", - [ - (0, FILES_OVERHEAD_PER_PART_WIDE), - (8192, FILES_OVERHEAD_PER_PART_COMPACT) - ] + [(0, FILES_OVERHEAD_PER_PART_WIDE), (8192, FILES_OVERHEAD_PER_PART_COMPACT)], ) def test_insert_select_replicated(cluster, min_rows_for_wide_part, files_per_part): - create_table(cluster, additional_settings="min_rows_for_wide_part={}".format(min_rows_for_wide_part)) + create_table( + cluster, + additional_settings="min_rows_for_wide_part={}".format(min_rows_for_wide_part), + ) all_values = "" for node_idx in range(1, 4): node = cluster.instances["node" + str(node_idx)] values = generate_values("2020-01-0" + str(node_idx), 4096) - node.query("INSERT INTO s3_test VALUES {}".format(values), settings={"insert_quorum": 3}) + node.query( + "INSERT INTO s3_test VALUES {}".format(values), + settings={"insert_quorum": 3}, + ) if node_idx != 1: all_values += "," all_values += values for node_idx in range(1, 4): node = cluster.instances["node" + str(node_idx)] - assert node.query("SELECT * FROM s3_test order by dt, id FORMAT Values", - settings={"select_sequential_consistency": 1}) == all_values + assert ( + node.query( + "SELECT * FROM s3_test order by dt, id FORMAT Values", + settings={"select_sequential_consistency": 1}, + ) + == all_values + ) minio = cluster.minio_client - assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == (3 * FILES_OVERHEAD) + (files_per_part * 3) + assert len(list(minio.list_objects(cluster.minio_bucket, "data/"))) == ( + 3 * FILES_OVERHEAD + ) + (files_per_part * 3) diff --git a/tests/integration/test_replicated_merge_tree_with_auxiliary_zookeepers/test.py b/tests/integration/test_replicated_merge_tree_with_auxiliary_zookeepers/test.py index 4644790ff94..c46e6840153 100644 --- a/tests/integration/test_replicated_merge_tree_with_auxiliary_zookeepers/test.py +++ b/tests/integration/test_replicated_merge_tree_with_auxiliary_zookeepers/test.py @@ -7,8 +7,16 @@ from helpers.client import QueryRuntimeException from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance("node1", main_configs=["configs/zookeeper_config.xml", "configs/remote_servers.xml"], with_zookeeper=True) -node2 = cluster.add_instance("node2", main_configs=["configs/zookeeper_config.xml", "configs/remote_servers.xml"], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/zookeeper_config.xml", "configs/remote_servers.xml"], + with_zookeeper=True, +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/zookeeper_config.xml", "configs/remote_servers.xml"], + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -29,16 +37,20 @@ def drop_table(nodes, table_name): for node in nodes: node.query("DROP TABLE IF EXISTS {} NO DELAY".format(table_name)) + # Create table with default zookeeper. def test_create_replicated_merge_tree_with_default_zookeeper(started_cluster): drop_table([node1, node2], "test_default_zookeeper") for node in [node1, node2]: node.query( - ''' + """ CREATE TABLE test_default_zookeeper(a Int32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_default_zookeeper', '{replica}') ORDER BY a; - '''.format(replica=node.name)) + """.format( + replica=node.name + ) + ) # Insert data into node1, and query it from node2. node1.query("INSERT INTO test_default_zookeeper VALUES (1)") @@ -48,16 +60,20 @@ def test_create_replicated_merge_tree_with_default_zookeeper(started_cluster): assert TSV(node1.query("SELECT a FROM test_default_zookeeper")) == TSV(expected) assert TSV(node2.query("SELECT a FROM test_default_zookeeper")) == TSV(expected) + # Create table with auxiliary zookeeper. def test_create_replicated_merge_tree_with_auxiliary_zookeeper(started_cluster): drop_table([node1, node2], "test_auxiliary_zookeeper") for node in [node1, node2]: node.query( - ''' + """ CREATE TABLE test_auxiliary_zookeeper(a Int32) ENGINE = ReplicatedMergeTree('zookeeper2:/clickhouse/tables/test/test_auxiliary_zookeeper', '{replica}') ORDER BY a; - '''.format(replica=node.name)) + """.format( + replica=node.name + ) + ) # Insert data into node1, and query it from node2. node1.query("INSERT INTO test_auxiliary_zookeeper VALUES (1)") @@ -67,27 +83,37 @@ def test_create_replicated_merge_tree_with_auxiliary_zookeeper(started_cluster): assert TSV(node1.query("SELECT a FROM test_auxiliary_zookeeper")) == TSV(expected) assert TSV(node2.query("SELECT a FROM test_auxiliary_zookeeper")) == TSV(expected) + # Create table with auxiliary zookeeper. -def test_create_replicated_merge_tree_with_not_exists_auxiliary_zookeeper(started_cluster): +def test_create_replicated_merge_tree_with_not_exists_auxiliary_zookeeper( + started_cluster, +): drop_table([node1], "test_auxiliary_zookeeper") with pytest.raises(QueryRuntimeException): node1.query( - ''' + """ CREATE TABLE test_auxiliary_zookeeper(a Int32) ENGINE = ReplicatedMergeTree('zookeeper_not_exits:/clickhouse/tables/test/test_auxiliary_zookeeper', '{replica}') ORDER BY a; - '''.format(replica=node1.name)) + """.format( + replica=node1.name + ) + ) + # Drop table with auxiliary zookeeper. def test_drop_replicated_merge_tree_with_auxiliary_zookeeper(started_cluster): drop_table([node1, node2], "test_auxiliary_zookeeper") for node in [node1, node2]: node.query( - ''' + """ CREATE TABLE test_auxiliary_zookeeper(a Int32) ENGINE = ReplicatedMergeTree('zookeeper2:/clickhouse/tables/test/test_auxiliary_zookeeper', '{replica}') ORDER BY a; - '''.format(replica=node.name)) + """.format( + replica=node.name + ) + ) # Insert data into node1, and query it from node2. node1.query("INSERT INTO test_auxiliary_zookeeper VALUES (1)") @@ -97,17 +123,26 @@ def test_drop_replicated_merge_tree_with_auxiliary_zookeeper(started_cluster): assert TSV(node1.query("SELECT a FROM test_auxiliary_zookeeper")) == TSV(expected) assert TSV(node2.query("SELECT a FROM test_auxiliary_zookeeper")) == TSV(expected) - zk = cluster.get_kazoo_client('zoo1') - assert zk.exists('/clickhouse/tables/test/test_auxiliary_zookeeper') + zk = cluster.get_kazoo_client("zoo1") + assert zk.exists("/clickhouse/tables/test/test_auxiliary_zookeeper") drop_table([node1, node2], "test_auxiliary_zookeeper") - assert zk.exists('/clickhouse/tables/test/test_auxiliary_zookeeper') is None + assert zk.exists("/clickhouse/tables/test/test_auxiliary_zookeeper") is None + def test_path_ambiguity(started_cluster): drop_table([node1, node2], "test_path_ambiguity1") drop_table([node1, node2], "test_path_ambiguity2") - node1.query("create table test_path_ambiguity1 (n int) engine=ReplicatedMergeTree('/test:bad:/path', '1') order by n") - assert "Invalid auxiliary ZooKeeper name" in node1.query_and_get_error("create table test_path_ambiguity2 (n int) engine=ReplicatedMergeTree('test:bad:/path', '1') order by n") - assert "ZooKeeper path must starts with '/'" in node1.query_and_get_error("create table test_path_ambiguity2 (n int) engine=ReplicatedMergeTree('test/bad:/path', '1') order by n") - node1.query("create table test_path_ambiguity2 (n int) engine=ReplicatedMergeTree('zookeeper2:/bad:/path', '1') order by n") + node1.query( + "create table test_path_ambiguity1 (n int) engine=ReplicatedMergeTree('/test:bad:/path', '1') order by n" + ) + assert "Invalid auxiliary ZooKeeper name" in node1.query_and_get_error( + "create table test_path_ambiguity2 (n int) engine=ReplicatedMergeTree('test:bad:/path', '1') order by n" + ) + assert "ZooKeeper path must starts with '/'" in node1.query_and_get_error( + "create table test_path_ambiguity2 (n int) engine=ReplicatedMergeTree('test/bad:/path', '1') order by n" + ) + node1.query( + "create table test_path_ambiguity2 (n int) engine=ReplicatedMergeTree('zookeeper2:/bad:/path', '1') order by n" + ) drop_table([node1, node2], "test_path_ambiguity1") drop_table([node1, node2], "test_path_ambiguity2") diff --git a/tests/integration/test_replicated_mutations/test.py b/tests/integration/test_replicated_mutations/test.py index 5efc022cf36..7479f082b06 100644 --- a/tests/integration/test_replicated_mutations/test.py +++ b/tests/integration/test_replicated_mutations/test.py @@ -9,35 +9,55 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', macros={'cluster': 'test1'}, with_zookeeper=True) +node1 = cluster.add_instance("node1", macros={"cluster": "test1"}, with_zookeeper=True) # Check, that limits on max part size for merges doesn`t affect mutations -node2 = cluster.add_instance('node2', macros={'cluster': 'test1'}, main_configs=["configs/merge_tree.xml"], - with_zookeeper=True) +node2 = cluster.add_instance( + "node2", + macros={"cluster": "test1"}, + main_configs=["configs/merge_tree.xml"], + with_zookeeper=True, +) -node3 = cluster.add_instance('node3', macros={'cluster': 'test2'}, main_configs=["configs/merge_tree_max_parts.xml"], - with_zookeeper=True) -node4 = cluster.add_instance('node4', macros={'cluster': 'test2'}, main_configs=["configs/merge_tree_max_parts.xml"], - with_zookeeper=True) +node3 = cluster.add_instance( + "node3", + macros={"cluster": "test2"}, + main_configs=["configs/merge_tree_max_parts.xml"], + with_zookeeper=True, +) +node4 = cluster.add_instance( + "node4", + macros={"cluster": "test2"}, + main_configs=["configs/merge_tree_max_parts.xml"], + with_zookeeper=True, +) -node5 = cluster.add_instance('node5', macros={'cluster': 'test3'}, main_configs=["configs/merge_tree_max_parts.xml"]) +node5 = cluster.add_instance( + "node5", + macros={"cluster": "test3"}, + main_configs=["configs/merge_tree_max_parts.xml"], +) all_nodes = [node1, node2, node3, node4, node5] + def prepare_cluster(): for node in all_nodes: node.query("DROP TABLE IF EXISTS test_mutations SYNC") for node in [node1, node2, node3, node4]: - node.query(""" + node.query( + """ CREATE TABLE test_mutations(d Date, x UInt32, i UInt32) ENGINE ReplicatedMergeTree('/clickhouse/{cluster}/tables/test/test_mutations', '{instance}') ORDER BY x PARTITION BY toYYYYMM(d) SETTINGS number_of_free_entries_in_pool_to_execute_mutation=0 - """) + """ + ) node5.query( - "CREATE TABLE test_mutations(d Date, x UInt32, i UInt32) ENGINE MergeTree() ORDER BY x PARTITION BY toYYYYMM(d)") + "CREATE TABLE test_mutations(d Date, x UInt32, i UInt32) ENGINE MergeTree() ORDER BY x PARTITION BY toYYYYMM(d)" + ) @pytest.fixture(scope="module") @@ -76,7 +96,7 @@ class Runner: # Each thread inserts a small random number of rows with random year, month 01 and day determined # by the thread number. The idea is to avoid spurious duplicates and to insert into a # nontrivial number of partitions. - month = '01' + month = "01" day = str(thread_num + 1).zfill(2) i = 1 while not self.stop_ev.is_set(): @@ -89,15 +109,17 @@ class Runner: self.currently_inserting_xs[x] += 1 year = 2000 + random.randint(0, partitions_num) - date_str = '{year}-{month}-{day}'.format(year=year, month=month, day=day) - payload = '' + date_str = "{year}-{month}-{day}".format(year=year, month=month, day=day) + payload = "" for x in xs: - payload += '{date_str} {x} {i}\n'.format(date_str=date_str, x=x, i=i) + payload += "{date_str} {x} {i}\n".format(date_str=date_str, x=x, i=i) i += 1 try: logging.debug(f"thread {thread_num}: insert for {date_str}: {xs}") - random.choice(self.nodes).query("INSERT INTO test_mutations FORMAT TSV", payload) + random.choice(self.nodes).query( + "INSERT INTO test_mutations FORMAT TSV", payload + ) with self.mtx: for x in xs: @@ -124,7 +146,10 @@ class Runner: if self.current_xs: x = random.choice(list(self.current_xs.elements())) - if self.currently_inserting_xs[x] == 0 and x not in self.currently_deleting_xs: + if ( + self.currently_inserting_xs[x] == 0 + and x not in self.currently_deleting_xs + ): chosen = True self.currently_deleting_xs.add(x) to_delete_count = self.current_xs[x] @@ -135,7 +160,9 @@ class Runner: try: logging.debug(f"thread {thread_num}: delete {to_delete_count} * {x}") - random.choice(self.nodes).query("ALTER TABLE test_mutations DELETE WHERE x = {}".format(x)) + random.choice(self.nodes).query( + "ALTER TABLE test_mutations DELETE WHERE x = {}".format(x) + ) with self.mtx: self.total_mutations += 1 @@ -157,7 +184,11 @@ def wait_for_mutations(nodes, number_of_mutations): time.sleep(0.8) def get_done_mutations(node): - return int(node.query("SELECT sum(is_done) FROM system.mutations WHERE table = 'test_mutations'").rstrip()) + return int( + node.query( + "SELECT sum(is_done) FROM system.mutations WHERE table = 'test_mutations'" + ).rstrip() + ) if all([get_done_mutations(n) == number_of_mutations for n in nodes]): return True @@ -195,32 +226,41 @@ def test_mutations(started_cluster): all_done = wait_for_mutations(nodes, runner.total_mutations) logging.debug(f"Total mutations: {runner.total_mutations}") for node in nodes: - logging.debug(node.query( - "SELECT mutation_id, command, parts_to_do, is_done FROM system.mutations WHERE table = 'test_mutations' FORMAT TSVWithNames")) + logging.debug( + node.query( + "SELECT mutation_id, command, parts_to_do, is_done FROM system.mutations WHERE table = 'test_mutations' FORMAT TSVWithNames" + ) + ) assert all_done expected_sum = runner.total_inserted_xs - runner.total_deleted_xs actual_sums = [] for i, node in enumerate(nodes): - actual_sums.append(int(node.query("SELECT sum(x) FROM test_mutations").rstrip())) + actual_sums.append( + int(node.query("SELECT sum(x) FROM test_mutations").rstrip()) + ) assert actual_sums[i] == expected_sum @pytest.mark.parametrize( - ('nodes',), + ("nodes",), [ - ([node5, ],), # MergeTree + ( + [ + node5, + ], + ), # MergeTree ([node3, node4],), # ReplicatedMergeTree - ] + ], ) def test_mutations_dont_prevent_merges(started_cluster, nodes): prepare_cluster() for year in range(2000, 2016): - rows = '' - date_str = '{}-01-{}'.format(year, random.randint(1, 10)) + rows = "" + date_str = "{}-01-{}".format(year, random.randint(1, 10)) for i in range(10): - rows += '{} {} {}\n'.format(date_str, random.randint(1, 10), i) + rows += "{} {} {}\n".format(date_str, random.randint(1, 10), i) nodes[0].query("INSERT INTO test_mutations FORMAT TSV", rows) # will run mutations of 16 parts in parallel, mutations will sleep for about 20 seconds @@ -242,10 +282,16 @@ def test_mutations_dont_prevent_merges(started_cluster, nodes): t.join() for node in nodes: - logging.debug(node.query( - "SELECT mutation_id, command, parts_to_do, is_done FROM system.mutations WHERE table = 'test_mutations' FORMAT TSVWithNames")) - logging.debug(node.query( - "SELECT partition, count(name), sum(active), sum(active*rows) FROM system.parts WHERE table ='test_mutations' GROUP BY partition FORMAT TSVWithNames")) + logging.debug( + node.query( + "SELECT mutation_id, command, parts_to_do, is_done FROM system.mutations WHERE table = 'test_mutations' FORMAT TSVWithNames" + ) + ) + logging.debug( + node.query( + "SELECT partition, count(name), sum(active), sum(active*rows) FROM system.parts WHERE table ='test_mutations' GROUP BY partition FORMAT TSVWithNames" + ) + ) assert all_done, "All done" assert all([str(e).find("Too many parts") < 0 for e in runner.exceptions]) diff --git a/tests/integration/test_replicated_parse_zk_metadata/test.py b/tests/integration/test_replicated_parse_zk_metadata/test.py index d8b6685ddcd..4646d502b70 100644 --- a/tests/integration/test_replicated_parse_zk_metadata/test.py +++ b/tests/integration/test_replicated_parse_zk_metadata/test.py @@ -3,10 +3,10 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', with_zookeeper=True) +node = cluster.add_instance("node", with_zookeeper=True) -@pytest.fixture(scope='module', autouse=True) +@pytest.fixture(scope="module", autouse=True) def started_cluster(): try: cluster.start() @@ -17,7 +17,7 @@ def started_cluster(): def test_replicated_engine_parse_metadata_on_attach(): node.query( - ''' + """ CREATE TABLE data ( key Int, INDEX key_idx0 key+0 TYPE minmax GRANULARITY 1, @@ -25,15 +25,18 @@ def test_replicated_engine_parse_metadata_on_attach(): ) ENGINE = ReplicatedMergeTree('/ch/tables/default/data', 'node') ORDER BY key; - ''') - node.query('DETACH TABLE data') + """ + ) + node.query("DETACH TABLE data") - zk = cluster.get_kazoo_client('zoo1') + zk = cluster.get_kazoo_client("zoo1") # Add **extra space between indices**, to check that it will be re-parsed # and successfully accepted by the server. # # This metadata was obtain from the server without #11325 - zk.set('/ch/tables/default/data/replicas/node/metadata', b""" + zk.set( + "/ch/tables/default/data/replicas/node/metadata", + b""" metadata format version: 1 date column: sampling expression: @@ -46,5 +49,6 @@ partition key: indices: key_idx0 key + 0 TYPE minmax GRANULARITY 1, key_idx1 key + 1 TYPE minmax GRANULARITY 1 granularity bytes: 10485760 -""".lstrip()) - node.query('ATTACH TABLE data') +""".lstrip(), + ) + node.query("ATTACH TABLE data") diff --git a/tests/integration/test_replicated_users/test.py b/tests/integration/test_replicated_users/test.py index 75bc93921be..add45d262e6 100644 --- a/tests/integration/test_replicated_users/test.py +++ b/tests/integration/test_replicated_users/test.py @@ -5,11 +5,16 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/config.xml'], with_zookeeper=True, stay_alive=True) -node2 = cluster.add_instance('node2', main_configs=['configs/config.xml'], with_zookeeper=True, stay_alive=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/config.xml"], with_zookeeper=True, stay_alive=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/config.xml"], with_zookeeper=True, stay_alive=True +) all_nodes = [node1, node2] + @pytest.fixture(scope="module") def started_cluster(): try: @@ -31,9 +36,10 @@ entities = [ Entity(keyword="ROLE", name="therole"), Entity(keyword="ROW POLICY", name="thepolicy", options=" ON default.t1"), Entity(keyword="QUOTA", name="thequota"), - Entity(keyword="SETTINGS PROFILE", name="theprofile") + Entity(keyword="SETTINGS PROFILE", name="theprofile"), ] + def get_entity_id(entity): return entity.keyword @@ -41,8 +47,12 @@ def get_entity_id(entity): @pytest.mark.parametrize("entity", entities, ids=get_entity_id) def test_create_replicated(started_cluster, entity): node1.query(f"CREATE {entity.keyword} {entity.name} {entity.options}") - assert f"cannot insert because {entity.keyword.lower()} `{entity.name}{entity.options}` already exists in replicated" in \ - node2.query_and_get_error(f"CREATE {entity.keyword} {entity.name} {entity.options}") + assert ( + f"cannot insert because {entity.keyword.lower()} `{entity.name}{entity.options}` already exists in replicated" + in node2.query_and_get_error( + f"CREATE {entity.keyword} {entity.name} {entity.options}" + ) + ) node1.query(f"DROP {entity.keyword} {entity.name} {entity.options}") @@ -54,20 +64,27 @@ def test_create_and_delete_replicated(started_cluster, entity): @pytest.mark.parametrize("entity", entities, ids=get_entity_id) def test_create_replicated_on_cluster(started_cluster, entity): - assert f"cannot insert because {entity.keyword.lower()} `{entity.name}{entity.options}` already exists in replicated" in \ - node1.query_and_get_error(f"CREATE {entity.keyword} {entity.name} ON CLUSTER default {entity.options}") + assert ( + f"cannot insert because {entity.keyword.lower()} `{entity.name}{entity.options}` already exists in replicated" + in node1.query_and_get_error( + f"CREATE {entity.keyword} {entity.name} ON CLUSTER default {entity.options}" + ) + ) node1.query(f"DROP {entity.keyword} {entity.name} {entity.options}") @pytest.mark.parametrize("entity", entities, ids=get_entity_id) def test_create_replicated_if_not_exists_on_cluster(started_cluster, entity): - node1.query(f"CREATE {entity.keyword} IF NOT EXISTS {entity.name} ON CLUSTER default {entity.options}") + node1.query( + f"CREATE {entity.keyword} IF NOT EXISTS {entity.name} ON CLUSTER default {entity.options}" + ) node1.query(f"DROP {entity.keyword} {entity.name} {entity.options}") @pytest.mark.parametrize("entity", entities, ids=get_entity_id) def test_rename_replicated(started_cluster, entity): node1.query(f"CREATE {entity.keyword} {entity.name} {entity.options}") - node2.query(f"ALTER {entity.keyword} {entity.name} {entity.options} RENAME TO {entity.name}2") + node2.query( + f"ALTER {entity.keyword} {entity.name} {entity.options} RENAME TO {entity.name}2" + ) node1.query(f"DROP {entity.keyword} {entity.name}2 {entity.options}") - diff --git a/tests/integration/test_replicating_constants/test.py b/tests/integration/test_replicating_constants/test.py index 13a605f2650..82cc5e757f1 100644 --- a/tests/integration/test_replicating_constants/test.py +++ b/tests/integration/test_replicating_constants/test.py @@ -4,9 +4,14 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True) -node2 = cluster.add_instance('node2', with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.14', - with_installed_binary=True) +node1 = cluster.add_instance("node1", with_zookeeper=True) +node2 = cluster.add_instance( + "node2", + with_zookeeper=True, + image="yandex/clickhouse-server", + tag="19.1.14", + with_installed_binary=True, +) @pytest.fixture(scope="module") @@ -20,4 +25,9 @@ def start_cluster(): def test_different_versions(start_cluster): - assert node1.query("SELECT uniqExact(x) FROM (SELECT version() as x from remote('node{1,2}', system.one))") == "2\n" + assert ( + node1.query( + "SELECT uniqExact(x) FROM (SELECT version() as x from remote('node{1,2}', system.one))" + ) + == "2\n" + ) diff --git a/tests/integration/test_replication_credentials/test.py b/tests/integration/test_replication_credentials/test.py index 359a8fc4b0d..e5313cb6bd4 100644 --- a/tests/integration/test_replication_credentials/test.py +++ b/tests/integration/test_replication_credentials/test.py @@ -7,18 +7,27 @@ from helpers.cluster import ClickHouseCluster def _fill_nodes(nodes, shard): for node in nodes: node.query( - ''' + """ CREATE DATABASE test; CREATE TABLE test_table(date Date, id UInt32, dummy UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}', date, id, 8192); - '''.format(shard=shard, replica=node.name)) + """.format( + shard=shard, replica=node.name + ) + ) cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml', 'configs/credentials1.xml'], - with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml', 'configs/credentials1.xml'], - with_zookeeper=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/remote_servers.xml", "configs/credentials1.xml"], + with_zookeeper=True, +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/remote_servers.xml", "configs/credentials1.xml"], + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -38,20 +47,26 @@ def test_same_credentials(same_credentials_cluster): node1.query("insert into test_table values ('2017-06-16', 111, 0)") time.sleep(1) - assert node1.query("SELECT id FROM test_table order by id") == '111\n' - assert node2.query("SELECT id FROM test_table order by id") == '111\n' + assert node1.query("SELECT id FROM test_table order by id") == "111\n" + assert node2.query("SELECT id FROM test_table order by id") == "111\n" node2.query("insert into test_table values ('2017-06-17', 222, 1)") time.sleep(1) - assert node1.query("SELECT id FROM test_table order by id") == '111\n222\n' - assert node2.query("SELECT id FROM test_table order by id") == '111\n222\n' + assert node1.query("SELECT id FROM test_table order by id") == "111\n222\n" + assert node2.query("SELECT id FROM test_table order by id") == "111\n222\n" -node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml', 'configs/no_credentials.xml'], - with_zookeeper=True) -node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml', 'configs/no_credentials.xml'], - with_zookeeper=True) +node3 = cluster.add_instance( + "node3", + main_configs=["configs/remote_servers.xml", "configs/no_credentials.xml"], + with_zookeeper=True, +) +node4 = cluster.add_instance( + "node4", + main_configs=["configs/remote_servers.xml", "configs/no_credentials.xml"], + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -71,20 +86,26 @@ def test_no_credentials(no_credentials_cluster): node3.query("insert into test_table values ('2017-06-18', 111, 0)") time.sleep(1) - assert node3.query("SELECT id FROM test_table order by id") == '111\n' - assert node4.query("SELECT id FROM test_table order by id") == '111\n' + assert node3.query("SELECT id FROM test_table order by id") == "111\n" + assert node4.query("SELECT id FROM test_table order by id") == "111\n" node4.query("insert into test_table values ('2017-06-19', 222, 1)") time.sleep(1) - assert node3.query("SELECT id FROM test_table order by id") == '111\n222\n' - assert node4.query("SELECT id FROM test_table order by id") == '111\n222\n' + assert node3.query("SELECT id FROM test_table order by id") == "111\n222\n" + assert node4.query("SELECT id FROM test_table order by id") == "111\n222\n" -node5 = cluster.add_instance('node5', main_configs=['configs/remote_servers.xml', 'configs/credentials1.xml'], - with_zookeeper=True) -node6 = cluster.add_instance('node6', main_configs=['configs/remote_servers.xml', 'configs/credentials2.xml'], - with_zookeeper=True) +node5 = cluster.add_instance( + "node5", + main_configs=["configs/remote_servers.xml", "configs/credentials1.xml"], + with_zookeeper=True, +) +node6 = cluster.add_instance( + "node6", + main_configs=["configs/remote_servers.xml", "configs/credentials2.xml"], + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -104,14 +125,14 @@ def test_different_credentials(different_credentials_cluster): node5.query("insert into test_table values ('2017-06-20', 111, 0)") time.sleep(1) - assert node5.query("SELECT id FROM test_table order by id") == '111\n' - assert node6.query("SELECT id FROM test_table order by id") == '' + assert node5.query("SELECT id FROM test_table order by id") == "111\n" + assert node6.query("SELECT id FROM test_table order by id") == "" node6.query("insert into test_table values ('2017-06-21', 222, 1)") time.sleep(1) - assert node5.query("SELECT id FROM test_table order by id") == '111\n' - assert node6.query("SELECT id FROM test_table order by id") == '222\n' + assert node5.query("SELECT id FROM test_table order by id") == "111\n" + assert node6.query("SELECT id FROM test_table order by id") == "222\n" add_old = """ @@ -137,13 +158,19 @@ def test_different_credentials(different_credentials_cluster): node5.query("INSERT INTO test_table values('2017-06-21', 333, 1)") node6.query("SYSTEM SYNC REPLICA test_table", timeout=10) - assert node6.query("SELECT id FROM test_table order by id") == '111\n222\n333\n' + assert node6.query("SELECT id FROM test_table order by id") == "111\n222\n333\n" -node7 = cluster.add_instance('node7', main_configs=['configs/remote_servers.xml', 'configs/credentials1.xml'], - with_zookeeper=True) -node8 = cluster.add_instance('node8', main_configs=['configs/remote_servers.xml', 'configs/no_credentials.xml'], - with_zookeeper=True) +node7 = cluster.add_instance( + "node7", + main_configs=["configs/remote_servers.xml", "configs/credentials1.xml"], + with_zookeeper=True, +) +node8 = cluster.add_instance( + "node8", + main_configs=["configs/remote_servers.xml", "configs/no_credentials.xml"], + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -163,14 +190,14 @@ def test_credentials_and_no_credentials(credentials_and_no_credentials_cluster): node7.query("insert into test_table values ('2017-06-21', 111, 0)") time.sleep(1) - assert node7.query("SELECT id FROM test_table order by id") == '111\n' - assert node8.query("SELECT id FROM test_table order by id") == '' + assert node7.query("SELECT id FROM test_table order by id") == "111\n" + assert node8.query("SELECT id FROM test_table order by id") == "" node8.query("insert into test_table values ('2017-06-22', 222, 1)") time.sleep(1) - assert node7.query("SELECT id FROM test_table order by id") == '111\n' - assert node8.query("SELECT id FROM test_table order by id") == '222\n' + assert node7.query("SELECT id FROM test_table order by id") == "111\n" + assert node8.query("SELECT id FROM test_table order by id") == "222\n" allow_empty = """ @@ -184,10 +211,11 @@ def test_credentials_and_no_credentials(credentials_and_no_credentials_cluster): """ # change state: Flip node7 to mixed auth/non-auth (allow node8) - node7.replace_config("/etc/clickhouse-server/config.d/credentials1.xml", - allow_empty) + node7.replace_config( + "/etc/clickhouse-server/config.d/credentials1.xml", allow_empty + ) node7.query("SYSTEM RELOAD CONFIG") node7.query("insert into test_table values ('2017-06-22', 333, 1)") node8.query("SYSTEM SYNC REPLICA test_table", timeout=10) - assert node8.query("SELECT id FROM test_table order by id") == '111\n222\n333\n' + assert node8.query("SELECT id FROM test_table order by id") == "111\n222\n333\n" diff --git a/tests/integration/test_replication_without_zookeeper/test.py b/tests/integration/test_replication_without_zookeeper/test.py index 26347b47d36..1b2bb6ef517 100644 --- a/tests/integration/test_replication_without_zookeeper/test.py +++ b/tests/integration/test_replication_without_zookeeper/test.py @@ -4,7 +4,12 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True, stay_alive=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/remote_servers.xml"], + with_zookeeper=True, + stay_alive=True, +) @pytest.fixture(scope="module") @@ -13,11 +18,11 @@ def start_cluster(): cluster.start() node1.query( - ''' + """ CREATE DATABASE test; CREATE TABLE test_table(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/replicated', 'node1') ORDER BY id PARTITION BY toYYYYMM(date); - ''' + """ ) yield cluster @@ -34,18 +39,28 @@ def drop_zk(zk): def test_startup_without_zookeeper(start_cluster): - node1.query("INSERT INTO test_table VALUES ('2018-10-01', 1), ('2018-10-02', 2), ('2018-10-03', 3)") + node1.query( + "INSERT INTO test_table VALUES ('2018-10-01', 1), ('2018-10-02', 2), ('2018-10-03', 3)" + ) assert node1.query("SELECT COUNT(*) from test_table") == "3\n" - assert node1.query("SELECT is_readonly from system.replicas where table='test_table'") == "0\n" + assert ( + node1.query("SELECT is_readonly from system.replicas where table='test_table'") + == "0\n" + ) cluster.run_kazoo_commands_with_retries(drop_zk) time.sleep(5) assert node1.query("SELECT COUNT(*) from test_table") == "3\n" with pytest.raises(Exception): - node1.query("INSERT INTO test_table VALUES ('2018-10-01', 1), ('2018-10-02', 2), ('2018-10-03', 3)") + node1.query( + "INSERT INTO test_table VALUES ('2018-10-01', 1), ('2018-10-02', 2), ('2018-10-03', 3)" + ) node1.restart_clickhouse() assert node1.query("SELECT COUNT(*) from test_table") == "3\n" - assert node1.query("SELECT is_readonly from system.replicas where table='test_table'") == "1\n" + assert ( + node1.query("SELECT is_readonly from system.replicas where table='test_table'") + == "1\n" + ) diff --git a/tests/integration/test_restart_server/test.py b/tests/integration/test_restart_server/test.py index 47797f7c4a5..180f8240d01 100755 --- a/tests/integration/test_restart_server/test.py +++ b/tests/integration/test_restart_server/test.py @@ -2,7 +2,8 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', stay_alive=True) +node = cluster.add_instance("node", stay_alive=True) + @pytest.fixture(scope="module") def start_cluster(): @@ -19,4 +20,3 @@ def test_drop_memory_database(start_cluster): node.query("DROP DATABASE test") node.restart_clickhouse(kill=True) assert node.query("SHOW DATABASES LIKE 'test'").strip() == "" - diff --git a/tests/integration/test_restore_replica/test.py b/tests/integration/test_restore_replica/test.py index 4013b5b474c..0b11cdf7512 100644 --- a/tests/integration/test_restore_replica/test.py +++ b/tests/integration/test_restore_replica/test.py @@ -5,23 +5,29 @@ from helpers.cluster import ClickHouseCluster from helpers.cluster import ClickHouseKiller from helpers.test_tools import assert_eq_with_retry + def fill_nodes(nodes): for node in nodes: node.query( - ''' + """ CREATE TABLE test(n UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/', '{replica}') ORDER BY n PARTITION BY n % 10; - '''.format(replica=node.name)) + """.format( + replica=node.name + ) + ) + cluster = ClickHouseCluster(__file__) -configs =["configs/remote_servers.xml"] +configs = ["configs/remote_servers.xml"] -node_1 = cluster.add_instance('replica1', with_zookeeper=True, main_configs=configs) -node_2 = cluster.add_instance('replica2', with_zookeeper=True, main_configs=configs) -node_3 = cluster.add_instance('replica3', with_zookeeper=True, main_configs=configs) +node_1 = cluster.add_instance("replica1", with_zookeeper=True, main_configs=configs) +node_2 = cluster.add_instance("replica2", with_zookeeper=True, main_configs=configs) +node_3 = cluster.add_instance("replica3", with_zookeeper=True, main_configs=configs) nodes = [node_1, node_2, node_3] + def fill_table(): node_1.query("TRUNCATE TABLE test") @@ -38,6 +44,7 @@ def fill_table(): node_1.query("INSERT INTO test SELECT number + 800 FROM numbers(200)") check_data(499500, 1000) + @pytest.fixture(scope="module") def start_cluster(): try: @@ -51,26 +58,30 @@ def start_cluster(): finally: cluster.shutdown() + def check_data(_sum: int, count: int) -> None: res = "{}\t{}\n".format(_sum, count) assert_eq_with_retry(node_1, "SELECT sum(n), count() FROM test", res) assert_eq_with_retry(node_2, "SELECT sum(n), count() FROM test", res) assert_eq_with_retry(node_3, "SELECT sum(n), count() FROM test", res) + def check_after_restoration(): check_data(1999000, 2000) for node in nodes: node.query_and_get_error("SYSTEM RESTORE REPLICA test") + def test_restore_replica_invalid_tables(start_cluster): print("Checking the invocation on non-existent and non-replicated tables") node_1.query_and_get_error("SYSTEM RESTORE REPLICA i_dont_exist_42") node_1.query_and_get_error("SYSTEM RESTORE REPLICA no_db.i_dont_exist_42") node_1.query_and_get_error("SYSTEM RESTORE REPLICA system.numbers") + def test_restore_replica_sequential(start_cluster): - zk = cluster.get_kazoo_client('zoo1') + zk = cluster.get_kazoo_client("zoo1") fill_table() print("Deleting root ZK path metadata") @@ -78,7 +89,9 @@ def test_restore_replica_sequential(start_cluster): assert zk.exists("/clickhouse/tables/test") is None node_1.query("SYSTEM RESTART REPLICA test") - node_1.query_and_get_error("INSERT INTO test SELECT number AS num FROM numbers(1000,2000) WHERE num % 2 = 0") + node_1.query_and_get_error( + "INSERT INTO test SELECT number AS num FROM numbers(1000,2000) WHERE num % 2 = 0" + ) print("Restoring replica1") @@ -101,8 +114,9 @@ def test_restore_replica_sequential(start_cluster): check_after_restoration() + def test_restore_replica_parallel(start_cluster): - zk = cluster.get_kazoo_client('zoo1') + zk = cluster.get_kazoo_client("zoo1") fill_table() print("Deleting root ZK path metadata") @@ -110,7 +124,9 @@ def test_restore_replica_parallel(start_cluster): assert zk.exists("/clickhouse/tables/test") is None node_1.query("SYSTEM RESTART REPLICA test") - node_1.query_and_get_error("INSERT INTO test SELECT number AS num FROM numbers(1000,2000) WHERE num % 2 = 0") + node_1.query_and_get_error( + "INSERT INTO test SELECT number AS num FROM numbers(1000,2000) WHERE num % 2 = 0" + ) print("Restoring replicas in parallel") @@ -126,8 +142,9 @@ def test_restore_replica_parallel(start_cluster): check_after_restoration() + def test_restore_replica_alive_replicas(start_cluster): - zk = cluster.get_kazoo_client('zoo1') + zk = cluster.get_kazoo_client("zoo1") fill_table() print("Deleting replica2 path, trying to restore replica1") diff --git a/tests/integration/test_rocksdb_options/test.py b/tests/integration/test_rocksdb_options/test.py index e8542749d8d..a00d3528eed 100644 --- a/tests/integration/test_rocksdb_options/test.py +++ b/tests/integration/test_rocksdb_options/test.py @@ -9,10 +9,12 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=['configs/rocksdb.xml'], stay_alive=True) +node = cluster.add_instance( + "node", main_configs=["configs/rocksdb.xml"], stay_alive=True +) -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def start_cluster(): try: cluster.start() @@ -20,66 +22,138 @@ def start_cluster(): finally: cluster.shutdown() + def test_valid_options(start_cluster): - node.query(""" + node.query( + """ CREATE TABLE test (key UInt64, value String) Engine=EmbeddedRocksDB PRIMARY KEY(key); DROP TABLE test; - """) + """ + ) + def test_invalid_options(start_cluster): - node.exec_in_container(['bash', '-c', "sed -i 's/max_background_jobs/no_such_option/g' /etc/clickhouse-server/config.d/rocksdb.xml"]) + node.exec_in_container( + [ + "bash", + "-c", + "sed -i 's/max_background_jobs/no_such_option/g' /etc/clickhouse-server/config.d/rocksdb.xml", + ] + ) node.restart_clickhouse() with pytest.raises(QueryRuntimeException): - node.query(""" + node.query( + """ CREATE TABLE test (key UInt64, value String) Engine=EmbeddedRocksDB PRIMARY KEY(key); - """) - node.exec_in_container(['bash', '-c', "sed -i 's/no_such_option/max_background_jobs/g' /etc/clickhouse-server/config.d/rocksdb.xml"]) + """ + ) + node.exec_in_container( + [ + "bash", + "-c", + "sed -i 's/no_such_option/max_background_jobs/g' /etc/clickhouse-server/config.d/rocksdb.xml", + ] + ) node.restart_clickhouse() + def test_table_valid_options(start_cluster): - node.query(""" + node.query( + """ CREATE TABLE test (key UInt64, value String) Engine=EmbeddedRocksDB PRIMARY KEY(key); DROP TABLE test; - """) + """ + ) + def test_table_invalid_options(start_cluster): - node.exec_in_container(['bash', '-c', "sed -i 's/max_open_files/no_such_table_option/g' /etc/clickhouse-server/config.d/rocksdb.xml"]) + node.exec_in_container( + [ + "bash", + "-c", + "sed -i 's/max_open_files/no_such_table_option/g' /etc/clickhouse-server/config.d/rocksdb.xml", + ] + ) node.restart_clickhouse() with pytest.raises(QueryRuntimeException): - node.query(""" + node.query( + """ CREATE TABLE test (key UInt64, value String) Engine=EmbeddedRocksDB PRIMARY KEY(key); - """) - node.exec_in_container(['bash', '-c', "sed -i 's/no_such_table_option/max_open_files/g' /etc/clickhouse-server/config.d/rocksdb.xml"]) + """ + ) + node.exec_in_container( + [ + "bash", + "-c", + "sed -i 's/no_such_table_option/max_open_files/g' /etc/clickhouse-server/config.d/rocksdb.xml", + ] + ) node.restart_clickhouse() + def test_valid_column_family_options(start_cluster): - node.query(""" + node.query( + """ CREATE TABLE test (key UInt64, value String) Engine=EmbeddedRocksDB PRIMARY KEY(key); DROP TABLE test; - """) + """ + ) + def test_invalid_column_family_options(start_cluster): - node.exec_in_container(['bash', '-c', "sed -i 's/num_levels/no_such_column_family_option/g' /etc/clickhouse-server/config.d/rocksdb.xml"]) + node.exec_in_container( + [ + "bash", + "-c", + "sed -i 's/num_levels/no_such_column_family_option/g' /etc/clickhouse-server/config.d/rocksdb.xml", + ] + ) node.restart_clickhouse() with pytest.raises(QueryRuntimeException): - node.query(""" + node.query( + """ CREATE TABLE test (key UInt64, value String) Engine=EmbeddedRocksDB PRIMARY KEY(key); - """) - node.exec_in_container(['bash', '-c', "sed -i 's/no_such_column_family_option/num_levels/g' /etc/clickhouse-server/config.d/rocksdb.xml"]) + """ + ) + node.exec_in_container( + [ + "bash", + "-c", + "sed -i 's/no_such_column_family_option/num_levels/g' /etc/clickhouse-server/config.d/rocksdb.xml", + ] + ) node.restart_clickhouse() + def test_table_valid_column_family_options(start_cluster): - node.query(""" + node.query( + """ CREATE TABLE test (key UInt64, value String) Engine=EmbeddedRocksDB PRIMARY KEY(key); DROP TABLE test; - """) + """ + ) + def test_table_invalid_column_family_options(start_cluster): - node.exec_in_container(['bash', '-c', "sed -i 's/max_bytes_for_level_base/no_such_table_column_family_option/g' /etc/clickhouse-server/config.d/rocksdb.xml"]) + node.exec_in_container( + [ + "bash", + "-c", + "sed -i 's/max_bytes_for_level_base/no_such_table_column_family_option/g' /etc/clickhouse-server/config.d/rocksdb.xml", + ] + ) node.restart_clickhouse() with pytest.raises(QueryRuntimeException): - node.query(""" + node.query( + """ CREATE TABLE test (key UInt64, value String) Engine=EmbeddedRocksDB PRIMARY KEY(key); - """) - node.exec_in_container(['bash', '-c', "sed -i 's/no_such_table_column_family_option/max_bytes_for_level_base/g' /etc/clickhouse-server/config.d/rocksdb.xml"]) + """ + ) + node.exec_in_container( + [ + "bash", + "-c", + "sed -i 's/no_such_table_column_family_option/max_bytes_for_level_base/g' /etc/clickhouse-server/config.d/rocksdb.xml", + ] + ) node.restart_clickhouse() diff --git a/tests/integration/test_role/test.py b/tests/integration/test_role/test.py index 7600bc73b16..44ce0e13f2f 100644 --- a/tests/integration/test_role/test.py +++ b/tests/integration/test_role/test.py @@ -3,14 +3,16 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance') +instance = cluster.add_instance("instance") session_id_counter = 0 + + def new_session_id(): global session_id_counter session_id_counter += 1 - return 'session #' + str(session_id_counter) + return "session #" + str(session_id_counter) @pytest.fixture(scope="module", autouse=True) @@ -18,7 +20,9 @@ def started_cluster(): try: cluster.start() - instance.query("CREATE TABLE test_table(x UInt32, y UInt32) ENGINE = MergeTree ORDER BY tuple()") + instance.query( + "CREATE TABLE test_table(x UInt32, y UInt32) ENGINE = MergeTree ORDER BY tuple()" + ) instance.query("INSERT INTO test_table VALUES (1,5), (2,10)") yield cluster @@ -38,69 +42,91 @@ def cleanup_after_test(): def test_create_role(): instance.query("CREATE USER A") - instance.query('CREATE ROLE R1') + instance.query("CREATE ROLE R1") - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') + assert "Not enough privileges" in instance.query_and_get_error( + "SELECT * FROM test_table", user="A" + ) - instance.query('GRANT SELECT ON test_table TO R1') - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') + instance.query("GRANT SELECT ON test_table TO R1") + assert "Not enough privileges" in instance.query_and_get_error( + "SELECT * FROM test_table", user="A" + ) - instance.query('GRANT R1 TO A') - assert instance.query("SELECT * FROM test_table", user='A') == "1\t5\n2\t10\n" + instance.query("GRANT R1 TO A") + assert instance.query("SELECT * FROM test_table", user="A") == "1\t5\n2\t10\n" - instance.query('REVOKE R1 FROM A') - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') + instance.query("REVOKE R1 FROM A") + assert "Not enough privileges" in instance.query_and_get_error( + "SELECT * FROM test_table", user="A" + ) def test_grant_role_to_role(): instance.query("CREATE USER A") - instance.query('CREATE ROLE R1') - instance.query('CREATE ROLE R2') + instance.query("CREATE ROLE R1") + instance.query("CREATE ROLE R2") - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') + assert "Not enough privileges" in instance.query_and_get_error( + "SELECT * FROM test_table", user="A" + ) - instance.query('GRANT R1 TO A') - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') + instance.query("GRANT R1 TO A") + assert "Not enough privileges" in instance.query_and_get_error( + "SELECT * FROM test_table", user="A" + ) - instance.query('GRANT R2 TO R1') - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') + instance.query("GRANT R2 TO R1") + assert "Not enough privileges" in instance.query_and_get_error( + "SELECT * FROM test_table", user="A" + ) - instance.query('GRANT SELECT ON test_table TO R2') - assert instance.query("SELECT * FROM test_table", user='A') == "1\t5\n2\t10\n" + instance.query("GRANT SELECT ON test_table TO R2") + assert instance.query("SELECT * FROM test_table", user="A") == "1\t5\n2\t10\n" def test_combine_privileges(): instance.query("CREATE USER A ") - instance.query('CREATE ROLE R1') - instance.query('CREATE ROLE R2') + instance.query("CREATE ROLE R1") + instance.query("CREATE ROLE R2") - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') + assert "Not enough privileges" in instance.query_and_get_error( + "SELECT * FROM test_table", user="A" + ) - instance.query('GRANT R1 TO A') - instance.query('GRANT SELECT(x) ON test_table TO R1') - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='A') - assert instance.query("SELECT x FROM test_table", user='A') == "1\n2\n" + instance.query("GRANT R1 TO A") + instance.query("GRANT SELECT(x) ON test_table TO R1") + assert "Not enough privileges" in instance.query_and_get_error( + "SELECT * FROM test_table", user="A" + ) + assert instance.query("SELECT x FROM test_table", user="A") == "1\n2\n" - instance.query('GRANT SELECT(y) ON test_table TO R2') - instance.query('GRANT R2 TO A') - assert instance.query("SELECT * FROM test_table", user='A') == "1\t5\n2\t10\n" + instance.query("GRANT SELECT(y) ON test_table TO R2") + instance.query("GRANT R2 TO A") + assert instance.query("SELECT * FROM test_table", user="A") == "1\t5\n2\t10\n" def test_admin_option(): instance.query("CREATE USER A") instance.query("CREATE USER B") - instance.query('CREATE ROLE R1') + instance.query("CREATE ROLE R1") - instance.query('GRANT SELECT ON test_table TO R1') - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='B') + instance.query("GRANT SELECT ON test_table TO R1") + assert "Not enough privileges" in instance.query_and_get_error( + "SELECT * FROM test_table", user="B" + ) - instance.query('GRANT R1 TO A') - assert "Not enough privileges" in instance.query_and_get_error("GRANT R1 TO B", user='A') - assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test_table", user='B') + instance.query("GRANT R1 TO A") + assert "Not enough privileges" in instance.query_and_get_error( + "GRANT R1 TO B", user="A" + ) + assert "Not enough privileges" in instance.query_and_get_error( + "SELECT * FROM test_table", user="B" + ) - instance.query('GRANT R1 TO A WITH ADMIN OPTION') - instance.query("GRANT R1 TO B", user='A') - assert instance.query("SELECT * FROM test_table", user='B') == "1\t5\n2\t10\n" + instance.query("GRANT R1 TO A WITH ADMIN OPTION") + instance.query("GRANT R1 TO B", user="A") + assert instance.query("SELECT * FROM test_table", user="B") == "1\t5\n2\t10\n" def test_revoke_requires_admin_option(): @@ -111,37 +137,37 @@ def test_revoke_requires_admin_option(): assert instance.query("SHOW GRANTS FOR B") == "GRANT R1 TO B\n" expected_error = "necessary to have the role R1 granted" - assert expected_error in instance.query_and_get_error("REVOKE R1 FROM B", user='A') + assert expected_error in instance.query_and_get_error("REVOKE R1 FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "GRANT R1 TO B\n" instance.query("GRANT R1 TO A") expected_error = "granted, but without ADMIN option" - assert expected_error in instance.query_and_get_error("REVOKE R1 FROM B", user='A') + assert expected_error in instance.query_and_get_error("REVOKE R1 FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "GRANT R1 TO B\n" instance.query("GRANT R1 TO A WITH ADMIN OPTION") - instance.query("REVOKE R1 FROM B", user='A') + instance.query("REVOKE R1 FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "" instance.query("GRANT R1 TO B") assert instance.query("SHOW GRANTS FOR B") == "GRANT R1 TO B\n" - instance.query("REVOKE ALL FROM B", user='A') + instance.query("REVOKE ALL FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "" instance.query("GRANT R1, R2 TO B") assert instance.query("SHOW GRANTS FOR B") == "GRANT R1, R2 TO B\n" expected_error = "necessary to have the role R2 granted" - assert expected_error in instance.query_and_get_error("REVOKE ALL FROM B", user='A') + assert expected_error in instance.query_and_get_error("REVOKE ALL FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "GRANT R1, R2 TO B\n" - instance.query("REVOKE ALL EXCEPT R2 FROM B", user='A') + instance.query("REVOKE ALL EXCEPT R2 FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "GRANT R2 TO B\n" instance.query("GRANT R2 TO A WITH ADMIN OPTION") - instance.query("REVOKE ALL FROM B", user='A') + instance.query("REVOKE ALL FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "" instance.query("GRANT R1, R2 TO B") assert instance.query("SHOW GRANTS FOR B") == "GRANT R1, R2 TO B\n" - instance.query("REVOKE ALL FROM B", user='A') + instance.query("REVOKE ALL FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "" @@ -151,19 +177,29 @@ def test_set_role(): instance.query("GRANT R1, R2 TO A") session_id = new_session_id() - assert instance.http_query('SHOW CURRENT ROLES', user='A', params={'session_id':session_id}) == TSV([["R1", 0, 1], ["R2", 0, 1]]) + assert instance.http_query( + "SHOW CURRENT ROLES", user="A", params={"session_id": session_id} + ) == TSV([["R1", 0, 1], ["R2", 0, 1]]) - instance.http_query('SET ROLE R1', user='A', params={'session_id':session_id}) - assert instance.http_query('SHOW CURRENT ROLES', user='A', params={'session_id':session_id}) == TSV([["R1", 0, 1]]) + instance.http_query("SET ROLE R1", user="A", params={"session_id": session_id}) + assert instance.http_query( + "SHOW CURRENT ROLES", user="A", params={"session_id": session_id} + ) == TSV([["R1", 0, 1]]) - instance.http_query('SET ROLE R2', user='A', params={'session_id':session_id}) - assert instance.http_query('SHOW CURRENT ROLES', user='A', params={'session_id':session_id}) == TSV([["R2", 0, 1]]) + instance.http_query("SET ROLE R2", user="A", params={"session_id": session_id}) + assert instance.http_query( + "SHOW CURRENT ROLES", user="A", params={"session_id": session_id} + ) == TSV([["R2", 0, 1]]) - instance.http_query('SET ROLE NONE', user='A', params={'session_id':session_id}) - assert instance.http_query('SHOW CURRENT ROLES', user='A', params={'session_id':session_id}) == TSV([]) + instance.http_query("SET ROLE NONE", user="A", params={"session_id": session_id}) + assert instance.http_query( + "SHOW CURRENT ROLES", user="A", params={"session_id": session_id} + ) == TSV([]) - instance.http_query('SET ROLE DEFAULT', user='A', params={'session_id':session_id}) - assert instance.http_query('SHOW CURRENT ROLES', user='A', params={'session_id':session_id}) == TSV([["R1", 0, 1], ["R2", 0, 1]]) + instance.http_query("SET ROLE DEFAULT", user="A", params={"session_id": session_id}) + assert instance.http_query( + "SHOW CURRENT ROLES", user="A", params={"session_id": session_id} + ) == TSV([["R1", 0, 1], ["R2", 0, 1]]) def test_changing_default_roles_affects_new_sessions_only(): @@ -172,105 +208,201 @@ def test_changing_default_roles_affects_new_sessions_only(): instance.query("GRANT R1, R2 TO A") session_id = new_session_id() - assert instance.http_query('SHOW CURRENT ROLES', user='A', params={'session_id':session_id}) == TSV([["R1", 0, 1], ["R2", 0, 1]]) - instance.query('SET DEFAULT ROLE R2 TO A') - assert instance.http_query('SHOW CURRENT ROLES', user='A', params={'session_id':session_id}) == TSV([["R1", 0, 0], ["R2", 0, 1]]) + assert instance.http_query( + "SHOW CURRENT ROLES", user="A", params={"session_id": session_id} + ) == TSV([["R1", 0, 1], ["R2", 0, 1]]) + instance.query("SET DEFAULT ROLE R2 TO A") + assert instance.http_query( + "SHOW CURRENT ROLES", user="A", params={"session_id": session_id} + ) == TSV([["R1", 0, 0], ["R2", 0, 1]]) other_session_id = new_session_id() - assert instance.http_query('SHOW CURRENT ROLES', user='A', params={'session_id':other_session_id}) == TSV([["R2", 0, 1]]) + assert instance.http_query( + "SHOW CURRENT ROLES", user="A", params={"session_id": other_session_id} + ) == TSV([["R2", 0, 1]]) def test_introspection(): instance.query("CREATE USER A") instance.query("CREATE USER B") - instance.query('CREATE ROLE R1') - instance.query('CREATE ROLE R2') - instance.query('GRANT R1 TO A') - instance.query('GRANT R2 TO B WITH ADMIN OPTION') - instance.query('GRANT SELECT ON test.table TO A, R2') - instance.query('GRANT CREATE ON *.* TO B WITH GRANT OPTION') - instance.query('REVOKE SELECT(x) ON test.table FROM R2') + instance.query("CREATE ROLE R1") + instance.query("CREATE ROLE R2") + instance.query("GRANT R1 TO A") + instance.query("GRANT R2 TO B WITH ADMIN OPTION") + instance.query("GRANT SELECT ON test.table TO A, R2") + instance.query("GRANT CREATE ON *.* TO B WITH GRANT OPTION") + instance.query("REVOKE SELECT(x) ON test.table FROM R2") assert instance.query("SHOW ROLES") == TSV(["R1", "R2"]) assert instance.query("SHOW CREATE ROLE R1") == TSV(["CREATE ROLE R1"]) assert instance.query("SHOW CREATE ROLE R2") == TSV(["CREATE ROLE R2"]) - assert instance.query("SHOW CREATE ROLES R1, R2") == TSV(["CREATE ROLE R1", "CREATE ROLE R2"]) - assert instance.query("SHOW CREATE ROLES") == TSV(["CREATE ROLE R1", "CREATE ROLE R2"]) + assert instance.query("SHOW CREATE ROLES R1, R2") == TSV( + ["CREATE ROLE R1", "CREATE ROLE R2"] + ) + assert instance.query("SHOW CREATE ROLES") == TSV( + ["CREATE ROLE R1", "CREATE ROLE R2"] + ) - assert instance.query("SHOW GRANTS FOR A") == TSV(["GRANT SELECT ON test.table TO A", "GRANT R1 TO A"]) + assert instance.query("SHOW GRANTS FOR A") == TSV( + ["GRANT SELECT ON test.table TO A", "GRANT R1 TO A"] + ) assert instance.query("SHOW GRANTS FOR B") == TSV( - ["GRANT CREATE ON *.* TO B WITH GRANT OPTION", "GRANT R2 TO B WITH ADMIN OPTION"]) + [ + "GRANT CREATE ON *.* TO B WITH GRANT OPTION", + "GRANT R2 TO B WITH ADMIN OPTION", + ] + ) assert instance.query("SHOW GRANTS FOR R1") == "" assert instance.query("SHOW GRANTS FOR R2") == TSV( - ["GRANT SELECT ON test.table TO R2", "REVOKE SELECT(x) ON test.table FROM R2"]) + ["GRANT SELECT ON test.table TO R2", "REVOKE SELECT(x) ON test.table FROM R2"] + ) - assert instance.query("SHOW GRANTS", user='A') == TSV(["GRANT SELECT ON test.table TO A", "GRANT R1 TO A"]) - assert instance.query("SHOW GRANTS", user='B') == TSV( - ["GRANT CREATE ON *.* TO B WITH GRANT OPTION", "GRANT R2 TO B WITH ADMIN OPTION"]) - assert instance.query("SHOW CURRENT ROLES", user='A') == TSV([["R1", 0, 1]]) - assert instance.query("SHOW CURRENT ROLES", user='B') == TSV([["R2", 1, 1]]) - assert instance.query("SHOW ENABLED ROLES", user='A') == TSV([["R1", 0, 1, 1]]) - assert instance.query("SHOW ENABLED ROLES", user='B') == TSV([["R2", 1, 1, 1]]) + assert instance.query("SHOW GRANTS", user="A") == TSV( + ["GRANT SELECT ON test.table TO A", "GRANT R1 TO A"] + ) + assert instance.query("SHOW GRANTS", user="B") == TSV( + [ + "GRANT CREATE ON *.* TO B WITH GRANT OPTION", + "GRANT R2 TO B WITH ADMIN OPTION", + ] + ) + assert instance.query("SHOW CURRENT ROLES", user="A") == TSV([["R1", 0, 1]]) + assert instance.query("SHOW CURRENT ROLES", user="B") == TSV([["R2", 1, 1]]) + assert instance.query("SHOW ENABLED ROLES", user="A") == TSV([["R1", 0, 1, 1]]) + assert instance.query("SHOW ENABLED ROLES", user="B") == TSV([["R2", 1, 1, 1]]) - expected_access1 = "CREATE ROLE R1\n" \ - "CREATE ROLE R2\n" + expected_access1 = "CREATE ROLE R1\n" "CREATE ROLE R2\n" expected_access2 = "GRANT R1 TO A\n" expected_access3 = "GRANT R2 TO B WITH ADMIN OPTION" assert expected_access1 in instance.query("SHOW ACCESS") assert expected_access2 in instance.query("SHOW ACCESS") assert expected_access3 in instance.query("SHOW ACCESS") - assert instance.query("SELECT name, storage from system.roles WHERE name IN ('R1', 'R2') ORDER BY name") == \ - TSV([["R1", "local directory"], - ["R2", "local directory"]]) + assert instance.query( + "SELECT name, storage from system.roles WHERE name IN ('R1', 'R2') ORDER BY name" + ) == TSV([["R1", "local directory"], ["R2", "local directory"]]) assert instance.query( - "SELECT * from system.grants WHERE user_name IN ('A', 'B') OR role_name IN ('R1', 'R2') ORDER BY user_name, role_name, access_type, database, table, column, is_partial_revoke, grant_option") == \ - TSV([["A", "\\N", "SELECT", "test", "table", "\\N", 0, 0], - ["B", "\\N", "CREATE", "\\N", "\\N", "\\N", 0, 1], - ["\\N", "R2", "SELECT", "test", "table", "x", 1, 0], - ["\\N", "R2", "SELECT", "test", "table", "\\N", 0, 0]]) + "SELECT * from system.grants WHERE user_name IN ('A', 'B') OR role_name IN ('R1', 'R2') ORDER BY user_name, role_name, access_type, database, table, column, is_partial_revoke, grant_option" + ) == TSV( + [ + ["A", "\\N", "SELECT", "test", "table", "\\N", 0, 0], + ["B", "\\N", "CREATE", "\\N", "\\N", "\\N", 0, 1], + ["\\N", "R2", "SELECT", "test", "table", "x", 1, 0], + ["\\N", "R2", "SELECT", "test", "table", "\\N", 0, 0], + ] + ) assert instance.query( - "SELECT * from system.role_grants WHERE user_name IN ('A', 'B') OR role_name IN ('R1', 'R2') ORDER BY user_name, role_name, granted_role_name") == \ - TSV([["A", "\\N", "R1", 1, 0], - ["B", "\\N", "R2", 1, 1]]) + "SELECT * from system.role_grants WHERE user_name IN ('A', 'B') OR role_name IN ('R1', 'R2') ORDER BY user_name, role_name, granted_role_name" + ) == TSV([["A", "\\N", "R1", 1, 0], ["B", "\\N", "R2", 1, 1]]) - assert instance.query("SELECT * from system.current_roles ORDER BY role_name", user='A') == TSV([["R1", 0, 1]]) - assert instance.query("SELECT * from system.current_roles ORDER BY role_name", user='B') == TSV([["R2", 1, 1]]) - assert instance.query("SELECT * from system.enabled_roles ORDER BY role_name", user='A') == TSV([["R1", 0, 1, 1]]) - assert instance.query("SELECT * from system.enabled_roles ORDER BY role_name", user='B') == TSV([["R2", 1, 1, 1]]) + assert instance.query( + "SELECT * from system.current_roles ORDER BY role_name", user="A" + ) == TSV([["R1", 0, 1]]) + assert instance.query( + "SELECT * from system.current_roles ORDER BY role_name", user="B" + ) == TSV([["R2", 1, 1]]) + assert instance.query( + "SELECT * from system.enabled_roles ORDER BY role_name", user="A" + ) == TSV([["R1", 0, 1, 1]]) + assert instance.query( + "SELECT * from system.enabled_roles ORDER BY role_name", user="B" + ) == TSV([["R2", 1, 1, 1]]) def test_function_current_roles(): instance.query("CREATE USER A") - instance.query('CREATE ROLE R1, R2, R3, R4') - instance.query('GRANT R4 TO R2') - instance.query('GRANT R1,R2,R3 TO A') + instance.query("CREATE ROLE R1, R2, R3, R4") + instance.query("GRANT R4 TO R2") + instance.query("GRANT R1,R2,R3 TO A") session_id = new_session_id() - assert instance.http_query('SELECT defaultRoles(), currentRoles(), enabledRoles()', user='A', params={'session_id':session_id}) == "['R1','R2','R3']\t['R1','R2','R3']\t['R1','R2','R3','R4']\n" + assert ( + instance.http_query( + "SELECT defaultRoles(), currentRoles(), enabledRoles()", + user="A", + params={"session_id": session_id}, + ) + == "['R1','R2','R3']\t['R1','R2','R3']\t['R1','R2','R3','R4']\n" + ) - instance.http_query('SET ROLE R1', user='A', params={'session_id':session_id}) - assert instance.http_query('SELECT defaultRoles(), currentRoles(), enabledRoles()', user='A', params={'session_id':session_id}) == "['R1','R2','R3']\t['R1']\t['R1']\n" + instance.http_query("SET ROLE R1", user="A", params={"session_id": session_id}) + assert ( + instance.http_query( + "SELECT defaultRoles(), currentRoles(), enabledRoles()", + user="A", + params={"session_id": session_id}, + ) + == "['R1','R2','R3']\t['R1']\t['R1']\n" + ) - instance.http_query('SET ROLE R2', user='A', params={'session_id':session_id}) - assert instance.http_query('SELECT defaultRoles(), currentRoles(), enabledRoles()', user='A', params={'session_id':session_id}) == "['R1','R2','R3']\t['R2']\t['R2','R4']\n" + instance.http_query("SET ROLE R2", user="A", params={"session_id": session_id}) + assert ( + instance.http_query( + "SELECT defaultRoles(), currentRoles(), enabledRoles()", + user="A", + params={"session_id": session_id}, + ) + == "['R1','R2','R3']\t['R2']\t['R2','R4']\n" + ) - instance.http_query('SET ROLE NONE', user='A', params={'session_id':session_id}) - assert instance.http_query('SELECT defaultRoles(), currentRoles(), enabledRoles()', user='A', params={'session_id':session_id}) == "['R1','R2','R3']\t[]\t[]\n" + instance.http_query("SET ROLE NONE", user="A", params={"session_id": session_id}) + assert ( + instance.http_query( + "SELECT defaultRoles(), currentRoles(), enabledRoles()", + user="A", + params={"session_id": session_id}, + ) + == "['R1','R2','R3']\t[]\t[]\n" + ) - instance.http_query('SET ROLE DEFAULT', user='A', params={'session_id':session_id}) - assert instance.http_query('SELECT defaultRoles(), currentRoles(), enabledRoles()', user='A', params={'session_id':session_id}) == "['R1','R2','R3']\t['R1','R2','R3']\t['R1','R2','R3','R4']\n" + instance.http_query("SET ROLE DEFAULT", user="A", params={"session_id": session_id}) + assert ( + instance.http_query( + "SELECT defaultRoles(), currentRoles(), enabledRoles()", + user="A", + params={"session_id": session_id}, + ) + == "['R1','R2','R3']\t['R1','R2','R3']\t['R1','R2','R3','R4']\n" + ) - instance.query('SET DEFAULT ROLE R2 TO A') - assert instance.http_query('SELECT defaultRoles(), currentRoles(), enabledRoles()', user='A', params={'session_id':session_id}) == "['R2']\t['R1','R2','R3']\t['R1','R2','R3','R4']\n" + instance.query("SET DEFAULT ROLE R2 TO A") + assert ( + instance.http_query( + "SELECT defaultRoles(), currentRoles(), enabledRoles()", + user="A", + params={"session_id": session_id}, + ) + == "['R2']\t['R1','R2','R3']\t['R1','R2','R3','R4']\n" + ) - instance.query('REVOKE R3 FROM A') - assert instance.http_query('SELECT defaultRoles(), currentRoles(), enabledRoles()', user='A', params={'session_id':session_id}) == "['R2']\t['R1','R2']\t['R1','R2','R4']\n" + instance.query("REVOKE R3 FROM A") + assert ( + instance.http_query( + "SELECT defaultRoles(), currentRoles(), enabledRoles()", + user="A", + params={"session_id": session_id}, + ) + == "['R2']\t['R1','R2']\t['R1','R2','R4']\n" + ) - instance.query('REVOKE R2 FROM A') - assert instance.http_query('SELECT defaultRoles(), currentRoles(), enabledRoles()', user='A', params={'session_id':session_id}) == "[]\t['R1']\t['R1']\n" + instance.query("REVOKE R2 FROM A") + assert ( + instance.http_query( + "SELECT defaultRoles(), currentRoles(), enabledRoles()", + user="A", + params={"session_id": session_id}, + ) + == "[]\t['R1']\t['R1']\n" + ) - instance.query('SET DEFAULT ROLE ALL TO A') - assert instance.http_query('SELECT defaultRoles(), currentRoles(), enabledRoles()', user='A', params={'session_id':session_id}) == "['R1']\t['R1']\t['R1']\n" + instance.query("SET DEFAULT ROLE ALL TO A") + assert ( + instance.http_query( + "SELECT defaultRoles(), currentRoles(), enabledRoles()", + user="A", + params={"session_id": session_id}, + ) + == "['R1']\t['R1']\t['R1']\n" + ) diff --git a/tests/integration/test_row_policy/test.py b/tests/integration/test_row_policy/test.py index 0a7f6958b4a..c0ebfc7a070 100644 --- a/tests/integration/test_row_policy/test.py +++ b/tests/integration/test_row_policy/test.py @@ -7,22 +7,36 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry, TSV cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=["configs/config.d/remote_servers.xml"], - user_configs=["configs/users.d/row_policy.xml", "configs/users.d/another_user.xml", - "configs/users.d/any_join_distinct_right_table_keys.xml"], - with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=["configs/config.d/remote_servers.xml"], - user_configs=["configs/users.d/row_policy.xml", "configs/users.d/another_user.xml", - "configs/users.d/any_join_distinct_right_table_keys.xml"], - with_zookeeper=True) +node = cluster.add_instance( + "node", + main_configs=["configs/config.d/remote_servers.xml"], + user_configs=[ + "configs/users.d/row_policy.xml", + "configs/users.d/another_user.xml", + "configs/users.d/any_join_distinct_right_table_keys.xml", + ], + with_zookeeper=True, +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/config.d/remote_servers.xml"], + user_configs=[ + "configs/users.d/row_policy.xml", + "configs/users.d/another_user.xml", + "configs/users.d/any_join_distinct_right_table_keys.xml", + ], + with_zookeeper=True, +) nodes = [node, node2] def copy_policy_xml(local_file_name, reload_immediately=True): script_dir = os.path.dirname(os.path.realpath(__file__)) for current_node in nodes: - current_node.copy_file_to_container(os.path.join(script_dir, local_file_name), - '/etc/clickhouse-server/users.d/row_policy.xml') + current_node.copy_file_to_container( + os.path.join(script_dir, local_file_name), + "/etc/clickhouse-server/users.d/row_policy.xml", + ) if reload_immediately: current_node.query("SYSTEM RELOAD CONFIG") @@ -33,7 +47,8 @@ def started_cluster(): cluster.start() for current_node in nodes: - current_node.query(''' + current_node.query( + """ CREATE DATABASE mydb; CREATE TABLE mydb.filtered_table1 (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a; @@ -52,7 +67,8 @@ def started_cluster(): INSERT INTO mydb.`.filtered_table4` values (0, 0), (0, 1), (1, 0), (1, 1); CREATE TABLE mydb.local (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a; - ''') + """ + ) node.query("INSERT INTO mydb.local values (2, 0), (2, 1), (1, 0), (1, 1)") node2.query("INSERT INTO mydb.local values (3, 0), (3, 1), (1, 0), (1, 1)") @@ -68,303 +84,602 @@ def reset_policies(): try: yield finally: - copy_policy_xml('normal_filters.xml') + copy_policy_xml("normal_filters.xml") for current_node in nodes: current_node.query("DROP POLICY IF EXISTS pA, pB ON mydb.filtered_table1") def test_smoke(): assert node.query("SELECT * FROM mydb.filtered_table1") == TSV([[1, 0], [1, 1]]) - assert node.query("SELECT * FROM mydb.filtered_table2") == TSV([[0, 0, 0, 0], [0, 0, 6, 0]]) + assert node.query("SELECT * FROM mydb.filtered_table2") == TSV( + [[0, 0, 0, 0], [0, 0, 6, 0]] + ) assert node.query("SELECT * FROM mydb.filtered_table3") == TSV([[0, 1], [1, 0]]) assert node.query("SELECT a FROM mydb.filtered_table1") == TSV([[1], [1]]) assert node.query("SELECT b FROM mydb.filtered_table1") == TSV([[0], [1]]) - assert node.query("SELECT a FROM mydb.filtered_table1 WHERE a = 1") == TSV([[1], [1]]) - assert node.query("SELECT a FROM mydb.filtered_table1 WHERE a IN (1)") == TSV([[1], [1]]) + assert node.query("SELECT a FROM mydb.filtered_table1 WHERE a = 1") == TSV( + [[1], [1]] + ) + assert node.query("SELECT a FROM mydb.filtered_table1 WHERE a IN (1)") == TSV( + [[1], [1]] + ) assert node.query("SELECT a = 1 FROM mydb.filtered_table1") == TSV([[1], [1]]) assert node.query("SELECT a FROM mydb.filtered_table3") == TSV([[0], [1]]) assert node.query("SELECT b FROM mydb.filtered_table3") == TSV([[1], [0]]) assert node.query("SELECT c FROM mydb.filtered_table3") == TSV([[1], [1]]) assert node.query("SELECT a + b FROM mydb.filtered_table3") == TSV([[1], [1]]) - assert node.query("SELECT a FROM mydb.filtered_table3 WHERE c = 1") == TSV([[0], [1]]) + assert node.query("SELECT a FROM mydb.filtered_table3 WHERE c = 1") == TSV( + [[0], [1]] + ) assert node.query("SELECT c = 1 FROM mydb.filtered_table3") == TSV([[1], [1]]) assert node.query("SELECT a + b = 1 FROM mydb.filtered_table3") == TSV([[1], [1]]) def test_join(): assert node.query( - "SELECT * FROM mydb.filtered_table1 as t1 ANY LEFT JOIN mydb.filtered_table1 as t2 ON t1.a = t2.b") == TSV( - [[1, 0, 1, 1], [1, 1, 1, 1]]) + "SELECT * FROM mydb.filtered_table1 as t1 ANY LEFT JOIN mydb.filtered_table1 as t2 ON t1.a = t2.b" + ) == TSV([[1, 0, 1, 1], [1, 1, 1, 1]]) assert node.query( - "SELECT * FROM mydb.filtered_table1 as t2 ANY RIGHT JOIN mydb.filtered_table1 as t1 ON t2.b = t1.a") == TSV( - [[1, 1, 1, 0]]) + "SELECT * FROM mydb.filtered_table1 as t2 ANY RIGHT JOIN mydb.filtered_table1 as t1 ON t2.b = t1.a" + ) == TSV([[1, 1, 1, 0]]) def test_cannot_trick_row_policy_with_keyword_with(): - assert node.query("WITH 0 AS a SELECT a FROM mydb.filtered_table1") == TSV([[0], [0]]) - assert node.query("WITH 0 AS a SELECT b FROM mydb.filtered_table1") == TSV([[0], [1]]) + assert node.query("WITH 0 AS a SELECT a FROM mydb.filtered_table1") == TSV( + [[0], [0]] + ) + assert node.query("WITH 0 AS a SELECT b FROM mydb.filtered_table1") == TSV( + [[0], [1]] + ) - assert node.query("WITH 0 AS a SELECT * FROM mydb.filtered_table1") == TSV([[1, 0], [1, 1]]) - assert node.query("WITH 0 AS a SELECT * FROM mydb.filtered_table1 WHERE a >= 0 AND b >= 0 SETTINGS optimize_move_to_prewhere = 0") == TSV([[1, 0], [1, 1]]) - assert node.query("WITH 0 AS a SELECT * FROM mydb.filtered_table1 PREWHERE a >= 0 AND b >= 0") == TSV([[1, 0], [1, 1]]) - assert node.query("WITH 0 AS a SELECT * FROM mydb.filtered_table1 PREWHERE a >= 0 WHERE b >= 0") == TSV([[1, 0], [1, 1]]) - assert node.query("WITH 0 AS a SELECT * FROM mydb.filtered_table1 PREWHERE b >= 0 WHERE a >= 0") == TSV([[1, 0], [1, 1]]) + assert node.query("WITH 0 AS a SELECT * FROM mydb.filtered_table1") == TSV( + [[1, 0], [1, 1]] + ) + assert node.query( + "WITH 0 AS a SELECT * FROM mydb.filtered_table1 WHERE a >= 0 AND b >= 0 SETTINGS optimize_move_to_prewhere = 0" + ) == TSV([[1, 0], [1, 1]]) + assert node.query( + "WITH 0 AS a SELECT * FROM mydb.filtered_table1 PREWHERE a >= 0 AND b >= 0" + ) == TSV([[1, 0], [1, 1]]) + assert node.query( + "WITH 0 AS a SELECT * FROM mydb.filtered_table1 PREWHERE a >= 0 WHERE b >= 0" + ) == TSV([[1, 0], [1, 1]]) + assert node.query( + "WITH 0 AS a SELECT * FROM mydb.filtered_table1 PREWHERE b >= 0 WHERE a >= 0" + ) == TSV([[1, 0], [1, 1]]) - assert node.query("WITH 0 AS a SELECT a, b FROM mydb.filtered_table1") == TSV([[0, 0], [0, 1]]) - assert node.query("WITH 0 AS a SELECT a, b FROM mydb.filtered_table1 WHERE a >= 0 AND b >= 0 SETTINGS optimize_move_to_prewhere = 0") == TSV([[0, 0], [0, 1]]) - assert node.query("WITH 0 AS a SELECT a, b FROM mydb.filtered_table1 PREWHERE a >= 0 AND b >= 0") == TSV([[0, 0], [0, 1]]) - assert node.query("WITH 0 AS a SELECT a, b FROM mydb.filtered_table1 PREWHERE a >= 0 WHERE b >= 0") == TSV([[0, 0], [0, 1]]) - assert node.query("WITH 0 AS a SELECT a, b FROM mydb.filtered_table1 PREWHERE b >= 0 WHERE a >= 0") == TSV([[0, 0], [0, 1]]) + assert node.query("WITH 0 AS a SELECT a, b FROM mydb.filtered_table1") == TSV( + [[0, 0], [0, 1]] + ) + assert node.query( + "WITH 0 AS a SELECT a, b FROM mydb.filtered_table1 WHERE a >= 0 AND b >= 0 SETTINGS optimize_move_to_prewhere = 0" + ) == TSV([[0, 0], [0, 1]]) + assert node.query( + "WITH 0 AS a SELECT a, b FROM mydb.filtered_table1 PREWHERE a >= 0 AND b >= 0" + ) == TSV([[0, 0], [0, 1]]) + assert node.query( + "WITH 0 AS a SELECT a, b FROM mydb.filtered_table1 PREWHERE a >= 0 WHERE b >= 0" + ) == TSV([[0, 0], [0, 1]]) + assert node.query( + "WITH 0 AS a SELECT a, b FROM mydb.filtered_table1 PREWHERE b >= 0 WHERE a >= 0" + ) == TSV([[0, 0], [0, 1]]) - assert node.query("WITH 0 AS c SELECT * FROM mydb.filtered_table3") == TSV([[0, 1], [1, 0]]) - assert node.query("WITH 0 AS c SELECT * FROM mydb.filtered_table3 WHERE c >= 0 AND a >= 0 SETTINGS optimize_move_to_prewhere = 0") == TSV([[0, 1], [1, 0]]) - assert node.query("WITH 0 AS c SELECT * FROM mydb.filtered_table3 PREWHERE c >= 0 AND a >= 0") == TSV([[0, 1], [1, 0]]) - assert node.query("WITH 0 AS c SELECT * FROM mydb.filtered_table3 PREWHERE c >= 0 WHERE a >= 0") == TSV([[0, 1], [1, 0]]) - assert node.query("WITH 0 AS c SELECT * FROM mydb.filtered_table3 PREWHERE a >= 0 WHERE c >= 0") == TSV([[0, 1], [1, 0]]) + assert node.query("WITH 0 AS c SELECT * FROM mydb.filtered_table3") == TSV( + [[0, 1], [1, 0]] + ) + assert node.query( + "WITH 0 AS c SELECT * FROM mydb.filtered_table3 WHERE c >= 0 AND a >= 0 SETTINGS optimize_move_to_prewhere = 0" + ) == TSV([[0, 1], [1, 0]]) + assert node.query( + "WITH 0 AS c SELECT * FROM mydb.filtered_table3 PREWHERE c >= 0 AND a >= 0" + ) == TSV([[0, 1], [1, 0]]) + assert node.query( + "WITH 0 AS c SELECT * FROM mydb.filtered_table3 PREWHERE c >= 0 WHERE a >= 0" + ) == TSV([[0, 1], [1, 0]]) + assert node.query( + "WITH 0 AS c SELECT * FROM mydb.filtered_table3 PREWHERE a >= 0 WHERE c >= 0" + ) == TSV([[0, 1], [1, 0]]) - assert node.query("WITH 0 AS c SELECT a, b, c FROM mydb.filtered_table3") == TSV([[0, 1, 0], [1, 0, 0]]) - assert node.query("WITH 0 AS c SELECT a, b, c FROM mydb.filtered_table3 WHERE c >= 0 AND a >= 0 SETTINGS optimize_move_to_prewhere = 0") == TSV([[0, 1, 0], [1, 0, 0]]) - assert node.query("WITH 0 AS c SELECT a, b, c FROM mydb.filtered_table3 PREWHERE c >= 0 AND a >= 0") == TSV([[0, 1, 0], [1, 0, 0]]) - assert node.query("WITH 0 AS c SELECT a, b, c FROM mydb.filtered_table3 PREWHERE c >= 0 WHERE a >= 0") == TSV([[0, 1, 0], [1, 0, 0]]) - assert node.query("WITH 0 AS c SELECT a, b, c FROM mydb.filtered_table3 PREWHERE a >= 0 WHERE c >= 0") == TSV([[0, 1, 0], [1, 0, 0]]) + assert node.query("WITH 0 AS c SELECT a, b, c FROM mydb.filtered_table3") == TSV( + [[0, 1, 0], [1, 0, 0]] + ) + assert node.query( + "WITH 0 AS c SELECT a, b, c FROM mydb.filtered_table3 WHERE c >= 0 AND a >= 0 SETTINGS optimize_move_to_prewhere = 0" + ) == TSV([[0, 1, 0], [1, 0, 0]]) + assert node.query( + "WITH 0 AS c SELECT a, b, c FROM mydb.filtered_table3 PREWHERE c >= 0 AND a >= 0" + ) == TSV([[0, 1, 0], [1, 0, 0]]) + assert node.query( + "WITH 0 AS c SELECT a, b, c FROM mydb.filtered_table3 PREWHERE c >= 0 WHERE a >= 0" + ) == TSV([[0, 1, 0], [1, 0, 0]]) + assert node.query( + "WITH 0 AS c SELECT a, b, c FROM mydb.filtered_table3 PREWHERE a >= 0 WHERE c >= 0" + ) == TSV([[0, 1, 0], [1, 0, 0]]) def test_policy_from_users_xml_affects_only_user_assigned(): assert node.query("SELECT * FROM mydb.filtered_table1") == TSV([[1, 0], [1, 1]]) - assert node.query("SELECT * FROM mydb.filtered_table1", user="another") == TSV([[0, 0], [0, 1], [1, 0], [1, 1]]) + assert node.query("SELECT * FROM mydb.filtered_table1", user="another") == TSV( + [[0, 0], [0, 1], [1, 0], [1, 1]] + ) - assert node.query("SELECT * FROM mydb.filtered_table2") == TSV([[0, 0, 0, 0], [0, 0, 6, 0]]) + assert node.query("SELECT * FROM mydb.filtered_table2") == TSV( + [[0, 0, 0, 0], [0, 0, 6, 0]] + ) assert node.query("SELECT * FROM mydb.filtered_table2", user="another") == TSV( - [[0, 0, 0, 0], [0, 0, 6, 0], [1, 2, 3, 4], [4, 3, 2, 1]]) + [[0, 0, 0, 0], [0, 0, 6, 0], [1, 2, 3, 4], [4, 3, 2, 1]] + ) - assert node.query("SELECT * FROM mydb.local") == TSV([[1, 0], [1, 1], [2, 0], [2, 1]]) - assert node.query("SELECT * FROM mydb.local", user="another") == TSV([[1, 0], [1, 1]]) + assert node.query("SELECT * FROM mydb.local") == TSV( + [[1, 0], [1, 1], [2, 0], [2, 1]] + ) + assert node.query("SELECT * FROM mydb.local", user="another") == TSV( + [[1, 0], [1, 1]] + ) def test_with_prewhere(): - copy_policy_xml('normal_filter2_table2.xml') - assert node.query("SELECT * FROM mydb.filtered_table2 WHERE a > 1 SETTINGS optimize_move_to_prewhere = 0") == TSV([[4, 3, 2, 1]]) - assert node.query("SELECT a FROM mydb.filtered_table2 WHERE a > 1 SETTINGS optimize_move_to_prewhere = 0") == TSV([[4]]) - assert node.query("SELECT a, b FROM mydb.filtered_table2 WHERE a > 1 SETTINGS optimize_move_to_prewhere = 0") == TSV([[4, 3]]) - assert node.query("SELECT b, c FROM mydb.filtered_table2 WHERE a > 1 SETTINGS optimize_move_to_prewhere = 0") == TSV([[3, 2]]) - assert node.query("SELECT d FROM mydb.filtered_table2 WHERE a > 1 SETTINGS optimize_move_to_prewhere = 0") == TSV([[1]]) + copy_policy_xml("normal_filter2_table2.xml") + assert node.query( + "SELECT * FROM mydb.filtered_table2 WHERE a > 1 SETTINGS optimize_move_to_prewhere = 0" + ) == TSV([[4, 3, 2, 1]]) + assert node.query( + "SELECT a FROM mydb.filtered_table2 WHERE a > 1 SETTINGS optimize_move_to_prewhere = 0" + ) == TSV([[4]]) + assert node.query( + "SELECT a, b FROM mydb.filtered_table2 WHERE a > 1 SETTINGS optimize_move_to_prewhere = 0" + ) == TSV([[4, 3]]) + assert node.query( + "SELECT b, c FROM mydb.filtered_table2 WHERE a > 1 SETTINGS optimize_move_to_prewhere = 0" + ) == TSV([[3, 2]]) + assert node.query( + "SELECT d FROM mydb.filtered_table2 WHERE a > 1 SETTINGS optimize_move_to_prewhere = 0" + ) == TSV([[1]]) - assert node.query("SELECT * FROM mydb.filtered_table2 PREWHERE a > 1") == TSV([[4, 3, 2, 1]]) - assert node.query("SELECT a FROM mydb.filtered_table2 PREWHERE a > 1") == TSV([[4]]) - assert node.query("SELECT a, b FROM mydb.filtered_table2 PREWHERE a > 1") == TSV([[4, 3]]) - assert node.query("SELECT b, c FROM mydb.filtered_table2 PREWHERE a > 1") == TSV([[3, 2]]) - assert node.query("SELECT d FROM mydb.filtered_table2 PREWHERE a > 1") == TSV([[1]]) + assert node.query("SELECT * FROM mydb.filtered_table2 PREWHERE a > 1") == TSV( + [[4, 3, 2, 1]] + ) + assert node.query("SELECT a FROM mydb.filtered_table2 PREWHERE a > 1") == TSV([[4]]) + assert node.query("SELECT a, b FROM mydb.filtered_table2 PREWHERE a > 1") == TSV( + [[4, 3]] + ) + assert node.query("SELECT b, c FROM mydb.filtered_table2 PREWHERE a > 1") == TSV( + [[3, 2]] + ) + assert node.query("SELECT d FROM mydb.filtered_table2 PREWHERE a > 1") == TSV([[1]]) - assert node.query("SELECT * FROM mydb.filtered_table2 PREWHERE a < 4 WHERE b < 10") == TSV([[1, 2, 3, 4]]) - assert node.query("SELECT a FROM mydb.filtered_table2 PREWHERE a < 4 WHERE b < 10") == TSV([[1]]) - assert node.query("SELECT b FROM mydb.filtered_table2 PREWHERE a < 4 WHERE b < 10") == TSV([[2]]) - assert node.query("SELECT a, b FROM mydb.filtered_table2 PREWHERE a < 4 WHERE b < 10") == TSV([[1, 2]]) - assert node.query("SELECT a, c FROM mydb.filtered_table2 PREWHERE a < 4 WHERE b < 10") == TSV([[1, 3]]) - assert node.query("SELECT b, d FROM mydb.filtered_table2 PREWHERE a < 4 WHERE b < 10") == TSV([[2, 4]]) - assert node.query("SELECT c, d FROM mydb.filtered_table2 PREWHERE a < 4 WHERE b < 10") == TSV([[3, 4]]) + assert node.query( + "SELECT * FROM mydb.filtered_table2 PREWHERE a < 4 WHERE b < 10" + ) == TSV([[1, 2, 3, 4]]) + assert node.query( + "SELECT a FROM mydb.filtered_table2 PREWHERE a < 4 WHERE b < 10" + ) == TSV([[1]]) + assert node.query( + "SELECT b FROM mydb.filtered_table2 PREWHERE a < 4 WHERE b < 10" + ) == TSV([[2]]) + assert node.query( + "SELECT a, b FROM mydb.filtered_table2 PREWHERE a < 4 WHERE b < 10" + ) == TSV([[1, 2]]) + assert node.query( + "SELECT a, c FROM mydb.filtered_table2 PREWHERE a < 4 WHERE b < 10" + ) == TSV([[1, 3]]) + assert node.query( + "SELECT b, d FROM mydb.filtered_table2 PREWHERE a < 4 WHERE b < 10" + ) == TSV([[2, 4]]) + assert node.query( + "SELECT c, d FROM mydb.filtered_table2 PREWHERE a < 4 WHERE b < 10" + ) == TSV([[3, 4]]) def test_throwif_error_in_where_with_same_condition_as_filter(): - copy_policy_xml('normal_filter2_table2.xml') - assert 'expected' in node.query_and_get_error("SELECT * FROM mydb.filtered_table2 WHERE throwIf(a > 0, 'expected') = 0 SETTINGS optimize_move_to_prewhere = 0") + copy_policy_xml("normal_filter2_table2.xml") + assert "expected" in node.query_and_get_error( + "SELECT * FROM mydb.filtered_table2 WHERE throwIf(a > 0, 'expected') = 0 SETTINGS optimize_move_to_prewhere = 0" + ) def test_throwif_error_in_prewhere_with_same_condition_as_filter(): - copy_policy_xml('normal_filter2_table2.xml') - assert 'expected' in node.query_and_get_error("SELECT * FROM mydb.filtered_table2 PREWHERE throwIf(a > 0, 'expected') = 0") + copy_policy_xml("normal_filter2_table2.xml") + assert "expected" in node.query_and_get_error( + "SELECT * FROM mydb.filtered_table2 PREWHERE throwIf(a > 0, 'expected') = 0" + ) def test_throwif_in_where_doesnt_expose_restricted_data(): - copy_policy_xml('no_filters.xml') - assert 'expected' in node.query_and_get_error("SELECT * FROM mydb.filtered_table2 WHERE throwIf(a = 0, 'expected') = 0 SETTINGS optimize_move_to_prewhere = 0") + copy_policy_xml("no_filters.xml") + assert "expected" in node.query_and_get_error( + "SELECT * FROM mydb.filtered_table2 WHERE throwIf(a = 0, 'expected') = 0 SETTINGS optimize_move_to_prewhere = 0" + ) - copy_policy_xml('normal_filter2_table2.xml') - assert node.query("SELECT * FROM mydb.filtered_table2 WHERE throwIf(a = 0, 'pwned') = 0 SETTINGS optimize_move_to_prewhere = 0") == TSV([ - [1, 2, 3, 4], [4, 3, 2, 1]]) + copy_policy_xml("normal_filter2_table2.xml") + assert node.query( + "SELECT * FROM mydb.filtered_table2 WHERE throwIf(a = 0, 'pwned') = 0 SETTINGS optimize_move_to_prewhere = 0" + ) == TSV([[1, 2, 3, 4], [4, 3, 2, 1]]) def test_throwif_in_prewhere_doesnt_expose_restricted_data(): - copy_policy_xml('no_filters.xml') - assert 'expected' in node.query_and_get_error("SELECT * FROM mydb.filtered_table2 PREWHERE throwIf(a = 0, 'expected') = 0") + copy_policy_xml("no_filters.xml") + assert "expected" in node.query_and_get_error( + "SELECT * FROM mydb.filtered_table2 PREWHERE throwIf(a = 0, 'expected') = 0" + ) - copy_policy_xml('normal_filter2_table2.xml') - assert node.query("SELECT * FROM mydb.filtered_table2 PREWHERE throwIf(a = 0, 'pwned') = 0") == TSV([ - [1, 2, 3, 4], [4, 3, 2, 1]]) + copy_policy_xml("normal_filter2_table2.xml") + assert node.query( + "SELECT * FROM mydb.filtered_table2 PREWHERE throwIf(a = 0, 'pwned') = 0" + ) == TSV([[1, 2, 3, 4], [4, 3, 2, 1]]) def test_change_of_users_xml_changes_row_policies(): - copy_policy_xml('normal_filters.xml') + copy_policy_xml("normal_filters.xml") assert node.query("SELECT * FROM mydb.filtered_table1") == TSV([[1, 0], [1, 1]]) - assert node.query("SELECT * FROM mydb.filtered_table2") == TSV([[0, 0, 0, 0], [0, 0, 6, 0]]) + assert node.query("SELECT * FROM mydb.filtered_table2") == TSV( + [[0, 0, 0, 0], [0, 0, 6, 0]] + ) assert node.query("SELECT * FROM mydb.filtered_table3") == TSV([[0, 1], [1, 0]]) - copy_policy_xml('all_rows.xml') - assert node.query("SELECT * FROM mydb.filtered_table1") == TSV([[0, 0], [0, 1], [1, 0], [1, 1]]) + copy_policy_xml("all_rows.xml") + assert node.query("SELECT * FROM mydb.filtered_table1") == TSV( + [[0, 0], [0, 1], [1, 0], [1, 1]] + ) assert node.query("SELECT * FROM mydb.filtered_table2") == TSV( - [[0, 0, 0, 0], [0, 0, 6, 0], [1, 2, 3, 4], [4, 3, 2, 1]]) - assert node.query("SELECT * FROM mydb.filtered_table3") == TSV([[0, 0], [0, 1], [1, 0], [1, 1]]) + [[0, 0, 0, 0], [0, 0, 6, 0], [1, 2, 3, 4], [4, 3, 2, 1]] + ) + assert node.query("SELECT * FROM mydb.filtered_table3") == TSV( + [[0, 0], [0, 1], [1, 0], [1, 1]] + ) - copy_policy_xml('no_rows.xml') + copy_policy_xml("no_rows.xml") assert node.query("SELECT * FROM mydb.filtered_table1") == "" assert node.query("SELECT * FROM mydb.filtered_table2") == "" assert node.query("SELECT * FROM mydb.filtered_table3") == "" - copy_policy_xml('normal_filters.xml') + copy_policy_xml("normal_filters.xml") assert node.query("SELECT * FROM mydb.filtered_table1") == TSV([[1, 0], [1, 1]]) - assert node.query("SELECT * FROM mydb.filtered_table2") == TSV([[0, 0, 0, 0], [0, 0, 6, 0]]) + assert node.query("SELECT * FROM mydb.filtered_table2") == TSV( + [[0, 0, 0, 0], [0, 0, 6, 0]] + ) assert node.query("SELECT * FROM mydb.filtered_table3") == TSV([[0, 1], [1, 0]]) - copy_policy_xml('normal_filter2_table2.xml') - assert node.query("SELECT * FROM mydb.filtered_table1") == TSV([[0, 0], [0, 1], [1, 0], [1, 1]]) - assert node.query("SELECT * FROM mydb.filtered_table2") == TSV([[1, 2, 3, 4], [4, 3, 2, 1]]) - assert node.query("SELECT * FROM mydb.filtered_table3") == TSV([[0, 0], [0, 1], [1, 0], [1, 1]]) - - copy_policy_xml('no_filters.xml') - assert node.query("SELECT * FROM mydb.filtered_table1") == TSV([[0, 0], [0, 1], [1, 0], [1, 1]]) + copy_policy_xml("normal_filter2_table2.xml") + assert node.query("SELECT * FROM mydb.filtered_table1") == TSV( + [[0, 0], [0, 1], [1, 0], [1, 1]] + ) assert node.query("SELECT * FROM mydb.filtered_table2") == TSV( - [[0, 0, 0, 0], [0, 0, 6, 0], [1, 2, 3, 4], [4, 3, 2, 1]]) - assert node.query("SELECT * FROM mydb.filtered_table3") == TSV([[0, 0], [0, 1], [1, 0], [1, 1]]) + [[1, 2, 3, 4], [4, 3, 2, 1]] + ) + assert node.query("SELECT * FROM mydb.filtered_table3") == TSV( + [[0, 0], [0, 1], [1, 0], [1, 1]] + ) - copy_policy_xml('normal_filters.xml') + copy_policy_xml("no_filters.xml") + assert node.query("SELECT * FROM mydb.filtered_table1") == TSV( + [[0, 0], [0, 1], [1, 0], [1, 1]] + ) + assert node.query("SELECT * FROM mydb.filtered_table2") == TSV( + [[0, 0, 0, 0], [0, 0, 6, 0], [1, 2, 3, 4], [4, 3, 2, 1]] + ) + assert node.query("SELECT * FROM mydb.filtered_table3") == TSV( + [[0, 0], [0, 1], [1, 0], [1, 1]] + ) + + copy_policy_xml("normal_filters.xml") assert node.query("SELECT * FROM mydb.filtered_table1") == TSV([[1, 0], [1, 1]]) - assert node.query("SELECT * FROM mydb.filtered_table2") == TSV([[0, 0, 0, 0], [0, 0, 6, 0]]) + assert node.query("SELECT * FROM mydb.filtered_table2") == TSV( + [[0, 0, 0, 0], [0, 0, 6, 0]] + ) assert node.query("SELECT * FROM mydb.filtered_table3") == TSV([[0, 1], [1, 0]]) def test_reload_users_xml_by_timer(): - copy_policy_xml('normal_filters.xml') + copy_policy_xml("normal_filters.xml") assert node.query("SELECT * FROM mydb.filtered_table1") == TSV([[1, 0], [1, 1]]) - assert node.query("SELECT * FROM mydb.filtered_table2") == TSV([[0, 0, 0, 0], [0, 0, 6, 0]]) + assert node.query("SELECT * FROM mydb.filtered_table2") == TSV( + [[0, 0, 0, 0], [0, 0, 6, 0]] + ) assert node.query("SELECT * FROM mydb.filtered_table3") == TSV([[0, 1], [1, 0]]) - time.sleep(1) # The modification time of the 'row_policy.xml' file should be different. - copy_policy_xml('all_rows.xml', False) - assert_eq_with_retry(node, "SELECT * FROM mydb.filtered_table1", [[0, 0], [0, 1], [1, 0], [1, 1]]) - assert_eq_with_retry(node, "SELECT * FROM mydb.filtered_table2", - [[0, 0, 0, 0], [0, 0, 6, 0], [1, 2, 3, 4], [4, 3, 2, 1]]) - assert_eq_with_retry(node, "SELECT * FROM mydb.filtered_table3", [[0, 0], [0, 1], [1, 0], [1, 1]]) + time.sleep( + 1 + ) # The modification time of the 'row_policy.xml' file should be different. + copy_policy_xml("all_rows.xml", False) + assert_eq_with_retry( + node, "SELECT * FROM mydb.filtered_table1", [[0, 0], [0, 1], [1, 0], [1, 1]] + ) + assert_eq_with_retry( + node, + "SELECT * FROM mydb.filtered_table2", + [[0, 0, 0, 0], [0, 0, 6, 0], [1, 2, 3, 4], [4, 3, 2, 1]], + ) + assert_eq_with_retry( + node, "SELECT * FROM mydb.filtered_table3", [[0, 0], [0, 1], [1, 0], [1, 1]] + ) - time.sleep(1) # The modification time of the 'row_policy.xml' file should be different. - copy_policy_xml('normal_filters.xml', False) + time.sleep( + 1 + ) # The modification time of the 'row_policy.xml' file should be different. + copy_policy_xml("normal_filters.xml", False) assert_eq_with_retry(node, "SELECT * FROM mydb.filtered_table1", [[1, 0], [1, 1]]) - assert_eq_with_retry(node, "SELECT * FROM mydb.filtered_table2", [[0, 0, 0, 0], [0, 0, 6, 0]]) + assert_eq_with_retry( + node, "SELECT * FROM mydb.filtered_table2", [[0, 0, 0, 0], [0, 0, 6, 0]] + ) assert_eq_with_retry(node, "SELECT * FROM mydb.filtered_table3", [[0, 1], [1, 0]]) def test_introspection(): policies = [ - ["another ON mydb.filtered_table1", "another", "mydb", "filtered_table1", - "6068883a-0e9d-f802-7e22-0144f8e66d3c", "users.xml", "1", 0, 0, "['another']", "[]"], - ["another ON mydb.filtered_table2", "another", "mydb", "filtered_table2", - "c019e957-c60b-d54e-cc52-7c90dac5fb01", "users.xml", "1", 0, 0, "['another']", "[]"], - ["another ON mydb.filtered_table3", "another", "mydb", "filtered_table3", - "4cb080d0-44e8-dbef-6026-346655143628", "users.xml", "1", 0, 0, "['another']", "[]"], - ["another ON mydb.local", "another", "mydb", "local", "5b23c389-7e18-06bf-a6bc-dd1afbbc0a97", "users.xml", - "a = 1", 0, 0, "['another']", "[]"], - ["default ON mydb.filtered_table1", "default", "mydb", "filtered_table1", - "9e8a8f62-4965-2b5e-8599-57c7b99b3549", "users.xml", "a = 1", 0, 0, "['default']", "[]"], - ["default ON mydb.filtered_table2", "default", "mydb", "filtered_table2", - "cffae79d-b9bf-a2ef-b798-019c18470b25", "users.xml", "a + b < 1 or c - d > 5", 0, 0, "['default']", "[]"], - ["default ON mydb.filtered_table3", "default", "mydb", "filtered_table3", - "12fc5cef-e3da-3940-ec79-d8be3911f42b", "users.xml", "c = 1", 0, 0, "['default']", "[]"], - ["default ON mydb.local", "default", "mydb", "local", "cdacaeb5-1d97-f99d-2bb0-4574f290629c", "users.xml", "1", - 0, 0, "['default']", "[]"] + [ + "another ON mydb.filtered_table1", + "another", + "mydb", + "filtered_table1", + "6068883a-0e9d-f802-7e22-0144f8e66d3c", + "users.xml", + "1", + 0, + 0, + "['another']", + "[]", + ], + [ + "another ON mydb.filtered_table2", + "another", + "mydb", + "filtered_table2", + "c019e957-c60b-d54e-cc52-7c90dac5fb01", + "users.xml", + "1", + 0, + 0, + "['another']", + "[]", + ], + [ + "another ON mydb.filtered_table3", + "another", + "mydb", + "filtered_table3", + "4cb080d0-44e8-dbef-6026-346655143628", + "users.xml", + "1", + 0, + 0, + "['another']", + "[]", + ], + [ + "another ON mydb.local", + "another", + "mydb", + "local", + "5b23c389-7e18-06bf-a6bc-dd1afbbc0a97", + "users.xml", + "a = 1", + 0, + 0, + "['another']", + "[]", + ], + [ + "default ON mydb.filtered_table1", + "default", + "mydb", + "filtered_table1", + "9e8a8f62-4965-2b5e-8599-57c7b99b3549", + "users.xml", + "a = 1", + 0, + 0, + "['default']", + "[]", + ], + [ + "default ON mydb.filtered_table2", + "default", + "mydb", + "filtered_table2", + "cffae79d-b9bf-a2ef-b798-019c18470b25", + "users.xml", + "a + b < 1 or c - d > 5", + 0, + 0, + "['default']", + "[]", + ], + [ + "default ON mydb.filtered_table3", + "default", + "mydb", + "filtered_table3", + "12fc5cef-e3da-3940-ec79-d8be3911f42b", + "users.xml", + "c = 1", + 0, + 0, + "['default']", + "[]", + ], + [ + "default ON mydb.local", + "default", + "mydb", + "local", + "cdacaeb5-1d97-f99d-2bb0-4574f290629c", + "users.xml", + "1", + 0, + 0, + "['default']", + "[]", + ], ] - assert node.query("SELECT * from system.row_policies ORDER BY short_name, database, table") == TSV(policies) + assert node.query( + "SELECT * from system.row_policies ORDER BY short_name, database, table" + ) == TSV(policies) def test_dcl_introspection(): assert node.query("SHOW POLICIES") == TSV( - ["another ON mydb.filtered_table1", "another ON mydb.filtered_table2", "another ON mydb.filtered_table3", - "another ON mydb.local", "default ON mydb.filtered_table1", "default ON mydb.filtered_table2", - "default ON mydb.filtered_table3", "default ON mydb.local"]) + [ + "another ON mydb.filtered_table1", + "another ON mydb.filtered_table2", + "another ON mydb.filtered_table3", + "another ON mydb.local", + "default ON mydb.filtered_table1", + "default ON mydb.filtered_table2", + "default ON mydb.filtered_table3", + "default ON mydb.local", + ] + ) - assert node.query("SHOW POLICIES ON mydb.filtered_table1") == TSV(["another", "default"]) + assert node.query("SHOW POLICIES ON mydb.filtered_table1") == TSV( + ["another", "default"] + ) assert node.query("SHOW POLICIES ON mydb.local") == TSV(["another", "default"]) assert node.query("SHOW POLICIES ON mydb.*") == TSV( - ["another ON mydb.filtered_table1", "another ON mydb.filtered_table2", "another ON mydb.filtered_table3", - "another ON mydb.local", "default ON mydb.filtered_table1", "default ON mydb.filtered_table2", - "default ON mydb.filtered_table3", "default ON mydb.local"]) + [ + "another ON mydb.filtered_table1", + "another ON mydb.filtered_table2", + "another ON mydb.filtered_table3", + "another ON mydb.local", + "default ON mydb.filtered_table1", + "default ON mydb.filtered_table2", + "default ON mydb.filtered_table3", + "default ON mydb.local", + ] + ) assert node.query("SHOW POLICIES default") == TSV( - ["default ON mydb.filtered_table1", "default ON mydb.filtered_table2", "default ON mydb.filtered_table3", - "default ON mydb.local"]) + [ + "default ON mydb.filtered_table1", + "default ON mydb.filtered_table2", + "default ON mydb.filtered_table3", + "default ON mydb.local", + ] + ) - assert node.query( - "SHOW CREATE POLICY default ON mydb.filtered_table1") == "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default\n" - assert node.query( - "SHOW CREATE POLICY default ON mydb.filtered_table2") == "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default\n" - assert node.query( - "SHOW CREATE POLICY default ON mydb.filtered_table3") == "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default\n" - assert node.query( - "SHOW CREATE POLICY default ON mydb.local") == "CREATE ROW POLICY default ON mydb.local FOR SELECT USING 1 TO default\n" + assert ( + node.query("SHOW CREATE POLICY default ON mydb.filtered_table1") + == "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default\n" + ) + assert ( + node.query("SHOW CREATE POLICY default ON mydb.filtered_table2") + == "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default\n" + ) + assert ( + node.query("SHOW CREATE POLICY default ON mydb.filtered_table3") + == "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default\n" + ) + assert ( + node.query("SHOW CREATE POLICY default ON mydb.local") + == "CREATE ROW POLICY default ON mydb.local FOR SELECT USING 1 TO default\n" + ) assert node.query("SHOW CREATE POLICY default") == TSV( - ["CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default", - "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default", - "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default", - "CREATE ROW POLICY default ON mydb.local FOR SELECT USING 1 TO default"]) + [ + "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default", + "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default", + "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default", + "CREATE ROW POLICY default ON mydb.local FOR SELECT USING 1 TO default", + ] + ) assert node.query("SHOW CREATE POLICIES ON mydb.filtered_table1") == TSV( - ["CREATE ROW POLICY another ON mydb.filtered_table1 FOR SELECT USING 1 TO another", - "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default"]) + [ + "CREATE ROW POLICY another ON mydb.filtered_table1 FOR SELECT USING 1 TO another", + "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default", + ] + ) assert node.query("SHOW CREATE POLICIES ON mydb.*") == TSV( - ["CREATE ROW POLICY another ON mydb.filtered_table1 FOR SELECT USING 1 TO another", - "CREATE ROW POLICY another ON mydb.filtered_table2 FOR SELECT USING 1 TO another", - "CREATE ROW POLICY another ON mydb.filtered_table3 FOR SELECT USING 1 TO another", - "CREATE ROW POLICY another ON mydb.local FOR SELECT USING a = 1 TO another", - "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default", - "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default", - "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default", - "CREATE ROW POLICY default ON mydb.local FOR SELECT USING 1 TO default"]) + [ + "CREATE ROW POLICY another ON mydb.filtered_table1 FOR SELECT USING 1 TO another", + "CREATE ROW POLICY another ON mydb.filtered_table2 FOR SELECT USING 1 TO another", + "CREATE ROW POLICY another ON mydb.filtered_table3 FOR SELECT USING 1 TO another", + "CREATE ROW POLICY another ON mydb.local FOR SELECT USING a = 1 TO another", + "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default", + "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default", + "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default", + "CREATE ROW POLICY default ON mydb.local FOR SELECT USING 1 TO default", + ] + ) assert node.query("SHOW CREATE POLICIES") == TSV( - ["CREATE ROW POLICY another ON mydb.filtered_table1 FOR SELECT USING 1 TO another", - "CREATE ROW POLICY another ON mydb.filtered_table2 FOR SELECT USING 1 TO another", - "CREATE ROW POLICY another ON mydb.filtered_table3 FOR SELECT USING 1 TO another", - "CREATE ROW POLICY another ON mydb.local FOR SELECT USING a = 1 TO another", - "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default", - "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default", - "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default", - "CREATE ROW POLICY default ON mydb.local FOR SELECT USING 1 TO default"]) + [ + "CREATE ROW POLICY another ON mydb.filtered_table1 FOR SELECT USING 1 TO another", + "CREATE ROW POLICY another ON mydb.filtered_table2 FOR SELECT USING 1 TO another", + "CREATE ROW POLICY another ON mydb.filtered_table3 FOR SELECT USING 1 TO another", + "CREATE ROW POLICY another ON mydb.local FOR SELECT USING a = 1 TO another", + "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default", + "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default", + "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default", + "CREATE ROW POLICY default ON mydb.local FOR SELECT USING 1 TO default", + ] + ) - expected_access = "CREATE ROW POLICY another ON mydb.filtered_table1 FOR SELECT USING 1 TO another\n" \ - "CREATE ROW POLICY another ON mydb.filtered_table2 FOR SELECT USING 1 TO another\n" \ - "CREATE ROW POLICY another ON mydb.filtered_table3 FOR SELECT USING 1 TO another\n" \ - "CREATE ROW POLICY another ON mydb.local FOR SELECT USING a = 1 TO another\n" \ - "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default\n" \ - "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default\n" \ - "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default\n" \ - "CREATE ROW POLICY default ON mydb.local FOR SELECT USING 1 TO default\n" + expected_access = ( + "CREATE ROW POLICY another ON mydb.filtered_table1 FOR SELECT USING 1 TO another\n" + "CREATE ROW POLICY another ON mydb.filtered_table2 FOR SELECT USING 1 TO another\n" + "CREATE ROW POLICY another ON mydb.filtered_table3 FOR SELECT USING 1 TO another\n" + "CREATE ROW POLICY another ON mydb.local FOR SELECT USING a = 1 TO another\n" + "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING a = 1 TO default\n" + "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING ((a + b) < 1) OR ((c - d) > 5) TO default\n" + "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 1 TO default\n" + "CREATE ROW POLICY default ON mydb.local FOR SELECT USING 1 TO default\n" + ) assert expected_access in node.query("SHOW ACCESS") - copy_policy_xml('all_rows.xml') + copy_policy_xml("all_rows.xml") assert node.query("SHOW POLICIES") == TSV( - ["another ON mydb.filtered_table1", "another ON mydb.filtered_table2", "another ON mydb.filtered_table3", - "default ON mydb.filtered_table1", "default ON mydb.filtered_table2", "default ON mydb.filtered_table3"]) - assert node.query( - "SHOW CREATE POLICY default ON mydb.filtered_table1") == "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING 1 TO default\n" - assert node.query( - "SHOW CREATE POLICY default ON mydb.filtered_table2") == "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING 1 TO default\n" - assert node.query( - "SHOW CREATE POLICY default ON mydb.filtered_table3") == "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING 1 TO default\n" + [ + "another ON mydb.filtered_table1", + "another ON mydb.filtered_table2", + "another ON mydb.filtered_table3", + "default ON mydb.filtered_table1", + "default ON mydb.filtered_table2", + "default ON mydb.filtered_table3", + ] + ) + assert ( + node.query("SHOW CREATE POLICY default ON mydb.filtered_table1") + == "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING 1 TO default\n" + ) + assert ( + node.query("SHOW CREATE POLICY default ON mydb.filtered_table2") + == "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING 1 TO default\n" + ) + assert ( + node.query("SHOW CREATE POLICY default ON mydb.filtered_table3") + == "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING 1 TO default\n" + ) - copy_policy_xml('no_rows.xml') + copy_policy_xml("no_rows.xml") assert node.query("SHOW POLICIES") == TSV( - ["another ON mydb.filtered_table1", "another ON mydb.filtered_table2", "another ON mydb.filtered_table3", - "default ON mydb.filtered_table1", "default ON mydb.filtered_table2", "default ON mydb.filtered_table3"]) - assert node.query( - "SHOW CREATE POLICY default ON mydb.filtered_table1") == "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING NULL TO default\n" - assert node.query( - "SHOW CREATE POLICY default ON mydb.filtered_table2") == "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING NULL TO default\n" - assert node.query( - "SHOW CREATE POLICY default ON mydb.filtered_table3") == "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING NULL TO default\n" + [ + "another ON mydb.filtered_table1", + "another ON mydb.filtered_table2", + "another ON mydb.filtered_table3", + "default ON mydb.filtered_table1", + "default ON mydb.filtered_table2", + "default ON mydb.filtered_table3", + ] + ) + assert ( + node.query("SHOW CREATE POLICY default ON mydb.filtered_table1") + == "CREATE ROW POLICY default ON mydb.filtered_table1 FOR SELECT USING NULL TO default\n" + ) + assert ( + node.query("SHOW CREATE POLICY default ON mydb.filtered_table2") + == "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING NULL TO default\n" + ) + assert ( + node.query("SHOW CREATE POLICY default ON mydb.filtered_table3") + == "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING NULL TO default\n" + ) - copy_policy_xml('no_filters.xml') + copy_policy_xml("no_filters.xml") assert node.query("SHOW POLICIES") == "" def test_dcl_management(): - copy_policy_xml('no_filters.xml') + copy_policy_xml("no_filters.xml") assert node.query("SHOW POLICIES") == "" node.query("CREATE POLICY pA ON mydb.filtered_table1 FOR SELECT USING a b TO default\n" + assert ( + node.query("SHOW CREATE POLICY pB ON mydb.filtered_table1") + == "CREATE ROW POLICY pB ON mydb.filtered_table1 FOR SELECT USING a > b TO default\n" + ) node.query("DROP POLICY pB ON mydb.filtered_table1") - assert node.query("SELECT * FROM mydb.filtered_table1") == TSV([[0, 0], [0, 1], [1, 0], [1, 1]]) + assert node.query("SELECT * FROM mydb.filtered_table1") == TSV( + [[0, 0], [0, 1], [1, 0], [1, 1]] + ) assert node.query("SHOW POLICIES") == "" def test_grant_create_row_policy(): - copy_policy_xml('no_filters.xml') + copy_policy_xml("no_filters.xml") assert node.query("SHOW POLICIES") == "" node.query("CREATE USER X") expected_error = "necessary to have grant CREATE ROW POLICY ON mydb.filtered_table1" - assert expected_error in node.query_and_get_error("CREATE POLICY pA ON mydb.filtered_table1 FOR SELECT USING a (d + 5) TO default", - "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 0 TO default", - "CREATE ROW POLICY default ON mydb.table FOR SELECT USING a = 0 TO default"]) + [ + "CREATE ROW POLICY default ON mydb.`.filtered_table4` FOR SELECT USING c = 2 TO default", + "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING c > (d + 5) TO default", + "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 0 TO default", + "CREATE ROW POLICY default ON mydb.table FOR SELECT USING a = 0 TO default", + ] + ) def test_miscellaneous_engines(): - node.query("CREATE ROW POLICY OR REPLACE pC ON mydb.other_table FOR SELECT USING a = 1 TO default") + node.query( + "CREATE ROW POLICY OR REPLACE pC ON mydb.other_table FOR SELECT USING a = 1 TO default" + ) assert node.query("SHOW ROW POLICIES ON mydb.other_table") == "pC\n" # ReplicatedMergeTree node.query("DROP TABLE IF EXISTS mydb.other_table") - node.query("CREATE TABLE mydb.other_table (a UInt8, b UInt8) ENGINE ReplicatedMergeTree('/clickhouse/tables/00-00/filtered_table1', 'replica1') ORDER BY a") + node.query( + "CREATE TABLE mydb.other_table (a UInt8, b UInt8) ENGINE ReplicatedMergeTree('/clickhouse/tables/00-00/filtered_table1', 'replica1') ORDER BY a" + ) node.query("INSERT INTO mydb.other_table values (0, 0), (0, 1), (1, 0), (1, 1)") assert node.query("SELECT * FROM mydb.other_table") == TSV([[1, 0], [1, 1]]) # CollapsingMergeTree node.query("DROP TABLE mydb.other_table") - node.query("CREATE TABLE mydb.other_table (a UInt8, b Int8) ENGINE CollapsingMergeTree(b) ORDER BY a") + node.query( + "CREATE TABLE mydb.other_table (a UInt8, b Int8) ENGINE CollapsingMergeTree(b) ORDER BY a" + ) node.query("INSERT INTO mydb.other_table values (0, 1), (0, 1), (1, 1), (1, 1)") assert node.query("SELECT * FROM mydb.other_table") == TSV([[1, 1], [1, 1]]) # ReplicatedCollapsingMergeTree node.query("DROP TABLE mydb.other_table") - node.query("CREATE TABLE mydb.other_table (a UInt8, b Int8) ENGINE ReplicatedCollapsingMergeTree('/clickhouse/tables/00-01/filtered_table1', 'replica1', b) ORDER BY a") + node.query( + "CREATE TABLE mydb.other_table (a UInt8, b Int8) ENGINE ReplicatedCollapsingMergeTree('/clickhouse/tables/00-01/filtered_table1', 'replica1', b) ORDER BY a" + ) node.query("INSERT INTO mydb.other_table values (0, 1), (0, 1), (1, 1), (1, 1)") assert node.query("SELECT * FROM mydb.other_table") == TSV([[1, 1], [1, 1]]) @@ -480,17 +839,27 @@ def test_miscellaneous_engines(): # DistributedMergeTree node.query("DROP TABLE IF EXISTS mydb.other_table") - node.query("CREATE TABLE mydb.other_table (a UInt8, b UInt8) ENGINE Distributed('test_local_cluster', mydb, local)") - assert node.query("SELECT * FROM mydb.other_table", user="another") == TSV([[1, 0], [1, 1], [1, 0], [1, 1]]) - assert node.query("SELECT sum(a), b FROM mydb.other_table GROUP BY b ORDER BY b", user="another") == TSV([[2, 0], [2, 1]]) + node.query( + "CREATE TABLE mydb.other_table (a UInt8, b UInt8) ENGINE Distributed('test_local_cluster', mydb, local)" + ) + assert node.query("SELECT * FROM mydb.other_table", user="another") == TSV( + [[1, 0], [1, 1], [1, 0], [1, 1]] + ) + assert node.query( + "SELECT sum(a), b FROM mydb.other_table GROUP BY b ORDER BY b", user="another" + ) == TSV([[2, 0], [2, 1]]) def test_policy_on_distributed_table_via_role(): node.query("DROP TABLE IF EXISTS local_tbl") node.query("DROP TABLE IF EXISTS dist_tbl") - node.query("CREATE TABLE local_tbl engine=MergeTree ORDER BY tuple() as select * FROM numbers(10)") - node.query("CREATE TABLE dist_tbl ENGINE=Distributed( 'test_cluster_two_shards_localhost', default, local_tbl) AS local_tbl") + node.query( + "CREATE TABLE local_tbl engine=MergeTree ORDER BY tuple() as select * FROM numbers(10)" + ) + node.query( + "CREATE TABLE dist_tbl ENGINE=Distributed( 'test_cluster_two_shards_localhost', default, local_tbl) AS local_tbl" + ) node.query("CREATE ROLE OR REPLACE 'role1'") node.query("CREATE USER OR REPLACE 'user1' DEFAULT ROLE 'role1'") @@ -498,8 +867,16 @@ def test_policy_on_distributed_table_via_role(): node.query("GRANT SELECT ON dist_tbl TO 'role1'") node.query("GRANT SELECT ON local_tbl TO 'role1'") - node.query("CREATE ROW POLICY OR REPLACE 'all_data' ON dist_tbl, local_tbl USING 1 TO ALL EXCEPT 'role1'") - node.query("CREATE ROW POLICY OR REPLACE 'role1_data' ON dist_tbl, local_tbl USING number % 2 = 0 TO 'role1'") + node.query( + "CREATE ROW POLICY OR REPLACE 'all_data' ON dist_tbl, local_tbl USING 1 TO ALL EXCEPT 'role1'" + ) + node.query( + "CREATE ROW POLICY OR REPLACE 'role1_data' ON dist_tbl, local_tbl USING number % 2 = 0 TO 'role1'" + ) - assert node.query("SELECT * FROM local_tbl SETTINGS prefer_localhost_replica=0", user="user1") == TSV([[0], [2], [4], [6], [8]]) - assert node.query("SELECT * FROM dist_tbl SETTINGS prefer_localhost_replica=0", user="user1") == TSV([[0], [2], [4], [6], [8], [0], [2], [4], [6], [8]]) + assert node.query( + "SELECT * FROM local_tbl SETTINGS prefer_localhost_replica=0", user="user1" + ) == TSV([[0], [2], [4], [6], [8]]) + assert node.query( + "SELECT * FROM dist_tbl SETTINGS prefer_localhost_replica=0", user="user1" + ) == TSV([[0], [2], [4], [6], [8], [0], [2], [4], [6], [8]]) diff --git a/tests/integration/test_s3_cluster/test.py b/tests/integration/test_s3_cluster/test.py index f60e6e6862f..561d3e3ed28 100644 --- a/tests/integration/test_s3_cluster/test.py +++ b/tests/integration/test_s3_cluster/test.py @@ -9,12 +9,22 @@ logging.getLogger().setLevel(logging.INFO) logging.getLogger().addHandler(logging.StreamHandler()) SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -S3_DATA = ['data/clickhouse/part1.csv', 'data/clickhouse/part123.csv', 'data/database/part2.csv', 'data/database/partition675.csv'] +S3_DATA = [ + "data/clickhouse/part1.csv", + "data/clickhouse/part123.csv", + "data/database/part2.csv", + "data/database/partition675.csv", +] + def create_buckets_s3(cluster): minio = cluster.minio_client for file in S3_DATA: - minio.fput_object(bucket_name=cluster.minio_bucket, object_name=file, file_path=os.path.join(SCRIPT_DIR, file)) + minio.fput_object( + bucket_name=cluster.minio_bucket, + object_name=file, + file_path=os.path.join(SCRIPT_DIR, file), + ) for obj in minio.list_objects(cluster.minio_bucket, recursive=True): print(obj.object_name) @@ -23,10 +33,12 @@ def create_buckets_s3(cluster): def started_cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance('s0_0_0', main_configs=["configs/cluster.xml"], with_minio=True) - cluster.add_instance('s0_0_1', main_configs=["configs/cluster.xml"]) - cluster.add_instance('s0_1_0', main_configs=["configs/cluster.xml"]) - + cluster.add_instance( + "s0_0_0", main_configs=["configs/cluster.xml"], with_minio=True + ) + cluster.add_instance("s0_0_1", main_configs=["configs/cluster.xml"]) + cluster.add_instance("s0_1_0", main_configs=["configs/cluster.xml"]) + logging.info("Starting cluster...") cluster.start() logging.info("Cluster started") @@ -39,45 +51,54 @@ def started_cluster(): def test_select_all(started_cluster): - node = started_cluster.instances['s0_0_0'] - pure_s3 = node.query(""" + node = started_cluster.instances["s0_0_0"] + pure_s3 = node.query( + """ SELECT * from s3( 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') - ORDER BY (name, value, polygon)""") + ORDER BY (name, value, polygon)""" + ) # print(pure_s3) - s3_distibuted = node.query(""" + s3_distibuted = node.query( + """ SELECT * from s3Cluster( 'cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', - 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ORDER BY (name, value, polygon)""") + 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ORDER BY (name, value, polygon)""" + ) # print(s3_distibuted) assert TSV(pure_s3) == TSV(s3_distibuted) def test_count(started_cluster): - node = started_cluster.instances['s0_0_0'] - pure_s3 = node.query(""" + node = started_cluster.instances["s0_0_0"] + pure_s3 = node.query( + """ SELECT count(*) from s3( 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', - 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))')""") + 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))')""" + ) # print(pure_s3) - s3_distibuted = node.query(""" + s3_distibuted = node.query( + """ SELECT count(*) from s3Cluster( 'cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', 'minio', 'minio123', 'CSV', - 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))')""") + 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))')""" + ) # print(s3_distibuted) assert TSV(pure_s3) == TSV(s3_distibuted) def test_union_all(started_cluster): - node = started_cluster.instances['s0_0_0'] - pure_s3 = node.query(""" + node = started_cluster.instances["s0_0_0"] + pure_s3 = node.query( + """ SELECT * FROM ( SELECT * from s3( @@ -91,9 +112,11 @@ def test_union_all(started_cluster): 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ) ORDER BY (name, value, polygon) - """) + """ + ) # print(pure_s3) - s3_distibuted = node.query(""" + s3_distibuted = node.query( + """ SELECT * FROM ( SELECT * from s3Cluster( @@ -107,15 +130,17 @@ def test_union_all(started_cluster): 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ) ORDER BY (name, value, polygon) - """) + """ + ) # print(s3_distibuted) assert TSV(pure_s3) == TSV(s3_distibuted) def test_wrong_cluster(started_cluster): - node = started_cluster.instances['s0_0_0'] - error = node.query_and_get_error(""" + node = started_cluster.instances["s0_0_0"] + error = node.query_and_get_error( + """ SELECT count(*) from s3Cluster( 'non_existent_cluster', 'http://minio1:9001/root/data/{clickhouse,database}/*', @@ -124,6 +149,7 @@ def test_wrong_cluster(started_cluster): SELECT count(*) from s3Cluster( 'non_existent_cluster', 'http://minio1:9001/root/data/{clickhouse,database}/*', - 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))')""") - - assert "not found" in error \ No newline at end of file + 'minio', 'minio123', 'CSV', 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))')""" + ) + + assert "not found" in error diff --git a/tests/integration/test_s3_low_cardinality_right_border/test.py b/tests/integration/test_s3_low_cardinality_right_border/test.py index 056c3e4430f..babe25fa899 100644 --- a/tests/integration/test_s3_low_cardinality_right_border/test.py +++ b/tests/integration/test_s3_low_cardinality_right_border/test.py @@ -65,6 +65,7 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance("node1", main_configs=["configs/s3.xml"], with_minio=True) + @pytest.fixture(scope="module") def started_cluster(): try: @@ -76,7 +77,8 @@ def started_cluster(): def test_s3_right_border(started_cluster): - node1.query(""" + node1.query( + """ CREATE TABLE s3_low_cardinality ( str_column LowCardinality(String) @@ -84,12 +86,17 @@ CREATE TABLE s3_low_cardinality ENGINE = MergeTree() ORDER BY tuple() SETTINGS storage_policy = 's3', min_bytes_for_wide_part = 0, index_granularity = 1024; - """) + """ + ) node1.query("INSERT INTO s3_low_cardinality SELECT 'aaaaaa' FROM numbers(600000)") - node1.query("INSERT INTO s3_low_cardinality SELECT toString(number) FROM numbers(100000)") + node1.query( + "INSERT INTO s3_low_cardinality SELECT toString(number) FROM numbers(100000)" + ) node1.query("INSERT INTO s3_low_cardinality SELECT 'bbbbbb' FROM numbers(500000)") - node1.query("INSERT INTO s3_low_cardinality SELECT toString(number + 100000000) FROM numbers(100000)") + node1.query( + "INSERT INTO s3_low_cardinality SELECT toString(number + 100000000) FROM numbers(100000)" + ) node1.query("OPTIMIZE TABLE s3_low_cardinality FINAL") @@ -98,4 +105,10 @@ SETTINGS storage_policy = 's3', min_bytes_for_wide_part = 0, index_granularity "merge_tree_min_rows_for_concurrent_read": "0", "max_threads": "2", } - assert node1.query("SELECT COUNT() FROM s3_low_cardinality WHERE not ignore(str_column)", settings=settings) == "1300000\n" + assert ( + node1.query( + "SELECT COUNT() FROM s3_low_cardinality WHERE not ignore(str_column)", + settings=settings, + ) + == "1300000\n" + ) diff --git a/tests/integration/test_s3_with_https/test.py b/tests/integration/test_s3_with_https/test.py index 4fa8260ed2e..46e281251a0 100644 --- a/tests/integration/test_s3_with_https/test.py +++ b/tests/integration/test_s3_with_https/test.py @@ -15,8 +15,15 @@ def check_proxy_logs(cluster, proxy_instance): def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("node", main_configs=["configs/config.d/storage_conf.xml", "configs/config.d/ssl.xml"], - with_minio=True, minio_certs_dir="minio_certs") + cluster.add_instance( + "node", + main_configs=[ + "configs/config.d/storage_conf.xml", + "configs/config.d/ssl.xml", + ], + with_minio=True, + minio_certs_dir="minio_certs", + ) logging.info("Starting cluster...") cluster.start() logging.info("Cluster started") @@ -26,9 +33,7 @@ def cluster(): cluster.shutdown() -@pytest.mark.parametrize( - "policy", ["s3_secure", "s3_secure_with_proxy"] -) +@pytest.mark.parametrize("policy", ["s3_secure", "s3_secure_with_proxy"]) def test_s3_with_https(cluster, policy): node = cluster.instances["node"] @@ -40,12 +45,16 @@ def test_s3_with_https(cluster, policy): ) ENGINE=MergeTree() ORDER BY id SETTINGS storage_policy='{}' - """ - .format(policy) + """.format( + policy + ) ) node.query("INSERT INTO s3_test VALUES (0,'data'),(1,'data')") - assert node.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data')" + assert ( + node.query("SELECT * FROM s3_test order by id FORMAT Values") + == "(0,'data'),(1,'data')" + ) node.query("DROP TABLE IF EXISTS s3_test NO DELAY") diff --git a/tests/integration/test_s3_with_proxy/proxy-resolver/resolver.py b/tests/integration/test_s3_with_proxy/proxy-resolver/resolver.py index 87fe4ce30f6..eaea4c1dab2 100644 --- a/tests/integration/test_s3_with_proxy/proxy-resolver/resolver.py +++ b/tests/integration/test_s3_with_proxy/proxy-resolver/resolver.py @@ -3,12 +3,12 @@ import random import bottle -@bottle.route('/hostname') +@bottle.route("/hostname") def index(): if random.randrange(2) == 0: - return 'proxy1' + return "proxy1" else: - return 'proxy2' + return "proxy2" -bottle.run(host='0.0.0.0', port=8080) +bottle.run(host="0.0.0.0", port=8080) diff --git a/tests/integration/test_s3_with_proxy/test.py b/tests/integration/test_s3_with_proxy/test.py index 33ad981d18d..1102d190a87 100644 --- a/tests/integration/test_s3_with_proxy/test.py +++ b/tests/integration/test_s3_with_proxy/test.py @@ -7,10 +7,13 @@ from helpers.cluster import ClickHouseCluster # Runs simple proxy resolver in python env container. def run_resolver(cluster): - container_id = cluster.get_container_id('resolver') + container_id = cluster.get_container_id("resolver") current_dir = os.path.dirname(__file__) - cluster.copy_file_to_container(container_id, os.path.join(current_dir, "proxy-resolver", "resolver.py"), - "resolver.py") + cluster.copy_file_to_container( + container_id, + os.path.join(current_dir, "proxy-resolver", "resolver.py"), + "resolver.py", + ) cluster.exec_in_container(container_id, ["python", "resolver.py"], detach=True) @@ -18,9 +21,9 @@ def run_resolver(cluster): def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("node", - main_configs=["configs/config.d/storage_conf.xml"], - with_minio=True) + cluster.add_instance( + "node", main_configs=["configs/config.d/storage_conf.xml"], with_minio=True + ) logging.info("Starting cluster...") cluster.start() logging.info("Cluster started") @@ -45,9 +48,7 @@ def check_proxy_logs(cluster, proxy_instance, http_methods={"POST", "PUT", "GET" assert False, "http method not found in logs" -@pytest.mark.parametrize( - "policy", ["s3", "s3_with_resolver"] -) +@pytest.mark.parametrize("policy", ["s3", "s3_with_resolver"]) def test_s3_with_proxy_list(cluster, policy): node = cluster.instances["node"] @@ -59,12 +60,16 @@ def test_s3_with_proxy_list(cluster, policy): ) ENGINE=MergeTree() ORDER BY id SETTINGS storage_policy='{}' - """ - .format(policy) + """.format( + policy + ) ) node.query("INSERT INTO s3_test VALUES (0,'data'),(1,'data')") - assert node.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data')" + assert ( + node.query("SELECT * FROM s3_test order by id FORMAT Values") + == "(0,'data'),(1,'data')" + ) node.query("DROP TABLE IF EXISTS s3_test NO DELAY") diff --git a/tests/integration/test_s3_zero_copy_replication/test.py b/tests/integration/test_s3_zero_copy_replication/test.py index c56d98559f8..1ce1047ebec 100644 --- a/tests/integration/test_s3_zero_copy_replication/test.py +++ b/tests/integration/test_s3_zero_copy_replication/test.py @@ -13,12 +13,20 @@ logging.getLogger().addHandler(logging.StreamHandler()) def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("node1", main_configs=["configs/config.d/s3.xml"], macros={'replica': '1'}, - with_minio=True, - with_zookeeper=True) - cluster.add_instance("node2", main_configs=["configs/config.d/s3.xml"], macros={'replica': '2'}, - with_minio=True, - with_zookeeper=True) + cluster.add_instance( + "node1", + main_configs=["configs/config.d/s3.xml"], + macros={"replica": "1"}, + with_minio=True, + with_zookeeper=True, + ) + cluster.add_instance( + "node2", + main_configs=["configs/config.d/s3.xml"], + macros={"replica": "2"}, + with_minio=True, + with_zookeeper=True, + ) logging.info("Starting cluster...") cluster.start() logging.info("Cluster started") @@ -28,28 +36,28 @@ def cluster(): cluster.shutdown() -def get_large_objects_count(cluster, size=100, folder='data'): +def get_large_objects_count(cluster, size=100, folder="data"): minio = cluster.minio_client counter = 0 - for obj in minio.list_objects(cluster.minio_bucket, '{}/'.format(folder)): + for obj in minio.list_objects(cluster.minio_bucket, "{}/".format(folder)): if obj.size is not None and obj.size >= size: counter = counter + 1 return counter -def check_objects_exisis(cluster, object_list, folder='data'): +def check_objects_exisis(cluster, object_list, folder="data"): minio = cluster.minio_client for obj in object_list: if obj: - minio.stat_object(cluster.minio_bucket, '{}/{}'.format(folder, obj)) + minio.stat_object(cluster.minio_bucket, "{}/{}".format(folder, obj)) -def check_objects_not_exisis(cluster, object_list, folder='data'): +def check_objects_not_exisis(cluster, object_list, folder="data"): minio = cluster.minio_client for obj in object_list: if obj: try: - minio.stat_object(cluster.minio_bucket, '{}/{}'.format(folder, obj)) + minio.stat_object(cluster.minio_bucket, "{}/{}".format(folder, obj)) except Exception as error: assert "NoSuchKey" in str(error) else: @@ -69,7 +77,11 @@ def wait_for_active_parts(node, num_expected_parts, table_name, timeout=30): deadline = time.monotonic() + timeout num_parts = 0 while time.monotonic() < deadline: - num_parts_str = node.query("select count() from system.parts where table = '{}' and active".format(table_name)) + num_parts_str = node.query( + "select count() from system.parts where table = '{}' and active".format( + table_name + ) + ) num_parts = int(num_parts_str.strip()) if num_parts == num_expected_parts: return @@ -81,9 +93,7 @@ def wait_for_active_parts(node, num_expected_parts, table_name, timeout=30): # Result of `get_large_objects_count` can be changed in other tests, so run this case at the beginning @pytest.mark.order(0) -@pytest.mark.parametrize( - "policy", ["s3"] -) +@pytest.mark.parametrize("policy", ["s3"]) def test_s3_zero_copy_replication(cluster, policy): node1 = cluster.instances["node1"] node2 = cluster.instances["node2"] @@ -94,14 +104,21 @@ def test_s3_zero_copy_replication(cluster, policy): ENGINE=ReplicatedMergeTree('/clickhouse/tables/s3_test', '{}') ORDER BY id SETTINGS storage_policy='{}' - """ - .format('{replica}', policy) + """.format( + "{replica}", policy + ) ) node1.query("INSERT INTO s3_test VALUES (0,'data'),(1,'data')") node2.query("SYSTEM SYNC REPLICA s3_test") - assert node1.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data')" - assert node2.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data')" + assert ( + node1.query("SELECT * FROM s3_test order by id FORMAT Values") + == "(0,'data'),(1,'data')" + ) + assert ( + node2.query("SELECT * FROM s3_test order by id FORMAT Values") + == "(0,'data'),(1,'data')" + ) # Based on version 21.x - should be only 1 file with size 100+ (checksums.txt), used by both nodes assert get_large_objects_count(cluster) == 1 @@ -109,8 +126,14 @@ def test_s3_zero_copy_replication(cluster, policy): node2.query("INSERT INTO s3_test VALUES (2,'data'),(3,'data')") node1.query("SYSTEM SYNC REPLICA s3_test") - assert node2.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data'),(2,'data'),(3,'data')" - assert node1.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data'),(2,'data'),(3,'data')" + assert ( + node2.query("SELECT * FROM s3_test order by id FORMAT Values") + == "(0,'data'),(1,'data'),(2,'data'),(3,'data')" + ) + assert ( + node1.query("SELECT * FROM s3_test order by id FORMAT Values") + == "(0,'data'),(1,'data'),(2,'data'),(3,'data')" + ) # Based on version 21.x - two parts wait_for_large_objects_count(cluster, 2) @@ -137,44 +160,90 @@ def test_s3_zero_copy_on_hybrid_storage(cluster): ENGINE=ReplicatedMergeTree('/clickhouse/tables/hybrid_test', '{}') ORDER BY id SETTINGS storage_policy='hybrid' - """ - .format('{replica}') + """.format( + "{replica}" + ) ) node1.query("INSERT INTO hybrid_test VALUES (0,'data'),(1,'data')") node2.query("SYSTEM SYNC REPLICA hybrid_test") - assert node1.query("SELECT * FROM hybrid_test ORDER BY id FORMAT Values") == "(0,'data'),(1,'data')" - assert node2.query("SELECT * FROM hybrid_test ORDER BY id FORMAT Values") == "(0,'data'),(1,'data')" + assert ( + node1.query("SELECT * FROM hybrid_test ORDER BY id FORMAT Values") + == "(0,'data'),(1,'data')" + ) + assert ( + node2.query("SELECT * FROM hybrid_test ORDER BY id FORMAT Values") + == "(0,'data'),(1,'data')" + ) - assert node1.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','default')" - assert node2.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','default')" + assert ( + node1.query( + "SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values" + ) + == "('all','default')" + ) + assert ( + node2.query( + "SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values" + ) + == "('all','default')" + ) node1.query("ALTER TABLE hybrid_test MOVE PARTITION ID 'all' TO DISK 's31'") - assert node1.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','s31')" - assert node2.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','default')" + assert ( + node1.query( + "SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values" + ) + == "('all','s31')" + ) + assert ( + node2.query( + "SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values" + ) + == "('all','default')" + ) # Total objects in S3 s3_objects = get_large_objects_count(cluster, size=0) node2.query("ALTER TABLE hybrid_test MOVE PARTITION ID 'all' TO DISK 's31'") - assert node1.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','s31')" - assert node2.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','s31')" + assert ( + node1.query( + "SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values" + ) + == "('all','s31')" + ) + assert ( + node2.query( + "SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values" + ) + == "('all','s31')" + ) # Check that after moving partition on node2 no new obects on s3 wait_for_large_objects_count(cluster, s3_objects, size=0) - assert node1.query("SELECT * FROM hybrid_test ORDER BY id FORMAT Values") == "(0,'data'),(1,'data')" - assert node2.query("SELECT * FROM hybrid_test ORDER BY id FORMAT Values") == "(0,'data'),(1,'data')" + assert ( + node1.query("SELECT * FROM hybrid_test ORDER BY id FORMAT Values") + == "(0,'data'),(1,'data')" + ) + assert ( + node2.query("SELECT * FROM hybrid_test ORDER BY id FORMAT Values") + == "(0,'data'),(1,'data')" + ) node1.query("DROP TABLE IF EXISTS hybrid_test NO DELAY") node2.query("DROP TABLE IF EXISTS hybrid_test NO DELAY") def insert_data_time(node, table, number_of_mb, time, start=0): - values = ','.join(f"({x},{time})" for x in range(start, int((1024 * 1024 * number_of_mb) / 8) + start + 1)) + values = ",".join( + f"({x},{time})" + for x in range(start, int((1024 * 1024 * number_of_mb) / 8) + start + 1) + ) node.query(f"INSERT INTO {table} VALUES {values}") @@ -182,9 +251,9 @@ def insert_large_data(node, table): tm = time.mktime((datetime.date.today() - datetime.timedelta(days=7)).timetuple()) insert_data_time(node, table, 1, tm, 0) tm = time.mktime((datetime.date.today() - datetime.timedelta(days=3)).timetuple()) - insert_data_time(node, table, 1, tm, 1024*1024) + insert_data_time(node, table, 1, tm, 1024 * 1024) tm = time.mktime(datetime.date.today().timetuple()) - insert_data_time(node, table, 10, tm, 1024*1024*2) + insert_data_time(node, table, 10, tm, 1024 * 1024 * 2) @pytest.mark.parametrize( @@ -194,7 +263,7 @@ def insert_large_data(node, table): ("tiered_copy", False, 10), ("tiered", True, 3), ("tiered_copy", True, 3), - ] + ], ) def test_s3_zero_copy_with_ttl_move(cluster, storage_policy, large_data, iterations): node1 = cluster.instances["node1"] @@ -211,12 +280,13 @@ def test_s3_zero_copy_with_ttl_move(cluster, storage_policy, large_data, iterati ORDER BY d TTL d1 + INTERVAL 2 DAY TO VOLUME 'external' SETTINGS storage_policy='{}' - """ - .format('{replica}', storage_policy) + """.format( + "{replica}", storage_policy + ) ) if large_data: - insert_large_data(node1, 'ttl_move_test') + insert_large_data(node1, "ttl_move_test") else: node1.query("INSERT INTO ttl_move_test VALUES (10, now() - INTERVAL 3 DAY)") node1.query("INSERT INTO ttl_move_test VALUES (11, now() - INTERVAL 1 DAY)") @@ -225,13 +295,29 @@ def test_s3_zero_copy_with_ttl_move(cluster, storage_policy, large_data, iterati node2.query("SYSTEM SYNC REPLICA ttl_move_test") if large_data: - assert node1.query("SELECT count() FROM ttl_move_test FORMAT Values") == "(1572867)" - assert node2.query("SELECT count() FROM ttl_move_test FORMAT Values") == "(1572867)" + assert ( + node1.query("SELECT count() FROM ttl_move_test FORMAT Values") + == "(1572867)" + ) + assert ( + node2.query("SELECT count() FROM ttl_move_test FORMAT Values") + == "(1572867)" + ) else: - assert node1.query("SELECT count() FROM ttl_move_test FORMAT Values") == "(2)" - assert node2.query("SELECT count() FROM ttl_move_test FORMAT Values") == "(2)" - assert node1.query("SELECT d FROM ttl_move_test ORDER BY d FORMAT Values") == "(10),(11)" - assert node2.query("SELECT d FROM ttl_move_test ORDER BY d FORMAT Values") == "(10),(11)" + assert ( + node1.query("SELECT count() FROM ttl_move_test FORMAT Values") == "(2)" + ) + assert ( + node2.query("SELECT count() FROM ttl_move_test FORMAT Values") == "(2)" + ) + assert ( + node1.query("SELECT d FROM ttl_move_test ORDER BY d FORMAT Values") + == "(10),(11)" + ) + assert ( + node2.query("SELECT d FROM ttl_move_test ORDER BY d FORMAT Values") + == "(10),(11)" + ) node1.query("DROP TABLE IF EXISTS ttl_move_test NO DELAY") node2.query("DROP TABLE IF EXISTS ttl_move_test NO DELAY") @@ -242,7 +328,7 @@ def test_s3_zero_copy_with_ttl_move(cluster, storage_policy, large_data, iterati [ (False, 10), (True, 3), - ] + ], ) def test_s3_zero_copy_with_ttl_delete(cluster, large_data, iterations): node1 = cluster.instances["node1"] @@ -259,27 +345,52 @@ def test_s3_zero_copy_with_ttl_delete(cluster, large_data, iterations): ORDER BY d TTL d1 + INTERVAL 2 DAY SETTINGS storage_policy='tiered' - """ - .format('{replica}') + """.format( + "{replica}" + ) ) if large_data: - insert_large_data(node1, 'ttl_delete_test') + insert_large_data(node1, "ttl_delete_test") else: - node1.query("INSERT INTO ttl_delete_test VALUES (10, now() - INTERVAL 3 DAY)") - node1.query("INSERT INTO ttl_delete_test VALUES (11, now() - INTERVAL 1 DAY)") + node1.query( + "INSERT INTO ttl_delete_test VALUES (10, now() - INTERVAL 3 DAY)" + ) + node1.query( + "INSERT INTO ttl_delete_test VALUES (11, now() - INTERVAL 1 DAY)" + ) node1.query("OPTIMIZE TABLE ttl_delete_test FINAL") + + node1.query("SYSTEM SYNC REPLICA ttl_delete_test") node2.query("SYSTEM SYNC REPLICA ttl_delete_test") if large_data: - assert node1.query("SELECT count() FROM ttl_delete_test FORMAT Values") == "(1310721)" - assert node2.query("SELECT count() FROM ttl_delete_test FORMAT Values") == "(1310721)" + assert ( + node1.query("SELECT count() FROM ttl_delete_test FORMAT Values") + == "(1310721)" + ) + assert ( + node2.query("SELECT count() FROM ttl_delete_test FORMAT Values") + == "(1310721)" + ) else: - assert node1.query("SELECT count() FROM ttl_delete_test FORMAT Values") == "(1)" - assert node2.query("SELECT count() FROM ttl_delete_test FORMAT Values") == "(1)" - assert node1.query("SELECT d FROM ttl_delete_test ORDER BY d FORMAT Values") == "(11)" - assert node2.query("SELECT d FROM ttl_delete_test ORDER BY d FORMAT Values") == "(11)" + assert ( + node1.query("SELECT count() FROM ttl_delete_test FORMAT Values") + == "(1)" + ) + assert ( + node2.query("SELECT count() FROM ttl_delete_test FORMAT Values") + == "(1)" + ) + assert ( + node1.query("SELECT d FROM ttl_delete_test ORDER BY d FORMAT Values") + == "(11)" + ) + assert ( + node2.query("SELECT d FROM ttl_delete_test ORDER BY d FORMAT Values") + == "(11)" + ) node1.query("DROP TABLE IF EXISTS ttl_delete_test NO DELAY") node2.query("DROP TABLE IF EXISTS ttl_delete_test NO DELAY") @@ -289,12 +400,16 @@ def wait_mutations(node, table, seconds): time.sleep(1) while seconds > 0: seconds -= 1 - mutations = node.query(f"SELECT count() FROM system.mutations WHERE table='{table}' AND is_done=0") - if mutations == '0\n': + mutations = node.query( + f"SELECT count() FROM system.mutations WHERE table='{table}' AND is_done=0" + ) + if mutations == "0\n": return time.sleep(1) - mutations = node.query(f"SELECT count() FROM system.mutations WHERE table='{table}' AND is_done=0") - assert mutations == '0\n' + mutations = node.query( + f"SELECT count() FROM system.mutations WHERE table='{table}' AND is_done=0" + ) + assert mutations == "0\n" def test_s3_zero_copy_unfreeze(cluster): @@ -310,8 +425,9 @@ def test_s3_zero_copy_unfreeze(cluster): ENGINE=ReplicatedMergeTree('/clickhouse/tables/unfreeze_test', '{}') ORDER BY d SETTINGS storage_policy='s3' - """ - .format('{replica}') + """.format( + "{replica}" + ) ) node1.query("INSERT INTO unfreeze_test VALUES (0)") @@ -366,8 +482,9 @@ def test_s3_zero_copy_drop_detached(cluster): ENGINE=ReplicatedMergeTree('/clickhouse/tables/drop_detached_test', '{}') ORDER BY d PARTITION BY d SETTINGS storage_policy='s3' - """ - .format('{replica}') + """.format( + "{replica}" + ) ) node1.query("INSERT INTO drop_detached_test VALUES (0)") @@ -394,7 +511,10 @@ def test_s3_zero_copy_drop_detached(cluster): check_objects_exisis(cluster, objects1) check_objects_exisis(cluster, objects2) - node2.query("ALTER TABLE drop_detached_test DROP DETACHED PARTITION '1'", settings={"allow_drop_detached": 1}) + node2.query( + "ALTER TABLE drop_detached_test DROP DETACHED PARTITION '1'", + settings={"allow_drop_detached": 1}, + ) node1.query("SYSTEM SYNC REPLICA drop_detached_test") wait_mutations(node1, "drop_detached_test", 10) wait_mutations(node2, "drop_detached_test", 10) @@ -402,7 +522,10 @@ def test_s3_zero_copy_drop_detached(cluster): check_objects_exisis(cluster, objects1) check_objects_exisis(cluster, objects2) - node1.query("ALTER TABLE drop_detached_test DROP DETACHED PARTITION '1'", settings={"allow_drop_detached": 1}) + node1.query( + "ALTER TABLE drop_detached_test DROP DETACHED PARTITION '1'", + settings={"allow_drop_detached": 1}, + ) node2.query("SYSTEM SYNC REPLICA drop_detached_test") wait_mutations(node1, "drop_detached_test", 10) wait_mutations(node2, "drop_detached_test", 10) @@ -410,14 +533,20 @@ def test_s3_zero_copy_drop_detached(cluster): check_objects_exisis(cluster, objects1) check_objects_not_exisis(cluster, objects_diff) - node1.query("ALTER TABLE drop_detached_test DROP DETACHED PARTITION '0'", settings={"allow_drop_detached": 1}) + node1.query( + "ALTER TABLE drop_detached_test DROP DETACHED PARTITION '0'", + settings={"allow_drop_detached": 1}, + ) node2.query("SYSTEM SYNC REPLICA drop_detached_test") wait_mutations(node1, "drop_detached_test", 10) wait_mutations(node2, "drop_detached_test", 10) check_objects_exisis(cluster, objects1) - node2.query("ALTER TABLE drop_detached_test DROP DETACHED PARTITION '0'", settings={"allow_drop_detached": 1}) + node2.query( + "ALTER TABLE drop_detached_test DROP DETACHED PARTITION '0'", + settings={"allow_drop_detached": 1}, + ) node1.query("SYSTEM SYNC REPLICA drop_detached_test") wait_mutations(node1, "drop_detached_test", 10) wait_mutations(node2, "drop_detached_test", 10) @@ -434,13 +563,13 @@ def test_s3_zero_copy_concurrent_merge(cluster): for node in (node1, node2): node.query( - """ + """ CREATE TABLE concurrent_merge (id UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/concurrent_merge', '{replica}') ORDER BY id SETTINGS index_granularity=2, storage_policy='s3', remote_fs_execute_merges_on_single_replica_time_threshold=1 """ - ) + ) node1.query("system stop merges") node2.query("system stop merges") @@ -449,7 +578,7 @@ def test_s3_zero_copy_concurrent_merge(cluster): node1.query("insert into concurrent_merge select number from numbers(40)") node1.query("insert into concurrent_merge select number + 1 from numbers(40)") - wait_for_active_parts(node2, 2, 'concurrent_merge') + wait_for_active_parts(node2, 2, "concurrent_merge") # Merge will materialize default column, it should sleep every granule and take 20 * 2 * 0.1 = 4 sec. node1.query("alter table concurrent_merge add column x UInt32 default sleep(0.1)") @@ -465,8 +594,8 @@ def test_s3_zero_copy_concurrent_merge(cluster): # For now, it does not happen (every blob has a random name, and we just have a duplicating data) node1.query("optimize table concurrent_merge final") - wait_for_active_parts(node1, 1, 'concurrent_merge') - wait_for_active_parts(node2, 1, 'concurrent_merge') + wait_for_active_parts(node1, 1, "concurrent_merge") + wait_for_active_parts(node2, 1, "concurrent_merge") for node in (node1, node2): - assert node.query('select sum(id) from concurrent_merge').strip() == '1600' + assert node.query("select sum(id) from concurrent_merge").strip() == "1600" diff --git a/tests/integration/test_s3_zero_copy_ttl/test.py b/tests/integration/test_s3_zero_copy_ttl/test.py index 5f63bfbfdff..14b4664fcc1 100644 --- a/tests/integration/test_s3_zero_copy_ttl/test.py +++ b/tests/integration/test_s3_zero_copy_ttl/test.py @@ -5,9 +5,16 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance("node1", main_configs=["configs/s3.xml"], with_minio=True, with_zookeeper=True) -node2 = cluster.add_instance("node2", main_configs=["configs/s3.xml"], with_minio=True, with_zookeeper=True) -node3 = cluster.add_instance("node3", main_configs=["configs/s3.xml"], with_minio=True, with_zookeeper=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/s3.xml"], with_minio=True, with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/s3.xml"], with_minio=True, with_zookeeper=True +) +node3 = cluster.add_instance( + "node3", main_configs=["configs/s3.xml"], with_minio=True, with_zookeeper=True +) + @pytest.fixture(scope="module") def started_cluster(): @@ -18,6 +25,7 @@ def started_cluster(): finally: cluster.shutdown() + def test_ttl_move_and_s3(started_cluster): for i, node in enumerate([node1, node2, node3]): node.query( @@ -28,7 +36,10 @@ def test_ttl_move_and_s3(started_cluster): PARTITION BY id TTL date TO DISK 's3_disk' SETTINGS storage_policy='s3_and_default' - """.format(i)) + """.format( + i + ) + ) node1.query("SYSTEM STOP MOVES s3_test_with_ttl") @@ -40,7 +51,9 @@ def test_ttl_move_and_s3(started_cluster): else: node = node2 - node.query(f"INSERT INTO s3_test_with_ttl SELECT now() + 5, {i}, randomPrintableASCII(1048570)") + node.query( + f"INSERT INTO s3_test_with_ttl SELECT now() + 5, {i}, randomPrintableASCII(1048570)" + ) node1.query("SYSTEM SYNC REPLICA s3_test_with_ttl") node2.query("SYSTEM SYNC REPLICA s3_test_with_ttl") @@ -57,10 +70,14 @@ def test_ttl_move_and_s3(started_cluster): time.sleep(5) - print(node1.query("SELECT * FROM system.parts WHERE table = 's3_test_with_ttl' FORMAT Vertical")) + print( + node1.query( + "SELECT * FROM system.parts WHERE table = 's3_test_with_ttl' FORMAT Vertical" + ) + ) minio = cluster.minio_client - objects = minio.list_objects(cluster.minio_bucket, 'data/', recursive=True) + objects = minio.list_objects(cluster.minio_bucket, "data/", recursive=True) counter = 0 for obj in objects: print("Objectname:", obj.object_name, "metadata:", obj.metadata) diff --git a/tests/integration/test_secure_socket/test.py b/tests/integration/test_secure_socket/test.py index c542b855258..2dffbed03d6 100644 --- a/tests/integration/test_secure_socket/test.py +++ b/tests/integration/test_secure_socket/test.py @@ -7,15 +7,15 @@ from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -NODES = {'node' + str(i): None for i in (1, 2)} +NODES = {"node" + str(i): None for i in (1, 2)} -config = ''' +config = """ {sleep_in_send_data_ms} -''' +""" @pytest.fixture(scope="module") @@ -29,14 +29,22 @@ def started_cluster(): "configs_secure/config.d/ssl_conf.xml", ] - NODES['node1'] = cluster.add_instance('node1', main_configs=main_configs) - NODES['node2'] = cluster.add_instance('node2', main_configs=main_configs, user_configs=["configs_secure/users.d/users.xml"]) + NODES["node1"] = cluster.add_instance("node1", main_configs=main_configs) + NODES["node2"] = cluster.add_instance( + "node2", + main_configs=main_configs, + user_configs=["configs_secure/users.d/users.xml"], + ) try: cluster.start() - NODES['node2'].query("CREATE TABLE base_table (x UInt64) ENGINE = MergeTree ORDER BY x;") - NODES['node2'].query("INSERT INTO base_table VALUES (5);") - NODES['node1'].query("CREATE TABLE distributed_table (x UInt64) ENGINE = Distributed(test_cluster, default, base_table);") + NODES["node2"].query( + "CREATE TABLE base_table (x UInt64) ENGINE = MergeTree ORDER BY x;" + ) + NODES["node2"].query("INSERT INTO base_table VALUES (5);") + NODES["node1"].query( + "CREATE TABLE distributed_table (x UInt64) ENGINE = Distributed(test_cluster, default, base_table);" + ) yield cluster @@ -45,11 +53,16 @@ def started_cluster(): def test(started_cluster): - NODES['node2'].replace_config('/etc/clickhouse-server/users.d/users.xml', config.format(sleep_in_send_data_ms=1000000)) - + NODES["node2"].replace_config( + "/etc/clickhouse-server/users.d/users.xml", + config.format(sleep_in_send_data_ms=1000000), + ) + attempts = 0 while attempts < 1000: - setting = NODES['node2'].http_query("SELECT value FROM system.settings WHERE name='sleep_in_send_data_ms'") + setting = NODES["node2"].http_query( + "SELECT value FROM system.settings WHERE name='sleep_in_send_data_ms'" + ) if int(setting) == 1000000: break time.sleep(0.1) @@ -57,28 +70,31 @@ def test(started_cluster): assert attempts < 1000 - start = time.time() - NODES['node1'].query_and_get_error('SELECT * FROM distributed_table settings receive_timeout=5, send_timeout=5, use_hedged_requests=0, async_socket_for_remote=0;') + NODES["node1"].query_and_get_error( + "SELECT * FROM distributed_table settings receive_timeout=5, send_timeout=5, use_hedged_requests=0, async_socket_for_remote=0;" + ) end = time.time() assert end - start < 10 start = time.time() - error = NODES['node1'].query_and_get_error('SELECT * FROM distributed_table settings receive_timeout=5, send_timeout=5, use_hedged_requests=0, async_socket_for_remote=1;') + error = NODES["node1"].query_and_get_error( + "SELECT * FROM distributed_table settings receive_timeout=5, send_timeout=5, use_hedged_requests=0, async_socket_for_remote=1;" + ) end = time.time() assert end - start < 10 # Check that exception about timeout wasn't thrown from DB::ReadBufferFromPocoSocket::nextImpl(). - assert error.find('DB::ReadBufferFromPocoSocket::nextImpl()') == -1 + assert error.find("DB::ReadBufferFromPocoSocket::nextImpl()") == -1 start = time.time() - error = NODES['node1'].query_and_get_error('SELECT * FROM distributed_table settings receive_timeout=5, send_timeout=5, use_hedged_requests=1, async_socket_for_remote=1;') + error = NODES["node1"].query_and_get_error( + "SELECT * FROM distributed_table settings receive_timeout=5, send_timeout=5, use_hedged_requests=1, async_socket_for_remote=1;" + ) end = time.time() assert end - start < 10 # Check that exception about timeout wasn't thrown from DB::ReadBufferFromPocoSocket::nextImpl(). - assert error.find('DB::ReadBufferFromPocoSocket::nextImpl()') == -1 - - + assert error.find("DB::ReadBufferFromPocoSocket::nextImpl()") == -1 diff --git a/tests/integration/test_select_access_rights/test.py b/tests/integration/test_select_access_rights/test.py index 0272eac5fa1..76940cdadb4 100644 --- a/tests/integration/test_select_access_rights/test.py +++ b/tests/integration/test_select_access_rights/test.py @@ -3,7 +3,7 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance') +instance = cluster.add_instance("instance") @pytest.fixture(scope="module", autouse=True) @@ -24,214 +24,334 @@ def cleanup_after_test(): def test_select_single_column(): - instance.query("CREATE TABLE table1(d DATE, a String, b UInt8) ENGINE = MergeTree ORDER BY d") + instance.query( + "CREATE TABLE table1(d DATE, a String, b UInt8) ENGINE = MergeTree ORDER BY d" + ) select_query = "SELECT a FROM table1" - assert "it's necessary to have grant SELECT(a) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(a) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT(a) ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "" + assert instance.query(select_query, user="A") == "" instance.query("REVOKE SELECT(a) ON default.table1 FROM A") - assert "it's necessary to have grant SELECT(a) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(a) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) def test_select_single_column_with_table_grant(): - instance.query("CREATE TABLE table1(d DATE, a String, b UInt8) ENGINE = MergeTree ORDER BY d") + instance.query( + "CREATE TABLE table1(d DATE, a String, b UInt8) ENGINE = MergeTree ORDER BY d" + ) select_query = "SELECT a FROM table1" - assert "it's necessary to have grant SELECT(a) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(a) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "" + assert instance.query(select_query, user="A") == "" instance.query("REVOKE SELECT(a) ON default.table1 FROM A") - assert "it's necessary to have grant SELECT(a) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(a) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) def test_select_all_columns(): - instance.query("CREATE TABLE table1(d DATE, a String, b UInt8) ENGINE = MergeTree ORDER BY d") + instance.query( + "CREATE TABLE table1(d DATE, a String, b UInt8) ENGINE = MergeTree ORDER BY d" + ) select_query = "SELECT * FROM table1" - assert "it's necessary to have grant SELECT(d, a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(d, a, b) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT(d) ON default.table1 TO A") - assert "it's necessary to have grant SELECT(d, a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(d, a, b) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT(a) ON default.table1 TO A") - assert "it's necessary to have grant SELECT(d, a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(d, a, b) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT(b) ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "" + assert instance.query(select_query, user="A") == "" def test_select_all_columns_with_table_grant(): - instance.query("CREATE TABLE table1(d DATE, a String, b UInt8) ENGINE = MergeTree ORDER BY d") + instance.query( + "CREATE TABLE table1(d DATE, a String, b UInt8) ENGINE = MergeTree ORDER BY d" + ) select_query = "SELECT * FROM table1" - assert "it's necessary to have grant SELECT(d, a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(d, a, b) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "" + assert instance.query(select_query, user="A") == "" def test_alias(): - instance.query("CREATE TABLE table1(x Int32, y Int32) ENGINE = MergeTree ORDER BY tuple()") + instance.query( + "CREATE TABLE table1(x Int32, y Int32) ENGINE = MergeTree ORDER BY tuple()" + ) select_query = "SELECT x, y, x + y AS s FROM table1" - assert "it's necessary to have grant SELECT(x, y) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(x, y) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT(x, y) ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "" + assert instance.query(select_query, user="A") == "" def test_alias_columns(): - instance.query("CREATE TABLE table1(x Int32, y Int32, s Int32 ALIAS x + y) ENGINE = MergeTree ORDER BY tuple()") + instance.query( + "CREATE TABLE table1(x Int32, y Int32, s Int32 ALIAS x + y) ENGINE = MergeTree ORDER BY tuple()" + ) select_query = "SELECT * FROM table1" - assert "it's necessary to have grant SELECT(x, y) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(x, y) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT(x,y) ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "" + assert instance.query(select_query, user="A") == "" select_query = "SELECT s FROM table1" - assert "it's necessary to have grant SELECT(s) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(s) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT(s) ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "" + assert instance.query(select_query, user="A") == "" instance.query("REVOKE SELECT(x,y) ON default.table1 FROM A") - assert instance.query(select_query, user = 'A') == "" + assert instance.query(select_query, user="A") == "" def test_materialized_columns(): - instance.query("CREATE TABLE table1(x Int32, y Int32, p Int32 MATERIALIZED x * y) ENGINE = MergeTree ORDER BY tuple()") + instance.query( + "CREATE TABLE table1(x Int32, y Int32, p Int32 MATERIALIZED x * y) ENGINE = MergeTree ORDER BY tuple()" + ) select_query = "SELECT * FROM table1" - assert "it's necessary to have grant SELECT(x, y) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(x, y) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT(x,y) ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "" + assert instance.query(select_query, user="A") == "" select_query = "SELECT p FROM table1" - assert "it's necessary to have grant SELECT(p) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') - + assert ( + "it's necessary to have grant SELECT(p) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) + instance.query("GRANT SELECT(p) ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "" + assert instance.query(select_query, user="A") == "" instance.query("REVOKE SELECT(x,y) ON default.table1 FROM A") - assert instance.query(select_query, user = 'A') == "" + assert instance.query(select_query, user="A") == "" def test_select_join(): - instance.query("CREATE TABLE table1(d DATE, a String, b UInt8) ENGINE = MergeTree ORDER BY d") - instance.query("CREATE TABLE table2(d DATE, x UInt32, y UInt8) ENGINE = MergeTree ORDER BY d") + instance.query( + "CREATE TABLE table1(d DATE, a String, b UInt8) ENGINE = MergeTree ORDER BY d" + ) + instance.query( + "CREATE TABLE table2(d DATE, x UInt32, y UInt8) ENGINE = MergeTree ORDER BY d" + ) select_query = "SELECT * FROM table1 JOIN table2 USING(d)" - assert "it's necessary to have grant SELECT(d, x, y) ON default.table2" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(d, x, y) ON default.table2" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT(d, x, y) ON default.table2 TO A") - assert "it's necessary to have grant SELECT(d, a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(d, a, b) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT(d, a, b) ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "" + assert instance.query(select_query, user="A") == "" instance.query("REVOKE SELECT ON default.table2 FROM A") - assert "it's necessary to have grant SELECT(d, x, y) ON default.table2" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(d, x, y) ON default.table2" + in instance.query_and_get_error(select_query, user="A") + ) def test_select_union(): - instance.query("CREATE TABLE table1(a String, b UInt8) ENGINE = MergeTree ORDER BY tuple()") - instance.query("CREATE TABLE table2(a String, b UInt8) ENGINE = MergeTree ORDER BY tuple()") + instance.query( + "CREATE TABLE table1(a String, b UInt8) ENGINE = MergeTree ORDER BY tuple()" + ) + instance.query( + "CREATE TABLE table2(a String, b UInt8) ENGINE = MergeTree ORDER BY tuple()" + ) select_query = "SELECT * FROM table1 UNION ALL SELECT * FROM table2" - assert "it's necessary to have grant SELECT(a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(a, b) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT(a, b) ON default.table1 TO A") - assert "it's necessary to have grant SELECT(a, b) ON default.table2" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(a, b) ON default.table2" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT(a, b) ON default.table2 TO A") - assert instance.query(select_query, user = 'A') == "" + assert instance.query(select_query, user="A") == "" instance.query("REVOKE SELECT ON default.table1 FROM A") - assert "it's necessary to have grant SELECT(a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(a, b) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) def test_select_count(): - instance.query("CREATE TABLE table1(x String, y UInt8) ENGINE = MergeTree ORDER BY tuple()") + instance.query( + "CREATE TABLE table1(x String, y UInt8) ENGINE = MergeTree ORDER BY tuple()" + ) select_query = "SELECT count() FROM table1" - assert "it's necessary to have grant SELECT for at least one column on default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT for at least one column on default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT(x) ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "0\n" + assert instance.query(select_query, user="A") == "0\n" instance.query("REVOKE SELECT(x) ON default.table1 FROM A") - assert "it's necessary to have grant SELECT for at least one column on default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT for at least one column on default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT(y) ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "0\n" + assert instance.query(select_query, user="A") == "0\n" instance.query("REVOKE SELECT(y) ON default.table1 FROM A") - assert "it's necessary to have grant SELECT for at least one column on default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT for at least one column on default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "0\n" + assert instance.query(select_query, user="A") == "0\n" def test_select_where(): # User should have grants for the columns used in WHERE. - instance.query("CREATE TABLE table1(a String, b UInt8) ENGINE = MergeTree ORDER BY b") + instance.query( + "CREATE TABLE table1(a String, b UInt8) ENGINE = MergeTree ORDER BY b" + ) instance.query("INSERT INTO table1 VALUES ('xxx', 0), ('yyy', 1), ('zzz', 0)") instance.query("GRANT SELECT(a) ON default.table1 TO A") select_query = "SELECT a FROM table1 WHERE b = 0" - assert "it's necessary to have grant SELECT(a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(a, b) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT(b) ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "xxx\nzzz\n" + assert instance.query(select_query, user="A") == "xxx\nzzz\n" instance.query("REVOKE SELECT ON default.table1 FROM A") - assert "it's necessary to have grant SELECT(a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(a, b) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "xxx\nzzz\n" + assert instance.query(select_query, user="A") == "xxx\nzzz\n" def test_select_prewhere(): # User should have grants for the columns used in PREWHERE. - instance.query("CREATE TABLE table1(a String, b UInt8) ENGINE = MergeTree ORDER BY b") + instance.query( + "CREATE TABLE table1(a String, b UInt8) ENGINE = MergeTree ORDER BY b" + ) instance.query("INSERT INTO table1 VALUES ('xxx', 0), ('yyy', 1), ('zzz', 0)") instance.query("GRANT SELECT(a) ON default.table1 TO A") select_query = "SELECT a FROM table1 PREWHERE b = 0" - assert "it's necessary to have grant SELECT(a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(a, b) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT(b) ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "xxx\nzzz\n" + assert instance.query(select_query, user="A") == "xxx\nzzz\n" instance.query("REVOKE SELECT ON default.table1 FROM A") - assert "it's necessary to have grant SELECT(a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT(a, b) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT SELECT ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "xxx\nzzz\n" + assert instance.query(select_query, user="A") == "xxx\nzzz\n" def test_select_with_row_policy(): # Normal users should not aware of the existence of row policy filters. - instance.query("CREATE TABLE table1(a String, b UInt8) ENGINE = MergeTree ORDER BY b") + instance.query( + "CREATE TABLE table1(a String, b UInt8) ENGINE = MergeTree ORDER BY b" + ) instance.query("INSERT INTO table1 VALUES ('xxx', 0), ('yyy', 1), ('zzz', 0)") instance.query("CREATE ROW POLICY pol1 ON table1 USING b = 0 TO A") select_query = "SELECT a FROM table1" select_query2 = "SELECT count() FROM table1" - assert "it's necessary to have grant SELECT(a) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') - assert "it's necessary to have grant SELECT for at least one column on default.table1" in instance.query_and_get_error(select_query2, user = 'A') + assert ( + "it's necessary to have grant SELECT(a) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) + assert ( + "it's necessary to have grant SELECT for at least one column on default.table1" + in instance.query_and_get_error(select_query2, user="A") + ) instance.query("GRANT SELECT(a) ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "xxx\nzzz\n" - assert instance.query(select_query2, user = 'A') == "2\n" + assert instance.query(select_query, user="A") == "xxx\nzzz\n" + assert instance.query(select_query2, user="A") == "2\n" instance.query("REVOKE SELECT(a) ON default.table1 FROM A") - assert "it's necessary to have grant SELECT(a) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') - assert "it's necessary to have grant SELECT for at least one column on default.table1" in instance.query_and_get_error(select_query2, user = 'A') + assert ( + "it's necessary to have grant SELECT(a) ON default.table1" + in instance.query_and_get_error(select_query, user="A") + ) + assert ( + "it's necessary to have grant SELECT for at least one column on default.table1" + in instance.query_and_get_error(select_query2, user="A") + ) diff --git a/tests/integration/test_send_crash_reports/fake_sentry_server.py b/tests/integration/test_send_crash_reports/fake_sentry_server.py index fa40f642e41..37d733cc005 100644 --- a/tests/integration/test_send_crash_reports/fake_sentry_server.py +++ b/tests/integration/test_send_crash_reports/fake_sentry_server.py @@ -1,19 +1,27 @@ import http.server -RESULT_PATH = '/result.txt' +RESULT_PATH = "/result.txt" class SentryHandler(http.server.BaseHTTPRequestHandler): def do_POST(self): post_data = self.__read_and_decode_post_data() - with open(RESULT_PATH, 'w') as f: + with open(RESULT_PATH, "w") as f: content_length = self.headers.get("content-length") if self.headers.get("content-type") != "application/x-sentry-envelope": f.write("INCORRECT_CONTENT_TYPE") elif int(content_length) < 200: - f.write("INCORRECT_CONTENT_LENGTH:" + content_length + '\n' + post_data.decode()) - elif b'"http://6f33034cfe684dd7a3ab9875e57b1c8d@localhost:9500/5226277"' not in post_data: - f.write('INCORRECT_POST_DATA') + f.write( + "INCORRECT_CONTENT_LENGTH:" + + content_length + + "\n" + + post_data.decode() + ) + elif ( + b'"http://6f33034cfe684dd7a3ab9875e57b1c8d@localhost:9500/5226277"' + not in post_data + ): + f.write("INCORRECT_POST_DATA") else: f.write("OK") self.send_response(200) @@ -36,9 +44,15 @@ class SentryHandler(http.server.BaseHTTPRequestHandler): if __name__ == "__main__": - with open(RESULT_PATH, 'w') as f: + with open(RESULT_PATH, "w") as f: f.write("INITIAL_STATE") - httpd = http.server.HTTPServer(("localhost", 9500,), SentryHandler) + httpd = http.server.HTTPServer( + ( + "localhost", + 9500, + ), + SentryHandler, + ) try: httpd.serve_forever() finally: diff --git a/tests/integration/test_send_crash_reports/test.py b/tests/integration/test_send_crash_reports/test.py index 55c63c3fe12..90a6c684de7 100644 --- a/tests/integration/test_send_crash_reports/test.py +++ b/tests/integration/test_send_crash_reports/test.py @@ -19,9 +19,12 @@ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) def started_node(): cluster = helpers.cluster.ClickHouseCluster(__file__) try: - node = cluster.add_instance("node", main_configs=[ - os.path.join(SCRIPT_DIR, "configs", "config_send_crash_reports.xml") - ]) + node = cluster.add_instance( + "node", + main_configs=[ + os.path.join(SCRIPT_DIR, "configs", "config_send_crash_reports.xml") + ], + ) cluster.start() yield node finally: @@ -33,23 +36,36 @@ def started_node(): def test_send_segfault(started_node): - if started_node.is_built_with_thread_sanitizer() or started_node.is_built_with_memory_sanitizer(): + if ( + started_node.is_built_with_thread_sanitizer() + or started_node.is_built_with_memory_sanitizer() + ): pytest.skip("doesn't fit in timeouts for stacktrace generation") - started_node.copy_file_to_container(os.path.join(SCRIPT_DIR, "fake_sentry_server.py"), "/fake_sentry_server.py") - started_node.exec_in_container(["bash", "-c", "python3 /fake_sentry_server.py > /fake_sentry_server.log 2>&1"], detach=True, user="root") + started_node.copy_file_to_container( + os.path.join(SCRIPT_DIR, "fake_sentry_server.py"), "/fake_sentry_server.py" + ) + started_node.exec_in_container( + ["bash", "-c", "python3 /fake_sentry_server.py > /fake_sentry_server.log 2>&1"], + detach=True, + user="root", + ) time.sleep(1) - started_node.exec_in_container(["bash", "-c", "pkill -SEGV clickhouse"], user="root") + started_node.exec_in_container( + ["bash", "-c", "pkill -SEGV clickhouse"], user="root" + ) result = None for attempt in range(1, 6): time.sleep(attempt) - result = started_node.exec_in_container(['cat', fake_sentry_server.RESULT_PATH], user='root') - if result == 'OK': + result = started_node.exec_in_container( + ["cat", fake_sentry_server.RESULT_PATH], user="root" + ) + if result == "OK": break - if result == 'INITIAL_STATE': + if result == "INITIAL_STATE": continue if result: - assert False, 'Unexpected state: ' + result + assert False, "Unexpected state: " + result - assert result == 'OK', 'Crash report not sent' + assert result == "OK", "Crash report not sent" diff --git a/tests/integration/test_send_request_to_leader_replica/test.py b/tests/integration/test_send_request_to_leader_replica/test.py index 721e446ff82..60df18bf7d3 100644 --- a/tests/integration/test_send_request_to_leader_replica/test.py +++ b/tests/integration/test_send_request_to_leader_replica/test.py @@ -5,14 +5,30 @@ from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], - user_configs=['configs/user_good_restricted.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], - user_configs=['configs/user_good_restricted.xml'], with_zookeeper=True) -node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml'], - user_configs=['configs/user_good_allowed.xml'], with_zookeeper=True) -node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml'], - user_configs=['configs/user_good_allowed.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/user_good_restricted.xml"], + with_zookeeper=True, +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/user_good_restricted.xml"], + with_zookeeper=True, +) +node3 = cluster.add_instance( + "node3", + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/user_good_allowed.xml"], + with_zookeeper=True, +) +node4 = cluster.add_instance( + "node4", + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/user_good_allowed.xml"], + with_zookeeper=True, +) @pytest.fixture(scope="module") @@ -21,16 +37,26 @@ def started_cluster(): cluster.start() for node in [node1, node2]: - node.query(''' + node.query( + """ CREATE TABLE sometable(date Date, id UInt32, value Int32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/sometable', '{replica}', date, id, 8192); - '''.format(replica=node.name), user='awesome') + """.format( + replica=node.name + ), + user="awesome", + ) for node in [node3, node4]: - node.query(''' + node.query( + """ CREATE TABLE someothertable(date Date, id UInt32, value Int32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/someothertable', '{replica}', date, id, 8192); - '''.format(replica=node.name), user='good') + """.format( + replica=node.name + ), + user="good", + ) yield cluster @@ -38,39 +64,82 @@ def started_cluster(): cluster.shutdown() -@pytest.mark.parametrize("table,query,expected,n1,n2", [ - pytest.param("sometable", "ALTER TABLE sometable DROP PARTITION 201706", '1', node1, node2, id="case1"), - pytest.param("sometable", "TRUNCATE TABLE sometable", '0', node1, node2, id="case2"), - pytest.param("sometable", "OPTIMIZE TABLE sometable", '4', node1, node2, id="case3"), - pytest.param("someothertable", "ALTER TABLE someothertable DROP PARTITION 201706", '1', node3, node4, id="case4"), - pytest.param("someothertable", "TRUNCATE TABLE someothertable", '0', node3, node4, id="case5"), - pytest.param("someothertable", "OPTIMIZE TABLE someothertable", '4', node3, node4, id="case6"), -]) +@pytest.mark.parametrize( + "table,query,expected,n1,n2", + [ + pytest.param( + "sometable", + "ALTER TABLE sometable DROP PARTITION 201706", + "1", + node1, + node2, + id="case1", + ), + pytest.param( + "sometable", "TRUNCATE TABLE sometable", "0", node1, node2, id="case2" + ), + pytest.param( + "sometable", "OPTIMIZE TABLE sometable", "4", node1, node2, id="case3" + ), + pytest.param( + "someothertable", + "ALTER TABLE someothertable DROP PARTITION 201706", + "1", + node3, + node4, + id="case4", + ), + pytest.param( + "someothertable", + "TRUNCATE TABLE someothertable", + "0", + node3, + node4, + id="case5", + ), + pytest.param( + "someothertable", + "OPTIMIZE TABLE someothertable", + "4", + node3, + node4, + id="case6", + ), + ], +) def test_alter_table_drop_partition(started_cluster, table, query, expected, n1, n2): - to_insert = '''\ + to_insert = """\ 2017-06-16 111 0 2017-06-16 222 1 2017-06-16 333 2 2017-07-16 444 3 -''' - n1.query("INSERT INTO {} FORMAT TSV".format(table), stdin=to_insert, user='good') +""" + n1.query("INSERT INTO {} FORMAT TSV".format(table), stdin=to_insert, user="good") - assert_eq_with_retry(n1, "SELECT COUNT(*) from {}".format(table), '4', user='good') - assert_eq_with_retry(n2, "SELECT COUNT(*) from {}".format(table), '4', user='good') + assert_eq_with_retry(n1, "SELECT COUNT(*) from {}".format(table), "4", user="good") + assert_eq_with_retry(n2, "SELECT COUNT(*) from {}".format(table), "4", user="good") ### It maybe leader and everything will be ok - n1.query(query, user='good') + n1.query(query, user="good") - assert_eq_with_retry(n1, "SELECT COUNT(*) from {}".format(table), expected, user='good') - assert_eq_with_retry(n2, "SELECT COUNT(*) from {}".format(table), expected, user='good') + assert_eq_with_retry( + n1, "SELECT COUNT(*) from {}".format(table), expected, user="good" + ) + assert_eq_with_retry( + n2, "SELECT COUNT(*) from {}".format(table), expected, user="good" + ) - n1.query("INSERT INTO {} FORMAT TSV".format(table), stdin=to_insert, user='good') + n1.query("INSERT INTO {} FORMAT TSV".format(table), stdin=to_insert, user="good") - assert_eq_with_retry(n1, "SELECT COUNT(*) from {}".format(table), '4', user='good') - assert_eq_with_retry(n2, "SELECT COUNT(*) from {}".format(table), '4', user='good') + assert_eq_with_retry(n1, "SELECT COUNT(*) from {}".format(table), "4", user="good") + assert_eq_with_retry(n2, "SELECT COUNT(*) from {}".format(table), "4", user="good") ### If node1 is leader than node2 will be slave - n2.query(query, user='good') + n2.query(query, user="good") - assert_eq_with_retry(n1, "SELECT COUNT(*) from {}".format(table), expected, user='good') - assert_eq_with_retry(n2, "SELECT COUNT(*) from {}".format(table), expected, user='good') + assert_eq_with_retry( + n1, "SELECT COUNT(*) from {}".format(table), expected, user="good" + ) + assert_eq_with_retry( + n2, "SELECT COUNT(*) from {}".format(table), expected, user="good" + ) diff --git a/tests/integration/test_server_initialization/test.py b/tests/integration/test_server_initialization/test.py index 08032436982..1b57e14a51b 100644 --- a/tests/integration/test_server_initialization/test.py +++ b/tests/integration/test_server_initialization/test.py @@ -7,11 +7,15 @@ from helpers.cluster import ClickHouseCluster def started_cluster(): try: cluster = ClickHouseCluster(__file__) - instance = cluster.add_instance('dummy', clickhouse_path_dir='clickhouse_path', stay_alive=True) + instance = cluster.add_instance( + "dummy", clickhouse_path_dir="clickhouse_path", stay_alive=True + ) cluster.start() - cluster_fail = ClickHouseCluster(__file__, name='fail') - instance_fail = cluster_fail.add_instance('dummy_fail', clickhouse_path_dir='clickhouse_path_fail') + cluster_fail = ClickHouseCluster(__file__, name="fail") + instance_fail = cluster_fail.add_instance( + "dummy_fail", clickhouse_path_dir="clickhouse_path_fail" + ) with pytest.raises(Exception): cluster_fail.start() cluster_fail.shutdown() # cleanup @@ -23,26 +27,40 @@ def started_cluster(): def test_sophisticated_default(started_cluster): - instance = started_cluster.instances['dummy'] + instance = started_cluster.instances["dummy"] instance.query("INSERT INTO sophisticated_default (c) VALUES (0)") assert instance.query("SELECT a, b, c FROM sophisticated_default") == "3\t9\t0\n" def test_partially_dropped_tables(started_cluster): - instance = started_cluster.instances['dummy'] - assert instance.exec_in_container(['bash', '-c', 'find /var/lib/clickhouse/*/default -name *.sql* | sort'], - privileged=True, user='root') \ - == "/var/lib/clickhouse/metadata/default/should_be_restored.sql\n" \ - "/var/lib/clickhouse/metadata/default/sophisticated_default.sql\n" + instance = started_cluster.instances["dummy"] + assert ( + instance.exec_in_container( + ["bash", "-c", "find /var/lib/clickhouse/*/default -name *.sql* | sort"], + privileged=True, + user="root", + ) + == "/var/lib/clickhouse/metadata/default/should_be_restored.sql\n" + "/var/lib/clickhouse/metadata/default/sophisticated_default.sql\n" + ) assert instance.query("SELECT n FROM should_be_restored") == "1\n2\n3\n" - assert instance.query("SELECT count() FROM system.tables WHERE name='should_be_dropped'") == "0\n" + assert ( + instance.query( + "SELECT count() FROM system.tables WHERE name='should_be_dropped'" + ) + == "0\n" + ) def test_live_view_dependency(started_cluster): - instance = started_cluster.instances['dummy'] + instance = started_cluster.instances["dummy"] instance.query("CREATE DATABASE a_load_first") instance.query("CREATE DATABASE b_load_second") - instance.query("CREATE TABLE b_load_second.mt (a Int32) Engine=MergeTree order by tuple()") - instance.query("CREATE LIVE VIEW a_load_first.lv AS SELECT sum(a) FROM b_load_second.mt", - settings={'allow_experimental_live_view': 1}) + instance.query( + "CREATE TABLE b_load_second.mt (a Int32) Engine=MergeTree order by tuple()" + ) + instance.query( + "CREATE LIVE VIEW a_load_first.lv AS SELECT sum(a) FROM b_load_second.mt", + settings={"allow_experimental_live_view": 1}, + ) instance.restart_clickhouse() diff --git a/tests/integration/test_server_reload/test.py b/tests/integration/test_server_reload/test.py index 3c22b476f64..5cda659b5c4 100644 --- a/tests/integration/test_server_reload/test.py +++ b/tests/integration/test_server_reload/test.py @@ -17,10 +17,15 @@ cluster = ClickHouseCluster(__file__) instance = cluster.add_instance( "instance", main_configs=[ - "configs/ports_from_zk.xml", "configs/ssl_conf.xml", "configs/dhparam.pem", "configs/server.crt", "configs/server.key" + "configs/ports_from_zk.xml", + "configs/ssl_conf.xml", + "configs/dhparam.pem", + "configs/server.crt", + "configs/server.key", ], user_configs=["configs/default_passwd.xml"], - with_zookeeper=True) + with_zookeeper=True, +) LOADS_QUERY = "SELECT value FROM system.events WHERE event = 'MainConfigLoads'" @@ -33,7 +38,9 @@ gen_dir = Path(__file__).parent / "_gen" gen_dir.mkdir(exist_ok=True) run_and_check( f"python3 -m grpc_tools.protoc -I{proto_dir!s} --python_out={gen_dir!s} --grpc_python_out={gen_dir!s} \ - {proto_dir!s}/clickhouse_grpc.proto", shell=True) + {proto_dir!s}/clickhouse_grpc.proto", + shell=True, +) sys.path.append(str(gen_dir)) import clickhouse_grpc_pb2 @@ -56,7 +63,11 @@ def fixture_zk(cluster): def get_client(cluster, port): - return Client(host=cluster.get_instance_ip("instance"), port=port, command=cluster.client_bin_path) + return Client( + host=cluster.get_instance_ip("instance"), + port=port, + command=cluster.client_bin_path, + ) def get_mysql_client(cluster, port): @@ -64,7 +75,12 @@ def get_mysql_client(cluster, port): while True: try: return pymysql.connections.Connection( - host=cluster.get_instance_ip("instance"), user="default", password="", database="default", port=port) + host=cluster.get_instance_ip("instance"), + user="default", + password="", + database="default", + port=port, + ) except pymysql.err.OperationalError: if time.monotonic() - start_time > 10: raise @@ -76,7 +92,12 @@ def get_pgsql_client(cluster, port): while True: try: return psycopg2.connect( - host=cluster.get_instance_ip("instance"), user="postgresql", password="123", database="default", port=port) + host=cluster.get_instance_ip("instance"), + user="postgresql", + password="123", + database="default", + port=port, + ) except psycopg2.OperationalError: if time.monotonic() - start_time > 10: raise @@ -212,7 +233,9 @@ def test_change_grpc_port(cluster, zk): assert grpc_query(grpc_channel, "SELECT 1") == "1\n" with sync_loaded_config(client.query): zk.set("/clickhouse/ports/grpc", b"9090") - with pytest.raises(grpc._channel._InactiveRpcError, match="StatusCode.UNAVAILABLE"): + with pytest.raises( + grpc._channel._InactiveRpcError, match="StatusCode.UNAVAILABLE" + ): grpc_query(grpc_channel, "SELECT 1") grpc_channel_on_new_port = get_grpc_channel(cluster, port=9090) assert grpc_query(grpc_channel_on_new_port, "SELECT 1") == "1\n" @@ -264,13 +287,22 @@ def test_remove_grpc_port(cluster, zk): assert grpc_query(grpc_channel, "SELECT 1") == "1\n" with sync_loaded_config(client.query): zk.delete("/clickhouse/ports/grpc") - with pytest.raises(grpc._channel._InactiveRpcError, match="StatusCode.UNAVAILABLE"): + with pytest.raises( + grpc._channel._InactiveRpcError, match="StatusCode.UNAVAILABLE" + ): grpc_query(grpc_channel, "SELECT 1") def test_change_listen_host(cluster, zk): - localhost_client = Client(host="127.0.0.1", port=9000, command="/usr/bin/clickhouse") - localhost_client.command = ["docker", "exec", "-i", instance.docker_id] + localhost_client.command + localhost_client = Client( + host="127.0.0.1", port=9000, command="/usr/bin/clickhouse" + ) + localhost_client.command = [ + "docker", + "exec", + "-i", + instance.docker_id, + ] + localhost_client.command try: client = get_client(cluster, port=9000) with sync_loaded_config(localhost_client.query): @@ -281,4 +313,3 @@ def test_change_listen_host(cluster, zk): finally: with sync_loaded_config(localhost_client.query): configure_ports_from_zk(zk) - diff --git a/tests/integration/test_settings_constraints/test.py b/tests/integration/test_settings_constraints/test.py index 18c80d9c1da..f6490c60407 100644 --- a/tests/integration/test_settings_constraints/test.py +++ b/tests/integration/test_settings_constraints/test.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', user_configs=["configs/users.xml"]) +instance = cluster.add_instance("instance", user_configs=["configs/users.xml"]) @pytest.fixture(scope="module") @@ -16,80 +16,122 @@ def started_cluster(): def test_system_settings(started_cluster): - assert instance.query( - "SELECT name, value, min, max, readonly from system.settings WHERE name = 'force_index_by_date'") == \ - "force_index_by_date\t0\t\\N\t\\N\t1\n" + assert ( + instance.query( + "SELECT name, value, min, max, readonly from system.settings WHERE name = 'force_index_by_date'" + ) + == "force_index_by_date\t0\t\\N\t\\N\t1\n" + ) - assert instance.query( - "SELECT name, value, min, max, readonly from system.settings WHERE name = 'max_memory_usage'") == \ - "max_memory_usage\t10000000000\t5000000000\t20000000000\t0\n" + assert ( + instance.query( + "SELECT name, value, min, max, readonly from system.settings WHERE name = 'max_memory_usage'" + ) + == "max_memory_usage\t10000000000\t5000000000\t20000000000\t0\n" + ) - assert instance.query("SELECT name, value, min, max, readonly from system.settings WHERE name = 'readonly'") == \ - "readonly\t0\t\\N\t\\N\t0\n" + assert ( + instance.query( + "SELECT name, value, min, max, readonly from system.settings WHERE name = 'readonly'" + ) + == "readonly\t0\t\\N\t\\N\t0\n" + ) def test_system_constraints(started_cluster): - assert_query_settings(instance, "SELECT 1", - settings={'readonly': 0}, - exception="Cannot modify 'readonly'", - user="readonly_user") + assert_query_settings( + instance, + "SELECT 1", + settings={"readonly": 0}, + exception="Cannot modify 'readonly'", + user="readonly_user", + ) - assert_query_settings(instance, "SELECT 1", - settings={'allow_ddl': 1}, - exception="Cannot modify 'allow_ddl'", - user="no_dll_user") + assert_query_settings( + instance, + "SELECT 1", + settings={"allow_ddl": 1}, + exception="Cannot modify 'allow_ddl'", + user="no_dll_user", + ) def test_read_only_constraint(started_cluster): # Default value - assert_query_settings(instance, "SELECT value FROM system.settings WHERE name='force_index_by_date'", - settings={}, - result="0") + assert_query_settings( + instance, + "SELECT value FROM system.settings WHERE name='force_index_by_date'", + settings={}, + result="0", + ) # Invalid value - assert_query_settings(instance, "SELECT value FROM system.settings WHERE name='force_index_by_date'", - settings={'force_index_by_date': 1}, - result=None, - exception="Setting force_index_by_date should not be changed") + assert_query_settings( + instance, + "SELECT value FROM system.settings WHERE name='force_index_by_date'", + settings={"force_index_by_date": 1}, + result=None, + exception="Setting force_index_by_date should not be changed", + ) def test_min_constraint(started_cluster): # Default value - assert_query_settings(instance, "SELECT value FROM system.settings WHERE name='max_memory_usage'", - {}, - result="10000000000") + assert_query_settings( + instance, + "SELECT value FROM system.settings WHERE name='max_memory_usage'", + {}, + result="10000000000", + ) # Valid value - assert_query_settings(instance, "SELECT value FROM system.settings WHERE name='max_memory_usage'", - settings={'max_memory_usage': 5000000000}, - result="5000000000") + assert_query_settings( + instance, + "SELECT value FROM system.settings WHERE name='max_memory_usage'", + settings={"max_memory_usage": 5000000000}, + result="5000000000", + ) # Invalid value - assert_query_settings(instance, "SELECT value FROM system.settings WHERE name='max_memory_usage'", - settings={'max_memory_usage': 4999999999}, - result=None, - exception="Setting max_memory_usage shouldn't be less than 5000000000") + assert_query_settings( + instance, + "SELECT value FROM system.settings WHERE name='max_memory_usage'", + settings={"max_memory_usage": 4999999999}, + result=None, + exception="Setting max_memory_usage shouldn't be less than 5000000000", + ) def test_max_constraint(started_cluster): # Default value - assert_query_settings(instance, "SELECT value FROM system.settings WHERE name='max_memory_usage'", - {}, - result="10000000000") + assert_query_settings( + instance, + "SELECT value FROM system.settings WHERE name='max_memory_usage'", + {}, + result="10000000000", + ) # Valid value - assert_query_settings(instance, "SELECT value FROM system.settings WHERE name='max_memory_usage'", - settings={'max_memory_usage': 20000000000}, - result="20000000000") + assert_query_settings( + instance, + "SELECT value FROM system.settings WHERE name='max_memory_usage'", + settings={"max_memory_usage": 20000000000}, + result="20000000000", + ) # Invalid value - assert_query_settings(instance, "SELECT value FROM system.settings WHERE name='max_memory_usage'", - settings={'max_memory_usage': 20000000001}, - result=None, - exception="Setting max_memory_usage shouldn't be greater than 20000000000") + assert_query_settings( + instance, + "SELECT value FROM system.settings WHERE name='max_memory_usage'", + settings={"max_memory_usage": 20000000001}, + result=None, + exception="Setting max_memory_usage shouldn't be greater than 20000000000", + ) -def assert_query_settings(instance, query, settings, result=None, exception=None, user=None): +def assert_query_settings( + instance, query, settings, result=None, exception=None, user=None +): """ Try and send the query with custom settings via all available methods: 1. TCP Protocol with settings packet @@ -103,13 +145,17 @@ def assert_query_settings(instance, query, settings, result=None, exception=None # tcp level settings if exception: - assert exception in instance.query_and_get_error(query, settings=settings, user=user) + assert exception in instance.query_and_get_error( + query, settings=settings, user=user + ) else: assert instance.query(query, settings=settings, user=user).strip() == result # http level settings if exception: - assert exception in instance.http_query_and_get_error(query, params=settings, user=user) + assert exception in instance.http_query_and_get_error( + query, params=settings, user=user + ) else: assert instance.http_query(query, params=settings, user=user).strip() == result diff --git a/tests/integration/test_settings_constraints_distributed/test.py b/tests/integration/test_settings_constraints_distributed/test.py index eed4e66131d..75d4732ffc4 100644 --- a/tests/integration/test_settings_constraints_distributed/test.py +++ b/tests/integration/test_settings_constraints_distributed/test.py @@ -5,12 +5,22 @@ from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=["configs/config.d/remote_servers.xml"], - user_configs=["configs/users.d/allow_introspection_functions.xml"]) -node2 = cluster.add_instance('node2', main_configs=["configs/config.d/remote_servers.xml"], - user_configs=["configs/users.d/allow_introspection_functions.xml"]) -distributed = cluster.add_instance('distributed', main_configs=["configs/config.d/remote_servers.xml"], - user_configs=["configs/users.d/allow_introspection_functions.xml"], stay_alive=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/config.d/remote_servers.xml"], + user_configs=["configs/users.d/allow_introspection_functions.xml"], +) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/config.d/remote_servers.xml"], + user_configs=["configs/users.d/allow_introspection_functions.xml"], +) +distributed = cluster.add_instance( + "distributed", + main_configs=["configs/config.d/remote_servers.xml"], + user_configs=["configs/users.d/allow_introspection_functions.xml"], + stay_alive=True, +) @pytest.fixture(scope="module", autouse=True) @@ -24,7 +34,8 @@ def started_cluster(): distributed.query("CREATE ROLE admin") distributed.query("GRANT ALL ON *.* TO admin") distributed.query( - "CREATE TABLE shard_settings (name String, value String) ENGINE = Distributed(test_cluster, system, settings);") + "CREATE TABLE shard_settings (name String, value String) ENGINE = Distributed(test_cluster, system, settings);" + ) yield cluster @@ -42,81 +53,126 @@ def restart_distributed(): def test_select_clamps_settings(): for node in [node1, node2]: - node.query("CREATE TABLE sometable_select (date Date, id UInt32, value Int32) ENGINE = MergeTree() ORDER BY id;") + node.query( + "CREATE TABLE sometable_select (date Date, id UInt32, value Int32) ENGINE = MergeTree() ORDER BY id;" + ) node.query("INSERT INTO sometable_select VALUES (toDate('2010-01-10'), 1, 1)") distributed.query( - "CREATE TABLE proxy_select (date Date, id UInt32, value Int32) ENGINE = Distributed(test_cluster, default, sometable_select, toUInt64(date));") + "CREATE TABLE proxy_select (date Date, id UInt32, value Int32) ENGINE = Distributed(test_cluster, default, sometable_select, toUInt64(date));" + ) - - distributed.query("CREATE USER normal DEFAULT ROLE admin SETTINGS max_memory_usage = 80000000") - distributed.query("CREATE USER wasteful DEFAULT ROLE admin SETTINGS max_memory_usage = 2000000000") + distributed.query( + "CREATE USER normal DEFAULT ROLE admin SETTINGS max_memory_usage = 80000000" + ) + distributed.query( + "CREATE USER wasteful DEFAULT ROLE admin SETTINGS max_memory_usage = 2000000000" + ) distributed.query("CREATE USER readonly DEFAULT ROLE admin SETTINGS readonly = 1") - node1.query("ALTER USER shard SETTINGS max_memory_usage = 50000000 MIN 11111111 MAX 99999999") + node1.query( + "ALTER USER shard SETTINGS max_memory_usage = 50000000 MIN 11111111 MAX 99999999" + ) node2.query("ALTER USER shard SETTINGS readonly = 1") # Check that shards doesn't throw exceptions on constraints violation query = "SELECT COUNT() FROM proxy_select" - assert distributed.query(query) == '2\n' - assert distributed.query(query, user='normal') == '2\n' - assert distributed.query(query, user='wasteful') == '2\n' - assert distributed.query(query, user='readonly') == '2\n' + assert distributed.query(query) == "2\n" + assert distributed.query(query, user="normal") == "2\n" + assert distributed.query(query, user="wasteful") == "2\n" + assert distributed.query(query, user="readonly") == "2\n" - assert distributed.query(query, settings={"max_memory_usage": 40000000, "readonly": 2}) == '2\n' - assert distributed.query(query, settings={"max_memory_usage": 3000000000, "readonly": 2}) == '2\n' + assert ( + distributed.query(query, settings={"max_memory_usage": 40000000, "readonly": 2}) + == "2\n" + ) + assert ( + distributed.query( + query, settings={"max_memory_usage": 3000000000, "readonly": 2} + ) + == "2\n" + ) query = "SELECT COUNT() FROM remote('node{1,2}', 'default', 'sometable_select')" - assert distributed.query(query) == '2\n' - assert distributed.query(query, user='normal') == '2\n' - assert distributed.query(query, user='wasteful') == '2\n' + assert distributed.query(query) == "2\n" + assert distributed.query(query, user="normal") == "2\n" + assert distributed.query(query, user="wasteful") == "2\n" # Check that shards clamp passed settings. query = "SELECT hostName() as host, name, value FROM shard_settings WHERE name = 'max_memory_usage' OR name = 'readonly' ORDER BY host, name, value" - assert distributed.query(query) == 'node1\tmax_memory_usage\t99999999\n' \ - 'node1\treadonly\t0\n' \ - 'node2\tmax_memory_usage\t10000000000\n' \ - 'node2\treadonly\t1\n' - assert distributed.query(query, user='normal') == 'node1\tmax_memory_usage\t80000000\n' \ - 'node1\treadonly\t0\n' \ - 'node2\tmax_memory_usage\t10000000000\n' \ - 'node2\treadonly\t1\n' - assert distributed.query(query, user='wasteful') == 'node1\tmax_memory_usage\t99999999\n' \ - 'node1\treadonly\t0\n' \ - 'node2\tmax_memory_usage\t10000000000\n' \ - 'node2\treadonly\t1\n' - assert distributed.query(query, user='readonly') == 'node1\tmax_memory_usage\t99999999\n' \ - 'node1\treadonly\t1\n' \ - 'node2\tmax_memory_usage\t10000000000\n' \ - 'node2\treadonly\t1\n' + assert ( + distributed.query(query) == "node1\tmax_memory_usage\t99999999\n" + "node1\treadonly\t0\n" + "node2\tmax_memory_usage\t10000000000\n" + "node2\treadonly\t1\n" + ) + assert ( + distributed.query(query, user="normal") == "node1\tmax_memory_usage\t80000000\n" + "node1\treadonly\t0\n" + "node2\tmax_memory_usage\t10000000000\n" + "node2\treadonly\t1\n" + ) + assert ( + distributed.query(query, user="wasteful") + == "node1\tmax_memory_usage\t99999999\n" + "node1\treadonly\t0\n" + "node2\tmax_memory_usage\t10000000000\n" + "node2\treadonly\t1\n" + ) + assert ( + distributed.query(query, user="readonly") + == "node1\tmax_memory_usage\t99999999\n" + "node1\treadonly\t1\n" + "node2\tmax_memory_usage\t10000000000\n" + "node2\treadonly\t1\n" + ) - assert distributed.query(query, settings={"max_memory_usage": 1}) == 'node1\tmax_memory_usage\t11111111\n' \ - 'node1\treadonly\t0\n' \ - 'node2\tmax_memory_usage\t10000000000\n' \ - 'node2\treadonly\t1\n' - assert distributed.query(query, settings={"max_memory_usage": 40000000, - "readonly": 2}) == 'node1\tmax_memory_usage\t40000000\n' \ - 'node1\treadonly\t2\n' \ - 'node2\tmax_memory_usage\t10000000000\n' \ - 'node2\treadonly\t1\n' - assert distributed.query(query, settings={"max_memory_usage": 3000000000, - "readonly": 2}) == 'node1\tmax_memory_usage\t99999999\n' \ - 'node1\treadonly\t2\n' \ - 'node2\tmax_memory_usage\t10000000000\n' \ - 'node2\treadonly\t1\n' + assert ( + distributed.query(query, settings={"max_memory_usage": 1}) + == "node1\tmax_memory_usage\t11111111\n" + "node1\treadonly\t0\n" + "node2\tmax_memory_usage\t10000000000\n" + "node2\treadonly\t1\n" + ) + assert ( + distributed.query(query, settings={"max_memory_usage": 40000000, "readonly": 2}) + == "node1\tmax_memory_usage\t40000000\n" + "node1\treadonly\t2\n" + "node2\tmax_memory_usage\t10000000000\n" + "node2\treadonly\t1\n" + ) + assert ( + distributed.query( + query, settings={"max_memory_usage": 3000000000, "readonly": 2} + ) + == "node1\tmax_memory_usage\t99999999\n" + "node1\treadonly\t2\n" + "node2\tmax_memory_usage\t10000000000\n" + "node2\treadonly\t1\n" + ) def test_insert_clamps_settings(): for node in [node1, node2]: - node.query("CREATE TABLE sometable_insert (date Date, id UInt32, value Int32) ENGINE = MergeTree() ORDER BY id;") + node.query( + "CREATE TABLE sometable_insert (date Date, id UInt32, value Int32) ENGINE = MergeTree() ORDER BY id;" + ) node.query("INSERT INTO sometable_insert VALUES (toDate('2010-01-10'), 1, 1)") distributed.query( - "CREATE TABLE proxy_insert (date Date, id UInt32, value Int32) ENGINE = Distributed(test_cluster, default, sometable_insert, toUInt64(date));") + "CREATE TABLE proxy_insert (date Date, id UInt32, value Int32) ENGINE = Distributed(test_cluster, default, sometable_insert, toUInt64(date));" + ) - node1.query("ALTER USER shard SETTINGS max_memory_usage = 50000000 MIN 11111111 MAX 99999999") - node2.query("ALTER USER shard SETTINGS max_memory_usage = 50000000 MIN 11111111 MAX 99999999") + node1.query( + "ALTER USER shard SETTINGS max_memory_usage = 50000000 MIN 11111111 MAX 99999999" + ) + node2.query( + "ALTER USER shard SETTINGS max_memory_usage = 50000000 MIN 11111111 MAX 99999999" + ) distributed.query("INSERT INTO proxy_insert VALUES (toDate('2020-02-20'), 2, 2)") - distributed.query("INSERT INTO proxy_insert VALUES (toDate('2020-02-21'), 2, 2)", settings={"max_memory_usage": 5000000}) + distributed.query( + "INSERT INTO proxy_insert VALUES (toDate('2020-02-21'), 2, 2)", + settings={"max_memory_usage": 5000000}, + ) distributed.query("SYSTEM FLUSH DISTRIBUTED proxy_insert") assert_eq_with_retry(distributed, "SELECT COUNT() FROM proxy_insert", "4") diff --git a/tests/integration/test_settings_profile/test.py b/tests/integration/test_settings_profile/test.py index 7be0b395764..b4c0cec9f48 100644 --- a/tests/integration/test_settings_profile/test.py +++ b/tests/integration/test_settings_profile/test.py @@ -3,12 +3,17 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance') +instance = cluster.add_instance("instance") def system_settings_profile(profile_name): - return TSV(instance.query( - "SELECT name, storage, num_elements, apply_to_all, apply_to_list, apply_to_except FROM system.settings_profiles WHERE name='" + profile_name + "'")) + return TSV( + instance.query( + "SELECT name, storage, num_elements, apply_to_all, apply_to_list, apply_to_except FROM system.settings_profiles WHERE name='" + + profile_name + + "'" + ) + ) def system_settings_profile_elements(profile_name=None, user_name=None, role_name=None): @@ -23,10 +28,12 @@ def system_settings_profile_elements(profile_name=None, user_name=None, role_nam session_id_counter = 0 + + def new_session_id(): global session_id_counter session_id_counter += 1 - return 'session #' + str(session_id_counter) + return "session #" + str(session_id_counter) @pytest.fixture(scope="module", autouse=True) @@ -49,51 +56,110 @@ def reset_after_test(): finally: instance.query("CREATE USER OR REPLACE robin") instance.query("DROP ROLE IF EXISTS worker") - instance.query("DROP SETTINGS PROFILE IF EXISTS xyz, alpha, P1, P2, P3, P4, P5, P6") + instance.query( + "DROP SETTINGS PROFILE IF EXISTS xyz, alpha, P1, P2, P3, P4, P5, P6" + ) def test_smoke(): # Set settings and constraints via CREATE SETTINGS PROFILE ... TO user instance.query( - "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000 TO robin") - assert instance.query( - "SHOW CREATE SETTINGS PROFILE xyz") == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000 TO robin\n" - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", - user="robin") == "100000001\n" - assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error( - "SET max_memory_usage = 80000000", user="robin") - assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error( - "SET max_memory_usage = 120000000", user="robin") - assert system_settings_profile("xyz") == [["xyz", "local directory", 1, 0, "['robin']", "[]"]] + "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000 TO robin" + ) + assert ( + instance.query("SHOW CREATE SETTINGS PROFILE xyz") + == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000 TO robin\n" + ) + assert ( + instance.query( + "SELECT value FROM system.settings WHERE name = 'max_memory_usage'", + user="robin", + ) + == "100000001\n" + ) + assert ( + "Setting max_memory_usage shouldn't be less than 90000000" + in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") + ) + assert ( + "Setting max_memory_usage shouldn't be greater than 110000000" + in instance.query_and_get_error( + "SET max_memory_usage = 120000000", user="robin" + ) + ) + assert system_settings_profile("xyz") == [ + ["xyz", "local directory", 1, 0, "['robin']", "[]"] + ] assert system_settings_profile_elements(profile_name="xyz") == [ - ["xyz", "\\N", "\\N", 0, "max_memory_usage", 100000001, 90000000, 110000000, "\\N", "\\N"]] + [ + "xyz", + "\\N", + "\\N", + 0, + "max_memory_usage", + 100000001, + 90000000, + 110000000, + "\\N", + "\\N", + ] + ] instance.query("ALTER SETTINGS PROFILE xyz TO NONE") - assert instance.query( - "SHOW CREATE SETTINGS PROFILE xyz") == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000\n" - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", - user="robin") == "10000000000\n" + assert ( + instance.query("SHOW CREATE SETTINGS PROFILE xyz") + == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000\n" + ) + assert ( + instance.query( + "SELECT value FROM system.settings WHERE name = 'max_memory_usage'", + user="robin", + ) + == "10000000000\n" + ) instance.query("SET max_memory_usage = 80000000", user="robin") instance.query("SET max_memory_usage = 120000000", user="robin") - assert system_settings_profile("xyz") == [["xyz", "local directory", 1, 0, "[]", "[]"]] + assert system_settings_profile("xyz") == [ + ["xyz", "local directory", 1, 0, "[]", "[]"] + ] assert system_settings_profile_elements(user_name="robin") == [] # Set settings and constraints via CREATE USER ... SETTINGS PROFILE instance.query("ALTER USER robin SETTINGS PROFILE xyz") - assert instance.query("SHOW CREATE USER robin") == "CREATE USER robin SETTINGS PROFILE xyz\n" - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", - user="robin") == "100000001\n" - assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error( - "SET max_memory_usage = 80000000", user="robin") - assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error( - "SET max_memory_usage = 120000000", user="robin") + assert ( + instance.query("SHOW CREATE USER robin") + == "CREATE USER robin SETTINGS PROFILE xyz\n" + ) + assert ( + instance.query( + "SELECT value FROM system.settings WHERE name = 'max_memory_usage'", + user="robin", + ) + == "100000001\n" + ) + assert ( + "Setting max_memory_usage shouldn't be less than 90000000" + in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") + ) + assert ( + "Setting max_memory_usage shouldn't be greater than 110000000" + in instance.query_and_get_error( + "SET max_memory_usage = 120000000", user="robin" + ) + ) assert system_settings_profile_elements(user_name="robin") == [ - ["\\N", "robin", "\\N", 0, "\\N", "\\N", "\\N", "\\N", "\\N", "xyz"]] + ["\\N", "robin", "\\N", 0, "\\N", "\\N", "\\N", "\\N", "\\N", "xyz"] + ] instance.query("ALTER USER robin SETTINGS NONE") assert instance.query("SHOW CREATE USER robin") == "CREATE USER robin\n" - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", - user="robin") == "10000000000\n" + assert ( + instance.query( + "SELECT value FROM system.settings WHERE name = 'max_memory_usage'", + user="robin", + ) + == "10000000000\n" + ) instance.query("SET max_memory_usage = 80000000", user="robin") instance.query("SET max_memory_usage = 120000000", user="robin") assert system_settings_profile_elements(user_name="robin") == [] @@ -102,94 +168,200 @@ def test_smoke(): def test_settings_from_granted_role(): # Set settings and constraints via granted role instance.query( - "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MAX 110000000, max_ast_depth = 2000") + "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MAX 110000000, max_ast_depth = 2000" + ) instance.query("CREATE ROLE worker SETTINGS PROFILE xyz") instance.query("GRANT worker TO robin") - assert instance.query( - "SHOW CREATE SETTINGS PROFILE xyz") == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MAX 110000000, max_ast_depth = 2000\n" - assert instance.query("SHOW CREATE ROLE worker") == "CREATE ROLE worker SETTINGS PROFILE xyz\n" - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", - user="robin") == "100000001\n" - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_ast_depth'", user="robin") == "2000\n" - assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error( - "SET max_memory_usage = 120000000", user="robin") - assert system_settings_profile("xyz") == [["xyz", "local directory", 2, 0, "[]", "[]"]] + assert ( + instance.query("SHOW CREATE SETTINGS PROFILE xyz") + == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MAX 110000000, max_ast_depth = 2000\n" + ) + assert ( + instance.query("SHOW CREATE ROLE worker") + == "CREATE ROLE worker SETTINGS PROFILE xyz\n" + ) + assert ( + instance.query( + "SELECT value FROM system.settings WHERE name = 'max_memory_usage'", + user="robin", + ) + == "100000001\n" + ) + assert ( + instance.query( + "SELECT value FROM system.settings WHERE name = 'max_ast_depth'", + user="robin", + ) + == "2000\n" + ) + assert ( + "Setting max_memory_usage shouldn't be greater than 110000000" + in instance.query_and_get_error( + "SET max_memory_usage = 120000000", user="robin" + ) + ) + assert system_settings_profile("xyz") == [ + ["xyz", "local directory", 2, 0, "[]", "[]"] + ] assert system_settings_profile_elements(profile_name="xyz") == [ - ["xyz", "\\N", "\\N", 0, "max_memory_usage", 100000001, "\\N", 110000000, "\\N", "\\N"], - ["xyz", "\\N", "\\N", 1, "max_ast_depth", 2000, "\\N", "\\N", "\\N", "\\N"]] + [ + "xyz", + "\\N", + "\\N", + 0, + "max_memory_usage", + 100000001, + "\\N", + 110000000, + "\\N", + "\\N", + ], + ["xyz", "\\N", "\\N", 1, "max_ast_depth", 2000, "\\N", "\\N", "\\N", "\\N"], + ] assert system_settings_profile_elements(role_name="worker") == [ - ["\\N", "\\N", "worker", 0, "\\N", "\\N", "\\N", "\\N", "\\N", "xyz"]] + ["\\N", "\\N", "worker", 0, "\\N", "\\N", "\\N", "\\N", "\\N", "xyz"] + ] instance.query("REVOKE worker FROM robin") - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", - user="robin") == "10000000000\n" + assert ( + instance.query( + "SELECT value FROM system.settings WHERE name = 'max_memory_usage'", + user="robin", + ) + == "10000000000\n" + ) instance.query("SET max_memory_usage = 120000000", user="robin") instance.query("ALTER ROLE worker SETTINGS NONE") instance.query("GRANT worker TO robin") assert instance.query("SHOW CREATE ROLE worker") == "CREATE ROLE worker\n" - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", - user="robin") == "10000000000\n" + assert ( + instance.query( + "SELECT value FROM system.settings WHERE name = 'max_memory_usage'", + user="robin", + ) + == "10000000000\n" + ) instance.query("SET max_memory_usage = 120000000", user="robin") assert system_settings_profile_elements(role_name="worker") == [] # Set settings and constraints via CREATE SETTINGS PROFILE ... TO granted role instance.query("ALTER SETTINGS PROFILE xyz TO worker") - assert instance.query( - "SHOW CREATE SETTINGS PROFILE xyz") == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MAX 110000000, max_ast_depth = 2000 TO worker\n" - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", - user="robin") == "100000001\n" - assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error( - "SET max_memory_usage = 120000000", user="robin") - assert system_settings_profile("xyz") == [["xyz", "local directory", 2, 0, "['worker']", "[]"]] + assert ( + instance.query("SHOW CREATE SETTINGS PROFILE xyz") + == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MAX 110000000, max_ast_depth = 2000 TO worker\n" + ) + assert ( + instance.query( + "SELECT value FROM system.settings WHERE name = 'max_memory_usage'", + user="robin", + ) + == "100000001\n" + ) + assert ( + "Setting max_memory_usage shouldn't be greater than 110000000" + in instance.query_and_get_error( + "SET max_memory_usage = 120000000", user="robin" + ) + ) + assert system_settings_profile("xyz") == [ + ["xyz", "local directory", 2, 0, "['worker']", "[]"] + ] instance.query("ALTER SETTINGS PROFILE xyz TO NONE") - assert instance.query( - "SHOW CREATE SETTINGS PROFILE xyz") == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MAX 110000000, max_ast_depth = 2000\n" - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", - user="robin") == "10000000000\n" + assert ( + instance.query("SHOW CREATE SETTINGS PROFILE xyz") + == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MAX 110000000, max_ast_depth = 2000\n" + ) + assert ( + instance.query( + "SELECT value FROM system.settings WHERE name = 'max_memory_usage'", + user="robin", + ) + == "10000000000\n" + ) instance.query("SET max_memory_usage = 120000000", user="robin") - assert system_settings_profile("xyz") == [["xyz", "local directory", 2, 0, "[]", "[]"]] + assert system_settings_profile("xyz") == [ + ["xyz", "local directory", 2, 0, "[]", "[]"] + ] def test_inheritance(): - instance.query("CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000002 READONLY") + instance.query( + "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000002 READONLY" + ) instance.query("CREATE SETTINGS PROFILE alpha SETTINGS PROFILE xyz TO robin") - assert instance.query( - "SHOW CREATE SETTINGS PROFILE xyz") == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000002 READONLY\n" - assert instance.query( - "SHOW CREATE SETTINGS PROFILE alpha") == "CREATE SETTINGS PROFILE alpha SETTINGS INHERIT xyz TO robin\n" - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", - user="robin") == "100000002\n" - assert "Setting max_memory_usage should not be changed" in instance.query_and_get_error( - "SET max_memory_usage = 80000000", user="robin") + assert ( + instance.query("SHOW CREATE SETTINGS PROFILE xyz") + == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000002 READONLY\n" + ) + assert ( + instance.query("SHOW CREATE SETTINGS PROFILE alpha") + == "CREATE SETTINGS PROFILE alpha SETTINGS INHERIT xyz TO robin\n" + ) + assert ( + instance.query( + "SELECT value FROM system.settings WHERE name = 'max_memory_usage'", + user="robin", + ) + == "100000002\n" + ) + assert ( + "Setting max_memory_usage should not be changed" + in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") + ) - assert system_settings_profile("xyz") == [["xyz", "local directory", 1, 0, "[]", "[]"]] + assert system_settings_profile("xyz") == [ + ["xyz", "local directory", 1, 0, "[]", "[]"] + ] assert system_settings_profile_elements(profile_name="xyz") == [ - ["xyz", "\\N", "\\N", 0, "max_memory_usage", 100000002, "\\N", "\\N", 1, "\\N"]] - assert system_settings_profile("alpha") == [["alpha", "local directory", 1, 0, "['robin']", "[]"]] + ["xyz", "\\N", "\\N", 0, "max_memory_usage", 100000002, "\\N", "\\N", 1, "\\N"] + ] + assert system_settings_profile("alpha") == [ + ["alpha", "local directory", 1, 0, "['robin']", "[]"] + ] assert system_settings_profile_elements(profile_name="alpha") == [ - ["alpha", "\\N", "\\N", 0, "\\N", "\\N", "\\N", "\\N", "\\N", "xyz"]] + ["alpha", "\\N", "\\N", 0, "\\N", "\\N", "\\N", "\\N", "\\N", "xyz"] + ] assert system_settings_profile_elements(user_name="robin") == [] def test_alter_and_drop(): instance.query( - "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000003 MIN 90000000 MAX 110000000 TO robin") - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", - user="robin") == "100000003\n" - assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error( - "SET max_memory_usage = 80000000", user="robin") - assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error( - "SET max_memory_usage = 120000000", user="robin") + "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000003 MIN 90000000 MAX 110000000 TO robin" + ) + assert ( + instance.query( + "SELECT value FROM system.settings WHERE name = 'max_memory_usage'", + user="robin", + ) + == "100000003\n" + ) + assert ( + "Setting max_memory_usage shouldn't be less than 90000000" + in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") + ) + assert ( + "Setting max_memory_usage shouldn't be greater than 110000000" + in instance.query_and_get_error( + "SET max_memory_usage = 120000000", user="robin" + ) + ) instance.query("ALTER SETTINGS PROFILE xyz SETTINGS readonly=1") - assert "Cannot modify 'max_memory_usage' setting in readonly mode" in instance.query_and_get_error( - "SET max_memory_usage = 80000000", user="robin") + assert ( + "Cannot modify 'max_memory_usage' setting in readonly mode" + in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin") + ) instance.query("DROP SETTINGS PROFILE xyz") - assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", - user="robin") == "10000000000\n" + assert ( + instance.query( + "SELECT value FROM system.settings WHERE name = 'max_memory_usage'", + user="robin", + ) + == "10000000000\n" + ) instance.query("SET max_memory_usage = 80000000", user="robin") instance.query("SET max_memory_usage = 120000000", user="robin") @@ -200,28 +372,49 @@ def test_show_profiles(): assert instance.query("SHOW PROFILES") == "default\nreadonly\nxyz\n" assert instance.query("SHOW CREATE PROFILE xyz") == "CREATE SETTINGS PROFILE xyz\n" - assert instance.query( - "SHOW CREATE SETTINGS PROFILE default") == "CREATE SETTINGS PROFILE default SETTINGS max_memory_usage = 10000000000, load_balancing = \\'random\\'\n" - assert instance.query( - "SHOW CREATE PROFILES") == "CREATE SETTINGS PROFILE default SETTINGS max_memory_usage = 10000000000, load_balancing = \\'random\\'\n" \ - "CREATE SETTINGS PROFILE readonly SETTINGS readonly = 1\n" \ - "CREATE SETTINGS PROFILE xyz\n" + assert ( + instance.query("SHOW CREATE SETTINGS PROFILE default") + == "CREATE SETTINGS PROFILE default SETTINGS max_memory_usage = 10000000000, load_balancing = \\'random\\'\n" + ) + assert ( + instance.query("SHOW CREATE PROFILES") + == "CREATE SETTINGS PROFILE default SETTINGS max_memory_usage = 10000000000, load_balancing = \\'random\\'\n" + "CREATE SETTINGS PROFILE readonly SETTINGS readonly = 1\n" + "CREATE SETTINGS PROFILE xyz\n" + ) - expected_access = "CREATE SETTINGS PROFILE default SETTINGS max_memory_usage = 10000000000, load_balancing = \\'random\\'\n" \ - "CREATE SETTINGS PROFILE readonly SETTINGS readonly = 1\n" \ - "CREATE SETTINGS PROFILE xyz\n" + expected_access = ( + "CREATE SETTINGS PROFILE default SETTINGS max_memory_usage = 10000000000, load_balancing = \\'random\\'\n" + "CREATE SETTINGS PROFILE readonly SETTINGS readonly = 1\n" + "CREATE SETTINGS PROFILE xyz\n" + ) assert expected_access in instance.query("SHOW ACCESS") def test_set_profile(): - instance.query("CREATE SETTINGS PROFILE P1 SETTINGS max_memory_usage=10000000001 MAX 20000000002") + instance.query( + "CREATE SETTINGS PROFILE P1 SETTINGS max_memory_usage=10000000001 MAX 20000000002" + ) session_id = new_session_id() - instance.http_query("SET profile='P1'", user='robin', params={'session_id':session_id}) - assert instance.http_query("SELECT getSetting('max_memory_usage')", user='robin', params={'session_id':session_id}) == "10000000001\n" + instance.http_query( + "SET profile='P1'", user="robin", params={"session_id": session_id} + ) + assert ( + instance.http_query( + "SELECT getSetting('max_memory_usage')", + user="robin", + params={"session_id": session_id}, + ) + == "10000000001\n" + ) expected_error = "max_memory_usage shouldn't be greater than 20000000002" - assert expected_error in instance.http_query_and_get_error("SET max_memory_usage=20000000003", user='robin', params={'session_id':session_id}) + assert expected_error in instance.http_query_and_get_error( + "SET max_memory_usage=20000000003", + user="robin", + params={"session_id": session_id}, + ) def test_changing_default_profiles_affects_new_sessions_only(): @@ -230,12 +423,33 @@ def test_changing_default_profiles_affects_new_sessions_only(): instance.query("ALTER USER robin SETTINGS PROFILE P1") session_id = new_session_id() - assert instance.http_query("SELECT getSetting('max_memory_usage')", user='robin', params={'session_id':session_id}) == "10000000001\n" + assert ( + instance.http_query( + "SELECT getSetting('max_memory_usage')", + user="robin", + params={"session_id": session_id}, + ) + == "10000000001\n" + ) instance.query("ALTER USER robin SETTINGS PROFILE P2") - assert instance.http_query("SELECT getSetting('max_memory_usage')", user='robin', params={'session_id':session_id}) == "10000000001\n" + assert ( + instance.http_query( + "SELECT getSetting('max_memory_usage')", + user="robin", + params={"session_id": session_id}, + ) + == "10000000001\n" + ) other_session_id = new_session_id() - assert instance.http_query("SELECT getSetting('max_memory_usage')", user='robin', params={'session_id':other_session_id}) == "10000000002\n" + assert ( + instance.http_query( + "SELECT getSetting('max_memory_usage')", + user="robin", + params={"session_id": other_session_id}, + ) + == "10000000002\n" + ) def test_function_current_profiles(): @@ -249,22 +463,60 @@ def test_function_current_profiles(): instance.query("CREATE SETTINGS PROFILE P6") session_id = new_session_id() - assert instance.http_query('SELECT defaultProfiles(), currentProfiles(), enabledProfiles()', user='robin', params={'session_id':session_id}) == "['P1','P2']\t['P1','P2']\t['default','P3','P4','P5','P1','P2']\n" + assert ( + instance.http_query( + "SELECT defaultProfiles(), currentProfiles(), enabledProfiles()", + user="robin", + params={"session_id": session_id}, + ) + == "['P1','P2']\t['P1','P2']\t['default','P3','P4','P5','P1','P2']\n" + ) - instance.http_query("SET profile='P6'", user='robin', params={'session_id':session_id}) - assert instance.http_query('SELECT defaultProfiles(), currentProfiles(), enabledProfiles()', user='robin', params={'session_id':session_id}) == "['P1','P2']\t['P6']\t['default','P3','P4','P5','P1','P2','P6']\n" + instance.http_query( + "SET profile='P6'", user="robin", params={"session_id": session_id} + ) + assert ( + instance.http_query( + "SELECT defaultProfiles(), currentProfiles(), enabledProfiles()", + user="robin", + params={"session_id": session_id}, + ) + == "['P1','P2']\t['P6']\t['default','P3','P4','P5','P1','P2','P6']\n" + ) - instance.http_query("SET profile='P5'", user='robin', params={'session_id':session_id}) - assert instance.http_query('SELECT defaultProfiles(), currentProfiles(), enabledProfiles()', user='robin', params={'session_id':session_id}) == "['P1','P2']\t['P5']\t['default','P3','P1','P2','P6','P4','P5']\n" + instance.http_query( + "SET profile='P5'", user="robin", params={"session_id": session_id} + ) + assert ( + instance.http_query( + "SELECT defaultProfiles(), currentProfiles(), enabledProfiles()", + user="robin", + params={"session_id": session_id}, + ) + == "['P1','P2']\t['P5']\t['default','P3','P1','P2','P6','P4','P5']\n" + ) instance.query("ALTER USER robin SETTINGS PROFILE P2") - assert instance.http_query('SELECT defaultProfiles(), currentProfiles(), enabledProfiles()', user='robin', params={'session_id':session_id}) == "['P2']\t['P5']\t['default','P3','P1','P2','P6','P4','P5']\n" + assert ( + instance.http_query( + "SELECT defaultProfiles(), currentProfiles(), enabledProfiles()", + user="robin", + params={"session_id": session_id}, + ) + == "['P2']\t['P5']\t['default','P3','P1','P2','P6','P4','P5']\n" + ) def test_allow_ddl(): - assert "it's necessary to have grant" in instance.query_and_get_error("CREATE TABLE tbl(a Int32) ENGINE=Log", user="robin") - assert "it's necessary to have grant" in instance.query_and_get_error("GRANT CREATE ON tbl TO robin", user="robin") - assert "DDL queries are prohibited" in instance.query_and_get_error("CREATE TABLE tbl(a Int32) ENGINE=Log", settings={"allow_ddl": 0}) + assert "it's necessary to have grant" in instance.query_and_get_error( + "CREATE TABLE tbl(a Int32) ENGINE=Log", user="robin" + ) + assert "it's necessary to have grant" in instance.query_and_get_error( + "GRANT CREATE ON tbl TO robin", user="robin" + ) + assert "DDL queries are prohibited" in instance.query_and_get_error( + "CREATE TABLE tbl(a Int32) ENGINE=Log", settings={"allow_ddl": 0} + ) instance.query("GRANT CREATE ON tbl TO robin") instance.query("CREATE TABLE tbl(a Int32) ENGINE=Log", user="robin") @@ -272,27 +524,60 @@ def test_allow_ddl(): def test_allow_introspection(): - assert instance.query("SELECT demangle('a')", settings={"allow_introspection_functions": 1}) == "signed char\n" + assert ( + instance.query( + "SELECT demangle('a')", settings={"allow_introspection_functions": 1} + ) + == "signed char\n" + ) - assert "Introspection functions are disabled" in instance.query_and_get_error("SELECT demangle('a')") - assert "it's necessary to have grant" in instance.query_and_get_error("SELECT demangle('a')", user="robin") - assert "it's necessary to have grant" in instance.query_and_get_error("SELECT demangle('a')", user="robin", settings={"allow_introspection_functions": 1}) + assert "Introspection functions are disabled" in instance.query_and_get_error( + "SELECT demangle('a')" + ) + assert "it's necessary to have grant" in instance.query_and_get_error( + "SELECT demangle('a')", user="robin" + ) + assert "it's necessary to have grant" in instance.query_and_get_error( + "SELECT demangle('a')", + user="robin", + settings={"allow_introspection_functions": 1}, + ) instance.query("GRANT demangle ON *.* TO robin") - assert "Introspection functions are disabled" in instance.query_and_get_error("SELECT demangle('a')", user="robin") - assert instance.query("SELECT demangle('a')", user="robin", settings={"allow_introspection_functions": 1}) == "signed char\n" + assert "Introspection functions are disabled" in instance.query_and_get_error( + "SELECT demangle('a')", user="robin" + ) + assert ( + instance.query( + "SELECT demangle('a')", + user="robin", + settings={"allow_introspection_functions": 1}, + ) + == "signed char\n" + ) instance.query("ALTER USER robin SETTINGS allow_introspection_functions=1") assert instance.query("SELECT demangle('a')", user="robin") == "signed char\n" instance.query("ALTER USER robin SETTINGS NONE") - assert "Introspection functions are disabled" in instance.query_and_get_error("SELECT demangle('a')", user="robin") + assert "Introspection functions are disabled" in instance.query_and_get_error( + "SELECT demangle('a')", user="robin" + ) - instance.query("CREATE SETTINGS PROFILE xyz SETTINGS allow_introspection_functions=1 TO robin") + instance.query( + "CREATE SETTINGS PROFILE xyz SETTINGS allow_introspection_functions=1 TO robin" + ) assert instance.query("SELECT demangle('a')", user="robin") == "signed char\n" instance.query("DROP SETTINGS PROFILE xyz") - assert "Introspection functions are disabled" in instance.query_and_get_error("SELECT demangle('a')", user="robin") + assert "Introspection functions are disabled" in instance.query_and_get_error( + "SELECT demangle('a')", user="robin" + ) - instance.query("REVOKE demangle ON *.* FROM robin", settings={"allow_introspection_functions": 1}) - assert "it's necessary to have grant" in instance.query_and_get_error("SELECT demangle('a')", user="robin") + instance.query( + "REVOKE demangle ON *.* FROM robin", + settings={"allow_introspection_functions": 1}, + ) + assert "it's necessary to have grant" in instance.query_and_get_error( + "SELECT demangle('a')", user="robin" + ) diff --git a/tests/integration/test_sharding_key_from_default_column/test.py b/tests/integration/test_sharding_key_from_default_column/test.py index 1717a1ee14a..1ecf96305a4 100644 --- a/tests/integration/test_sharding_key_from_default_column/test.py +++ b/tests/integration/test_sharding_key_from_default_column/test.py @@ -5,8 +5,12 @@ from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/test_cluster.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/test_cluster.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/test_cluster.xml"], with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/test_cluster.xml"], with_zookeeper=True +) @pytest.fixture(scope="module", autouse=True) @@ -29,88 +33,162 @@ def cleanup_after_test(): # A default column is used in the sharding key expression. def test_default_column(): - node1.query("CREATE TABLE dist ON CLUSTER 'test_cluster' (x Int32, y Int32 DEFAULT x + 100, z Int32 DEFAULT x + y) ENGINE = Distributed('test_cluster', currentDatabase(), local, y)") - node1.query("CREATE TABLE local ON CLUSTER 'test_cluster' (x Int32, y Int32 DEFAULT x + 200, z Int32 DEFAULT x - y) ENGINE = MergeTree() ORDER BY y") + node1.query( + "CREATE TABLE dist ON CLUSTER 'test_cluster' (x Int32, y Int32 DEFAULT x + 100, z Int32 DEFAULT x + y) ENGINE = Distributed('test_cluster', currentDatabase(), local, y)" + ) + node1.query( + "CREATE TABLE local ON CLUSTER 'test_cluster' (x Int32, y Int32 DEFAULT x + 200, z Int32 DEFAULT x - y) ENGINE = MergeTree() ORDER BY y" + ) for insert_sync in [0, 1]: - settings = {'insert_distributed_sync': insert_sync} - + settings = {"insert_distributed_sync": insert_sync} + # INSERT INTO TABLE dist (x) node1.query("TRUNCATE TABLE local ON CLUSTER 'test_cluster'") - node1.query("INSERT INTO TABLE dist (x) VALUES (1), (2), (3), (4)", settings=settings) + node1.query( + "INSERT INTO TABLE dist (x) VALUES (1), (2), (3), (4)", settings=settings + ) node1.query("SYSTEM FLUSH DISTRIBUTED dist") - assert node1.query("SELECT x, y, z FROM local") == TSV([[2, 102, 104], [4, 104, 108]]) - assert node2.query("SELECT x, y, z FROM local") == TSV([[1, 101, 102], [3, 103, 106]]) - assert node1.query("SELECT x, y, z FROM dist") == TSV([[2, 102, 104], [4, 104, 108], [1, 101, 102], [3, 103, 106]]) + assert node1.query("SELECT x, y, z FROM local") == TSV( + [[2, 102, 104], [4, 104, 108]] + ) + assert node2.query("SELECT x, y, z FROM local") == TSV( + [[1, 101, 102], [3, 103, 106]] + ) + assert node1.query("SELECT x, y, z FROM dist") == TSV( + [[2, 102, 104], [4, 104, 108], [1, 101, 102], [3, 103, 106]] + ) # INSERT INTO TABLE dist (x, y) node1.query("TRUNCATE TABLE local ON CLUSTER 'test_cluster'") - node1.query("INSERT INTO TABLE dist (x, y) VALUES (1, 11), (2, 22), (3, 33)", settings=settings) + node1.query( + "INSERT INTO TABLE dist (x, y) VALUES (1, 11), (2, 22), (3, 33)", + settings=settings, + ) node1.query("SYSTEM FLUSH DISTRIBUTED dist") assert node1.query("SELECT x, y, z FROM local") == TSV([[2, 22, 24]]) - assert node2.query("SELECT x, y, z FROM local") == TSV([[1, 11, 12], [3, 33, 36]]) - assert node1.query("SELECT x, y, z FROM dist") == TSV([[2, 22, 24], [1, 11, 12], [3, 33, 36]]) + assert node2.query("SELECT x, y, z FROM local") == TSV( + [[1, 11, 12], [3, 33, 36]] + ) + assert node1.query("SELECT x, y, z FROM dist") == TSV( + [[2, 22, 24], [1, 11, 12], [3, 33, 36]] + ) # A materialized column is used in the sharding key expression and `insert_allow_materialized_columns` set to 1. def test_materialized_column_allow_insert_materialized(): - node1.query("CREATE TABLE dist ON CLUSTER 'test_cluster' (x Int32, y Int32 MATERIALIZED x + 100, z Int32 MATERIALIZED x + y) ENGINE = Distributed('test_cluster', currentDatabase(), local, y)") - node1.query("CREATE TABLE local ON CLUSTER 'test_cluster' (x Int32, y Int32 MATERIALIZED x + 200, z Int32 MATERIALIZED x - y) ENGINE = MergeTree() ORDER BY y") + node1.query( + "CREATE TABLE dist ON CLUSTER 'test_cluster' (x Int32, y Int32 MATERIALIZED x + 100, z Int32 MATERIALIZED x + y) ENGINE = Distributed('test_cluster', currentDatabase(), local, y)" + ) + node1.query( + "CREATE TABLE local ON CLUSTER 'test_cluster' (x Int32, y Int32 MATERIALIZED x + 200, z Int32 MATERIALIZED x - y) ENGINE = MergeTree() ORDER BY y" + ) for insert_sync in [0, 1]: - settings = {'insert_distributed_sync': insert_sync, 'insert_allow_materialized_columns': 1} - + settings = { + "insert_distributed_sync": insert_sync, + "insert_allow_materialized_columns": 1, + } + # INSERT INTO TABLE dist (x) node1.query("TRUNCATE TABLE local ON CLUSTER 'test_cluster'") - node1.query("INSERT INTO TABLE dist (x) VALUES (1), (2), (3), (4)", settings=settings) + node1.query( + "INSERT INTO TABLE dist (x) VALUES (1), (2), (3), (4)", settings=settings + ) node1.query("SYSTEM FLUSH DISTRIBUTED dist") - assert node1.query("SELECT x, y, z FROM local") == TSV([[2, 102, 104], [4, 104, 108]]) - assert node2.query("SELECT x, y, z FROM local") == TSV([[1, 101, 102], [3, 103, 106]]) - assert node1.query("SELECT x, y, z FROM dist") == TSV([[2, 102, 104], [4, 104, 108], [1, 101, 102], [3, 103, 106]]) + assert node1.query("SELECT x, y, z FROM local") == TSV( + [[2, 102, 104], [4, 104, 108]] + ) + assert node2.query("SELECT x, y, z FROM local") == TSV( + [[1, 101, 102], [3, 103, 106]] + ) + assert node1.query("SELECT x, y, z FROM dist") == TSV( + [[2, 102, 104], [4, 104, 108], [1, 101, 102], [3, 103, 106]] + ) # INSERT INTO TABLE dist (x, y) node1.query("TRUNCATE TABLE local ON CLUSTER 'test_cluster'") - node1.query("INSERT INTO TABLE dist (x, y) VALUES (1, 11), (2, 22), (3, 33)", settings=settings) + node1.query( + "INSERT INTO TABLE dist (x, y) VALUES (1, 11), (2, 22), (3, 33)", + settings=settings, + ) node1.query("SYSTEM FLUSH DISTRIBUTED dist") assert node1.query("SELECT x, y, z FROM local") == TSV([[2, 22, 24]]) - assert node2.query("SELECT x, y, z FROM local") == TSV([[1, 11, 12], [3, 33, 36]]) - assert node1.query("SELECT x, y, z FROM dist") == TSV([[2, 22, 24], [1, 11, 12], [3, 33, 36]]) + assert node2.query("SELECT x, y, z FROM local") == TSV( + [[1, 11, 12], [3, 33, 36]] + ) + assert node1.query("SELECT x, y, z FROM dist") == TSV( + [[2, 22, 24], [1, 11, 12], [3, 33, 36]] + ) # A materialized column is used in the sharding key expression and `insert_allow_materialized_columns` set to 0. def test_materialized_column_disallow_insert_materialized(): - node1.query("CREATE TABLE dist ON CLUSTER 'test_cluster' (x Int32, y Int32 MATERIALIZED x + 100, z Int32 MATERIALIZED x + y) ENGINE = Distributed('test_cluster', currentDatabase(), local, y)") - node1.query("CREATE TABLE local ON CLUSTER 'test_cluster' (x Int32, y Int32 MATERIALIZED x + 200, z Int32 MATERIALIZED x - y) ENGINE = MergeTree() ORDER BY y") + node1.query( + "CREATE TABLE dist ON CLUSTER 'test_cluster' (x Int32, y Int32 MATERIALIZED x + 100, z Int32 MATERIALIZED x + y) ENGINE = Distributed('test_cluster', currentDatabase(), local, y)" + ) + node1.query( + "CREATE TABLE local ON CLUSTER 'test_cluster' (x Int32, y Int32 MATERIALIZED x + 200, z Int32 MATERIALIZED x - y) ENGINE = MergeTree() ORDER BY y" + ) for insert_sync in [0, 1]: - settings = {'insert_distributed_sync': insert_sync, 'insert_allow_materialized_columns': 0} - + settings = { + "insert_distributed_sync": insert_sync, + "insert_allow_materialized_columns": 0, + } + # INSERT INTO TABLE dist (x) node1.query("TRUNCATE TABLE local ON CLUSTER 'test_cluster'") - node1.query("INSERT INTO TABLE dist (x) VALUES (1), (2), (3), (4)", settings=settings) + node1.query( + "INSERT INTO TABLE dist (x) VALUES (1), (2), (3), (4)", settings=settings + ) node1.query("SYSTEM FLUSH DISTRIBUTED dist") - assert node1.query("SELECT x, y, z FROM local") == TSV([[2, 202, -200], [4, 204, -200]]) - assert node2.query("SELECT x, y, z FROM local") == TSV([[1, 201, -200], [3, 203, -200]]) - assert node1.query("SELECT x, y, z FROM dist") == TSV([[2, 202, -200], [4, 204, -200], [1, 201, -200], [3, 203, -200]]) + assert node1.query("SELECT x, y, z FROM local") == TSV( + [[2, 202, -200], [4, 204, -200]] + ) + assert node2.query("SELECT x, y, z FROM local") == TSV( + [[1, 201, -200], [3, 203, -200]] + ) + assert node1.query("SELECT x, y, z FROM dist") == TSV( + [[2, 202, -200], [4, 204, -200], [1, 201, -200], [3, 203, -200]] + ) # INSERT INTO TABLE dist (x, y) node1.query("TRUNCATE TABLE local ON CLUSTER 'test_cluster'") expected_error = "Cannot insert column y, because it is MATERIALIZED column" - assert expected_error in node1.query_and_get_error("INSERT INTO TABLE dist (x, y) VALUES (1, 11), (2, 22), (3, 33)", settings=settings) + assert expected_error in node1.query_and_get_error( + "INSERT INTO TABLE dist (x, y) VALUES (1, 11), (2, 22), (3, 33)", + settings=settings, + ) # Almost the same as the previous test `test_materialized_column_disallow_insert_materialized`, but the sharding key has different values. def test_materialized_column_disallow_insert_materialized_different_shards(): - node1.query("CREATE TABLE dist ON CLUSTER 'test_cluster' (x Int32, y Int32 MATERIALIZED x + 101, z Int32 MATERIALIZED x + y) ENGINE = Distributed('test_cluster', currentDatabase(), local, y)") - node1.query("CREATE TABLE local ON CLUSTER 'test_cluster' (x Int32, y Int32 MATERIALIZED x + 200, z Int32 MATERIALIZED x - y) ENGINE = MergeTree() ORDER BY y") + node1.query( + "CREATE TABLE dist ON CLUSTER 'test_cluster' (x Int32, y Int32 MATERIALIZED x + 101, z Int32 MATERIALIZED x + y) ENGINE = Distributed('test_cluster', currentDatabase(), local, y)" + ) + node1.query( + "CREATE TABLE local ON CLUSTER 'test_cluster' (x Int32, y Int32 MATERIALIZED x + 200, z Int32 MATERIALIZED x - y) ENGINE = MergeTree() ORDER BY y" + ) for insert_sync in [0, 1]: - settings = {'insert_distributed_sync': insert_sync, 'insert_allow_materialized_columns': 0} - + settings = { + "insert_distributed_sync": insert_sync, + "insert_allow_materialized_columns": 0, + } + # INSERT INTO TABLE dist (x) node1.query("TRUNCATE TABLE local ON CLUSTER 'test_cluster'") - node1.query("INSERT INTO TABLE dist (x) VALUES (1), (2), (3), (4)", settings=settings) + node1.query( + "INSERT INTO TABLE dist (x) VALUES (1), (2), (3), (4)", settings=settings + ) node1.query("SYSTEM FLUSH DISTRIBUTED dist") - assert node1.query("SELECT x, y, z FROM local") == TSV([[1, 201, -200], [3, 203, -200]]) - assert node2.query("SELECT x, y, z FROM local") == TSV([[2, 202, -200], [4, 204, -200]]) - assert node1.query("SELECT x, y, z FROM dist") == TSV([[1, 201, -200], [3, 203, -200], [2, 202, -200], [4, 204, -200]]) + assert node1.query("SELECT x, y, z FROM local") == TSV( + [[1, 201, -200], [3, 203, -200]] + ) + assert node2.query("SELECT x, y, z FROM local") == TSV( + [[2, 202, -200], [4, 204, -200]] + ) + assert node1.query("SELECT x, y, z FROM dist") == TSV( + [[1, 201, -200], [3, 203, -200], [2, 202, -200], [4, 204, -200]] + ) diff --git a/tests/integration/test_sql_user_defined_functions_on_cluster/test.py b/tests/integration/test_sql_user_defined_functions_on_cluster/test.py index d5c74a99622..c940998ec42 100644 --- a/tests/integration/test_sql_user_defined_functions_on_cluster/test.py +++ b/tests/integration/test_sql_user_defined_functions_on_cluster/test.py @@ -2,9 +2,15 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -ch1 = cluster.add_instance('ch1', main_configs=["configs/config.d/clusters.xml"], with_zookeeper=True) -ch2 = cluster.add_instance('ch2', main_configs=["configs/config.d/clusters.xml"], with_zookeeper=True) -ch3 = cluster.add_instance('ch3', main_configs=["configs/config.d/clusters.xml"], with_zookeeper=True) +ch1 = cluster.add_instance( + "ch1", main_configs=["configs/config.d/clusters.xml"], with_zookeeper=True +) +ch2 = cluster.add_instance( + "ch2", main_configs=["configs/config.d/clusters.xml"], with_zookeeper=True +) +ch3 = cluster.add_instance( + "ch3", main_configs=["configs/config.d/clusters.xml"], with_zookeeper=True +) @pytest.fixture(scope="module", autouse=True) @@ -18,17 +24,31 @@ def started_cluster(): def test_sql_user_defined_functions_on_cluster(): - assert "Unknown function test_function" in ch1.query_and_get_error("SELECT test_function(1);") - assert "Unknown function test_function" in ch2.query_and_get_error("SELECT test_function(1);") - assert "Unknown function test_function" in ch3.query_and_get_error("SELECT test_function(1);") + assert "Unknown function test_function" in ch1.query_and_get_error( + "SELECT test_function(1);" + ) + assert "Unknown function test_function" in ch2.query_and_get_error( + "SELECT test_function(1);" + ) + assert "Unknown function test_function" in ch3.query_and_get_error( + "SELECT test_function(1);" + ) - ch1.query_with_retry("CREATE FUNCTION test_function ON CLUSTER 'cluster' AS x -> x + 1;") + ch1.query_with_retry( + "CREATE FUNCTION test_function ON CLUSTER 'cluster' AS x -> x + 1;" + ) assert ch1.query("SELECT test_function(1);") == "2\n" assert ch2.query("SELECT test_function(1);") == "2\n" assert ch3.query("SELECT test_function(1);") == "2\n" ch2.query_with_retry("DROP FUNCTION test_function ON CLUSTER 'cluster'") - assert "Unknown function test_function" in ch1.query_and_get_error("SELECT test_function(1);") - assert "Unknown function test_function" in ch2.query_and_get_error("SELECT test_function(1);") - assert "Unknown function test_function" in ch3.query_and_get_error("SELECT test_function(1);") + assert "Unknown function test_function" in ch1.query_and_get_error( + "SELECT test_function(1);" + ) + assert "Unknown function test_function" in ch2.query_and_get_error( + "SELECT test_function(1);" + ) + assert "Unknown function test_function" in ch3.query_and_get_error( + "SELECT test_function(1);" + ) diff --git a/tests/integration/test_ssl_cert_authentication/test.py b/tests/integration/test_ssl_cert_authentication/test.py index eceb4d10ae5..74bd08e9b35 100644 --- a/tests/integration/test_ssl_cert_authentication/test.py +++ b/tests/integration/test_ssl_cert_authentication/test.py @@ -5,14 +5,22 @@ import ssl import os.path HTTPS_PORT = 8443 -NODE_IP = '10.5.172.77' # It's important for the node to work at this IP because 'server-cert.pem' requires that (see server-ext.cnf). -NODE_IP_WITH_HTTPS_PORT = NODE_IP + ':' + str(HTTPS_PORT) +NODE_IP = "10.5.172.77" # It's important for the node to work at this IP because 'server-cert.pem' requires that (see server-ext.cnf). +NODE_IP_WITH_HTTPS_PORT = NODE_IP + ":" + str(HTTPS_PORT) SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('node', ipv4_address=NODE_IP, - main_configs=['configs/ssl_config.xml', 'certs/server-key.pem', 'certs/server-cert.pem', 'certs/ca-cert.pem'], - user_configs=["configs/users_with_ssl_auth.xml"]) +instance = cluster.add_instance( + "node", + ipv4_address=NODE_IP, + main_configs=[ + "configs/ssl_config.xml", + "certs/server-key.pem", + "certs/server-cert.pem", + "certs/ca-cert.pem", + ], + user_configs=["configs/users_with_ssl_auth.xml"], +) @pytest.fixture(scope="module", autouse=True) @@ -27,41 +35,57 @@ def started_cluster(): def get_ssl_context(cert_name): context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) - context.load_verify_locations(cafile=f'{SCRIPT_DIR}/certs/ca-cert.pem') + context.load_verify_locations(cafile=f"{SCRIPT_DIR}/certs/ca-cert.pem") if cert_name: - context.load_cert_chain(f'{SCRIPT_DIR}/certs/{cert_name}-cert.pem', f'{SCRIPT_DIR}/certs/{cert_name}-key.pem') + context.load_cert_chain( + f"{SCRIPT_DIR}/certs/{cert_name}-cert.pem", + f"{SCRIPT_DIR}/certs/{cert_name}-key.pem", + ) context.verify_mode = ssl.CERT_REQUIRED context.check_hostname = True return context -def execute_query_https(query, user, enable_ssl_auth=True, cert_name=None, password=None): - url = f'https://{NODE_IP_WITH_HTTPS_PORT}/?query={urllib.parse.quote(query)}' +def execute_query_https( + query, user, enable_ssl_auth=True, cert_name=None, password=None +): + url = f"https://{NODE_IP_WITH_HTTPS_PORT}/?query={urllib.parse.quote(query)}" request = urllib.request.Request(url) - request.add_header('X-ClickHouse-User', user) + request.add_header("X-ClickHouse-User", user) if enable_ssl_auth: - request.add_header('X-ClickHouse-SSL-Certificate-Auth', 'on') + request.add_header("X-ClickHouse-SSL-Certificate-Auth", "on") if password: - request.add_header('X-ClickHouse-Key', password) - response = urllib.request.urlopen(request, context=get_ssl_context(cert_name)).read() - return response.decode('utf-8') + request.add_header("X-ClickHouse-Key", password) + response = urllib.request.urlopen( + request, context=get_ssl_context(cert_name) + ).read() + return response.decode("utf-8") def test_https(): - assert execute_query_https("SELECT currentUser()", user="john", cert_name='client1') == "john\n" - assert execute_query_https("SELECT currentUser()", user="lucy", cert_name='client2') == "lucy\n" - assert execute_query_https("SELECT currentUser()", user="lucy", cert_name='client3') == "lucy\n" + assert ( + execute_query_https("SELECT currentUser()", user="john", cert_name="client1") + == "john\n" + ) + assert ( + execute_query_https("SELECT currentUser()", user="lucy", cert_name="client2") + == "lucy\n" + ) + assert ( + execute_query_https("SELECT currentUser()", user="lucy", cert_name="client3") + == "lucy\n" + ) def test_https_wrong_cert(): # Wrong certificate: different user's certificate with pytest.raises(Exception) as err: - execute_query_https("SELECT currentUser()", user="john", cert_name='client2') + execute_query_https("SELECT currentUser()", user="john", cert_name="client2") assert "HTTP Error 403" in str(err.value) # Wrong certificate: self-signed certificate. with pytest.raises(Exception) as err: - execute_query_https("SELECT currentUser()", user="john", cert_name='wrong') + execute_query_https("SELECT currentUser()", user="john", cert_name="wrong") assert "unknown ca" in str(err.value) # No certificate. @@ -71,47 +95,141 @@ def test_https_wrong_cert(): # No header enabling SSL authentication. with pytest.raises(Exception) as err: - execute_query_https("SELECT currentUser()", user="john", enable_ssl_auth=False, cert_name='client1') + execute_query_https( + "SELECT currentUser()", + user="john", + enable_ssl_auth=False, + cert_name="client1", + ) def test_https_non_ssl_auth(): # Users with non-SSL authentication are allowed, in this case we can skip sending a client certificate at all (because "verificationMode" is set to "relaxed"). - #assert execute_query_https("SELECT currentUser()", user="peter", enable_ssl_auth=False) == "peter\n" - assert execute_query_https("SELECT currentUser()", user="jane", enable_ssl_auth=False, password='qwe123') == "jane\n" + # assert execute_query_https("SELECT currentUser()", user="peter", enable_ssl_auth=False) == "peter\n" + assert ( + execute_query_https( + "SELECT currentUser()", + user="jane", + enable_ssl_auth=False, + password="qwe123", + ) + == "jane\n" + ) # But we still can send a certificate if we want. - assert execute_query_https("SELECT currentUser()", user="peter", enable_ssl_auth=False, cert_name='client1') == "peter\n" - assert execute_query_https("SELECT currentUser()", user="peter", enable_ssl_auth=False, cert_name='client2') == "peter\n" - assert execute_query_https("SELECT currentUser()", user="peter", enable_ssl_auth=False, cert_name='client3') == "peter\n" - - assert execute_query_https("SELECT currentUser()", user="jane", enable_ssl_auth=False, password='qwe123', cert_name='client1') == "jane\n" - assert execute_query_https("SELECT currentUser()", user="jane", enable_ssl_auth=False, password='qwe123', cert_name='client2') == "jane\n" - assert execute_query_https("SELECT currentUser()", user="jane", enable_ssl_auth=False, password='qwe123', cert_name='client3') == "jane\n" + assert ( + execute_query_https( + "SELECT currentUser()", + user="peter", + enable_ssl_auth=False, + cert_name="client1", + ) + == "peter\n" + ) + assert ( + execute_query_https( + "SELECT currentUser()", + user="peter", + enable_ssl_auth=False, + cert_name="client2", + ) + == "peter\n" + ) + assert ( + execute_query_https( + "SELECT currentUser()", + user="peter", + enable_ssl_auth=False, + cert_name="client3", + ) + == "peter\n" + ) + + assert ( + execute_query_https( + "SELECT currentUser()", + user="jane", + enable_ssl_auth=False, + password="qwe123", + cert_name="client1", + ) + == "jane\n" + ) + assert ( + execute_query_https( + "SELECT currentUser()", + user="jane", + enable_ssl_auth=False, + password="qwe123", + cert_name="client2", + ) + == "jane\n" + ) + assert ( + execute_query_https( + "SELECT currentUser()", + user="jane", + enable_ssl_auth=False, + password="qwe123", + cert_name="client3", + ) + == "jane\n" + ) # However if we send a certificate it must not be wrong. with pytest.raises(Exception) as err: - execute_query_https("SELECT currentUser()", user="peter", enable_ssl_auth=False, cert_name='wrong') + execute_query_https( + "SELECT currentUser()", + user="peter", + enable_ssl_auth=False, + cert_name="wrong", + ) assert "unknown ca" in str(err.value) with pytest.raises(Exception) as err: - execute_query_https("SELECT currentUser()", user="jane", enable_ssl_auth=False, password='qwe123', cert_name='wrong') + execute_query_https( + "SELECT currentUser()", + user="jane", + enable_ssl_auth=False, + password="qwe123", + cert_name="wrong", + ) assert "unknown ca" in str(err.value) def test_create_user(): instance.query("CREATE USER emma IDENTIFIED WITH ssl_certificate CN 'client3'") - assert execute_query_https("SELECT currentUser()", user="emma", cert_name='client3') == "emma\n" - assert instance.query("SHOW CREATE USER emma") == "CREATE USER emma IDENTIFIED WITH ssl_certificate CN \\'client3\\'\n" + assert ( + execute_query_https("SELECT currentUser()", user="emma", cert_name="client3") + == "emma\n" + ) + assert ( + instance.query("SHOW CREATE USER emma") + == "CREATE USER emma IDENTIFIED WITH ssl_certificate CN \\'client3\\'\n" + ) instance.query("ALTER USER emma IDENTIFIED WITH ssl_certificate CN 'client2'") - assert execute_query_https("SELECT currentUser()", user="emma", cert_name='client2') == "emma\n" - assert instance.query("SHOW CREATE USER emma") == "CREATE USER emma IDENTIFIED WITH ssl_certificate CN \\'client2\\'\n" + assert ( + execute_query_https("SELECT currentUser()", user="emma", cert_name="client2") + == "emma\n" + ) + assert ( + instance.query("SHOW CREATE USER emma") + == "CREATE USER emma IDENTIFIED WITH ssl_certificate CN \\'client2\\'\n" + ) with pytest.raises(Exception) as err: - execute_query_https("SELECT currentUser()", user="emma", cert_name='client3') + execute_query_https("SELECT currentUser()", user="emma", cert_name="client3") assert "HTTP Error 403" in str(err.value) - assert instance.query("SHOW CREATE USER lucy") == "CREATE USER lucy IDENTIFIED WITH ssl_certificate CN \\'client2\\', \\'client3\\'\n" + assert ( + instance.query("SHOW CREATE USER lucy") + == "CREATE USER lucy IDENTIFIED WITH ssl_certificate CN \\'client2\\', \\'client3\\'\n" + ) - assert instance.query("SELECT name, auth_type, auth_params FROM system.users WHERE name IN ['emma', 'lucy'] ORDER BY name") ==\ - "emma\tssl_certificate\t{\"common_names\":[\"client2\"]}\n"\ - "lucy\tssl_certificate\t{\"common_names\":[\"client2\",\"client3\"]}\n" + assert ( + instance.query( + "SELECT name, auth_type, auth_params FROM system.users WHERE name IN ['emma', 'lucy'] ORDER BY name" + ) + == 'emma\tssl_certificate\t{"common_names":["client2"]}\n' + 'lucy\tssl_certificate\t{"common_names":["client2","client3"]}\n' + ) diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 4e848dc2915..7f340424ccf 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -5,7 +5,7 @@ from helpers.cluster import ClickHouseCluster from pyhdfs import HdfsClient cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_hdfs=True) +node1 = cluster.add_instance("node1", with_hdfs=True) @pytest.fixture(scope="module") @@ -16,11 +16,13 @@ def started_cluster(): finally: cluster.shutdown() + def test_read_write_storage(started_cluster): hdfs_api = started_cluster.hdfs_api node1.query("drop table if exists SimpleHDFSStorage SYNC") node1.query( - "create table SimpleHDFSStorage (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/simple_storage', 'TSV')") + "create table SimpleHDFSStorage (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/simple_storage', 'TSV')" + ) node1.query("insert into SimpleHDFSStorage values (1, 'Mark', 72.53)") assert hdfs_api.read_data("/simple_storage") == "1\tMark\t72.53\n" assert node1.query("select * from SimpleHDFSStorage") == "1\tMark\t72.53\n" @@ -30,13 +32,17 @@ def test_read_write_storage_with_globs(started_cluster): hdfs_api = started_cluster.hdfs_api node1.query( - "create table HDFSStorageWithRange (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/storage{1..5}', 'TSV')") + "create table HDFSStorageWithRange (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/storage{1..5}', 'TSV')" + ) node1.query( - "create table HDFSStorageWithEnum (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/storage{1,2,3,4,5}', 'TSV')") + "create table HDFSStorageWithEnum (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/storage{1,2,3,4,5}', 'TSV')" + ) node1.query( - "create table HDFSStorageWithQuestionMark (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/storage?', 'TSV')") + "create table HDFSStorageWithQuestionMark (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/storage?', 'TSV')" + ) node1.query( - "create table HDFSStorageWithAsterisk (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/storage*', 'TSV')") + "create table HDFSStorageWithAsterisk (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/storage*', 'TSV')" + ) for i in ["1", "2", "3"]: hdfs_api.write_data("/storage" + i, i + "\tMark\t72.53\n") @@ -72,23 +78,28 @@ def test_read_write_storage_with_globs(started_cluster): def test_read_write_table(started_cluster): hdfs_api = started_cluster.hdfs_api - data = "1\tSerialize\t555.222\n2\tData\t777.333\n" hdfs_api.write_data("/simple_table_function", data) assert hdfs_api.read_data("/simple_table_function") == data - assert node1.query( - "select * from hdfs('hdfs://hdfs1:9000/simple_table_function', 'TSV', 'id UInt64, text String, number Float64')") == data + assert ( + node1.query( + "select * from hdfs('hdfs://hdfs1:9000/simple_table_function', 'TSV', 'id UInt64, text String, number Float64')" + ) + == data + ) def test_write_table(started_cluster): hdfs_api = started_cluster.hdfs_api - node1.query( - "create table OtherHDFSStorage (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/other_storage', 'TSV')") - node1.query("insert into OtherHDFSStorage values (10, 'tomas', 55.55), (11, 'jack', 32.54)") + "create table OtherHDFSStorage (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/other_storage', 'TSV')" + ) + node1.query( + "insert into OtherHDFSStorage values (10, 'tomas', 55.55), (11, 'jack', 32.54)" + ) result = "10\ttomas\t55.55\n11\tjack\t32.54\n" assert hdfs_api.read_data("/other_storage") == result @@ -98,117 +109,154 @@ def test_write_table(started_cluster): def test_bad_hdfs_uri(started_cluster): try: node1.query( - "create table BadStorage1 (id UInt32, name String, weight Float64) ENGINE = HDFS('hads:hgsdfs100500:9000/other_storage', 'TSV')") + "create table BadStorage1 (id UInt32, name String, weight Float64) ENGINE = HDFS('hads:hgsdfs100500:9000/other_storage', 'TSV')" + ) except Exception as ex: print(ex) assert "Bad hdfs url" in str(ex) try: node1.query( - "create table BadStorage2 (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs100500:9000/other_storage', 'TSV')") + "create table BadStorage2 (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs100500:9000/other_storage', 'TSV')" + ) except Exception as ex: print(ex) assert "Unable to create builder to connect to HDFS" in str(ex) try: node1.query( - "create table BadStorage3 (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/<>', 'TSV')") + "create table BadStorage3 (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/<>', 'TSV')" + ) except Exception as ex: print(ex) assert "Unable to open HDFS file" in str(ex) + @pytest.mark.timeout(800) def test_globs_in_read_table(started_cluster): hdfs_api = started_cluster.hdfs_api - some_data = "1\tSerialize\t555.222\n2\tData\t777.333\n" globs_dir = "/dir_for_test_with_globs/" - files = ["dir1/dir_dir/file1", "dir2/file2", "simple_table_function", "dir/file", "some_dir/dir1/file", - "some_dir/dir2/file", "some_dir/file", "table1_function", "table2_function", "table3_function"] + files = [ + "dir1/dir_dir/file1", + "dir2/file2", + "simple_table_function", + "dir/file", + "some_dir/dir1/file", + "some_dir/dir2/file", + "some_dir/file", + "table1_function", + "table2_function", + "table3_function", + ] for filename in files: hdfs_api.write_data(globs_dir + filename, some_data) - test_requests = [("dir{1..5}/dir_dir/file1", 1, 1), - ("*_table_functio?", 1, 1), - ("dir/fil?", 1, 1), - ("table{3..8}_function", 1, 1), - ("table{2..8}_function", 2, 2), - ("dir/*", 1, 1), - ("dir/*?*?*?*?*", 1, 1), - ("dir/*?*?*?*?*?*", 0, 0), - ("some_dir/*/file", 2, 1), - ("some_dir/dir?/*", 2, 1), - ("*/*/*", 3, 2), - ("?", 0, 0)] + test_requests = [ + ("dir{1..5}/dir_dir/file1", 1, 1), + ("*_table_functio?", 1, 1), + ("dir/fil?", 1, 1), + ("table{3..8}_function", 1, 1), + ("table{2..8}_function", 2, 2), + ("dir/*", 1, 1), + ("dir/*?*?*?*?*", 1, 1), + ("dir/*?*?*?*?*?*", 0, 0), + ("some_dir/*/file", 2, 1), + ("some_dir/dir?/*", 2, 1), + ("*/*/*", 3, 2), + ("?", 0, 0), + ] for pattern, paths_amount, files_amount in test_requests: - inside_table_func = "'hdfs://hdfs1:9000" + globs_dir + pattern + "', 'TSV', 'id UInt64, text String, number Float64'" + inside_table_func = ( + "'hdfs://hdfs1:9000" + + globs_dir + + pattern + + "', 'TSV', 'id UInt64, text String, number Float64'" + ) print("inside_table_func ", inside_table_func) - assert node1.query("select * from hdfs(" + inside_table_func + ")") == paths_amount * some_data - assert node1.query("select count(distinct _path) from hdfs(" + inside_table_func + ")").rstrip() == str( - paths_amount) - assert node1.query("select count(distinct _file) from hdfs(" + inside_table_func + ")").rstrip() == str( - files_amount) + assert ( + node1.query("select * from hdfs(" + inside_table_func + ")") + == paths_amount * some_data + ) + assert node1.query( + "select count(distinct _path) from hdfs(" + inside_table_func + ")" + ).rstrip() == str(paths_amount) + assert node1.query( + "select count(distinct _file) from hdfs(" + inside_table_func + ")" + ).rstrip() == str(files_amount) def test_read_write_gzip_table(started_cluster): hdfs_api = started_cluster.hdfs_api - data = "1\tHello Jessica\t555.222\n2\tI rolled a joint\t777.333\n" hdfs_api.write_gzip_data("/simple_table_function.gz", data) assert hdfs_api.read_gzip_data("/simple_table_function.gz") == data - assert node1.query( - "select * from hdfs('hdfs://hdfs1:9000/simple_table_function.gz', 'TSV', 'id UInt64, text String, number Float64')") == data + assert ( + node1.query( + "select * from hdfs('hdfs://hdfs1:9000/simple_table_function.gz', 'TSV', 'id UInt64, text String, number Float64')" + ) + == data + ) def test_read_write_gzip_table_with_parameter_gzip(started_cluster): hdfs_api = started_cluster.hdfs_api - data = "1\tHello Jessica\t555.222\n2\tI rolled a joint\t777.333\n" hdfs_api.write_gzip_data("/simple_table_function", data) assert hdfs_api.read_gzip_data("/simple_table_function") == data - assert node1.query( - "select * from hdfs('hdfs://hdfs1:9000/simple_table_function', 'TSV', 'id UInt64, text String, number Float64', 'gzip')") == data + assert ( + node1.query( + "select * from hdfs('hdfs://hdfs1:9000/simple_table_function', 'TSV', 'id UInt64, text String, number Float64', 'gzip')" + ) + == data + ) def test_read_write_table_with_parameter_none(started_cluster): hdfs_api = started_cluster.hdfs_api - data = "1\tHello Jessica\t555.222\n2\tI rolled a joint\t777.333\n" hdfs_api.write_data("/simple_table_function.gz", data) assert hdfs_api.read_data("/simple_table_function.gz") == data - assert node1.query( - "select * from hdfs('hdfs://hdfs1:9000/simple_table_function.gz', 'TSV', 'id UInt64, text String, number Float64', 'none')") == data + assert ( + node1.query( + "select * from hdfs('hdfs://hdfs1:9000/simple_table_function.gz', 'TSV', 'id UInt64, text String, number Float64', 'none')" + ) + == data + ) def test_read_write_gzip_table_with_parameter_auto_gz(started_cluster): hdfs_api = started_cluster.hdfs_api - data = "1\tHello Jessica\t555.222\n2\tI rolled a joint\t777.333\n" hdfs_api.write_gzip_data("/simple_table_function.gz", data) assert hdfs_api.read_gzip_data("/simple_table_function.gz") == data - assert node1.query( - "select * from hdfs('hdfs://hdfs1:9000/simple_table_function.gz', 'TSV', 'id UInt64, text String, number Float64', 'auto')") == data + assert ( + node1.query( + "select * from hdfs('hdfs://hdfs1:9000/simple_table_function.gz', 'TSV', 'id UInt64, text String, number Float64', 'auto')" + ) + == data + ) def test_write_gz_storage(started_cluster): hdfs_api = started_cluster.hdfs_api - node1.query( - "create table GZHDFSStorage (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/storage.gz', 'TSV')") + "create table GZHDFSStorage (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/storage.gz', 'TSV')" + ) node1.query("insert into GZHDFSStorage values (1, 'Mark', 72.53)") assert hdfs_api.read_gzip_data("/storage.gz") == "1\tMark\t72.53\n" assert node1.query("select * from GZHDFSStorage") == "1\tMark\t72.53\n" @@ -217,9 +265,9 @@ def test_write_gz_storage(started_cluster): def test_write_gzip_storage(started_cluster): hdfs_api = started_cluster.hdfs_api - node1.query( - "create table GZIPHDFSStorage (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/gzip_storage', 'TSV', 'gzip')") + "create table GZIPHDFSStorage (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/gzip_storage', 'TSV', 'gzip')" + ) node1.query("insert into GZIPHDFSStorage values (1, 'Mark', 72.53)") assert hdfs_api.read_gzip_data("/gzip_storage") == "1\tMark\t72.53\n" assert node1.query("select * from GZIPHDFSStorage") == "1\tMark\t72.53\n" @@ -228,19 +276,26 @@ def test_write_gzip_storage(started_cluster): def test_virtual_columns(started_cluster): hdfs_api = started_cluster.hdfs_api - node1.query("create table virtual_cols (id UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/file*', 'TSV')") + node1.query( + "create table virtual_cols (id UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/file*', 'TSV')" + ) hdfs_api.write_data("/file1", "1\n") hdfs_api.write_data("/file2", "2\n") hdfs_api.write_data("/file3", "3\n") expected = "1\tfile1\thdfs://hdfs1:9000//file1\n2\tfile2\thdfs://hdfs1:9000//file2\n3\tfile3\thdfs://hdfs1:9000//file3\n" - assert node1.query("select id, _file as file_name, _path as file_path from virtual_cols order by id") == expected + assert ( + node1.query( + "select id, _file as file_name, _path as file_path from virtual_cols order by id" + ) + == expected + ) def test_read_files_with_spaces(started_cluster): hdfs_api = started_cluster.hdfs_api fs = HdfsClient(hosts=started_cluster.hdfs_ip) - dir = '/test_spaces' + dir = "/test_spaces" exists = fs.exists(dir) if exists: fs.delete(dir, recursive=True) @@ -250,16 +305,18 @@ def test_read_files_with_spaces(started_cluster): hdfs_api.write_data(f"{dir}/test test test 2.txt", "2\n") hdfs_api.write_data(f"{dir}/test test test 3.txt", "3\n") - node1.query(f"create table test (id UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{dir}/test*', 'TSV')") + node1.query( + f"create table test (id UInt32) ENGINE = HDFS('hdfs://hdfs1:9000/{dir}/test*', 'TSV')" + ) assert node1.query("select * from test order by id") == "1\n2\n3\n" fs.delete(dir, recursive=True) - def test_truncate_table(started_cluster): hdfs_api = started_cluster.hdfs_api node1.query( - "create table test_truncate (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/tr', 'TSV')") + "create table test_truncate (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/tr', 'TSV')" + ) node1.query("insert into test_truncate values (1, 'Mark', 72.53)") assert hdfs_api.read_data("/tr") == "1\tMark\t72.53\n" assert node1.query("select * from test_truncate") == "1\tMark\t72.53\n" @@ -277,38 +334,60 @@ def test_partition_by(started_cluster): values = "(1, 2, 3), (3, 2, 1), (1, 3, 2)" table_function = f"hdfs('hdfs://hdfs1:9000/{file_name}', 'TSV', '{table_format}')" - node1.query(f"insert into table function {table_function} PARTITION BY {partition_by} values {values}") - result = node1.query(f"select * from hdfs('hdfs://hdfs1:9000/test_1', 'TSV', '{table_format}')") - assert(result.strip() == "3\t2\t1") - result = node1.query(f"select * from hdfs('hdfs://hdfs1:9000/test_2', 'TSV', '{table_format}')") - assert(result.strip() == "1\t3\t2") - result = node1.query(f"select * from hdfs('hdfs://hdfs1:9000/test_3', 'TSV', '{table_format}')") - assert(result.strip() == "1\t2\t3") + node1.query( + f"insert into table function {table_function} PARTITION BY {partition_by} values {values}" + ) + result = node1.query( + f"select * from hdfs('hdfs://hdfs1:9000/test_1', 'TSV', '{table_format}')" + ) + assert result.strip() == "3\t2\t1" + result = node1.query( + f"select * from hdfs('hdfs://hdfs1:9000/test_2', 'TSV', '{table_format}')" + ) + assert result.strip() == "1\t3\t2" + result = node1.query( + f"select * from hdfs('hdfs://hdfs1:9000/test_3', 'TSV', '{table_format}')" + ) + assert result.strip() == "1\t2\t3" file_name = "test2_{_partition_id}" - node1.query(f"create table p(column1 UInt32, column2 UInt32, column3 UInt32) engine = HDFS('hdfs://hdfs1:9000/{file_name}', 'TSV') partition by column3") + node1.query( + f"create table p(column1 UInt32, column2 UInt32, column3 UInt32) engine = HDFS('hdfs://hdfs1:9000/{file_name}', 'TSV') partition by column3" + ) node1.query(f"insert into p values {values}") - result = node1.query(f"select * from hdfs('hdfs://hdfs1:9000/test2_1', 'TSV', '{table_format}')") - assert(result.strip() == "3\t2\t1") - result = node1.query(f"select * from hdfs('hdfs://hdfs1:9000/test2_2', 'TSV', '{table_format}')") - assert(result.strip() == "1\t3\t2") - result = node1.query(f"select * from hdfs('hdfs://hdfs1:9000/test2_3', 'TSV', '{table_format}')") - assert(result.strip() == "1\t2\t3") + result = node1.query( + f"select * from hdfs('hdfs://hdfs1:9000/test2_1', 'TSV', '{table_format}')" + ) + assert result.strip() == "3\t2\t1" + result = node1.query( + f"select * from hdfs('hdfs://hdfs1:9000/test2_2', 'TSV', '{table_format}')" + ) + assert result.strip() == "1\t3\t2" + result = node1.query( + f"select * from hdfs('hdfs://hdfs1:9000/test2_3', 'TSV', '{table_format}')" + ) + assert result.strip() == "1\t2\t3" def test_seekable_formats(started_cluster): hdfs_api = started_cluster.hdfs_api - table_function = f"hdfs('hdfs://hdfs1:9000/parquet', 'Parquet', 'a Int32, b String')" - node1.query(f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000)") + table_function = ( + f"hdfs('hdfs://hdfs1:9000/parquet', 'Parquet', 'a Int32, b String')" + ) + node1.query( + f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000)" + ) result = node1.query(f"SELECT count() FROM {table_function}") - assert(int(result) == 5000000) + assert int(result) == 5000000 table_function = f"hdfs('hdfs://hdfs1:9000/orc', 'ORC', 'a Int32, b String')" - node1.query(f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000)") + node1.query( + f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000)" + ) result = node1.query(f"SELECT count() FROM {table_function}") - assert(int(result) == 5000000) + assert int(result) == 5000000 def test_read_table_with_default(started_cluster): @@ -319,31 +398,41 @@ def test_read_table_with_default(started_cluster): assert hdfs_api.read_data("/simple_table_function") == data output = "n\tm\n100\t200\n" - assert node1.query( - "select * from hdfs('hdfs://hdfs1:9000/simple_table_function', 'TSVWithNames', 'n UInt32, m UInt32 DEFAULT n * 2') FORMAT TSVWithNames") == output + assert ( + node1.query( + "select * from hdfs('hdfs://hdfs1:9000/simple_table_function', 'TSVWithNames', 'n UInt32, m UInt32 DEFAULT n * 2') FORMAT TSVWithNames" + ) + == output + ) def test_schema_inference(started_cluster): - node1.query(f"insert into table function hdfs('hdfs://hdfs1:9000/native', 'Native', 'a Int32, b String') SELECT number, randomString(100) FROM numbers(5000000)") + node1.query( + f"insert into table function hdfs('hdfs://hdfs1:9000/native', 'Native', 'a Int32, b String') SELECT number, randomString(100) FROM numbers(5000000)" + ) result = node1.query(f"desc hdfs('hdfs://hdfs1:9000/native', 'Native')") assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n" - result = node1.query(f"select count(*) from hdfs('hdfs://hdfs1:9000/native', 'Native')") - assert(int(result) == 5000000) + result = node1.query( + f"select count(*) from hdfs('hdfs://hdfs1:9000/native', 'Native')" + ) + assert int(result) == 5000000 - node1.query(f"create table schema_inference engine=HDFS('hdfs://hdfs1:9000/native', 'Native')") + node1.query( + f"create table schema_inference engine=HDFS('hdfs://hdfs1:9000/native', 'Native')" + ) result = node1.query(f"desc schema_inference") assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n" result = node1.query(f"select count(*) from schema_inference") - assert(int(result) == 5000000) + assert int(result) == 5000000 def test_hdfsCluster(started_cluster): hdfs_api = started_cluster.hdfs_api fs = HdfsClient(hosts=started_cluster.hdfs_ip) - dir = '/test_hdfsCluster' + dir = "/test_hdfsCluster" exists = fs.exists(dir) if exists: fs.delete(dir, recursive=True) @@ -352,31 +441,43 @@ def test_hdfsCluster(started_cluster): hdfs_api.write_data("/test_hdfsCluster/file2", "2\n") hdfs_api.write_data("/test_hdfsCluster/file3", "3\n") - actual = node1.query("select id, _file as file_name, _path as file_path from hdfs('hdfs://hdfs1:9000/test_hdfsCluster/file*', 'TSV', 'id UInt32') order by id") + actual = node1.query( + "select id, _file as file_name, _path as file_path from hdfs('hdfs://hdfs1:9000/test_hdfsCluster/file*', 'TSV', 'id UInt32') order by id" + ) expected = "1\tfile1\thdfs://hdfs1:9000/test_hdfsCluster/file1\n2\tfile2\thdfs://hdfs1:9000/test_hdfsCluster/file2\n3\tfile3\thdfs://hdfs1:9000/test_hdfsCluster/file3\n" assert actual == expected - actual = node1.query("select id, _file as file_name, _path as file_path from hdfsCluster('test_cluster_two_shards', 'hdfs://hdfs1:9000/test_hdfsCluster/file*', 'TSV', 'id UInt32') order by id") + actual = node1.query( + "select id, _file as file_name, _path as file_path from hdfsCluster('test_cluster_two_shards', 'hdfs://hdfs1:9000/test_hdfsCluster/file*', 'TSV', 'id UInt32') order by id" + ) expected = "1\tfile1\thdfs://hdfs1:9000/test_hdfsCluster/file1\n2\tfile2\thdfs://hdfs1:9000/test_hdfsCluster/file2\n3\tfile3\thdfs://hdfs1:9000/test_hdfsCluster/file3\n" assert actual == expected fs.delete(dir, recursive=True) + def test_hdfs_directory_not_exist(started_cluster): - ddl ="create table HDFSStorageWithNotExistDir (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/data/not_eixst', 'TSV')"; + ddl = "create table HDFSStorageWithNotExistDir (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://hdfs1:9000/data/not_eixst', 'TSV')" node1.query(ddl) assert "" == node1.query("select * from HDFSStorageWithNotExistDir") + def test_overwrite(started_cluster): hdfs_api = started_cluster.hdfs_api table_function = f"hdfs('hdfs://hdfs1:9000/data', 'Parquet', 'a Int32, b String')" node1.query(f"create table test_overwrite as {table_function}") - node1.query(f"insert into test_overwrite select number, randomString(100) from numbers(5)") - node1.query_and_get_error(f"insert into test_overwrite select number, randomString(100) FROM numbers(10)") - node1.query(f"insert into test_overwrite select number, randomString(100) from numbers(10) settings hdfs_truncate_on_insert=1") + node1.query( + f"insert into test_overwrite select number, randomString(100) from numbers(5)" + ) + node1.query_and_get_error( + f"insert into test_overwrite select number, randomString(100) FROM numbers(10)" + ) + node1.query( + f"insert into test_overwrite select number, randomString(100) from numbers(10) settings hdfs_truncate_on_insert=1" + ) result = node1.query(f"select count() from test_overwrite") - assert(int(result) == 10) + assert int(result) == 10 def test_multiple_inserts(started_cluster): @@ -384,54 +485,76 @@ def test_multiple_inserts(started_cluster): table_function = f"hdfs('hdfs://hdfs1:9000/data_multiple_inserts', 'Parquet', 'a Int32, b String')" node1.query(f"create table test_multiple_inserts as {table_function}") - node1.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(10)") - node1.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(20) settings hdfs_create_new_file_on_insert=1") - node1.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(30) settings hdfs_create_new_file_on_insert=1") + node1.query( + f"insert into test_multiple_inserts select number, randomString(100) from numbers(10)" + ) + node1.query( + f"insert into test_multiple_inserts select number, randomString(100) from numbers(20) settings hdfs_create_new_file_on_insert=1" + ) + node1.query( + f"insert into test_multiple_inserts select number, randomString(100) from numbers(30) settings hdfs_create_new_file_on_insert=1" + ) result = node1.query(f"select count() from test_multiple_inserts") - assert(int(result) == 60) + assert int(result) == 60 result = node1.query(f"drop table test_multiple_inserts") table_function = f"hdfs('hdfs://hdfs1:9000/data_multiple_inserts.gz', 'Parquet', 'a Int32, b String')" node1.query(f"create table test_multiple_inserts as {table_function}") - node1.query(f"insert into test_multiple_inserts select number, randomString(100) FROM numbers(10)") - node1.query(f"insert into test_multiple_inserts select number, randomString(100) FROM numbers(20) settings hdfs_create_new_file_on_insert=1") - node1.query(f"insert into test_multiple_inserts select number, randomString(100) FROM numbers(30) settings hdfs_create_new_file_on_insert=1") + node1.query( + f"insert into test_multiple_inserts select number, randomString(100) FROM numbers(10)" + ) + node1.query( + f"insert into test_multiple_inserts select number, randomString(100) FROM numbers(20) settings hdfs_create_new_file_on_insert=1" + ) + node1.query( + f"insert into test_multiple_inserts select number, randomString(100) FROM numbers(30) settings hdfs_create_new_file_on_insert=1" + ) result = node1.query(f"select count() from test_multiple_inserts") - assert(int(result) == 60) + assert int(result) == 60 + - def test_format_detection(started_cluster): - node1.query(f"create table arrow_table (x UInt64) engine=HDFS('hdfs://hdfs1:9000/data.arrow')") + node1.query( + f"create table arrow_table (x UInt64) engine=HDFS('hdfs://hdfs1:9000/data.arrow')" + ) node1.query(f"insert into arrow_table select 1") result = node1.query(f"select * from hdfs('hdfs://hdfs1:9000/data.arrow')") - assert(int(result) == 1) + assert int(result) == 1 def test_schema_inference_with_globs(started_cluster): - node1.query(f"insert into table function hdfs('hdfs://hdfs1:9000/data1.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL") - node1.query(f"insert into table function hdfs('hdfs://hdfs1:9000/data2.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select 0") - - result = node1.query(f"desc hdfs('hdfs://hdfs1:9000/data*.jsoncompacteachrow')") - assert(result.strip() == 'c1\tNullable(Float64)') + node1.query( + f"insert into table function hdfs('hdfs://hdfs1:9000/data1.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL" + ) + node1.query( + f"insert into table function hdfs('hdfs://hdfs1:9000/data2.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select 0" + ) - result = node1.query(f"select * from hdfs('hdfs://hdfs1:9000/data*.jsoncompacteachrow')") - assert(sorted(result.split()) == ['0', '\\N']) + result = node1.query(f"desc hdfs('hdfs://hdfs1:9000/data*.jsoncompacteachrow')") + assert result.strip() == "c1\tNullable(Float64)" + + result = node1.query( + f"select * from hdfs('hdfs://hdfs1:9000/data*.jsoncompacteachrow')" + ) + assert sorted(result.split()) == ["0", "\\N"] def test_insert_select_schema_inference(started_cluster): - node1.query(f"insert into table function hdfs('hdfs://hdfs1:9000/test.native.zst') select toUInt64(1) as x") + node1.query( + f"insert into table function hdfs('hdfs://hdfs1:9000/test.native.zst') select toUInt64(1) as x" + ) result = node1.query(f"desc hdfs('hdfs://hdfs1:9000/test.native.zst')") - assert(result.strip() == 'x\tUInt64') + assert result.strip() == "x\tUInt64" result = node1.query(f"select * from hdfs('hdfs://hdfs1:9000/test.native.zst')") - assert(int(result) == 1) + assert int(result) == 1 -if __name__ == '__main__': +if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") cluster.shutdown() diff --git a/tests/integration/test_storage_kafka/kafka_pb2.py b/tests/integration/test_storage_kafka/kafka_pb2.py index a9dcab1a85a..d29bc7e8541 100644 --- a/tests/integration/test_storage_kafka/kafka_pb2.py +++ b/tests/integration/test_storage_kafka/kafka_pb2.py @@ -3,7 +3,7 @@ import sys -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')) +_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -14,58 +14,80 @@ from google.protobuf import symbol_database as _symbol_database _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( - name='clickhouse_path/format_schemas/kafka.proto', - package='', - syntax='proto3', + name="clickhouse_path/format_schemas/kafka.proto", + package="", + syntax="proto3", serialized_pb=_b( - '\n*clickhouse_path/format_schemas/kafka.proto\"*\n\x0cKeyValuePair\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12\r\n\x05value\x18\x02 \x01(\tb\x06proto3') + '\n*clickhouse_path/format_schemas/kafka.proto"*\n\x0cKeyValuePair\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12\r\n\x05value\x18\x02 \x01(\tb\x06proto3' + ), ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _KEYVALUEPAIR = _descriptor.Descriptor( - name='KeyValuePair', - full_name='KeyValuePair', + name="KeyValuePair", + full_name="KeyValuePair", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( - name='key', full_name='KeyValuePair.key', index=0, - number=1, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), + name="key", + full_name="KeyValuePair.key", + index=0, + number=1, + type=4, + cpp_type=4, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), _descriptor.FieldDescriptor( - name='value', full_name='KeyValuePair.value', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ + name="value", + full_name="KeyValuePair.value", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + ), ], + extensions=[], nested_types=[], - enum_types=[ - ], + enum_types=[], options=None, is_extendable=False, - syntax='proto3', + syntax="proto3", extension_ranges=[], - oneofs=[ - ], + oneofs=[], serialized_start=46, serialized_end=88, ) -DESCRIPTOR.message_types_by_name['KeyValuePair'] = _KEYVALUEPAIR +DESCRIPTOR.message_types_by_name["KeyValuePair"] = _KEYVALUEPAIR -KeyValuePair = _reflection.GeneratedProtocolMessageType('KeyValuePair', (_message.Message,), dict( - DESCRIPTOR=_KEYVALUEPAIR, - __module__='clickhouse_path.format_schemas.kafka_pb2' - # @@protoc_insertion_point(class_scope:KeyValuePair) -)) +KeyValuePair = _reflection.GeneratedProtocolMessageType( + "KeyValuePair", + (_message.Message,), + dict( + DESCRIPTOR=_KEYVALUEPAIR, + __module__="clickhouse_path.format_schemas.kafka_pb2" + # @@protoc_insertion_point(class_scope:KeyValuePair) + ), +) _sym_db.RegisterMessage(KeyValuePair) # @@protoc_insertion_point(module_scope) diff --git a/tests/integration/test_storage_kafka/message_with_repeated_pb2.py b/tests/integration/test_storage_kafka/message_with_repeated_pb2.py index 69702307e7f..b0755a121ae 100644 --- a/tests/integration/test_storage_kafka/message_with_repeated_pb2.py +++ b/tests/integration/test_storage_kafka/message_with_repeated_pb2.py @@ -2,177 +2,311 @@ # source: clickhouse_path/format_schemas/message_with_repeated.proto import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) + +_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() - - DESCRIPTOR = _descriptor.FileDescriptor( - name='clickhouse_path/format_schemas/message_with_repeated.proto', - package='', - syntax='proto3', - serialized_options=_b('H\001'), - serialized_pb=_b('\n:clickhouse_path/format_schemas/message_with_repeated.proto\"t\n\x07Message\x12\x0c\n\x04tnow\x18\x01 \x01(\r\x12\x0e\n\x06server\x18\x02 \x01(\t\x12\r\n\x05\x63lien\x18\x03 \x01(\t\x12\r\n\x05sPort\x18\x04 \x01(\r\x12\r\n\x05\x63Port\x18\x05 \x01(\r\x12\x0e\n\x01r\x18\x06 \x03(\x0b\x32\x03.dd\x12\x0e\n\x06method\x18\x07 \x01(\t\"J\n\x02\x64\x64\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05\x63lass\x18\x02 \x01(\r\x12\x0c\n\x04type\x18\x03 \x01(\r\x12\x0b\n\x03ttl\x18\x04 \x01(\x04\x12\x0c\n\x04\x64\x61ta\x18\x05 \x01(\x0c\x42\x02H\x01\x62\x06proto3') + name="clickhouse_path/format_schemas/message_with_repeated.proto", + package="", + syntax="proto3", + serialized_options=_b("H\001"), + serialized_pb=_b( + '\n:clickhouse_path/format_schemas/message_with_repeated.proto"t\n\x07Message\x12\x0c\n\x04tnow\x18\x01 \x01(\r\x12\x0e\n\x06server\x18\x02 \x01(\t\x12\r\n\x05\x63lien\x18\x03 \x01(\t\x12\r\n\x05sPort\x18\x04 \x01(\r\x12\r\n\x05\x63Port\x18\x05 \x01(\r\x12\x0e\n\x01r\x18\x06 \x03(\x0b\x32\x03.dd\x12\x0e\n\x06method\x18\x07 \x01(\t"J\n\x02\x64\x64\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05\x63lass\x18\x02 \x01(\r\x12\x0c\n\x04type\x18\x03 \x01(\r\x12\x0b\n\x03ttl\x18\x04 \x01(\x04\x12\x0c\n\x04\x64\x61ta\x18\x05 \x01(\x0c\x42\x02H\x01\x62\x06proto3' + ), ) - - _MESSAGE = _descriptor.Descriptor( - name='Message', - full_name='Message', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='tnow', full_name='Message.tnow', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='server', full_name='Message.server', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='clien', full_name='Message.clien', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='sPort', full_name='Message.sPort', index=3, - number=4, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='cPort', full_name='Message.cPort', index=4, - number=5, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='r', full_name='Message.r', index=5, - number=6, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='method', full_name='Message.method', index=6, - number=7, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=62, - serialized_end=178, + name="Message", + full_name="Message", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="tnow", + full_name="Message.tnow", + index=0, + number=1, + type=13, + cpp_type=3, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="server", + full_name="Message.server", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="clien", + full_name="Message.clien", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="sPort", + full_name="Message.sPort", + index=3, + number=4, + type=13, + cpp_type=3, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="cPort", + full_name="Message.cPort", + index=4, + number=5, + type=13, + cpp_type=3, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="r", + full_name="Message.r", + index=5, + number=6, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="method", + full_name="Message.method", + index=6, + number=7, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=62, + serialized_end=178, ) _DD = _descriptor.Descriptor( - name='dd', - full_name='dd', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='dd.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='class', full_name='dd.class', index=1, - number=2, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='type', full_name='dd.type', index=2, - number=3, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='ttl', full_name='dd.ttl', index=3, - number=4, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='data', full_name='dd.data', index=4, - number=5, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=180, - serialized_end=254, + name="dd", + full_name="dd", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="dd.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="class", + full_name="dd.class", + index=1, + number=2, + type=13, + cpp_type=3, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="type", + full_name="dd.type", + index=2, + number=3, + type=13, + cpp_type=3, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="ttl", + full_name="dd.ttl", + index=3, + number=4, + type=4, + cpp_type=4, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="data", + full_name="dd.data", + index=4, + number=5, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=180, + serialized_end=254, ) -_MESSAGE.fields_by_name['r'].message_type = _DD -DESCRIPTOR.message_types_by_name['Message'] = _MESSAGE -DESCRIPTOR.message_types_by_name['dd'] = _DD +_MESSAGE.fields_by_name["r"].message_type = _DD +DESCRIPTOR.message_types_by_name["Message"] = _MESSAGE +DESCRIPTOR.message_types_by_name["dd"] = _DD _sym_db.RegisterFileDescriptor(DESCRIPTOR) -Message = _reflection.GeneratedProtocolMessageType('Message', (_message.Message,), dict( - DESCRIPTOR = _MESSAGE, - __module__ = 'clickhouse_path.format_schemas.message_with_repeated_pb2' - # @@protoc_insertion_point(class_scope:Message) - )) +Message = _reflection.GeneratedProtocolMessageType( + "Message", + (_message.Message,), + dict( + DESCRIPTOR=_MESSAGE, + __module__="clickhouse_path.format_schemas.message_with_repeated_pb2" + # @@protoc_insertion_point(class_scope:Message) + ), +) _sym_db.RegisterMessage(Message) -dd = _reflection.GeneratedProtocolMessageType('dd', (_message.Message,), dict( - DESCRIPTOR = _DD, - __module__ = 'clickhouse_path.format_schemas.message_with_repeated_pb2' - # @@protoc_insertion_point(class_scope:dd) - )) +dd = _reflection.GeneratedProtocolMessageType( + "dd", + (_message.Message,), + dict( + DESCRIPTOR=_DD, + __module__="clickhouse_path.format_schemas.message_with_repeated_pb2" + # @@protoc_insertion_point(class_scope:dd) + ), +) _sym_db.RegisterMessage(dd) diff --git a/tests/integration/test_storage_kafka/social_pb2.py b/tests/integration/test_storage_kafka/social_pb2.py index eeba5efc8b1..429572a0b45 100644 --- a/tests/integration/test_storage_kafka/social_pb2.py +++ b/tests/integration/test_storage_kafka/social_pb2.py @@ -6,69 +6,89 @@ from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() - - DESCRIPTOR = _descriptor.FileDescriptor( - name='social.proto', - package='', - syntax='proto3', - serialized_options=None, - serialized_pb=b'\n\x0csocial.proto\"+\n\x04User\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x11\n\ttimestamp\x18\x02 \x01(\x05\x62\x06proto3' + name="social.proto", + package="", + syntax="proto3", + serialized_options=None, + serialized_pb=b'\n\x0csocial.proto"+\n\x04User\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x11\n\ttimestamp\x18\x02 \x01(\x05\x62\x06proto3', ) - - _USER = _descriptor.Descriptor( - name='User', - full_name='User', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='username', full_name='User.username', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='timestamp', full_name='User.timestamp', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=16, - serialized_end=59, + name="User", + full_name="User", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="username", + full_name="User.username", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="timestamp", + full_name="User.timestamp", + index=1, + number=2, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=16, + serialized_end=59, ) -DESCRIPTOR.message_types_by_name['User'] = _USER +DESCRIPTOR.message_types_by_name["User"] = _USER _sym_db.RegisterFileDescriptor(DESCRIPTOR) -User = _reflection.GeneratedProtocolMessageType('User', (_message.Message,), { - 'DESCRIPTOR' : _USER, - '__module__' : 'social_pb2' - # @@protoc_insertion_point(class_scope:User) - }) +User = _reflection.GeneratedProtocolMessageType( + "User", + (_message.Message,), + { + "DESCRIPTOR": _USER, + "__module__": "social_pb2" + # @@protoc_insertion_point(class_scope:User) + }, +) _sym_db.RegisterMessage(User) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 7a6f27ffa0a..e451e15a5d6 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -13,7 +13,9 @@ import math import avro.schema import avro.io import avro.datafile -from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient +from confluent_kafka.avro.cached_schema_registry_client import ( + CachedSchemaRegistryClient, +) from confluent_kafka.avro.serializer.message_serializer import MessageSerializer import kafka.errors @@ -43,26 +45,33 @@ from . import message_with_repeated_pb2 # TODO: add test for SELECT LIMIT is working. cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', - main_configs=['configs/kafka.xml', 'configs/named_collection.xml'], - user_configs=['configs/users.xml'], - with_kafka=True, - with_zookeeper=True, # For Replicated Table - macros={"kafka_broker":"kafka1", - "kafka_topic_old":"old", - "kafka_group_name_old":"old", - "kafka_topic_new":"new", - "kafka_group_name_new":"new", - "kafka_client_id":"instance", - "kafka_format_json_each_row":"JSONEachRow"}, - clickhouse_path_dir='clickhouse_path') +instance = cluster.add_instance( + "instance", + main_configs=["configs/kafka.xml", "configs/named_collection.xml"], + user_configs=["configs/users.xml"], + with_kafka=True, + with_zookeeper=True, # For Replicated Table + macros={ + "kafka_broker": "kafka1", + "kafka_topic_old": "old", + "kafka_group_name_old": "old", + "kafka_topic_new": "new", + "kafka_group_name_new": "new", + "kafka_client_id": "instance", + "kafka_format_json_each_row": "JSONEachRow", + }, + clickhouse_path_dir="clickhouse_path", +) def get_kafka_producer(port, serializer, retries): errors = [] for _ in range(retries): try: - producer = KafkaProducer(bootstrap_servers="localhost:{}".format(port), value_serializer=serializer) + producer = KafkaProducer( + bootstrap_servers="localhost:{}".format(port), + value_serializer=serializer, + ) logging.debug("Kafka Connection establised: localhost:{}".format(port)) return producer except Exception as e: @@ -71,12 +80,30 @@ def get_kafka_producer(port, serializer, retries): raise Exception("Connection not establised, {}".format(errors)) + def producer_serializer(x): return x.encode() if isinstance(x, str) else x -def kafka_create_topic(admin_client, topic_name, num_partitions=1, replication_factor=1, max_retries=50, config=None): - logging.debug(f"Kafka create topic={topic_name}, num_partitions={num_partitions}, replication_factor={replication_factor}") - topics_list = [NewTopic(name=topic_name, num_partitions=num_partitions, replication_factor=replication_factor, topic_configs=config)] + +def kafka_create_topic( + admin_client, + topic_name, + num_partitions=1, + replication_factor=1, + max_retries=50, + config=None, +): + logging.debug( + f"Kafka create topic={topic_name}, num_partitions={num_partitions}, replication_factor={replication_factor}" + ) + topics_list = [ + NewTopic( + name=topic_name, + num_partitions=num_partitions, + replication_factor=replication_factor, + topic_configs=config, + ) + ] retries = 0 while True: try: @@ -91,6 +118,7 @@ def kafka_create_topic(admin_client, topic_name, num_partitions=1, replication_f else: raise + def kafka_delete_topic(admin_client, topic, max_retries=50): result = admin_client.delete_topics([topic]) for (topic, e) in result.topic_error_codes: @@ -111,19 +139,31 @@ def kafka_delete_topic(admin_client, topic, max_retries=50): if retries > max_retries: raise Exception(f"Failed to delete topics {topic}, {result}") + def kafka_produce(kafka_cluster, topic, messages, timestamp=None, retries=15): - logging.debug("kafka_produce server:{}:{} topic:{}".format("localhost", kafka_cluster.kafka_port, topic)) - producer = get_kafka_producer(kafka_cluster.kafka_port, producer_serializer, retries) + logging.debug( + "kafka_produce server:{}:{} topic:{}".format( + "localhost", kafka_cluster.kafka_port, topic + ) + ) + producer = get_kafka_producer( + kafka_cluster.kafka_port, producer_serializer, retries + ) for message in messages: producer.send(topic=topic, value=message, timestamp_ms=timestamp) producer.flush() + ## just to ensure the python client / producer is working properly def kafka_producer_send_heartbeat_msg(max_retries=50): - kafka_produce(kafka_cluster, 'test_heartbeat_topic', ['test'], retries=max_retries) + kafka_produce(kafka_cluster, "test_heartbeat_topic", ["test"], retries=max_retries) -def kafka_consume(kafka_cluster, topic, needDecode = True, timestamp = 0): - consumer = KafkaConsumer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), auto_offset_reset="earliest") + +def kafka_consume(kafka_cluster, topic, needDecode=True, timestamp=0): + consumer = KafkaConsumer( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), + auto_offset_reset="earliest", + ) consumer.subscribe(topics=(topic)) for toppar, messages in list(consumer.poll(5000).items()): if toppar.topic == topic: @@ -136,22 +176,31 @@ def kafka_consume(kafka_cluster, topic, needDecode = True, timestamp = 0): consumer.unsubscribe() consumer.close() + def kafka_produce_protobuf_messages(kafka_cluster, topic, start_index, num_messages): - data = b'' + data = b"" for i in range(start_index, start_index + num_messages): msg = kafka_pb2.KeyValuePair() msg.key = i msg.value = str(i) serialized_msg = msg.SerializeToString() data = data + _VarintBytes(len(serialized_msg)) + serialized_msg - producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer) + producer = KafkaProducer( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), + value_serializer=producer_serializer, + ) producer.send(topic=topic, value=data) producer.flush() logging.debug(("Produced {} messages for topic {}".format(num_messages, topic))) -def kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, topic, start_index, num_messages): - data = '' - producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + +def kafka_produce_protobuf_messages_no_delimeters( + kafka_cluster, topic, start_index, num_messages +): + data = "" + producer = KafkaProducer( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) for i in range(start_index, start_index + num_messages): msg = kafka_pb2.KeyValuePair() msg.key = i @@ -161,37 +210,43 @@ def kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, topic, start_in producer.flush() logging.debug("Produced {} messages for topic {}".format(num_messages, topic)) -def kafka_produce_protobuf_social(kafka_cluster,topic, start_index, num_messages): - data = b'' + +def kafka_produce_protobuf_social(kafka_cluster, topic, start_index, num_messages): + data = b"" for i in range(start_index, start_index + num_messages): msg = social_pb2.User() - msg.username='John Doe {}'.format(i) - msg.timestamp=1000000+i + msg.username = "John Doe {}".format(i) + msg.timestamp = 1000000 + i serialized_msg = msg.SerializeToString() data = data + _VarintBytes(len(serialized_msg)) + serialized_msg - producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer) + producer = KafkaProducer( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), + value_serializer=producer_serializer, + ) producer.send(topic=topic, value=data) producer.flush() logging.debug(("Produced {} messages for topic {}".format(num_messages, topic))) + def avro_message(value): - schema = avro.schema.make_avsc_object({ - 'name': 'row', - 'type': 'record', - 'fields': [ - {'name': 'id', 'type': 'long'}, - {'name': 'blockNo', 'type': 'int'}, - {'name': 'val1', 'type': 'string'}, - {'name': 'val2', 'type': 'float'}, - {'name': 'val3', 'type': 'int'} - ] - }) + schema = avro.schema.make_avsc_object( + { + "name": "row", + "type": "record", + "fields": [ + {"name": "id", "type": "long"}, + {"name": "blockNo", "type": "int"}, + {"name": "val1", "type": "string"}, + {"name": "val2", "type": "float"}, + {"name": "val3", "type": "int"}, + ], + } + ) bytes_writer = io.BytesIO() # writer = avro.io.DatumWriter(schema) # encoder = avro.io.BinaryEncoder(bytes_writer) # writer.write(value, encoder) - # DataFileWrite seems to be mandatory to get schema encoded writer = avro.datafile.DataFileWriter(bytes_writer, avro.io.DatumWriter(), schema) if isinstance(value, list): @@ -206,64 +261,78 @@ def avro_message(value): bytes_writer.close() return raw_bytes + def avro_confluent_message(schema_registry_client, value): # type: (CachedSchemaRegistryClient, dict) -> str serializer = MessageSerializer(schema_registry_client) - schema = avro.schema.make_avsc_object({ - 'name': 'row', - 'type': 'record', - 'fields': [ - {'name': 'id', 'type': 'long'}, - {'name': 'blockNo', 'type': 'int'}, - {'name': 'val1', 'type': 'string'}, - {'name': 'val2', 'type': 'float'}, - {'name': 'val3', 'type': 'int'} - ] - }) - return serializer.encode_record_with_schema('test_subject', schema, value) + schema = avro.schema.make_avsc_object( + { + "name": "row", + "type": "record", + "fields": [ + {"name": "id", "type": "long"}, + {"name": "blockNo", "type": "int"}, + {"name": "val1", "type": "string"}, + {"name": "val2", "type": "float"}, + {"name": "val3", "type": "int"}, + ], + } + ) + return serializer.encode_record_with_schema("test_subject", schema, value) + # Tests + def test_kafka_settings_old_syntax(kafka_cluster): - assert TSV(instance.query("SELECT * FROM system.macros WHERE macro like 'kafka%' ORDER BY macro", - ignore_error=True)) == TSV('''kafka_broker kafka1 + assert TSV( + instance.query( + "SELECT * FROM system.macros WHERE macro like 'kafka%' ORDER BY macro", + ignore_error=True, + ) + ) == TSV( + """kafka_broker kafka1 kafka_client_id instance kafka_format_json_each_row JSONEachRow kafka_group_name_new new kafka_group_name_old old kafka_topic_new new kafka_topic_old old -''') +""" + ) - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n') SETTINGS kafka_commit_on_select = 1; - ''') + """ + ) # Don't insert malformed messages since old settings syntax # doesn't support skipping of broken messages. messages = [] for i in range(50): - messages.append(json.dumps({'key': i, 'value': i})) - kafka_produce(kafka_cluster, 'old', messages) + messages.append(json.dumps({"key": i, "value": i})) + kafka_produce(kafka_cluster, "old", messages) - result = '' + result = "" while True: - result += instance.query('SELECT * FROM test.kafka', ignore_error=True) + result += instance.query("SELECT * FROM test.kafka", ignore_error=True) if kafka_check_result(result): break kafka_check_result(result, True) - members = describe_consumer_group(kafka_cluster, 'old') - assert members[0]['client_id'] == 'ClickHouse-instance-test-kafka' + members = describe_consumer_group(kafka_cluster, "old") + assert members[0]["client_id"] == "ClickHouse-instance-test-kafka" # text_desc = kafka_cluster.exec_in_container(kafka_cluster.get_container_id('kafka1'),"kafka-consumer-groups --bootstrap-server localhost:9092 --describe --members --group old --verbose")) def test_kafka_settings_new_syntax(kafka_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = '{kafka_broker}:19092', @@ -274,44 +343,58 @@ def test_kafka_settings_new_syntax(kafka_cluster): kafka_commit_on_select = 1, kafka_client_id = '{kafka_client_id} test 1234', kafka_skip_broken_messages = 1; - ''') + """ + ) messages = [] for i in range(25): - messages.append(json.dumps({'key': i, 'value': i})) - kafka_produce(kafka_cluster, 'new', messages) + messages.append(json.dumps({"key": i, "value": i})) + kafka_produce(kafka_cluster, "new", messages) # Insert couple of malformed messages. - kafka_produce(kafka_cluster, 'new', ['}{very_broken_message,']) - kafka_produce(kafka_cluster, 'new', ['}another{very_broken_message,']) + kafka_produce(kafka_cluster, "new", ["}{very_broken_message,"]) + kafka_produce(kafka_cluster, "new", ["}another{very_broken_message,"]) messages = [] for i in range(25, 50): - messages.append(json.dumps({'key': i, 'value': i})) - kafka_produce(kafka_cluster, 'new', messages) + messages.append(json.dumps({"key": i, "value": i})) + kafka_produce(kafka_cluster, "new", messages) - result = '' + result = "" while True: - result += instance.query('SELECT * FROM test.kafka', ignore_error=True) + result += instance.query("SELECT * FROM test.kafka", ignore_error=True) if kafka_check_result(result): break kafka_check_result(result, True) - members = describe_consumer_group(kafka_cluster, 'new') - assert members[0]['client_id'] == 'instance test 1234' + members = describe_consumer_group(kafka_cluster, "new") + assert members[0]["client_id"] == "instance test 1234" def test_kafka_json_as_string(kafka_cluster): - kafka_produce(kafka_cluster, 'kafka_json_as_string', ['{"t": 123, "e": {"x": "woof"} }', '', '{"t": 124, "e": {"x": "test"} }', - '{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}']) + kafka_produce( + kafka_cluster, + "kafka_json_as_string", + [ + '{"t": 123, "e": {"x": "woof"} }', + "", + '{"t": 124, "e": {"x": "test"} }', + '{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}', + ], + ) # 'tombstone' record (null value) = marker of deleted record - producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer) - producer.send(topic='kafka_json_as_string', key='xxx') + producer = KafkaProducer( + bootstrap_servers="localhost:{}".format(cluster.kafka_port), + value_serializer=producer_serializer, + key_serializer=producer_serializer, + ) + producer.send(topic="kafka_json_as_string", key="xxx") producer.flush() - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (field String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -320,22 +403,28 @@ def test_kafka_json_as_string(kafka_cluster): kafka_commit_on_select = 1, kafka_format = 'JSONAsString', kafka_flush_interval_ms=1000; - ''') + """ + ) - result = instance.query('SELECT * FROM test.kafka;') - expected = '''\ + result = instance.query("SELECT * FROM test.kafka;") + expected = """\ {"t": 123, "e": {"x": "woof"} } {"t": 124, "e": {"x": "test"} } {"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"} -''' +""" assert TSV(result) == TSV(expected) assert instance.contains_in_log( - "Parsing of message (topic: kafka_json_as_string, partition: 0, offset: [0-9]*) return no rows") + "Parsing of message (topic: kafka_json_as_string, partition: 0, offset: [0-9]*) return no rows" + ) def test_kafka_formats(kafka_cluster): - schema_registry_client = CachedSchemaRegistryClient('http://localhost:{}'.format(kafka_cluster.schema_registry_port)) - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + schema_registry_client = CachedSchemaRegistryClient( + "http://localhost:{}".format(kafka_cluster.schema_registry_port) + ) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) # data was dumped from clickhouse itself in a following manner # clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g' @@ -343,25 +432,25 @@ def test_kafka_formats(kafka_cluster): all_formats = { ## Text formats ## # dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;' - 'JSONEachRow': { - 'data_sample': [ + "JSONEachRow": { + "data_sample": [ '{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n', '{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n', '{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n', ], - 'supports_empty_value': True, + "supports_empty_value": True, }, # JSONAsString doesn't fit to that test, and tested separately - 'JSONCompactEachRow': { - 'data_sample': [ + "JSONCompactEachRow": { + "data_sample": [ '["0", 0, "AM", 0.5, 1]\n', '["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n', '["0", 0, "AM", 0.5, 1]\n', ], - 'supports_empty_value': True, + "supports_empty_value": True, }, - 'JSONCompactEachRowWithNamesAndTypes': { - 'data_sample': [ + "JSONCompactEachRowWithNamesAndTypes": { + "data_sample": [ '["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n', '["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n', '["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n', @@ -372,11 +461,11 @@ def test_kafka_formats(kafka_cluster): # /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse ], }, - 'TSKV': { - 'data_sample': [ - 'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n', - 'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n', - 'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n', + "TSKV": { + "data_sample": [ + "id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n", + "id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n", + "id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n", # '' # On empty message exception: Unexpected end of stream while reading key name from TSKV format # /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:88: DB::readName(DB::ReadBuffer&, StringRef&, std::__1::basic_string, std::__1::allocator >&) @ 0x1df8c098 in /usr/bin/clickhouse @@ -384,24 +473,24 @@ def test_kafka_formats(kafka_cluster): # /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse ], }, - 'CSV': { - 'data_sample': [ + "CSV": { + "data_sample": [ '0,0,"AM",0.5,1\n', '1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n', '0,0,"AM",0.5,1\n', ], - 'supports_empty_value': True, + "supports_empty_value": True, }, - 'TSV': { - 'data_sample': [ - '0\t0\tAM\t0.5\t1\n', - '1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n', - '0\t0\tAM\t0.5\t1\n', + "TSV": { + "data_sample": [ + "0\t0\tAM\t0.5\t1\n", + "1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n", + "0\t0\tAM\t0.5\t1\n", ], - 'supports_empty_value': True, + "supports_empty_value": True, }, - 'CSVWithNames': { - 'data_sample': [ + "CSVWithNames": { + "data_sample": [ '"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n', '"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n', '"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n', @@ -415,27 +504,27 @@ def test_kafka_formats(kafka_cluster): # /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse ], }, - 'Values': { - 'data_sample': [ + "Values": { + "data_sample": [ "(0,0,'AM',0.5,1)", "(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)", "(0,0,'AM',0.5,1)", ], - 'supports_empty_value': True, + "supports_empty_value": True, }, - 'TSVWithNames': { - 'data_sample': [ - 'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n', - 'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n', - 'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n', + "TSVWithNames": { + "data_sample": [ + "id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n", + "id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n", + "id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n", ], - 'supports_empty_value': True, + "supports_empty_value": True, }, - 'TSVWithNamesAndTypes': { - 'data_sample': [ - 'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n', - 'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n', - 'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n', + "TSVWithNamesAndTypes": { + "data_sample": [ + "id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n", + "id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n", + "id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n", # '', # On empty message exception happens: Cannot parse input: expected '\n' at end of stream. # /src/IO/ReadHelpers.cpp:84: DB::throwAtAssertionFailed(char const*, DB::ReadBuffer&) @ 0x15c8d8ec in /usr/bin/clickhouse @@ -445,23 +534,23 @@ def test_kafka_formats(kafka_cluster): # /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse ], }, - 'CustomSeparated' : { - 'data_sample' : [ - '0\t0\tAM\t0.5\t1\n', - '1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n', - '0\t0\tAM\t0.5\t1\n', + "CustomSeparated": { + "data_sample": [ + "0\t0\tAM\t0.5\t1\n", + "1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n", + "0\t0\tAM\t0.5\t1\n", ], }, - 'Template' : { - 'data_sample' : [ + "Template": { + "data_sample": [ + '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)', + '(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)', '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)', - '(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)', - '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)', ], - 'extra_settings': ", format_template_row='template_row.format'" + "extra_settings": ", format_template_row='template_row.format'", }, - 'Regexp': { - 'data_sample': [ + "Regexp": { + "data_sample": [ '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)', '(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)', '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)', @@ -469,17 +558,16 @@ def test_kafka_formats(kafka_cluster): # On empty message exception happens: Line "" doesn't match the regexp.: (at row 1) # /src/Processors/Formats/Impl/RegexpRowInputFormat.cpp:140: DB::RegexpRowInputFormat::readRow(std::__1::vector::mutable_ptr, std::__1::allocator::mutable_ptr > >&, DB::RowReadExtension&) @ 0x1df82fcb in /usr/bin/clickhouse ], - 'extra_settings': r", format_regexp='\(id = (.+?), blockNo = (.+?), val1 = \"(.+?)\", val2 = (.+?), val3 = (.+?)\)', format_regexp_escaping_rule='Escaped'" + "extra_settings": r", format_regexp='\(id = (.+?), blockNo = (.+?), val1 = \"(.+?)\", val2 = (.+?), val3 = (.+?)\)', format_regexp_escaping_rule='Escaped'", }, - ## BINARY FORMATS # dumped with # clickhouse-client ... | xxd -ps -c 200 | tr -d '\n' | sed 's/\(..\)/\\x\1/g' - 'Native': { - 'data_sample': [ - b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01', - b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01', - b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01', + "Native": { + "data_sample": [ + b"\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01", + b"\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01", + b"\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01", # '' # On empty message exception happens: DB::Exception: Attempt to read after eof # /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse @@ -491,21 +579,21 @@ def test_kafka_formats(kafka_cluster): # /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse ], }, - 'MsgPack': { - 'data_sample': [ - b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01', - b'\x01\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x02\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x03\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x04\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x05\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x06\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x07\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x08\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x09\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0a\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0b\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0c\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0d\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0e\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0f\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01', - b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01', + "MsgPack": { + "data_sample": [ + b"\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01", + b"\x01\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x02\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x03\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x04\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x05\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x06\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x07\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x08\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x09\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0a\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0b\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0c\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0d\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0e\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0f\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01", + b"\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01", # '' # On empty message exception happens: Unexpected end of file while parsing msgpack object.: (at row 1) # coming from Processors/Formats/Impl/MsgPackRowInputFormat.cpp:170 ], }, - 'RowBinary': { - 'data_sample': [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', - b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', + "RowBinary": { + "data_sample": [ + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01", + b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01", + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01", # '' # On empty message exception happens: DB::Exception: Cannot read all data. Bytes read: 0. Bytes expected: 8. # /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse @@ -515,11 +603,11 @@ def test_kafka_formats(kafka_cluster): # /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector::mutable_ptr, std::__1::allocator::mutable_ptr > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse ], }, - 'RowBinaryWithNamesAndTypes': { - 'data_sample': [ - b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', - b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', - b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', + "RowBinaryWithNamesAndTypes": { + "data_sample": [ + b"\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01", + b"\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01", + b"\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01", # '' # !!! On empty message segfault: Address not mapped to object # /contrib/FastMemcpy/FastMemcpy.h:666: memcpy_fast @ 0x21742d65 in /usr/bin/clickhouse @@ -530,11 +618,11 @@ def test_kafka_formats(kafka_cluster): # /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector::mutable_ptr, std::__1::allocator::mutable_ptr > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse ], }, - 'Protobuf': { - 'data_sample': [ - b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01', - b'\x0d\x08\x01\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x02\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x03\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x04\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x05\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x06\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x07\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x08\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x09\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0a\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0c\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0d\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0e\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0f\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01', - b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01', + "Protobuf": { + "data_sample": [ + b"\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01", + b"\x0d\x08\x01\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x02\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x03\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x04\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x05\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x06\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x07\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x08\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x09\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0a\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0c\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0d\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0e\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0f\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01", + b"\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01", # '' # On empty message exception: Attempt to read after eof # /src/IO/ReadBuffer.h:184: DB::ReadBuffer::throwReadAfterEOF() @ 0x15c9699b in /usr/bin/clickhouse @@ -543,96 +631,122 @@ def test_kafka_formats(kafka_cluster): # /src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp:25: DB::ProtobufRowInputFormat::readRow(std::__1::vector::mutable_ptr, std::__1::allocator::mutable_ptr > >&, DB::RowReadExtension&) @ 0x1df4cc71 in /usr/bin/clickhouse # /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse ], - 'extra_settings': ", kafka_schema='test:TestMessage'" + "extra_settings": ", kafka_schema='test:TestMessage'", }, - 'ORC': { - 'data_sample': [ - b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18', - b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18', - b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18', + "ORC": { + "data_sample": [ + b"\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18", + b"\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18", + b"\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18", # '' # On empty message exception: IOError: File size too small, Stack trace (when copying this message, always include the lines below): # /src/Processors/Formats/Impl/ORCBlockInputFormat.cpp:36: DB::ORCBlockInputFormat::generate() @ 0x1df282a6 in /usr/bin/clickhouse # /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse ], }, - 'CapnProto': { - 'data_sample': [ - b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00', - b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00', - b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00', + "CapnProto": { + "data_sample": [ + b"\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00", + b"\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00", + b"\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00", # '' # On empty message exception: Cannot read all data. Bytes read: 0. Bytes expected: 4. # /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse # /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:212: DB::CapnProtoRowInputFormat::readMessage() @ 0x1ded1cab in /usr/bin/clickhouse # /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:241: DB::CapnProtoRowInputFormat::readRow(std::__1::vector::mutable_ptr, std::__1::allocator::mutable_ptr > >&, DB::RowReadExtension&) @ 0x1ded205d in /usr/bin/clickhouse ], - 'extra_settings': ", kafka_schema='test:TestRecordStruct'" + "extra_settings": ", kafka_schema='test:TestRecordStruct'", }, - 'Parquet' : { - 'data_sample': [ - b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31', - b'\x50\x41\x52\x31\x15\x04\x15\xf0\x01\x15\x90\x01\x4c\x15\x1e\x15\x04\x12\x00\x00\x78\x04\x01\x00\x09\x01\x00\x02\x09\x07\x04\x00\x03\x0d\x08\x00\x04\x0d\x08\x00\x05\x0d\x08\x00\x06\x0d\x08\x00\x07\x0d\x08\x00\x08\x0d\x08\x00\x09\x0d\x08\x00\x0a\x0d\x08\x00\x0b\x0d\x08\x00\x0c\x0d\x08\x00\x0d\x0d\x08\x3c\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x14\x15\x18\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x24\x04\x05\x10\x32\x54\x76\x98\xba\xdc\x0e\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x1e\x19\x1c\x19\x5c\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\xa6\x06\x16\x1e\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc5\x01\x00\x00\x50\x41\x52\x31', - b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31', + "Parquet": { + "data_sample": [ + b"\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31", + b"\x50\x41\x52\x31\x15\x04\x15\xf0\x01\x15\x90\x01\x4c\x15\x1e\x15\x04\x12\x00\x00\x78\x04\x01\x00\x09\x01\x00\x02\x09\x07\x04\x00\x03\x0d\x08\x00\x04\x0d\x08\x00\x05\x0d\x08\x00\x06\x0d\x08\x00\x07\x0d\x08\x00\x08\x0d\x08\x00\x09\x0d\x08\x00\x0a\x0d\x08\x00\x0b\x0d\x08\x00\x0c\x0d\x08\x00\x0d\x0d\x08\x3c\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x14\x15\x18\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x24\x04\x05\x10\x32\x54\x76\x98\xba\xdc\x0e\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x1e\x19\x1c\x19\x5c\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\xa6\x06\x16\x1e\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc5\x01\x00\x00\x50\x41\x52\x31", + b"\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31", ], }, - 'AvroConfluent': { - 'data_sample': [ - avro_confluent_message(schema_registry_client, - {'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}), - - b''.join([avro_confluent_message(schema_registry_client, - {'id': id, 'blockNo': 0, 'val1': str('AM'), - 'val2': 0.5, "val3": 1}) for id in range(1, 16)]), - - avro_confluent_message(schema_registry_client, - {'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}), + "AvroConfluent": { + "data_sample": [ + avro_confluent_message( + schema_registry_client, + {"id": 0, "blockNo": 0, "val1": str("AM"), "val2": 0.5, "val3": 1}, + ), + b"".join( + [ + avro_confluent_message( + schema_registry_client, + { + "id": id, + "blockNo": 0, + "val1": str("AM"), + "val2": 0.5, + "val3": 1, + }, + ) + for id in range(1, 16) + ] + ), + avro_confluent_message( + schema_registry_client, + {"id": 0, "blockNo": 0, "val1": str("AM"), "val2": 0.5, "val3": 1}, + ), ], - 'extra_settings': ", format_avro_schema_registry_url='http://{}:{}'".format( - kafka_cluster.schema_registry_host, - 8081 + "extra_settings": ", format_avro_schema_registry_url='http://{}:{}'".format( + kafka_cluster.schema_registry_host, 8081 ), - 'supports_empty_value': True, + "supports_empty_value": True, }, - 'Avro': { + "Avro": { # It seems impossible to send more than one avro file per a message # because of nature of Avro: blocks go one after another - 'data_sample': [ - avro_message({'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}), - - avro_message([{'id': id, 'blockNo': 0, 'val1': str('AM'), - 'val2': 0.5, "val3": 1} for id in range(1, 16)]), - - avro_message({'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}), + "data_sample": [ + avro_message( + {"id": 0, "blockNo": 0, "val1": str("AM"), "val2": 0.5, "val3": 1} + ), + avro_message( + [ + { + "id": id, + "blockNo": 0, + "val1": str("AM"), + "val2": 0.5, + "val3": 1, + } + for id in range(1, 16) + ] + ), + avro_message( + {"id": 0, "blockNo": 0, "val1": str("AM"), "val2": 0.5, "val3": 1} + ), ], - 'supports_empty_value': False, + "supports_empty_value": False, }, - 'Arrow' : { - 'data_sample' : [ - b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31', - b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31', - b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31', + "Arrow": { + "data_sample": [ + b"\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31", + b"\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31", + b"\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31", ], }, - 'ArrowStream' : { - 'data_sample' : [ - b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00', - b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00', - b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00', + "ArrowStream": { + "data_sample": [ + b"\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00", + b"\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00", + b"\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00", ], }, } for format_name, format_opts in list(all_formats.items()): - logging.debug(('Set up {}'.format(format_name))) - topic_name = 'format_tests_{}'.format(format_name) - data_sample = format_opts['data_sample'] + logging.debug(("Set up {}".format(format_name))) + topic_name = "format_tests_{}".format(format_name) + data_sample = format_opts["data_sample"] data_prefix = [] # prepend empty value when supported - if format_opts.get('supports_empty_value', False): - data_prefix = data_prefix + [''] + if format_opts.get("supports_empty_value", False): + data_prefix = data_prefix + [""] kafka_produce(kafka_cluster, topic_name, data_prefix + data_sample) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.kafka_{format_name}; CREATE TABLE test.kafka_{format_name} ( @@ -652,18 +766,30 @@ def test_kafka_formats(kafka_cluster): CREATE MATERIALIZED VIEW test.kafka_{format_name}_mv Engine=Log AS SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name}; - '''.format(topic_name=topic_name, format_name=format_name, - extra_settings=format_opts.get('extra_settings') or '')) + """.format( + topic_name=topic_name, + format_name=format_name, + extra_settings=format_opts.get("extra_settings") or "", + ) + ) - instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*format_tests_', repetitions=len(all_formats.keys()), look_behind_lines=12000) + instance.wait_for_log_line( + "kafka.*Committed offset [0-9]+.*format_tests_", + repetitions=len(all_formats.keys()), + look_behind_lines=12000, + ) for format_name, format_opts in list(all_formats.items()): - logging.debug(('Checking {}'.format(format_name))) - topic_name = f'format_tests_{format_name}' + logging.debug(("Checking {}".format(format_name))) + topic_name = f"format_tests_{format_name}" # shift offsets by 1 if format supports empty value - offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2] - result = instance.query('SELECT * FROM test.kafka_{format_name}_mv;'.format(format_name=format_name)) - expected = '''\ + offsets = ( + [1, 2, 3] if format_opts.get("supports_empty_value", False) else [0, 1, 2] + ) + result = instance.query( + "SELECT * FROM test.kafka_{format_name}_mv;".format(format_name=format_name) + ) + expected = """\ 0 0 AM 0.5 1 {topic_name} 0 {offset_0} 1 0 AM 0.5 1 {topic_name} 0 {offset_1} 2 0 AM 0.5 1 {topic_name} 0 {offset_1} @@ -681,13 +807,21 @@ def test_kafka_formats(kafka_cluster): 14 0 AM 0.5 1 {topic_name} 0 {offset_1} 15 0 AM 0.5 1 {topic_name} 0 {offset_1} 0 0 AM 0.5 1 {topic_name} 0 {offset_2} -'''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2]) - assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name) +""".format( + topic_name=topic_name, + offset_0=offsets[0], + offset_1=offsets[1], + offset_2=offsets[2], + ) + assert TSV(result) == TSV(expected), "Proper result for format: {}".format( + format_name + ) kafka_delete_topic(admin_client, topic_name) + # Since everything is async and shaky when receiving messages from Kafka, # we may want to try and check results multiple times in a loop. -def kafka_check_result(result, check=False, ref_file='test_kafka_json.reference'): +def kafka_check_result(result, check=False, ref_file="test_kafka_json.reference"): fpath = p.join(p.dirname(__file__), ref_file) with open(fpath) as reference: if check: @@ -708,7 +842,7 @@ def decode_avro(message): # https://stackoverflow.com/a/57692111/1555175 def describe_consumer_group(kafka_cluster, name): - client = BrokerConnection('localhost', kafka_cluster.kafka_port, socket.AF_INET) + client = BrokerConnection("localhost", kafka_cluster.kafka_port, socket.AF_INET) client.connect_blocking() list_members_in_groups = DescribeGroupsRequest_v1(groups=[name]) @@ -717,25 +851,35 @@ def describe_consumer_group(kafka_cluster, name): for resp, f in client.recv(): f.success(resp) - (error_code, group_id, state, protocol_type, protocol, members) = future.value.groups[0] + ( + error_code, + group_id, + state, + protocol_type, + protocol, + members, + ) = future.value.groups[0] res = [] for member in members: (member_id, client_id, client_host, member_metadata, member_assignment) = member member_info = {} - member_info['member_id'] = member_id - member_info['client_id'] = client_id - member_info['client_host'] = client_host + member_info["member_id"] = member_id + member_info["client_id"] = client_id + member_info["client_host"] = client_host member_topics_assignment = [] - for (topic, partitions) in MemberAssignment.decode(member_assignment).assignment: - member_topics_assignment.append({'topic': topic, 'partitions': partitions}) - member_info['assignment'] = member_topics_assignment + for (topic, partitions) in MemberAssignment.decode( + member_assignment + ).assignment: + member_topics_assignment.append({"topic": topic, "partitions": partitions}) + member_info["assignment"] = member_topics_assignment res.append(member_info) return res # Fixtures + @pytest.fixture(scope="module") def kafka_cluster(): try: @@ -749,7 +893,7 @@ def kafka_cluster(): @pytest.fixture(autouse=True) def kafka_setup_teardown(): - instance.query('DROP DATABASE IF EXISTS test; CREATE DATABASE test;') + instance.query("DROP DATABASE IF EXISTS test; CREATE DATABASE test;") # logging.debug("kafka is available - running test") yield # run test @@ -757,10 +901,18 @@ def kafka_setup_teardown(): # Tests def test_kafka_issue11308(kafka_cluster): # Check that matview does respect Kafka SETTINGS - kafka_produce(kafka_cluster, 'issue11308', ['{"t": 123, "e": {"x": "woof"} }', '{"t": 123, "e": {"x": "woof"} }', - '{"t": 124, "e": {"x": "test"} }']) + kafka_produce( + kafka_cluster, + "issue11308", + [ + '{"t": 123, "e": {"x": "woof"} }', + '{"t": 123, "e": {"x": "woof"} }', + '{"t": 124, "e": {"x": "test"} }', + ], + ) - instance.query(''' + instance.query( + """ CREATE TABLE test.persistent_kafka ( time UInt64, some_string String @@ -783,31 +935,39 @@ def test_kafka_issue11308(kafka_cluster): `t` AS `time`, `e.x` AS `some_string` FROM test.kafka; - ''') + """ + ) - while int(instance.query('SELECT count() FROM test.persistent_kafka')) < 3: + while int(instance.query("SELECT count() FROM test.persistent_kafka")) < 3: time.sleep(1) - result = instance.query('SELECT * FROM test.persistent_kafka ORDER BY time;') + result = instance.query("SELECT * FROM test.persistent_kafka ORDER BY time;") - instance.query(''' + instance.query( + """ DROP TABLE test.persistent_kafka; DROP TABLE test.persistent_kafka_mv; - ''') + """ + ) - expected = '''\ + expected = """\ 123 woof 123 woof 124 test -''' +""" assert TSV(result) == TSV(expected) def test_kafka_issue4116(kafka_cluster): # Check that format_csv_delimiter parameter works now - as part of all available format settings. - kafka_produce(kafka_cluster, 'issue4116', ['1|foo', '2|bar', '42|answer', '100|multi\n101|row\n103|message']) + kafka_produce( + kafka_cluster, + "issue4116", + ["1|foo", "2|bar", "42|answer", "100|multi\n101|row\n103|message"], + ) - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (a UInt64, b String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -817,28 +977,32 @@ def test_kafka_issue4116(kafka_cluster): kafka_format = 'CSV', kafka_row_delimiter = '\\n', format_csv_delimiter = '|'; - ''') + """ + ) - result = instance.query('SELECT * FROM test.kafka ORDER BY a;') + result = instance.query("SELECT * FROM test.kafka ORDER BY a;") - expected = '''\ + expected = """\ 1 foo 2 bar 42 answer 100 multi 101 row 103 message -''' +""" assert TSV(result) == TSV(expected) def test_kafka_consumer_hang(kafka_cluster): - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) topic_name = "consumer_hang" kafka_create_topic(admin_client, topic_name, num_partitions=8) - instance.query(f''' + instance.query( + f""" DROP TABLE IF EXISTS test.kafka; DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; @@ -852,26 +1016,29 @@ def test_kafka_consumer_hang(kafka_cluster): kafka_num_consumers = 8; CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = Memory(); CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; - ''') + """ + ) - instance.wait_for_log_line('kafka.*Stalled', repetitions=20) + instance.wait_for_log_line("kafka.*Stalled", repetitions=20) # This should trigger heartbeat fail, # which will trigger REBALANCE_IN_PROGRESS, # and which can lead to consumer hang. - kafka_cluster.pause_container('kafka1') - instance.wait_for_log_line('heartbeat error') - kafka_cluster.unpause_container('kafka1') + kafka_cluster.pause_container("kafka1") + instance.wait_for_log_line("heartbeat error") + kafka_cluster.unpause_container("kafka1") # logging.debug("Attempt to drop") - instance.query('DROP TABLE test.kafka') + instance.query("DROP TABLE test.kafka") # kafka_cluster.open_bash_shell('instance') - instance.query(''' + instance.query( + """ DROP TABLE test.consumer; DROP TABLE test.view; - ''') + """ + ) # original problem appearance was a sequence of the following messages in librdkafka logs: # BROKERFAIL -> |ASSIGN| -> REBALANCE_IN_PROGRESS -> "waiting for rebalance_cb" (repeated forever) @@ -879,10 +1046,18 @@ def test_kafka_consumer_hang(kafka_cluster): # from a user perspective: we expect no hanging 'drop' queries # 'dr'||'op' to avoid self matching - assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0 + assert ( + int( + instance.query( + "select count() from system.processes where position(lower(query),'dr'||'op')>0" + ) + ) + == 0 + ) # cleanup unread messages so kafka will not wait reading consumers to delete topic - instance.query(f''' + instance.query( + f""" CREATE TABLE test.kafka (key UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -891,21 +1066,25 @@ def test_kafka_consumer_hang(kafka_cluster): kafka_group_name = '{topic_name}', kafka_format = 'JSONEachRow', kafka_num_consumers = 8; - ''') + """ + ) - num_read = int(instance.query('SELECT count() FROM test.kafka')) + num_read = int(instance.query("SELECT count() FROM test.kafka")) logging.debug(f"read {num_read} from {topic_name} before delete") - instance.query('DROP TABLE test.kafka') + instance.query("DROP TABLE test.kafka") kafka_delete_topic(admin_client, topic_name) def test_kafka_consumer_hang2(kafka_cluster): - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) topic_name = "consumer_hang2" kafka_create_topic(admin_client, topic_name) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.kafka; CREATE TABLE test.kafka (key UInt64, value UInt64) @@ -923,14 +1102,15 @@ def test_kafka_consumer_hang2(kafka_cluster): kafka_commit_on_select = 1, kafka_group_name = 'consumer_hang2', kafka_format = 'JSONEachRow'; - ''') + """ + ) # first consumer subscribe the topic, try to poll some data, and go to rest - instance.query('SELECT * FROM test.kafka') + instance.query("SELECT * FROM test.kafka") # second consumer do the same leading to rebalance in the first # consumer, try to poll some data - instance.query('SELECT * FROM test.kafka2') + instance.query("SELECT * FROM test.kafka2") # echo 'SELECT * FROM test.kafka; SELECT * FROM test.kafka2; DROP TABLE test.kafka;' | clickhouse client -mn & # kafka_cluster.open_bash_shell('instance') @@ -939,22 +1119,30 @@ def test_kafka_consumer_hang2(kafka_cluster): # one of those queries was failing because of # https://github.com/edenhill/librdkafka/issues/2077 # https://github.com/edenhill/librdkafka/issues/2898 - instance.query('DROP TABLE test.kafka') - instance.query('DROP TABLE test.kafka2') + instance.query("DROP TABLE test.kafka") + instance.query("DROP TABLE test.kafka2") # from a user perspective: we expect no hanging 'drop' queries # 'dr'||'op' to avoid self matching - assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0 + assert ( + int( + instance.query( + "select count() from system.processes where position(lower(query),'dr'||'op')>0" + ) + ) + == 0 + ) kafka_delete_topic(admin_client, topic_name) def test_kafka_csv_with_delimiter(kafka_cluster): messages = [] for i in range(50): - messages.append('{i}, {i}'.format(i=i)) - kafka_produce(kafka_cluster, 'csv', messages) + messages.append("{i}, {i}".format(i=i)) + kafka_produce(kafka_cluster, "csv", messages) - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -962,11 +1150,12 @@ def test_kafka_csv_with_delimiter(kafka_cluster): kafka_commit_on_select = 1, kafka_group_name = 'csv', kafka_format = 'CSV'; - ''') + """ + ) - result = '' + result = "" while True: - result += instance.query('SELECT * FROM test.kafka', ignore_error=True) + result += instance.query("SELECT * FROM test.kafka", ignore_error=True) if kafka_check_result(result): break @@ -976,10 +1165,11 @@ def test_kafka_csv_with_delimiter(kafka_cluster): def test_kafka_tsv_with_delimiter(kafka_cluster): messages = [] for i in range(50): - messages.append('{i}\t{i}'.format(i=i)) - kafka_produce(kafka_cluster, 'tsv', messages) + messages.append("{i}\t{i}".format(i=i)) + kafka_produce(kafka_cluster, "tsv", messages) - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -987,11 +1177,12 @@ def test_kafka_tsv_with_delimiter(kafka_cluster): kafka_commit_on_select = 1, kafka_group_name = 'tsv', kafka_format = 'TSV'; - ''') + """ + ) - result = '' + result = "" while True: - result += instance.query('SELECT * FROM test.kafka', ignore_error=True) + result += instance.query("SELECT * FROM test.kafka", ignore_error=True) if kafka_check_result(result): break @@ -999,11 +1190,14 @@ def test_kafka_tsv_with_delimiter(kafka_cluster): def test_kafka_select_empty(kafka_cluster): - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) topic_name = "empty" kafka_create_topic(admin_client, topic_name) - instance.query(f''' + instance.query( + f""" CREATE TABLE test.kafka (key UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -1012,24 +1206,26 @@ def test_kafka_select_empty(kafka_cluster): kafka_group_name = '{topic_name}', kafka_format = 'TSV', kafka_row_delimiter = '\\n'; - ''') + """ + ) - assert int(instance.query('SELECT count() FROM test.kafka')) == 0 + assert int(instance.query("SELECT count() FROM test.kafka")) == 0 kafka_delete_topic(admin_client, topic_name) def test_kafka_json_without_delimiter(kafka_cluster): - messages = '' + messages = "" for i in range(25): - messages += json.dumps({'key': i, 'value': i}) + '\n' - kafka_produce(kafka_cluster, 'json', [messages]) + messages += json.dumps({"key": i, "value": i}) + "\n" + kafka_produce(kafka_cluster, "json", [messages]) - messages = '' + messages = "" for i in range(25, 50): - messages += json.dumps({'key': i, 'value': i}) + '\n' - kafka_produce(kafka_cluster, 'json', [messages]) + messages += json.dumps({"key": i, "value": i}) + "\n" + kafka_produce(kafka_cluster, "json", [messages]) - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -1037,11 +1233,12 @@ def test_kafka_json_without_delimiter(kafka_cluster): kafka_group_name = 'json', kafka_commit_on_select = 1, kafka_format = 'JSONEachRow'; - ''') + """ + ) - result = '' + result = "" while True: - result += instance.query('SELECT * FROM test.kafka', ignore_error=True) + result += instance.query("SELECT * FROM test.kafka", ignore_error=True) if kafka_check_result(result): break @@ -1049,11 +1246,12 @@ def test_kafka_json_without_delimiter(kafka_cluster): def test_kafka_protobuf(kafka_cluster): - kafka_produce_protobuf_messages(kafka_cluster, 'pb', 0, 20) - kafka_produce_protobuf_messages(kafka_cluster, 'pb', 20, 1) - kafka_produce_protobuf_messages(kafka_cluster, 'pb', 21, 29) + kafka_produce_protobuf_messages(kafka_cluster, "pb", 0, 20) + kafka_produce_protobuf_messages(kafka_cluster, "pb", 20, 1) + kafka_produce_protobuf_messages(kafka_cluster, "pb", 21, 29) - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (key UInt64, value String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -1062,11 +1260,12 @@ def test_kafka_protobuf(kafka_cluster): kafka_format = 'Protobuf', kafka_commit_on_select = 1, kafka_schema = 'kafka.proto:KeyValuePair'; - ''') + """ + ) - result = '' + result = "" while True: - result += instance.query('SELECT * FROM test.kafka', ignore_error=True) + result += instance.query("SELECT * FROM test.kafka", ignore_error=True) if kafka_check_result(result): break @@ -1074,12 +1273,19 @@ def test_kafka_protobuf(kafka_cluster): def test_kafka_string_field_on_first_position_in_protobuf(kafka_cluster): -# https://github.com/ClickHouse/ClickHouse/issues/12615 - kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 0, 20) - kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 20, 1) - kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 21, 29) + # https://github.com/ClickHouse/ClickHouse/issues/12615 + kafka_produce_protobuf_social( + kafka_cluster, "string_field_on_first_position_in_protobuf", 0, 20 + ) + kafka_produce_protobuf_social( + kafka_cluster, "string_field_on_first_position_in_protobuf", 20, 1 + ) + kafka_produce_protobuf_social( + kafka_cluster, "string_field_on_first_position_in_protobuf", 21, 29 + ) - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka ( username String, timestamp Int32 @@ -1091,10 +1297,11 @@ SETTINGS kafka_format = 'Protobuf', kafka_commit_on_select = 1, kafka_schema = 'social:User'; - ''') + """ + ) - result = instance.query('SELECT * FROM test.kafka', ignore_error=True) - expected = '''\ + result = instance.query("SELECT * FROM test.kafka", ignore_error=True) + expected = """\ John Doe 0 1000000 John Doe 1 1000001 John Doe 2 1000002 @@ -1145,11 +1352,13 @@ John Doe 46 1000046 John Doe 47 1000047 John Doe 48 1000048 John Doe 49 1000049 -''' +""" assert TSV(result) == TSV(expected) + def test_kafka_protobuf_no_delimiter(kafka_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (key UInt64, value String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -1158,21 +1367,29 @@ def test_kafka_protobuf_no_delimiter(kafka_cluster): kafka_format = 'ProtobufSingle', kafka_commit_on_select = 1, kafka_schema = 'kafka.proto:KeyValuePair'; - ''') + """ + ) - kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 0, 20) - kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 20, 1) - kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 21, 29) + kafka_produce_protobuf_messages_no_delimeters( + kafka_cluster, "pb_no_delimiter", 0, 20 + ) + kafka_produce_protobuf_messages_no_delimeters( + kafka_cluster, "pb_no_delimiter", 20, 1 + ) + kafka_produce_protobuf_messages_no_delimeters( + kafka_cluster, "pb_no_delimiter", 21, 29 + ) - result = '' + result = "" while True: - result += instance.query('SELECT * FROM test.kafka', ignore_error=True) + result += instance.query("SELECT * FROM test.kafka", ignore_error=True) if kafka_check_result(result): break kafka_check_result(result, True) - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka_writer (key UInt64, value String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -1181,26 +1398,29 @@ def test_kafka_protobuf_no_delimiter(kafka_cluster): kafka_format = 'ProtobufSingle', kafka_commit_on_select = 1, kafka_schema = 'kafka.proto:KeyValuePair'; - ''') + """ + ) - instance.query("INSERT INTO test.kafka_writer VALUES (13,'Friday'),(42,'Answer to the Ultimate Question of Life, the Universe, and Everything'), (110, 'just a number')") + instance.query( + "INSERT INTO test.kafka_writer VALUES (13,'Friday'),(42,'Answer to the Ultimate Question of Life, the Universe, and Everything'), (110, 'just a number')" + ) time.sleep(1) result = instance.query("SELECT * FROM test.kafka ORDER BY key", ignore_error=True) - expected = '''\ + expected = """\ 13 Friday 42 Answer to the Ultimate Question of Life, the Universe, and Everything 110 just a number -''' +""" assert TSV(result) == TSV(expected) - def test_kafka_materialized_view(kafka_cluster): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value UInt64) @@ -1215,38 +1435,44 @@ def test_kafka_materialized_view(kafka_cluster): ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; - ''') + """ + ) messages = [] for i in range(50): - messages.append(json.dumps({'key': i, 'value': i})) - kafka_produce(kafka_cluster, 'mv', messages) + messages.append(json.dumps({"key": i, "value": i})) + kafka_produce(kafka_cluster, "mv", messages) while True: - result = instance.query('SELECT * FROM test.view') + result = instance.query("SELECT * FROM test.view") if kafka_check_result(result): break - instance.query(''' + instance.query( + """ DROP TABLE test.consumer; DROP TABLE test.view; - ''') + """ + ) kafka_check_result(result, True) def test_kafka_recreate_kafka_table(kafka_cluster): - ''' - Checks that materialized view work properly after dropping and recreating the Kafka table. - ''' + """ + Checks that materialized view work properly after dropping and recreating the Kafka table. + """ # line for backporting: # admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092") - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) topic_name = "recreate_kafka_table" kafka_create_topic(admin_client, topic_name, num_partitions=6) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value UInt64) @@ -1264,22 +1490,30 @@ def test_kafka_recreate_kafka_table(kafka_cluster): ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; - ''') + """ + ) messages = [] for i in range(120): - messages.append(json.dumps({'key': i, 'value': i})) - kafka_produce(kafka_cluster,'recreate_kafka_table', messages) + messages.append(json.dumps({"key": i, "value": i})) + kafka_produce(kafka_cluster, "recreate_kafka_table", messages) - instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*recreate_kafka_table', repetitions=6, look_behind_lines=100) + instance.wait_for_log_line( + "kafka.*Committed offset [0-9]+.*recreate_kafka_table", + repetitions=6, + look_behind_lines=100, + ) - instance.query(''' + instance.query( + """ DROP TABLE test.kafka; - ''') + """ + ) - kafka_produce(kafka_cluster,'recreate_kafka_table', messages) + kafka_produce(kafka_cluster, "recreate_kafka_table", messages) - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -1289,17 +1523,24 @@ def test_kafka_recreate_kafka_table(kafka_cluster): kafka_num_consumers = 6, kafka_flush_interval_ms = 1000, kafka_skip_broken_messages = 1048577; - ''') + """ + ) - instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*recreate_kafka_table', repetitions=6, look_behind_lines=100) + instance.wait_for_log_line( + "kafka.*Committed offset [0-9]+.*recreate_kafka_table", + repetitions=6, + look_behind_lines=100, + ) # data was not flushed yet (it will be flushed 7.5 sec after creating MV) assert int(instance.query("SELECT count() FROM test.view")) == 240 - instance.query(''' + instance.query( + """ DROP TABLE test.consumer; DROP TABLE test.view; - ''') + """ + ) kafka_delete_topic(admin_client, topic_name) @@ -1326,28 +1567,33 @@ def test_librdkafka_compression(kafka_cluster): """ - supported_compression_types = ['gzip', 'snappy', 'lz4', 'zstd', 'uncompressed'] + supported_compression_types = ["gzip", "snappy", "lz4", "zstd", "uncompressed"] messages = [] expected = [] - value = 'foobarbaz'*10 + value = "foobarbaz" * 10 number_of_messages = 50 for i in range(number_of_messages): - messages.append(json.dumps({'key': i, 'value': value})) - expected.append(f'{i}\t{value}') + messages.append(json.dumps({"key": i, "value": value})) + expected.append(f"{i}\t{value}") - expected = '\n'.join(expected) + expected = "\n".join(expected) for compression_type in supported_compression_types: - logging.debug(('Check compression {}'.format(compression_type))) + logging.debug(("Check compression {}".format(compression_type))) - topic_name = 'test_librdkafka_compression_{}'.format(compression_type) - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + topic_name = "test_librdkafka_compression_{}".format(compression_type) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) - kafka_create_topic(admin_client, topic_name, config={'compression.type': compression_type}) + kafka_create_topic( + admin_client, topic_name, config={"compression.type": compression_type} + ) - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (key UInt64, value String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -1357,22 +1603,26 @@ def test_librdkafka_compression(kafka_cluster): kafka_flush_interval_ms = 1000; CREATE MATERIALIZED VIEW test.consumer Engine=Log AS SELECT * FROM test.kafka; - '''.format(topic_name=topic_name) ) + """.format( + topic_name=topic_name + ) + ) kafka_produce(kafka_cluster, topic_name, messages) instance.wait_for_log_line("Committed offset {}".format(number_of_messages)) - result = instance.query('SELECT * FROM test.consumer') + result = instance.query("SELECT * FROM test.consumer") assert TSV(result) == TSV(expected) - instance.query('DROP TABLE test.kafka SYNC') - instance.query('DROP TABLE test.consumer SYNC') + instance.query("DROP TABLE test.kafka SYNC") + instance.query("DROP TABLE test.consumer SYNC") kafka_delete_topic(admin_client, topic_name) def test_kafka_materialized_view_with_subquery(kafka_cluster): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value UInt64) @@ -1387,28 +1637,32 @@ def test_kafka_materialized_view_with_subquery(kafka_cluster): ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM (SELECT * FROM test.kafka); - ''') + """ + ) messages = [] for i in range(50): - messages.append(json.dumps({'key': i, 'value': i})) - kafka_produce(kafka_cluster, 'mvsq', messages) + messages.append(json.dumps({"key": i, "value": i})) + kafka_produce(kafka_cluster, "mvsq", messages) while True: - result = instance.query('SELECT * FROM test.view') + result = instance.query("SELECT * FROM test.view") if kafka_check_result(result): break - instance.query(''' + instance.query( + """ DROP TABLE test.consumer; DROP TABLE test.view; - ''') + """ + ) kafka_check_result(result, True) def test_kafka_many_materialized_views(kafka_cluster): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.view1; DROP TABLE IF EXISTS test.view2; DROP TABLE IF EXISTS test.consumer1; @@ -1430,25 +1684,28 @@ def test_kafka_many_materialized_views(kafka_cluster): SELECT * FROM test.kafka; CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS SELECT * FROM test.kafka; - ''') + """ + ) messages = [] for i in range(50): - messages.append(json.dumps({'key': i, 'value': i})) - kafka_produce(kafka_cluster, 'mmv', messages) + messages.append(json.dumps({"key": i, "value": i})) + kafka_produce(kafka_cluster, "mmv", messages) while True: - result1 = instance.query('SELECT * FROM test.view1') - result2 = instance.query('SELECT * FROM test.view2') + result1 = instance.query("SELECT * FROM test.view1") + result2 = instance.query("SELECT * FROM test.view2") if kafka_check_result(result1) and kafka_check_result(result2): break - instance.query(''' + instance.query( + """ DROP TABLE test.consumer1; DROP TABLE test.consumer2; DROP TABLE test.view1; DROP TABLE test.view2; - ''') + """ + ) kafka_check_result(result1, True) kafka_check_result(result2, True) @@ -1458,10 +1715,14 @@ def test_kafka_flush_on_big_message(kafka_cluster): # Create batchs of messages of size ~100Kb kafka_messages = 1000 batch_messages = 1000 - messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(kafka_messages)] - kafka_produce(kafka_cluster, 'flush', messages) + messages = [ + json.dumps({"key": i, "value": "x" * 100}) * batch_messages + for i in range(kafka_messages) + ] + kafka_produce(kafka_cluster, "flush", messages) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value String) @@ -1476,42 +1737,52 @@ def test_kafka_flush_on_big_message(kafka_cluster): ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; - ''') + """ + ) - client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) received = False while not received: try: - offsets = client.list_consumer_group_offsets('flush') + offsets = client.list_consumer_group_offsets("flush") for topic, offset in list(offsets.items()): - if topic.topic == 'flush' and offset.offset == kafka_messages: + if topic.topic == "flush" and offset.offset == kafka_messages: received = True break except kafka.errors.GroupCoordinatorNotAvailableError: continue while True: - result = instance.query('SELECT count() FROM test.view') + result = instance.query("SELECT count() FROM test.view") if int(result) == kafka_messages * batch_messages: break - instance.query(''' + instance.query( + """ DROP TABLE test.consumer; DROP TABLE test.view; - ''') + """ + ) - assert int(result) == kafka_messages * batch_messages, 'ClickHouse lost some messages: {}'.format(result) + assert ( + int(result) == kafka_messages * batch_messages + ), "ClickHouse lost some messages: {}".format(result) def test_kafka_virtual_columns(kafka_cluster): - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) topic_config = { # default retention, since predefined timestamp_ms is used. - 'retention.ms': '-1', + "retention.ms": "-1", } kafka_create_topic(admin_client, "virt1", config=topic_config) - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -1519,38 +1790,43 @@ def test_kafka_virtual_columns(kafka_cluster): kafka_group_name = 'virt1', kafka_commit_on_select = 1, kafka_format = 'JSONEachRow'; - ''') + """ + ) - messages = '' + messages = "" for i in range(25): - messages += json.dumps({'key': i, 'value': i}) + '\n' - kafka_produce(kafka_cluster, 'virt1', [messages], 0) + messages += json.dumps({"key": i, "value": i}) + "\n" + kafka_produce(kafka_cluster, "virt1", [messages], 0) - messages = '' + messages = "" for i in range(25, 50): - messages += json.dumps({'key': i, 'value': i}) + '\n' - kafka_produce(kafka_cluster, 'virt1', [messages], 0) + messages += json.dumps({"key": i, "value": i}) + "\n" + kafka_produce(kafka_cluster, "virt1", [messages], 0) - result = '' + result = "" while True: result += instance.query( - '''SELECT _key, key, _topic, value, _offset, _partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) AS _timestamp FROM test.kafka''', - ignore_error=True) - if kafka_check_result(result, False, 'test_kafka_virtual1.reference'): + """SELECT _key, key, _topic, value, _offset, _partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) AS _timestamp FROM test.kafka""", + ignore_error=True, + ) + if kafka_check_result(result, False, "test_kafka_virtual1.reference"): break - kafka_check_result(result, True, 'test_kafka_virtual1.reference') + kafka_check_result(result, True, "test_kafka_virtual1.reference") def test_kafka_virtual_columns_with_materialized_view(kafka_cluster): - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) topic_config = { # default retention, since predefined timestamp_ms is used. - 'retention.ms': '-1', + "retention.ms": "-1", } kafka_create_topic(admin_client, "virt2", config=topic_config) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value UInt64) @@ -1565,31 +1841,38 @@ def test_kafka_virtual_columns_with_materialized_view(kafka_cluster): ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) as timestamp FROM test.kafka; - ''') + """ + ) messages = [] for i in range(50): - messages.append(json.dumps({'key': i, 'value': i})) - kafka_produce(kafka_cluster, 'virt2', messages, 0) + messages.append(json.dumps({"key": i, "value": i})) + kafka_produce(kafka_cluster, "virt2", messages, 0) - sql = 'SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view ORDER BY kafka_key, key' + sql = "SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view ORDER BY kafka_key, key" result = instance.query(sql) iterations = 0 - while not kafka_check_result(result, False, 'test_kafka_virtual2.reference') and iterations < 10: + while ( + not kafka_check_result(result, False, "test_kafka_virtual2.reference") + and iterations < 10 + ): time.sleep(3) iterations += 1 result = instance.query(sql) - kafka_check_result(result, True, 'test_kafka_virtual2.reference') + kafka_check_result(result, True, "test_kafka_virtual2.reference") - instance.query(''' + instance.query( + """ DROP TABLE test.consumer; DROP TABLE test.view; - ''') + """ + ) def test_kafka_insert(kafka_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -1598,35 +1881,37 @@ def test_kafka_insert(kafka_cluster): kafka_format = 'TSV', kafka_commit_on_select = 1, kafka_row_delimiter = '\\n'; - ''') + """ + ) values = [] for i in range(50): values.append("({i}, {i})".format(i=i)) - values = ','.join(values) + values = ",".join(values) while True: try: instance.query("INSERT INTO test.kafka VALUES {}".format(values)) break except QueryRuntimeException as e: - if 'Local: Timed out.' in str(e): + if "Local: Timed out." in str(e): continue else: raise messages = [] while True: - messages.extend(kafka_consume(kafka_cluster, 'insert1')) + messages.extend(kafka_consume(kafka_cluster, "insert1")) if len(messages) == 50: break - result = '\n'.join(messages) + result = "\n".join(messages) kafka_check_result(result, True) def test_kafka_produce_consume(kafka_cluster): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value UInt64) @@ -1641,7 +1926,8 @@ def test_kafka_produce_consume(kafka_cluster): ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; - ''') + """ + ) messages_num = 10000 @@ -1649,14 +1935,14 @@ def test_kafka_produce_consume(kafka_cluster): values = [] for i in range(messages_num): values.append("({i}, {i})".format(i=i)) - values = ','.join(values) + values = ",".join(values) while True: try: instance.query("INSERT INTO test.kafka VALUES {}".format(values)) break except QueryRuntimeException as e: - if 'Local: Timed out.' in str(e): + if "Local: Timed out." in str(e): continue else: raise @@ -1670,24 +1956,29 @@ def test_kafka_produce_consume(kafka_cluster): thread.start() while True: - result = instance.query('SELECT count() FROM test.view') + result = instance.query("SELECT count() FROM test.view") time.sleep(1) if int(result) == messages_num * threads_num: break - instance.query(''' + instance.query( + """ DROP TABLE test.consumer; DROP TABLE test.view; - ''') + """ + ) for thread in threads: thread.join() - assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result) + assert ( + int(result) == messages_num * threads_num + ), "ClickHouse lost some messages: {}".format(result) def test_kafka_commit_on_block_write(kafka_cluster): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value UInt64) @@ -1703,7 +1994,8 @@ def test_kafka_commit_on_block_write(kafka_cluster): ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; - ''') + """ + ) cancel = threading.Event() @@ -1713,23 +2005,26 @@ def test_kafka_commit_on_block_write(kafka_cluster): while not cancel.is_set(): messages = [] for _ in range(101): - messages.append(json.dumps({'key': i[0], 'value': i[0]})) + messages.append(json.dumps({"key": i[0], "value": i[0]})) i[0] += 1 - kafka_produce(kafka_cluster, 'block', messages) + kafka_produce(kafka_cluster, "block", messages) kafka_thread = threading.Thread(target=produce) kafka_thread.start() - while int(instance.query('SELECT count() FROM test.view')) == 0: + while int(instance.query("SELECT count() FROM test.view")) == 0: time.sleep(1) cancel.set() - instance.query(''' + instance.query( + """ DROP TABLE test.kafka; - ''') + """ + ) - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -1738,34 +2033,40 @@ def test_kafka_commit_on_block_write(kafka_cluster): kafka_format = 'JSONEachRow', kafka_max_block_size = 100, kafka_row_delimiter = '\\n'; - ''') + """ + ) - while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]: + while int(instance.query("SELECT uniqExact(key) FROM test.view")) < i[0]: time.sleep(1) - result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view')) + result = int(instance.query("SELECT count() == uniqExact(key) FROM test.view")) - instance.query(''' + instance.query( + """ DROP TABLE test.consumer; DROP TABLE test.view; - ''') + """ + ) kafka_thread.join() - assert result == 1, 'Messages from kafka get duplicated!' + assert result == 1, "Messages from kafka get duplicated!" def test_kafka_virtual_columns2(kafka_cluster): - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) topic_config = { # default retention, since predefined timestamp_ms is used. - 'retention.ms': '-1', + "retention.ms": "-1", } kafka_create_topic(admin_client, "virt2_0", num_partitions=2, config=topic_config) kafka_create_topic(admin_client, "virt2_1", num_partitions=2, config=topic_config) - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -1776,40 +2077,101 @@ def test_kafka_virtual_columns2(kafka_cluster): CREATE MATERIALIZED VIEW test.view Engine=Log AS SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka; - ''') + """ + ) - producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer) + producer = KafkaProducer( + bootstrap_servers="localhost:{}".format(cluster.kafka_port), + value_serializer=producer_serializer, + key_serializer=producer_serializer, + ) - producer.send(topic='virt2_0', value=json.dumps({'value': 1}), partition=0, key='k1', timestamp_ms=1577836801001, - headers=[('content-encoding', b'base64')]) - producer.send(topic='virt2_0', value=json.dumps({'value': 2}), partition=0, key='k2', timestamp_ms=1577836802002, - headers=[('empty_value', b''), ('', b'empty name'), ('', b''), ('repetition', b'1'), ('repetition', b'2')]) + producer.send( + topic="virt2_0", + value=json.dumps({"value": 1}), + partition=0, + key="k1", + timestamp_ms=1577836801001, + headers=[("content-encoding", b"base64")], + ) + producer.send( + topic="virt2_0", + value=json.dumps({"value": 2}), + partition=0, + key="k2", + timestamp_ms=1577836802002, + headers=[ + ("empty_value", b""), + ("", b"empty name"), + ("", b""), + ("repetition", b"1"), + ("repetition", b"2"), + ], + ) producer.flush() - producer.send(topic='virt2_0', value=json.dumps({'value': 3}), partition=1, key='k3', timestamp_ms=1577836803003, - headers=[('b', b'b'), ('a', b'a')]) - producer.send(topic='virt2_0', value=json.dumps({'value': 4}), partition=1, key='k4', timestamp_ms=1577836804004, - headers=[('a', b'a'), ('b', b'b')]) + producer.send( + topic="virt2_0", + value=json.dumps({"value": 3}), + partition=1, + key="k3", + timestamp_ms=1577836803003, + headers=[("b", b"b"), ("a", b"a")], + ) + producer.send( + topic="virt2_0", + value=json.dumps({"value": 4}), + partition=1, + key="k4", + timestamp_ms=1577836804004, + headers=[("a", b"a"), ("b", b"b")], + ) producer.flush() - producer.send(topic='virt2_1', value=json.dumps({'value': 5}), partition=0, key='k5', timestamp_ms=1577836805005) - producer.send(topic='virt2_1', value=json.dumps({'value': 6}), partition=0, key='k6', timestamp_ms=1577836806006) + producer.send( + topic="virt2_1", + value=json.dumps({"value": 5}), + partition=0, + key="k5", + timestamp_ms=1577836805005, + ) + producer.send( + topic="virt2_1", + value=json.dumps({"value": 6}), + partition=0, + key="k6", + timestamp_ms=1577836806006, + ) producer.flush() - producer.send(topic='virt2_1', value=json.dumps({'value': 7}), partition=1, key='k7', timestamp_ms=1577836807007) - producer.send(topic='virt2_1', value=json.dumps({'value': 8}), partition=1, key='k8', timestamp_ms=1577836808008) + producer.send( + topic="virt2_1", + value=json.dumps({"value": 7}), + partition=1, + key="k7", + timestamp_ms=1577836807007, + ) + producer.send( + topic="virt2_1", + value=json.dumps({"value": 8}), + partition=1, + key="k8", + timestamp_ms=1577836808008, + ) producer.flush() - instance.wait_for_log_line('kafka.*Committed offset 2.*virt2_[01]', repetitions=4, look_behind_lines=6000) + instance.wait_for_log_line( + "kafka.*Committed offset 2.*virt2_[01]", repetitions=4, look_behind_lines=6000 + ) - members = describe_consumer_group(kafka_cluster, 'virt2') + members = describe_consumer_group(kafka_cluster, "virt2") # pprint.pprint(members) # members[0]['client_id'] = 'ClickHouse-instance-test-kafka-0' # members[1]['client_id'] = 'ClickHouse-instance-test-kafka-1' result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True) - expected = '''\ + expected = """\ 1 k1 virt2_0 0 0 1577836801 1577836801001 ['content-encoding'] ['base64'] 2 k2 virt2_0 0 1 1577836802 1577836802002 ['empty_value','','','repetition','repetition'] ['','empty name','','1','2'] 3 k3 virt2_0 1 0 1577836803 1577836803003 ['b','a'] ['b','a'] @@ -1818,26 +2180,32 @@ def test_kafka_virtual_columns2(kafka_cluster): 6 k6 virt2_1 0 1 1577836806 1577836806006 [] [] 7 k7 virt2_1 1 0 1577836807 1577836807007 [] [] 8 k8 virt2_1 1 1 1577836808 1577836808008 [] [] -''' +""" assert TSV(result) == TSV(expected) - instance.query(''' + instance.query( + """ DROP TABLE test.kafka; DROP TABLE test.view; - ''') + """ + ) kafka_delete_topic(admin_client, "virt2_0") kafka_delete_topic(admin_client, "virt2_1") instance.rotate_logs() + def test_kafka_produce_key_timestamp(kafka_cluster): - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) topic_name = "insert3" kafka_create_topic(admin_client, topic_name) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka_writer (key UInt64, value UInt64, _key String, _timestamp DateTime('UTC')) @@ -1858,18 +2226,29 @@ def test_kafka_produce_key_timestamp(kafka_cluster): CREATE MATERIALIZED VIEW test.view Engine=Log AS SELECT key, value, inserted_key, toUnixTimestamp(inserted_timestamp), _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka; - ''') + """ + ) - instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(1, 1, 'k1', 1577836801)) - instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(2, 2, 'k2', 1577836802)) instance.query( - "INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({})),({},{},'{}',toDateTime({}))".format(3, 3, - 'k3', - 1577836803, - 4, 4, - 'k4', - 1577836804)) - instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(5, 5, 'k5', 1577836805)) + "INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format( + 1, 1, "k1", 1577836801 + ) + ) + instance.query( + "INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format( + 2, 2, "k2", 1577836802 + ) + ) + instance.query( + "INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({})),({},{},'{}',toDateTime({}))".format( + 3, 3, "k3", 1577836803, 4, 4, "k4", 1577836804 + ) + ) + instance.query( + "INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format( + 5, 5, "k5", 1577836805 + ) + ) instance.wait_for_log_line("Committed offset 5") @@ -1877,13 +2256,13 @@ def test_kafka_produce_key_timestamp(kafka_cluster): # logging.debug(result) - expected = '''\ + expected = """\ 1 1 k1 1577836801 k1 insert3 0 0 1577836801 2 2 k2 1577836802 k2 insert3 0 1 1577836802 3 3 k3 1577836803 k3 insert3 0 2 1577836803 4 4 k4 1577836804 k4 insert3 0 3 1577836804 5 5 k5 1577836805 k5 insert3 0 4 1577836805 -''' +""" assert TSV(result) == TSV(expected) @@ -1891,14 +2270,17 @@ def test_kafka_produce_key_timestamp(kafka_cluster): def test_kafka_insert_avro(kafka_cluster): - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) topic_config = { # default retention, since predefined timestamp_ms is used. - 'retention.ms': '-1', + "retention.ms": "-1", } kafka_create_topic(admin_client, "avro1", config=topic_config) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.kafka; CREATE TABLE test.kafka (key UInt64, value UInt64, _timestamp DateTime('UTC')) ENGINE = Kafka @@ -1907,20 +2289,26 @@ def test_kafka_insert_avro(kafka_cluster): kafka_group_name = 'avro1', kafka_commit_on_select = 1, kafka_format = 'Avro'; - ''') + """ + ) - - instance.query("INSERT INTO test.kafka select number*10 as key, number*100 as value, 1636505534 as _timestamp from numbers(4) SETTINGS output_format_avro_rows_in_file = 2, output_format_avro_codec = 'deflate'") + instance.query( + "INSERT INTO test.kafka select number*10 as key, number*100 as value, 1636505534 as _timestamp from numbers(4) SETTINGS output_format_avro_rows_in_file = 2, output_format_avro_codec = 'deflate'" + ) messages = [] while True: - messages.extend(kafka_consume(kafka_cluster, 'avro1', needDecode = False, timestamp = 1636505534)) + messages.extend( + kafka_consume( + kafka_cluster, "avro1", needDecode=False, timestamp=1636505534 + ) + ) if len(messages) == 2: break - result = '' + result = "" for a_message in messages: - result += decode_avro(a_message) + '\n' + result += decode_avro(a_message) + "\n" expected_result = """{'key': 0, 'value': 0, '_timestamp': 1636505534} {'key': 10, 'value': 100, '_timestamp': 1636505534} @@ -1929,19 +2317,22 @@ def test_kafka_insert_avro(kafka_cluster): {'key': 30, 'value': 300, '_timestamp': 1636505534} """ - assert (result == expected_result) + assert result == expected_result def test_kafka_produce_consume_avro(kafka_cluster): - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) topic_name = "insert_avro" kafka_create_topic(admin_client, topic_name) num_rows = 75 - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.kafka; DROP TABLE IF EXISTS test.kafka_writer; @@ -1963,27 +2354,41 @@ def test_kafka_produce_consume_avro(kafka_cluster): CREATE MATERIALIZED VIEW test.view Engine=Log AS SELECT key, value FROM test.kafka; - ''') + """ + ) - instance.query("INSERT INTO test.kafka_writer select number*10 as key, number*100 as value from numbers({num_rows}) SETTINGS output_format_avro_rows_in_file = 7".format(num_rows=num_rows)) + instance.query( + "INSERT INTO test.kafka_writer select number*10 as key, number*100 as value from numbers({num_rows}) SETTINGS output_format_avro_rows_in_file = 7".format( + num_rows=num_rows + ) + ) - instance.wait_for_log_line("Committed offset {offset}".format(offset=math.ceil(num_rows/7))) + instance.wait_for_log_line( + "Committed offset {offset}".format(offset=math.ceil(num_rows / 7)) + ) - expected_num_rows = instance.query("SELECT COUNT(1) FROM test.view", ignore_error=True) - assert (int(expected_num_rows) == num_rows) + expected_num_rows = instance.query( + "SELECT COUNT(1) FROM test.view", ignore_error=True + ) + assert int(expected_num_rows) == num_rows - expected_max_key = instance.query("SELECT max(key) FROM test.view", ignore_error=True) - assert (int(expected_max_key) == (num_rows - 1) * 10) + expected_max_key = instance.query( + "SELECT max(key) FROM test.view", ignore_error=True + ) + assert int(expected_max_key) == (num_rows - 1) * 10 kafka_delete_topic(admin_client, topic_name) def test_kafka_flush_by_time(kafka_cluster): - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) topic_name = "flush_by_time" kafka_create_topic(admin_client, topic_name) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; @@ -2001,40 +2406,45 @@ def test_kafka_flush_by_time(kafka_cluster): CREATE TABLE test.view (key UInt64, value UInt64, ts DateTime64(3) MATERIALIZED now64(3)) ENGINE = MergeTree() ORDER BY key; - ''') + """ + ) cancel = threading.Event() def produce(): while not cancel.is_set(): messages = [] - messages.append(json.dumps({'key': 0, 'value': 0})) - kafka_produce(kafka_cluster, 'flush_by_time', messages) + messages.append(json.dumps({"key": 0, "value": 0})) + kafka_produce(kafka_cluster, "flush_by_time", messages) time.sleep(0.8) kafka_thread = threading.Thread(target=produce) kafka_thread.start() - instance.query(''' + instance.query( + """ CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; - ''') + """ + ) time.sleep(18) - result = instance.query('SELECT uniqExact(ts) = 2, count() >= 15 FROM test.view') + result = instance.query("SELECT uniqExact(ts) = 2, count() >= 15 FROM test.view") cancel.set() kafka_thread.join() # kafka_cluster.open_bash_shell('instance') - instance.query(''' + instance.query( + """ DROP TABLE test.consumer; DROP TABLE test.view; - ''') + """ + ) - assert TSV(result) == TSV('1 1') + assert TSV(result) == TSV("1 1") kafka_delete_topic(admin_client, topic_name) @@ -2044,13 +2454,14 @@ def test_kafka_flush_by_block_size(kafka_cluster): def produce(): while not cancel.is_set(): messages = [] - messages.append(json.dumps({'key': 0, 'value': 0})) - kafka_produce(kafka_cluster, 'flush_by_block_size', messages) + messages.append(json.dumps({"key": 0, "value": 0})) + kafka_produce(kafka_cluster, "flush_by_block_size", messages) kafka_thread = threading.Thread(target=produce) kafka_thread.start() - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; @@ -2071,11 +2482,15 @@ def test_kafka_flush_by_block_size(kafka_cluster): CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; - ''') + """ + ) # Wait for Kafka engine to consume this data - while 1 != int(instance.query( - "SELECT count() FROM system.parts WHERE database = 'test' AND table = 'view' AND name = 'all_1_1_0'")): + while 1 != int( + instance.query( + "SELECT count() FROM system.parts WHERE database = 'test' AND table = 'view' AND name = 'all_1_1_0'" + ) + ): time.sleep(0.5) cancel.set() @@ -2085,23 +2500,30 @@ def test_kafka_flush_by_block_size(kafka_cluster): result = instance.query("SELECT count() FROM test.view WHERE _part='all_1_1_0'") # logging.debug(result) - instance.query(''' + instance.query( + """ DROP TABLE test.consumer; DROP TABLE test.view; - ''') + """ + ) # 100 = first poll should return 100 messages (and rows) # not waiting for stream_flush_interval_ms - assert int( - result) == 100, 'Messages from kafka should be flushed when block of size kafka_max_block_size is formed!' + assert ( + int(result) == 100 + ), "Messages from kafka should be flushed when block of size kafka_max_block_size is formed!" + def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster): - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) topic_name = "topic_with_multiple_partitions2" kafka_create_topic(admin_client, topic_name, num_partitions=10) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; CREATE TABLE test.kafka (key UInt64, value UInt64) @@ -2117,7 +2539,8 @@ def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster): ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka; - ''') + """ + ) messages = [] count = 0 @@ -2125,27 +2548,30 @@ def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster): rows = [] for dummy_row in range(random.randrange(3, 10)): count = count + 1 - rows.append(json.dumps({'key': count, 'value': count})) + rows.append(json.dumps({"key": count, "value": count})) messages.append("\n".join(rows)) - kafka_produce(kafka_cluster, 'topic_with_multiple_partitions2', messages) + kafka_produce(kafka_cluster, "topic_with_multiple_partitions2", messages) - instance.wait_for_log_line('kafka.*Stalled', repetitions=5) + instance.wait_for_log_line("kafka.*Stalled", repetitions=5) - result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view') + result = instance.query("SELECT count(), uniqExact(key), max(key) FROM test.view") logging.debug(result) - assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(count)) + assert TSV(result) == TSV("{0}\t{0}\t{0}".format(count)) - instance.query(''' + instance.query( + """ DROP TABLE test.consumer; DROP TABLE test.view; - ''') + """ + ) kafka_delete_topic(admin_client, topic_name) def test_kafka_rebalance(kafka_cluster): NUMBER_OF_CONSURRENT_CONSUMERS = 11 - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.destination; CREATE TABLE test.destination ( key UInt64, @@ -2159,13 +2585,16 @@ def test_kafka_rebalance(kafka_cluster): ) ENGINE = MergeTree() ORDER BY key; - ''') + """ + ) # kafka_cluster.open_bash_shell('instance') # time.sleep(2) - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) topic_name = "topic_with_multiple_partitions" kafka_create_topic(admin_client, topic_name, num_partitions=11) @@ -2177,18 +2606,21 @@ def test_kafka_rebalance(kafka_cluster): while not cancel.is_set(): messages = [] for _ in range(59): - messages.append(json.dumps({'key': msg_index[0], 'value': msg_index[0]})) + messages.append( + json.dumps({"key": msg_index[0], "value": msg_index[0]}) + ) msg_index[0] += 1 - kafka_produce(kafka_cluster, 'topic_with_multiple_partitions', messages) + kafka_produce(kafka_cluster, "topic_with_multiple_partitions", messages) kafka_thread = threading.Thread(target=produce) kafka_thread.start() for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS): - table_name = 'kafka_consumer{}'.format(consumer_index) + table_name = "kafka_consumer{}".format(consumer_index) logging.debug(("Setting up {}".format(table_name))) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.{0}; DROP TABLE IF EXISTS test.{0}_mv; CREATE TABLE test.{0} (key UInt64, value UInt64) @@ -2210,29 +2642,50 @@ def test_kafka_rebalance(kafka_cluster): _timestamp, '{0}' as _consumed_by FROM test.{0}; - '''.format(table_name)) + """.format( + table_name + ) + ) # kafka_cluster.open_bash_shell('instance') # Waiting for test.kafka_consumerX to start consume ... - instance.wait_for_log_line('kafka_consumer{}.*Polled offset [0-9]+'.format(consumer_index)) + instance.wait_for_log_line( + "kafka_consumer{}.*Polled offset [0-9]+".format(consumer_index) + ) cancel.set() # I leave last one working by intent (to finish consuming after all rebalances) for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS - 1): logging.debug(("Dropping test.kafka_consumer{}".format(consumer_index))) - instance.query('DROP TABLE IF EXISTS test.kafka_consumer{} SYNC'.format(consumer_index)) + instance.query( + "DROP TABLE IF EXISTS test.kafka_consumer{} SYNC".format(consumer_index) + ) # logging.debug(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination')) # kafka_cluster.open_bash_shell('instance') while 1: - messages_consumed = int(instance.query('SELECT uniqExact(key) FROM test.destination')) + messages_consumed = int( + instance.query("SELECT uniqExact(key) FROM test.destination") + ) if messages_consumed >= msg_index[0]: break time.sleep(1) - logging.debug(("Waiting for finishing consuming (have {}, should be {})".format(messages_consumed, msg_index[0]))) + logging.debug( + ( + "Waiting for finishing consuming (have {}, should be {})".format( + messages_consumed, msg_index[0] + ) + ) + ) - logging.debug((instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination'))) + logging.debug( + ( + instance.query( + "SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination" + ) + ) + ) # Some queries to debug... # SELECT * FROM test.destination where key in (SELECT key FROM test.destination group by key having count() <> 1) @@ -2254,31 +2707,40 @@ def test_kafka_rebalance(kafka_cluster): # # select * from test.reference_mv left join test.destination using (key,_topic,_offset,_partition) where test.destination._consumed_by = ''; - result = int(instance.query('SELECT count() == uniqExact(key) FROM test.destination')) + result = int( + instance.query("SELECT count() == uniqExact(key) FROM test.destination") + ) for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS): logging.debug(("kafka_consumer{}".format(consumer_index))) - table_name = 'kafka_consumer{}'.format(consumer_index) - instance.query(''' + table_name = "kafka_consumer{}".format(consumer_index) + instance.query( + """ DROP TABLE IF EXISTS test.{0}; DROP TABLE IF EXISTS test.{0}_mv; - '''.format(table_name)) + """.format( + table_name + ) + ) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.destination; - ''') + """ + ) kafka_thread.join() - assert result == 1, 'Messages from kafka get duplicated!' + assert result == 1, "Messages from kafka get duplicated!" kafka_delete_topic(admin_client, topic_name) def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster): - messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)] - kafka_produce(kafka_cluster, 'no_holes_when_write_suffix_failed', messages) + messages = [json.dumps({"key": j + 1, "value": "x" * 300}) for j in range(22)] + kafka_produce(kafka_cluster, "no_holes_when_write_suffix_failed", messages) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.consumer; @@ -2294,41 +2756,49 @@ def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster): CREATE TABLE test.view (key UInt64, value String) ENGINE = ReplicatedMergeTree('/clickhouse/kafkatest/tables/no_holes_when_write_suffix_failed', 'node1') ORDER BY key; - ''') + """ + ) # init PartitionManager (it starts container) earlier pm = PartitionManager() - instance.query(''' + instance.query( + """ CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka WHERE NOT sleepEachRow(0.25); - ''') + """ + ) instance.wait_for_log_line("Polled batch of 20 messages") # the tricky part here is that disconnect should happen after write prefix, but before write suffix # we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages" # while materialized view is working to inject zookeeper failure pm.drop_instance_zk_connections(instance) - instance.wait_for_log_line("Error.*(session has been expired|Connection loss).*while pushing to view") + instance.wait_for_log_line( + "Error.*(session has been expired|Connection loss).*while pushing to view" + ) pm.heal_all() instance.wait_for_log_line("Committed offset 22") - result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view') + result = instance.query("SELECT count(), uniqExact(key), max(key) FROM test.view") logging.debug(result) # kafka_cluster.open_bash_shell('instance') - instance.query(''' + instance.query( + """ DROP TABLE test.consumer; DROP TABLE test.view; - ''') + """ + ) - assert TSV(result) == TSV('22\t22\t22') + assert TSV(result) == TSV("22\t22\t22") def test_exception_from_destructor(kafka_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (key UInt64, value String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -2336,35 +2806,45 @@ def test_exception_from_destructor(kafka_cluster): kafka_group_name = '', kafka_commit_on_select = 1, kafka_format = 'JSONEachRow'; - ''') - instance.query_and_get_error(''' + """ + ) + instance.query_and_get_error( + """ SELECT * FROM test.kafka; - ''') - instance.query(''' + """ + ) + instance.query( + """ DROP TABLE test.kafka; - ''') + """ + ) - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (key UInt64, value String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', kafka_topic_list = 'xyz', kafka_group_name = '', kafka_format = 'JSONEachRow'; - ''') - instance.query(''' + """ + ) + instance.query( + """ DROP TABLE test.kafka; - ''') + """ + ) # kafka_cluster.open_bash_shell('instance') - assert TSV(instance.query('SELECT 1')) == TSV('1') + assert TSV(instance.query("SELECT 1")) == TSV("1") def test_commits_of_unprocessed_messages_on_drop(kafka_cluster): - messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)] - kafka_produce(kafka_cluster, 'commits_of_unprocessed_messages_on_drop', messages) + messages = [json.dumps({"key": j + 1, "value": j + 1}) for j in range(1)] + kafka_produce(kafka_cluster, "commits_of_unprocessed_messages_on_drop", messages) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.destination SYNC; CREATE TABLE test.destination ( key UInt64, @@ -2398,10 +2878,11 @@ def test_commits_of_unprocessed_messages_on_drop(kafka_cluster): _partition, _timestamp FROM test.kafka; - ''') + """ + ) # Waiting for test.kafka_consumer to start consume - instance.wait_for_log_line('Committed offset [0-9]+') + instance.wait_for_log_line("Committed offset [0-9]+") cancel = threading.Event() @@ -2411,20 +2892,25 @@ def test_commits_of_unprocessed_messages_on_drop(kafka_cluster): while not cancel.is_set(): messages = [] for _ in range(113): - messages.append(json.dumps({'key': i[0], 'value': i[0]})) + messages.append(json.dumps({"key": i[0], "value": i[0]})) i[0] += 1 - kafka_produce(kafka_cluster, 'commits_of_unprocessed_messages_on_drop', messages) + kafka_produce( + kafka_cluster, "commits_of_unprocessed_messages_on_drop", messages + ) time.sleep(0.5) kafka_thread = threading.Thread(target=produce) kafka_thread.start() time.sleep(4) - instance.query(''' + instance.query( + """ DROP TABLE test.kafka SYNC; - ''') + """ + ) - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -2433,31 +2919,37 @@ def test_commits_of_unprocessed_messages_on_drop(kafka_cluster): kafka_format = 'JSONEachRow', kafka_max_block_size = 10000, kafka_flush_interval_ms = 1000; - ''') + """ + ) cancel.set() - instance.wait_for_log_line('kafka.*Stalled', repetitions=5) + instance.wait_for_log_line("kafka.*Stalled", repetitions=5) # kafka_cluster.open_bash_shell('instance') # SELECT key, _timestamp, _offset FROM test.destination where runningDifference(key) <> 1 ORDER BY key; - result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.destination') + result = instance.query( + "SELECT count(), uniqExact(key), max(key) FROM test.destination" + ) logging.debug(result) - instance.query(''' + instance.query( + """ DROP TABLE test.kafka_consumer SYNC; DROP TABLE test.destination SYNC; - ''') + """ + ) kafka_thread.join() - assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(i[0] - 1)), 'Missing data!' + assert TSV(result) == TSV("{0}\t{0}\t{0}".format(i[0] - 1)), "Missing data!" def test_bad_reschedule(kafka_cluster): - messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)] - kafka_produce(kafka_cluster, 'test_bad_reschedule', messages) + messages = [json.dumps({"key": j + 1, "value": j + 1}) for j in range(20000)] + kafka_produce(kafka_cluster, "test_bad_reschedule", messages) - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -2478,18 +2970,27 @@ def test_bad_reschedule(kafka_cluster): _partition, _timestamp FROM test.kafka; - ''') + """ + ) instance.wait_for_log_line("Committed offset 20000") - assert int(instance.query("SELECT max(consume_ts) - min(consume_ts) FROM test.destination")) < 8 + assert ( + int( + instance.query( + "SELECT max(consume_ts) - min(consume_ts) FROM test.destination" + ) + ) + < 8 + ) def test_kafka_duplicates_when_commit_failed(kafka_cluster): - messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)] - kafka_produce(kafka_cluster, 'duplicates_when_commit_failed', messages) + messages = [json.dumps({"key": j + 1, "value": "x" * 300}) for j in range(22)] + kafka_produce(kafka_cluster, "duplicates_when_commit_failed", messages) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.view SYNC; DROP TABLE IF EXISTS test.consumer SYNC; @@ -2505,45 +3006,52 @@ def test_kafka_duplicates_when_commit_failed(kafka_cluster): CREATE TABLE test.view (key UInt64, value String) ENGINE = MergeTree() ORDER BY key; - ''') + """ + ) - instance.query(''' + instance.query( + """ CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka WHERE NOT sleepEachRow(0.25); - ''') + """ + ) instance.wait_for_log_line("Polled batch of 20 messages") # the tricky part here is that disconnect should happen after write prefix, but before we do commit # we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages" # while materialized view is working to inject zookeeper failure - kafka_cluster.pause_container('kafka1') + kafka_cluster.pause_container("kafka1") # if we restore the connection too fast (<30sec) librdkafka will not report any timeout # (alternative is to decrease the default session timeouts for librdkafka) # # when the delay is too long (>50sec) broker will decide to remove us from the consumer group, # and will start answering "Broker: Unknown member" - instance.wait_for_log_line("Exception during commit attempt: Local: Waiting for coordinator", timeout=45) + instance.wait_for_log_line( + "Exception during commit attempt: Local: Waiting for coordinator", timeout=45 + ) instance.wait_for_log_line("All commit attempts failed", look_behind_lines=500) - kafka_cluster.unpause_container('kafka1') + kafka_cluster.unpause_container("kafka1") # kafka_cluster.open_bash_shell('instance') instance.wait_for_log_line("Committed offset 22") - result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view') + result = instance.query("SELECT count(), uniqExact(key), max(key) FROM test.view") logging.debug(result) - instance.query(''' + instance.query( + """ DROP TABLE test.consumer SYNC; DROP TABLE test.view SYNC; - ''') + """ + ) # After https://github.com/edenhill/librdkafka/issues/2631 # timeout triggers rebalance, making further commits to the topic after getting back online # impossible. So we have a duplicate in that scenario, but we report that situation properly. - assert TSV(result) == TSV('42\t22\t22') + assert TSV(result) == TSV("42\t22\t22") # if we came to partition end we will repeat polling until reaching kafka_max_block_size or flush_interval @@ -2553,7 +3061,8 @@ def test_kafka_duplicates_when_commit_failed(kafka_cluster): # easier to understand, so let's keep it as is for now. # also we can came to eof because we drained librdkafka internal queue too fast def test_premature_flush_on_eof(kafka_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -2573,17 +3082,19 @@ def test_premature_flush_on_eof(kafka_cluster): ) ENGINE = MergeTree() ORDER BY key; - ''') + """ + ) # messages created here will be consumed immedeately after MV creation # reaching topic EOF. # But we should not do flush immedeately after reaching EOF, because # next poll can return more data, and we should respect kafka_flush_interval_ms # and try to form bigger block - messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)] - kafka_produce(kafka_cluster, 'premature_flush_on_eof', messages) + messages = [json.dumps({"key": j + 1, "value": j + 1}) for j in range(1)] + kafka_produce(kafka_cluster, "premature_flush_on_eof", messages) - instance.query(''' + instance.query( + """ CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS SELECT key, @@ -2594,7 +3105,8 @@ def test_premature_flush_on_eof(kafka_cluster): _partition, _timestamp FROM test.kafka; - ''') + """ + ) # all subscriptions/assignments done during select, so it start sending data to test.destination # immediately after creation of MV @@ -2603,7 +3115,7 @@ def test_premature_flush_on_eof(kafka_cluster): instance.wait_for_log_line("Stalled") # produce more messages after delay - kafka_produce(kafka_cluster, 'premature_flush_on_eof', messages) + kafka_produce(kafka_cluster, "premature_flush_on_eof", messages) # data was not flushed yet (it will be flushed 7.5 sec after creating MV) assert int(instance.query("SELECT count() FROM test.destination")) == 0 @@ -2611,22 +3123,27 @@ def test_premature_flush_on_eof(kafka_cluster): instance.wait_for_log_line("Committed offset 2") # it should be single part, i.e. single insert - result = instance.query('SELECT _part, count() FROM test.destination group by _part') - assert TSV(result) == TSV('all_1_1_0\t2') + result = instance.query( + "SELECT _part, count() FROM test.destination group by _part" + ) + assert TSV(result) == TSV("all_1_1_0\t2") - instance.query(''' + instance.query( + """ DROP TABLE test.kafka_consumer; DROP TABLE test.destination; - ''') + """ + ) def test_kafka_unavailable(kafka_cluster): - messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)] - kafka_produce(kafka_cluster, 'test_bad_reschedule', messages) + messages = [json.dumps({"key": j + 1, "value": j + 1}) for j in range(20000)] + kafka_produce(kafka_cluster, "test_bad_reschedule", messages) - kafka_cluster.pause_container('kafka1') + kafka_cluster.pause_container("kafka1") - instance.query(''' + instance.query( + """ CREATE TABLE test.test_bad_reschedule (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -2647,16 +3164,19 @@ def test_kafka_unavailable(kafka_cluster): _partition, _timestamp FROM test.test_bad_reschedule; - ''') + """ + ) instance.query("SELECT * FROM test.test_bad_reschedule") instance.query("SELECT count() FROM test.destination_unavailable") # enough to trigger issue time.sleep(30) - kafka_cluster.unpause_container('kafka1') + kafka_cluster.unpause_container("kafka1") - while int(instance.query("SELECT count() FROM test.destination_unavailable")) < 20000: + while ( + int(instance.query("SELECT count() FROM test.destination_unavailable")) < 20000 + ): print("Waiting for consume") time.sleep(1) @@ -2666,7 +3186,8 @@ def test_kafka_issue14202(kafka_cluster): INSERT INTO Kafka Engine from an empty SELECT sub query was leading to failure """ - instance.query(''' + instance.query( + """ CREATE TABLE test.empty_table ( dt Date, some_string String @@ -2681,20 +3202,25 @@ def test_kafka_issue14202(kafka_cluster): kafka_topic_list = 'issue14202', kafka_group_name = 'issue14202', kafka_format = 'JSONEachRow'; - ''') + """ + ) instance.query( - 'INSERT INTO test.kafka_q SELECT t, some_string FROM ( SELECT dt AS t, some_string FROM test.empty_table )') + "INSERT INTO test.kafka_q SELECT t, some_string FROM ( SELECT dt AS t, some_string FROM test.empty_table )" + ) # check instance is alive - assert TSV(instance.query('SELECT 1')) == TSV('1') - instance.query(''' + assert TSV(instance.query("SELECT 1")) == TSV("1") + instance.query( + """ DROP TABLE test.empty_table; DROP TABLE test.kafka_q; - ''') + """ + ) def test_kafka_csv_with_thread_per_consumer(kafka_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092', @@ -2705,26 +3231,30 @@ def test_kafka_csv_with_thread_per_consumer(kafka_cluster): kafka_num_consumers = 4, kafka_commit_on_select = 1, kafka_thread_per_consumer = 1; - ''') + """ + ) messages = [] for i in range(50): - messages.append('{i}, {i}'.format(i=i)) - kafka_produce(kafka_cluster, 'csv_with_thread_per_consumer', messages) + messages.append("{i}, {i}".format(i=i)) + kafka_produce(kafka_cluster, "csv_with_thread_per_consumer", messages) - result = '' + result = "" while True: - result += instance.query('SELECT * FROM test.kafka', ignore_error=True) + result += instance.query("SELECT * FROM test.kafka", ignore_error=True) if kafka_check_result(result): break kafka_check_result(result, True) + def random_string(size=8): - return ''.join(random.choices(string.ascii_uppercase + string.digits, k=size)) + return "".join(random.choices(string.ascii_uppercase + string.digits, k=size)) + def test_kafka_engine_put_errors_to_stream(kafka_cluster): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.kafka; DROP TABLE IF EXISTS test.kafka_data; DROP TABLE IF EXISTS test.kafka_errors; @@ -2750,54 +3280,63 @@ def test_kafka_engine_put_errors_to_stream(kafka_cluster): _raw_message AS raw, _error AS error FROM test.kafka WHERE length(_error) > 0; - ''') + """ + ) messages = [] for i in range(128): if i % 2 == 0: - messages.append(json.dumps({'i': i, 's': random_string(8)})) + messages.append(json.dumps({"i": i, "s": random_string(8)})) else: # Unexpected json content for table test.kafka. - messages.append(json.dumps({'i': 'n_' + random_string(4), 's': random_string(8)})) + messages.append( + json.dumps({"i": "n_" + random_string(4), "s": random_string(8)}) + ) - kafka_produce(kafka_cluster, 'kafka_engine_put_errors_to_stream', messages) + kafka_produce(kafka_cluster, "kafka_engine_put_errors_to_stream", messages) instance.wait_for_log_line("Committed offset 128") - assert TSV(instance.query('SELECT count() FROM test.kafka_data')) == TSV('64') - assert TSV(instance.query('SELECT count() FROM test.kafka_errors')) == TSV('64') + assert TSV(instance.query("SELECT count() FROM test.kafka_data")) == TSV("64") + assert TSV(instance.query("SELECT count() FROM test.kafka_errors")) == TSV("64") - instance.query(''' + instance.query( + """ DROP TABLE test.kafka; DROP TABLE test.kafka_data; DROP TABLE test.kafka_errors; - ''') + """ + ) + def gen_normal_json(): return '{"i":1000, "s":"ABC123abc"}' + def gen_malformed_json(): return '{"i":"n1000", "s":"1000"}' -def gen_message_with_jsons(jsons = 10, malformed = 0): + +def gen_message_with_jsons(jsons=10, malformed=0): s = io.StringIO() # we don't care on which position error will be added # (we skip whole broken message), but we need to be # sure that at least one error will be added, # otherwise test will fail. - error_pos = random.randint(0,jsons-1) + error_pos = random.randint(0, jsons - 1) - for i in range (jsons): + for i in range(jsons): if malformed and i == error_pos: s.write(gen_malformed_json()) else: s.write(gen_normal_json()) - s.write(' ') + s.write(" ") return s.getvalue() def test_kafka_engine_put_errors_to_stream_with_random_malformed_json(kafka_cluster): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.kafka; DROP TABLE IF EXISTS test.kafka_data; DROP TABLE IF EXISTS test.kafka_errors; @@ -2824,7 +3363,8 @@ def test_kafka_engine_put_errors_to_stream_with_random_malformed_json(kafka_clus _raw_message AS raw, _error AS error FROM test.kafka WHERE length(_error) > 0; - ''') + """ + ) messages = [] for i in range(128): @@ -2833,205 +3373,215 @@ def test_kafka_engine_put_errors_to_stream_with_random_malformed_json(kafka_clus else: messages.append(gen_message_with_jsons(10, 0)) - kafka_produce(kafka_cluster, 'kafka_engine_put_errors_to_stream_with_random_malformed_json', messages) + kafka_produce( + kafka_cluster, + "kafka_engine_put_errors_to_stream_with_random_malformed_json", + messages, + ) instance.wait_for_log_line("Committed offset 128") # 64 good messages, each containing 10 rows - assert TSV(instance.query('SELECT count() FROM test.kafka_data')) == TSV('640') + assert TSV(instance.query("SELECT count() FROM test.kafka_data")) == TSV("640") # 64 bad messages, each containing some broken row - assert TSV(instance.query('SELECT count() FROM test.kafka_errors')) == TSV('64') + assert TSV(instance.query("SELECT count() FROM test.kafka_errors")) == TSV("64") - instance.query(''' + instance.query( + """ DROP TABLE test.kafka; DROP TABLE test.kafka_data; DROP TABLE test.kafka_errors; - ''') + """ + ) + def test_kafka_formats_with_broken_message(kafka_cluster): # data was dumped from clickhouse itself in a following manner # clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g' - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) all_formats = { ## Text formats ## # dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;' - 'JSONEachRow': { - 'data_sample': [ + "JSONEachRow": { + "data_sample": [ '{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n', '{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n', '{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n', # broken message '{"id":"0","blockNo":"BAD","val1":"AM","val2":0.5,"val3":1}', ], - 'expected':'''{"raw_message":"{\\"id\\":\\"0\\",\\"blockNo\\":\\"BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}': (while reading the value of key blockNo)"}''', - 'supports_empty_value': True, - 'printable': True, + "expected": """{"raw_message":"{\\"id\\":\\"0\\",\\"blockNo\\":\\"BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}': (while reading the value of key blockNo)"}""", + "supports_empty_value": True, + "printable": True, }, # JSONAsString doesn't fit to that test, and tested separately - 'JSONCompactEachRow': { - 'data_sample': [ + "JSONCompactEachRow": { + "data_sample": [ '["0", 0, "AM", 0.5, 1]\n', '["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n', '["0", 0, "AM", 0.5, 1]\n', # broken message '["0", "BAD", "AM", 0.5, 1]', ], - 'expected':'''{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse input: expected '\\"' before: 'BAD\\", \\"AM\\", 0.5, 1]': (while reading the value of key blockNo)"}''', - 'supports_empty_value': True, - 'printable':True, + "expected": """{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse input: expected '\\"' before: 'BAD\\", \\"AM\\", 0.5, 1]': (while reading the value of key blockNo)"}""", + "supports_empty_value": True, + "printable": True, }, - 'JSONCompactEachRowWithNamesAndTypes': { - 'data_sample': [ + "JSONCompactEachRowWithNamesAndTypes": { + "data_sample": [ '["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n', '["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n', '["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n', # broken message '["0", "BAD", "AM", 0.5, 1]', ], - 'expected':'''{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse JSON string: expected opening quote"}''', - 'printable':True, + "expected": """{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse JSON string: expected opening quote"}""", + "printable": True, }, - 'TSKV': { - 'data_sample': [ - 'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n', - 'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n', - 'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n', + "TSKV": { + "data_sample": [ + "id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n", + "id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n", + "id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n", # broken message - 'id=0\tblockNo=BAD\tval1=AM\tval2=0.5\tval3=1\n', + "id=0\tblockNo=BAD\tval1=AM\tval2=0.5\tval3=1\n", ], - 'expected':'{"raw_message":"id=0\\tblockNo=BAD\\tval1=AM\\tval2=0.5\\tval3=1\\n","error":"Found garbage after field in TSKV format: blockNo: (at row 1)\\n"}', - 'printable':True, + "expected": '{"raw_message":"id=0\\tblockNo=BAD\\tval1=AM\\tval2=0.5\\tval3=1\\n","error":"Found garbage after field in TSKV format: blockNo: (at row 1)\\n"}', + "printable": True, }, - 'CSV': { - 'data_sample': [ + "CSV": { + "data_sample": [ '0,0,"AM",0.5,1\n', '1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n', '0,0,"AM",0.5,1\n', # broken message '0,"BAD","AM",0.5,1\n', ], - 'expected':'''{"raw_message":"0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''', - 'printable':True, - 'supports_empty_value': True, + "expected": """{"raw_message":"0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}""", + "printable": True, + "supports_empty_value": True, }, - 'TSV': { - 'data_sample': [ - '0\t0\tAM\t0.5\t1\n', - '1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n', - '0\t0\tAM\t0.5\t1\n', + "TSV": { + "data_sample": [ + "0\t0\tAM\t0.5\t1\n", + "1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n", + "0\t0\tAM\t0.5\t1\n", # broken message - '0\tBAD\tAM\t0.5\t1\n', + "0\tBAD\tAM\t0.5\t1\n", ], - 'expected':'''{"raw_message":"0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''', - 'supports_empty_value': True, - 'printable':True, + "expected": """{"raw_message":"0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}""", + "supports_empty_value": True, + "printable": True, }, - 'CSVWithNames': { - 'data_sample': [ + "CSVWithNames": { + "data_sample": [ '"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n', '"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n', '"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n', # broken message '"id","blockNo","val1","val2","val3"\n0,"BAD","AM",0.5,1\n', ], - 'expected':'''{"raw_message":"\\"id\\",\\"blockNo\\",\\"val1\\",\\"val2\\",\\"val3\\"\\n0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''', - 'printable':True, + "expected": """{"raw_message":"\\"id\\",\\"blockNo\\",\\"val1\\",\\"val2\\",\\"val3\\"\\n0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}""", + "printable": True, }, - 'Values': { - 'data_sample': [ + "Values": { + "data_sample": [ "(0,0,'AM',0.5,1)", "(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)", "(0,0,'AM',0.5,1)", # broken message "(0,'BAD','AM',0.5,1)", ], - 'expected':r'''{"raw_message":"(0,'BAD','AM',0.5,1)","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception"}''', - 'supports_empty_value': True, - 'printable':True, + "expected": r"""{"raw_message":"(0,'BAD','AM',0.5,1)","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception"}""", + "supports_empty_value": True, + "printable": True, }, - 'TSVWithNames': { - 'data_sample': [ - 'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n', - 'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n', - 'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n', + "TSVWithNames": { + "data_sample": [ + "id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n", + "id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n", + "id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n", # broken message - 'id\tblockNo\tval1\tval2\tval3\n0\tBAD\tAM\t0.5\t1\n', + "id\tblockNo\tval1\tval2\tval3\n0\tBAD\tAM\t0.5\t1\n", ], - 'expected':'''{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''', - 'supports_empty_value': True, - 'printable':True, + "expected": """{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}""", + "supports_empty_value": True, + "printable": True, }, - 'TSVWithNamesAndTypes': { - 'data_sample': [ - 'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n', - 'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n', - 'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n', + "TSVWithNamesAndTypes": { + "data_sample": [ + "id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n", + "id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n", + "id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n", # broken message - 'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\tBAD\tAM\t0.5\t1\n', + "id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\tBAD\tAM\t0.5\t1\n", ], - 'expected':'''{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\nInt64\\tUInt16\\tString\\tFloat32\\tUInt8\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''', - 'printable':True, + "expected": """{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\nInt64\\tUInt16\\tString\\tFloat32\\tUInt8\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}""", + "printable": True, }, - 'Native': { - 'data_sample': [ - b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01', - b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01', - b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01', + "Native": { + "data_sample": [ + b"\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01", + b"\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01", + b"\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01", # broken message - b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x53\x74\x72\x69\x6e\x67\x03\x42\x41\x44\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01', + b"\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x53\x74\x72\x69\x6e\x67\x03\x42\x41\x44\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01", ], - 'expected':'''{"raw_message":"050102696405496E743634000000000000000007626C6F636B4E6F06537472696E67034241440476616C3106537472696E6702414D0476616C3207466C6F617433320000003F0476616C330555496E743801","error":"Cannot convert: String to UInt16"}''', - 'printable':False, + "expected": """{"raw_message":"050102696405496E743634000000000000000007626C6F636B4E6F06537472696E67034241440476616C3106537472696E6702414D0476616C3207466C6F617433320000003F0476616C330555496E743801","error":"Cannot convert: String to UInt16"}""", + "printable": False, }, - 'RowBinary': { - 'data_sample': [ - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', - b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', - b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', + "RowBinary": { + "data_sample": [ + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01", + b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01", + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01", # broken message - b'\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01', + b"\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01", ], - 'expected':'{"raw_message":"00000000000000000342414402414D0000003F01","error":"Cannot read all data. Bytes read: 9. Bytes expected: 65.: (at row 1)\\n"}', - 'printable':False, + "expected": '{"raw_message":"00000000000000000342414402414D0000003F01","error":"Cannot read all data. Bytes read: 9. Bytes expected: 65.: (at row 1)\\n"}', + "printable": False, }, - 'RowBinaryWithNamesAndTypes': { - 'data_sample': [ - b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', - b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', - b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01', + "RowBinaryWithNamesAndTypes": { + "data_sample": [ + b"\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01", + b"\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01", + b"\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01", # broken message - b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x53\x74\x72\x69\x6e\x67\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01', + b"\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x53\x74\x72\x69\x6e\x67\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01", ], - 'expected':'{"raw_message":"0502696407626C6F636B4E6F0476616C310476616C320476616C3305496E74363406537472696E6706537472696E6707466C6F617433320555496E743800000000000000000342414402414D0000003F01","error":"Type of \'blockNo\' must be UInt16, not String"}', - 'printable':False, + "expected": '{"raw_message":"0502696407626C6F636B4E6F0476616C310476616C320476616C3305496E74363406537472696E6706537472696E6707466C6F617433320555496E743800000000000000000342414402414D0000003F01","error":"Type of \'blockNo\' must be UInt16, not String"}', + "printable": False, }, - 'ORC': { - 'data_sample': [ - b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18', - b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18', - b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18', + "ORC": { + "data_sample": [ + b"\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18", + b"\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18", + b"\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18", # broken message - b'\x4f\x52\x43\x0a\x0b\x0a\x03\x00\x00\x00\x12\x04\x08\x01\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x29\x0a\x04\x00\x00\x00\x00\x12\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\xff\x80\xff\x80\xff\x00\xff\x80\xff\x03\x42\x41\x44\xff\x80\xff\x02\x41\x4d\xff\x80\x00\x00\x00\x3f\xff\x80\xff\x01\x0a\x06\x08\x06\x10\x00\x18\x0d\x0a\x06\x08\x06\x10\x01\x18\x17\x0a\x06\x08\x06\x10\x02\x18\x14\x0a\x06\x08\x06\x10\x03\x18\x14\x0a\x06\x08\x06\x10\x04\x18\x2b\x0a\x06\x08\x06\x10\x05\x18\x17\x0a\x06\x08\x00\x10\x00\x18\x02\x0a\x06\x08\x00\x10\x01\x18\x02\x0a\x06\x08\x01\x10\x01\x18\x02\x0a\x06\x08\x00\x10\x02\x18\x02\x0a\x06\x08\x02\x10\x02\x18\x02\x0a\x06\x08\x01\x10\x02\x18\x03\x0a\x06\x08\x00\x10\x03\x18\x02\x0a\x06\x08\x02\x10\x03\x18\x02\x0a\x06\x08\x01\x10\x03\x18\x02\x0a\x06\x08\x00\x10\x04\x18\x02\x0a\x06\x08\x01\x10\x04\x18\x04\x0a\x06\x08\x00\x10\x05\x18\x02\x0a\x06\x08\x01\x10\x05\x18\x02\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x1a\x03\x47\x4d\x54\x0a\x59\x0a\x04\x08\x01\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x08\x03\x10\xec\x02\x1a\x0c\x08\x03\x10\x8e\x01\x18\x1d\x20\xc1\x01\x28\x01\x22\x2e\x08\x0c\x12\x05\x01\x02\x03\x04\x05\x1a\x02\x69\x64\x1a\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x1a\x04\x76\x61\x6c\x31\x1a\x04\x76\x61\x6c\x32\x1a\x04\x76\x61\x6c\x33\x20\x00\x28\x00\x30\x00\x22\x08\x08\x04\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x05\x20\x00\x28\x00\x30\x00\x22\x08\x08\x01\x20\x00\x28\x00\x30\x00\x30\x01\x3a\x04\x08\x01\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x3a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x40\x90\x4e\x48\x01\x08\xd5\x01\x10\x00\x18\x80\x80\x04\x22\x02\x00\x0b\x28\x5b\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18', + b"\x4f\x52\x43\x0a\x0b\x0a\x03\x00\x00\x00\x12\x04\x08\x01\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x29\x0a\x04\x00\x00\x00\x00\x12\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\xff\x80\xff\x80\xff\x00\xff\x80\xff\x03\x42\x41\x44\xff\x80\xff\x02\x41\x4d\xff\x80\x00\x00\x00\x3f\xff\x80\xff\x01\x0a\x06\x08\x06\x10\x00\x18\x0d\x0a\x06\x08\x06\x10\x01\x18\x17\x0a\x06\x08\x06\x10\x02\x18\x14\x0a\x06\x08\x06\x10\x03\x18\x14\x0a\x06\x08\x06\x10\x04\x18\x2b\x0a\x06\x08\x06\x10\x05\x18\x17\x0a\x06\x08\x00\x10\x00\x18\x02\x0a\x06\x08\x00\x10\x01\x18\x02\x0a\x06\x08\x01\x10\x01\x18\x02\x0a\x06\x08\x00\x10\x02\x18\x02\x0a\x06\x08\x02\x10\x02\x18\x02\x0a\x06\x08\x01\x10\x02\x18\x03\x0a\x06\x08\x00\x10\x03\x18\x02\x0a\x06\x08\x02\x10\x03\x18\x02\x0a\x06\x08\x01\x10\x03\x18\x02\x0a\x06\x08\x00\x10\x04\x18\x02\x0a\x06\x08\x01\x10\x04\x18\x04\x0a\x06\x08\x00\x10\x05\x18\x02\x0a\x06\x08\x01\x10\x05\x18\x02\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x1a\x03\x47\x4d\x54\x0a\x59\x0a\x04\x08\x01\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x08\x03\x10\xec\x02\x1a\x0c\x08\x03\x10\x8e\x01\x18\x1d\x20\xc1\x01\x28\x01\x22\x2e\x08\x0c\x12\x05\x01\x02\x03\x04\x05\x1a\x02\x69\x64\x1a\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x1a\x04\x76\x61\x6c\x31\x1a\x04\x76\x61\x6c\x32\x1a\x04\x76\x61\x6c\x33\x20\x00\x28\x00\x30\x00\x22\x08\x08\x04\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x05\x20\x00\x28\x00\x30\x00\x22\x08\x08\x01\x20\x00\x28\x00\x30\x00\x30\x01\x3a\x04\x08\x01\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x3a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x40\x90\x4e\x48\x01\x08\xd5\x01\x10\x00\x18\x80\x80\x04\x22\x02\x00\x0b\x28\x5b\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18", ], - 'expected':r'''{"raw_message":"4F52430A0B0A030000001204080150000A150A050000000000120C0801120608001000180050000A120A06000000000000120808014202080650000A120A06000000000000120808014202080450000A290A0400000000122108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A150A050000000000120C080112060802100218025000FF80FF80FF00FF80FF03424144FF80FF02414DFF800000003FFF80FF010A0608061000180D0A060806100118170A060806100218140A060806100318140A0608061004182B0A060806100518170A060800100018020A060800100118020A060801100118020A060800100218020A060802100218020A060801100218030A060800100318020A060802100318020A060801100318020A060800100418020A060801100418040A060800100518020A060801100518021204080010001204080010001204080010001204080010001204080010001204080010001A03474D540A590A04080150000A0C0801120608001000180050000A0808014202080650000A0808014202080450000A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A0C080112060802100218025000080310EC021A0C0803108E01181D20C1012801222E080C120501020304051A0269641A07626C6F636B4E6F1A0476616C311A0476616C321A0476616C33200028003000220808042000280030002208080820002800300022080808200028003000220808052000280030002208080120002800300030013A04080150003A0C0801120608001000180050003A0808014202080650003A0808014202080450003A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50003A0C08011206080210021802500040904E480108D5011000188080042202000B285B300682F403034F524318","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception."}''', - 'printable':False, - } + "expected": r"""{"raw_message":"4F52430A0B0A030000001204080150000A150A050000000000120C0801120608001000180050000A120A06000000000000120808014202080650000A120A06000000000000120808014202080450000A290A0400000000122108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A150A050000000000120C080112060802100218025000FF80FF80FF00FF80FF03424144FF80FF02414DFF800000003FFF80FF010A0608061000180D0A060806100118170A060806100218140A060806100318140A0608061004182B0A060806100518170A060800100018020A060800100118020A060801100118020A060800100218020A060802100218020A060801100218030A060800100318020A060802100318020A060801100318020A060800100418020A060801100418040A060800100518020A060801100518021204080010001204080010001204080010001204080010001204080010001204080010001A03474D540A590A04080150000A0C0801120608001000180050000A0808014202080650000A0808014202080450000A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A0C080112060802100218025000080310EC021A0C0803108E01181D20C1012801222E080C120501020304051A0269641A07626C6F636B4E6F1A0476616C311A0476616C321A0476616C33200028003000220808042000280030002208080820002800300022080808200028003000220808052000280030002208080120002800300030013A04080150003A0C0801120608001000180050003A0808014202080650003A0808014202080450003A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50003A0C08011206080210021802500040904E480108D5011000188080042202000B285B300682F403034F524318","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception."}""", + "printable": False, + }, } - topic_name_prefix = 'format_tests_4_stream_' + topic_name_prefix = "format_tests_4_stream_" for format_name, format_opts in list(all_formats.items()): - logging.debug(f'Set up {format_name}') + logging.debug(f"Set up {format_name}") topic_name = f"{topic_name_prefix}{format_name}" - data_sample = format_opts['data_sample'] + data_sample = format_opts["data_sample"] data_prefix = [] - raw_message = '_raw_message' + raw_message = "_raw_message" # prepend empty value when supported - if format_opts.get('supports_empty_value', False): - data_prefix = data_prefix + [''] - if format_opts.get('printable', False) == False: - raw_message = 'hex(_raw_message)' + if format_opts.get("supports_empty_value", False): + data_prefix = data_prefix + [""] + if format_opts.get("printable", False) == False: + raw_message = "hex(_raw_message)" kafka_produce(kafka_cluster, topic_name, data_prefix + data_sample) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.kafka_{format_name}; CREATE TABLE test.kafka_{format_name} ( @@ -3057,16 +3607,27 @@ def test_kafka_formats_with_broken_message(kafka_cluster): CREATE MATERIALIZED VIEW test.kafka_errors_{format_name}_mv Engine=Log AS SELECT {raw_message} as raw_message, _error as error, _topic as topic, _partition as partition, _offset as offset FROM test.kafka_{format_name} WHERE length(_error) > 0; - '''.format(topic_name=topic_name, format_name=format_name, raw_message=raw_message, - extra_settings=format_opts.get('extra_settings') or '')) + """.format( + topic_name=topic_name, + format_name=format_name, + raw_message=raw_message, + extra_settings=format_opts.get("extra_settings") or "", + ) + ) for format_name, format_opts in list(all_formats.items()): - logging.debug('Checking {format_name}') + logging.debug("Checking {format_name}") topic_name = f"{topic_name_prefix}{format_name}" # shift offsets by 1 if format supports empty value - offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2] - result = instance.query('SELECT * FROM test.kafka_data_{format_name}_mv;'.format(format_name=format_name)) - expected = '''\ + offsets = ( + [1, 2, 3] if format_opts.get("supports_empty_value", False) else [0, 1, 2] + ) + result = instance.query( + "SELECT * FROM test.kafka_data_{format_name}_mv;".format( + format_name=format_name + ) + ) + expected = """\ 0 0 AM 0.5 1 {topic_name} 0 {offset_0} 1 0 AM 0.5 1 {topic_name} 0 {offset_1} 2 0 AM 0.5 1 {topic_name} 0 {offset_1} @@ -3084,19 +3645,37 @@ def test_kafka_formats_with_broken_message(kafka_cluster): 14 0 AM 0.5 1 {topic_name} 0 {offset_1} 15 0 AM 0.5 1 {topic_name} 0 {offset_1} 0 0 AM 0.5 1 {topic_name} 0 {offset_2} -'''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2]) +""".format( + topic_name=topic_name, + offset_0=offsets[0], + offset_1=offsets[1], + offset_2=offsets[2], + ) # print(('Checking result\n {result} \n expected \n {expected}\n'.format(result=str(result), expected=str(expected)))) - assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name) - errors_result = ast.literal_eval(instance.query('SELECT raw_message, error FROM test.kafka_errors_{format_name}_mv format JSONEachRow'.format(format_name=format_name))) - errors_expected = ast.literal_eval(format_opts['expected']) + assert TSV(result) == TSV(expected), "Proper result for format: {}".format( + format_name + ) + errors_result = ast.literal_eval( + instance.query( + "SELECT raw_message, error FROM test.kafka_errors_{format_name}_mv format JSONEachRow".format( + format_name=format_name + ) + ) + ) + errors_expected = ast.literal_eval(format_opts["expected"]) # print(errors_result.strip()) # print(errors_expected.strip()) - assert errors_result['raw_message'] == errors_expected['raw_message'], 'Proper raw_message for format: {}'.format(format_name) + assert ( + errors_result["raw_message"] == errors_expected["raw_message"] + ), "Proper raw_message for format: {}".format(format_name) # Errors text can change, just checking prefixes - assert errors_expected['error'] in errors_result['error'], 'Proper error for format: {}'.format(format_name) + assert ( + errors_expected["error"] in errors_result["error"] + ), "Proper error for format: {}".format(format_name) kafka_delete_topic(admin_client, topic_name) -def wait_for_new_data(table_name, prev_count = 0, max_retries = 120): + +def wait_for_new_data(table_name, prev_count=0, max_retries=120): retries = 0 while True: new_count = int(instance.query("SELECT count() FROM {}".format(table_name))) @@ -3109,15 +3688,19 @@ def wait_for_new_data(table_name, prev_count = 0, max_retries = 120): if retries > max_retries: raise Exception("No new data :(") + def test_kafka_consumer_failover(kafka_cluster): # for backporting: # admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092") - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) topic_name = "kafka_consumer_failover" kafka_create_topic(admin_client, topic_name, num_partitions=2) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.kafka; DROP TABLE IF EXISTS test.kafka2; @@ -3167,85 +3750,157 @@ def test_kafka_consumer_failover(kafka_cluster): CREATE MATERIALIZED VIEW test.kafka3_mv TO test.destination AS SELECT key, value, 'kafka3' as _consumed_by FROM test.kafka3; - ''') + """ + ) - - producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer) + producer = KafkaProducer( + bootstrap_servers="localhost:{}".format(cluster.kafka_port), + value_serializer=producer_serializer, + key_serializer=producer_serializer, + ) ## all 3 attached, 2 working - producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':1,'value': 1}), partition=0) - producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':1,'value': 1}), partition=1) + producer.send( + topic="kafka_consumer_failover", + value=json.dumps({"key": 1, "value": 1}), + partition=0, + ) + producer.send( + topic="kafka_consumer_failover", + value=json.dumps({"key": 1, "value": 1}), + partition=1, + ) producer.flush() - prev_count = wait_for_new_data('test.destination') + prev_count = wait_for_new_data("test.destination") ## 2 attached, 2 working - instance.query('DETACH TABLE test.kafka') - producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':2,'value': 2}), partition=0) - producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':2,'value': 2}), partition=1) + instance.query("DETACH TABLE test.kafka") + producer.send( + topic="kafka_consumer_failover", + value=json.dumps({"key": 2, "value": 2}), + partition=0, + ) + producer.send( + topic="kafka_consumer_failover", + value=json.dumps({"key": 2, "value": 2}), + partition=1, + ) producer.flush() - prev_count = wait_for_new_data('test.destination', prev_count) + prev_count = wait_for_new_data("test.destination", prev_count) ## 1 attached, 1 working - instance.query('DETACH TABLE test.kafka2') - producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':3,'value': 3}), partition=0) - producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':3,'value': 3}), partition=1) + instance.query("DETACH TABLE test.kafka2") + producer.send( + topic="kafka_consumer_failover", + value=json.dumps({"key": 3, "value": 3}), + partition=0, + ) + producer.send( + topic="kafka_consumer_failover", + value=json.dumps({"key": 3, "value": 3}), + partition=1, + ) producer.flush() - prev_count = wait_for_new_data('test.destination', prev_count) + prev_count = wait_for_new_data("test.destination", prev_count) ## 2 attached, 2 working - instance.query('ATTACH TABLE test.kafka') - producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':4,'value': 4}), partition=0) - producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':4,'value': 4}), partition=1) + instance.query("ATTACH TABLE test.kafka") + producer.send( + topic="kafka_consumer_failover", + value=json.dumps({"key": 4, "value": 4}), + partition=0, + ) + producer.send( + topic="kafka_consumer_failover", + value=json.dumps({"key": 4, "value": 4}), + partition=1, + ) producer.flush() - prev_count = wait_for_new_data('test.destination', prev_count) + prev_count = wait_for_new_data("test.destination", prev_count) ## 1 attached, 1 working - instance.query('DETACH TABLE test.kafka3') - producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':5,'value': 5}), partition=0) - producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':5,'value': 5}), partition=1) + instance.query("DETACH TABLE test.kafka3") + producer.send( + topic="kafka_consumer_failover", + value=json.dumps({"key": 5, "value": 5}), + partition=0, + ) + producer.send( + topic="kafka_consumer_failover", + value=json.dumps({"key": 5, "value": 5}), + partition=1, + ) producer.flush() - prev_count = wait_for_new_data('test.destination', prev_count) + prev_count = wait_for_new_data("test.destination", prev_count) ## 2 attached, 2 working - instance.query('ATTACH TABLE test.kafka2') - producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':6,'value': 6}), partition=0) - producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':6,'value': 6}), partition=1) + instance.query("ATTACH TABLE test.kafka2") + producer.send( + topic="kafka_consumer_failover", + value=json.dumps({"key": 6, "value": 6}), + partition=0, + ) + producer.send( + topic="kafka_consumer_failover", + value=json.dumps({"key": 6, "value": 6}), + partition=1, + ) producer.flush() - prev_count = wait_for_new_data('test.destination', prev_count) + prev_count = wait_for_new_data("test.destination", prev_count) ## 3 attached, 2 working - instance.query('ATTACH TABLE test.kafka3') - producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':7,'value': 7}), partition=0) - producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':7,'value': 7}), partition=1) + instance.query("ATTACH TABLE test.kafka3") + producer.send( + topic="kafka_consumer_failover", + value=json.dumps({"key": 7, "value": 7}), + partition=0, + ) + producer.send( + topic="kafka_consumer_failover", + value=json.dumps({"key": 7, "value": 7}), + partition=1, + ) producer.flush() - prev_count = wait_for_new_data('test.destination', prev_count) + prev_count = wait_for_new_data("test.destination", prev_count) ## 2 attached, same 2 working - instance.query('DETACH TABLE test.kafka3') - producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':8,'value': 8}), partition=0) - producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':8,'value': 8}), partition=1) + instance.query("DETACH TABLE test.kafka3") + producer.send( + topic="kafka_consumer_failover", + value=json.dumps({"key": 8, "value": 8}), + partition=0, + ) + producer.send( + topic="kafka_consumer_failover", + value=json.dumps({"key": 8, "value": 8}), + partition=1, + ) producer.flush() - prev_count = wait_for_new_data('test.destination', prev_count) + prev_count = wait_for_new_data("test.destination", prev_count) kafka_delete_topic(admin_client, topic_name) def test_kafka_predefined_configuration(kafka_cluster): - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) - topic_name = 'conf' + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) + topic_name = "conf" kafka_create_topic(admin_client, topic_name) messages = [] for i in range(50): - messages.append('{i}, {i}'.format(i=i)) + messages.append("{i}, {i}".format(i=i)) kafka_produce(kafka_cluster, topic_name, messages) - instance.query(f''' + instance.query( + f""" CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka(kafka1, kafka_format='CSV'); - ''') + """ + ) - result = '' + result = "" while True: - result += instance.query('SELECT * FROM test.kafka', ignore_error=True) + result += instance.query("SELECT * FROM test.kafka", ignore_error=True) if kafka_check_result(result): break kafka_check_result(result, True) @@ -3256,33 +3911,40 @@ def test_issue26643(kafka_cluster): # for backporting: # admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092") - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) - producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer) + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) + producer = KafkaProducer( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), + value_serializer=producer_serializer, + ) topic_list = [] - topic_list.append(NewTopic(name="test_issue26643", num_partitions=4, replication_factor=1)) + topic_list.append( + NewTopic(name="test_issue26643", num_partitions=4, replication_factor=1) + ) admin_client.create_topics(new_topics=topic_list, validate_only=False) msg = message_with_repeated_pb2.Message( tnow=1629000000, - server='server1', - clien='host1', + server="server1", + clien="host1", sPort=443, cPort=50000, r=[ - message_with_repeated_pb2.dd(name='1', type=444, ttl=123123, data=b'adsfasd'), - message_with_repeated_pb2.dd(name='2') + message_with_repeated_pb2.dd( + name="1", type=444, ttl=123123, data=b"adsfasd" + ), + message_with_repeated_pb2.dd(name="2"), ], - method='GET' + method="GET", ) - data = b'' + data = b"" serialized_msg = msg.SerializeToString() data = data + _VarintBytes(len(serialized_msg)) + serialized_msg - msg = message_with_repeated_pb2.Message( - tnow=1629000002 - ) + msg = message_with_repeated_pb2.Message(tnow=1629000002) serialized_msg = msg.SerializeToString() data = data + _VarintBytes(len(serialized_msg)) + serialized_msg @@ -3293,7 +3955,8 @@ def test_issue26643(kafka_cluster): producer.send(topic="test_issue26643", value=data) producer.flush() - instance.query(''' + instance.query( + """ CREATE TABLE IF NOT EXISTS test.test_queue ( `tnow` UInt32, @@ -3354,22 +4017,23 @@ def test_issue26643(kafka_cluster): a.`r.data` AS `r.data`, a.method AS method FROM test.test_queue AS a; - ''') + """ + ) instance.wait_for_log_line("Committed offset") - result = instance.query('SELECT * FROM test.log') + result = instance.query("SELECT * FROM test.log") - expected = '''\ + expected = """\ 2021-08-15 07:00:00 server1 443 50000 ['1','2'] [0,0] [444,0] [123123,0] ['adsfasd',''] GET 2021-08-15 07:00:02 0 0 [] [] [] [] [] 2021-08-15 07:00:02 0 0 [] [] [] [] [] -''' +""" assert TSV(result) == TSV(expected) # kafka_cluster.open_bash_shell('instance') -if __name__ == '__main__': +if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") cluster.shutdown() diff --git a/tests/integration/test_storage_kerberized_hdfs/test.py b/tests/integration/test_storage_kerberized_hdfs/test.py index d06f971557b..fb00403b952 100644 --- a/tests/integration/test_storage_kerberized_hdfs/test.py +++ b/tests/integration/test_storage_kerberized_hdfs/test.py @@ -7,7 +7,13 @@ from helpers.cluster import ClickHouseCluster import subprocess cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_kerberized_hdfs=True, user_configs=[], main_configs=['configs/hdfs.xml']) +node1 = cluster.add_instance( + "node1", + with_kerberized_hdfs=True, + user_configs=[], + main_configs=["configs/hdfs.xml"], +) + @pytest.fixture(scope="module") def started_cluster(): @@ -32,13 +38,18 @@ def test_read_table(started_cluster): api_read = hdfs_api.read_data("/simple_table_function") assert api_read == data - select_read = node1.query("select * from hdfs('hdfs://kerberizedhdfs1:9010/simple_table_function', 'TSV', 'id UInt64, text String, number Float64')") + select_read = node1.query( + "select * from hdfs('hdfs://kerberizedhdfs1:9010/simple_table_function', 'TSV', 'id UInt64, text String, number Float64')" + ) assert select_read == data + def test_read_write_storage(started_cluster): hdfs_api = started_cluster.hdfs_api - node1.query("create table SimpleHDFSStorage2 (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://kerberizedhdfs1:9010/simple_storage1', 'TSV')") + node1.query( + "create table SimpleHDFSStorage2 (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://kerberizedhdfs1:9010/simple_storage1', 'TSV')" + ) node1.query("insert into SimpleHDFSStorage2 values (1, 'Mark', 72.53)") api_read = hdfs_api.read_data("/simple_storage1") @@ -47,12 +58,15 @@ def test_read_write_storage(started_cluster): select_read = node1.query("select * from SimpleHDFSStorage2") assert select_read == "1\tMark\t72.53\n" + def test_write_storage_not_expired(started_cluster): hdfs_api = started_cluster.hdfs_api - node1.query("create table SimpleHDFSStorageNotExpired (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://kerberizedhdfs1:9010/simple_storage_not_expired', 'TSV')") + node1.query( + "create table SimpleHDFSStorageNotExpired (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://kerberizedhdfs1:9010/simple_storage_not_expired', 'TSV')" + ) - time.sleep(15) # wait for ticket expiration + time.sleep(15) # wait for ticket expiration node1.query("insert into SimpleHDFSStorageNotExpired values (1, 'Mark', 72.53)") api_read = hdfs_api.read_data("/simple_storage_not_expired") @@ -61,18 +75,28 @@ def test_write_storage_not_expired(started_cluster): select_read = node1.query("select * from SimpleHDFSStorageNotExpired") assert select_read == "1\tMark\t72.53\n" + def test_two_users(started_cluster): hdfs_api = started_cluster.hdfs_api - node1.query("create table HDFSStorOne (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://kerberizedhdfs1:9010/storage_user_one', 'TSV')") + node1.query( + "create table HDFSStorOne (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://kerberizedhdfs1:9010/storage_user_one', 'TSV')" + ) node1.query("insert into HDFSStorOne values (1, 'Real', 86.00)") - node1.query("create table HDFSStorTwo (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://suser@kerberizedhdfs1:9010/user/specuser/storage_user_two', 'TSV')") + node1.query( + "create table HDFSStorTwo (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://suser@kerberizedhdfs1:9010/user/specuser/storage_user_two', 'TSV')" + ) node1.query("insert into HDFSStorTwo values (1, 'Ideal', 74.00)") - select_read_1 = node1.query("select * from hdfs('hdfs://kerberizedhdfs1:9010/user/specuser/storage_user_two', 'TSV', 'id UInt64, text String, number Float64')") + select_read_1 = node1.query( + "select * from hdfs('hdfs://kerberizedhdfs1:9010/user/specuser/storage_user_two', 'TSV', 'id UInt64, text String, number Float64')" + ) + + select_read_2 = node1.query( + "select * from hdfs('hdfs://suser@kerberizedhdfs1:9010/storage_user_one', 'TSV', 'id UInt64, text String, number Float64')" + ) - select_read_2 = node1.query("select * from hdfs('hdfs://suser@kerberizedhdfs1:9010/storage_user_one', 'TSV', 'id UInt64, text String, number Float64')") def test_read_table_expired(started_cluster): hdfs_api = started_cluster.hdfs_api @@ -80,35 +104,49 @@ def test_read_table_expired(started_cluster): data = "1\tSerialize\t555.222\n2\tData\t777.333\n" hdfs_api.write_data("/simple_table_function_relogin", data) - started_cluster.pause_container('hdfskerberos') + started_cluster.pause_container("hdfskerberos") time.sleep(15) try: - select_read = node1.query("select * from hdfs('hdfs://reloginuser&kerberizedhdfs1:9010/simple_table_function', 'TSV', 'id UInt64, text String, number Float64')") + select_read = node1.query( + "select * from hdfs('hdfs://reloginuser&kerberizedhdfs1:9010/simple_table_function', 'TSV', 'id UInt64, text String, number Float64')" + ) assert False, "Exception have to be thrown" except Exception as ex: assert "DB::Exception: kinit failure:" in str(ex) - started_cluster.unpause_container('hdfskerberos') + started_cluster.unpause_container("hdfskerberos") + def test_prohibited(started_cluster): - node1.query("create table HDFSStorTwoProhibited (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://suser@kerberizedhdfs1:9010/storage_user_two_prohibited', 'TSV')") + node1.query( + "create table HDFSStorTwoProhibited (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://suser@kerberizedhdfs1:9010/storage_user_two_prohibited', 'TSV')" + ) try: node1.query("insert into HDFSStorTwoProhibited values (1, 'SomeOne', 74.00)") assert False, "Exception have to be thrown" except Exception as ex: - assert "Unable to open HDFS file: /storage_user_two_prohibited error: Permission denied: user=specuser, access=WRITE" in str(ex) + assert ( + "Unable to open HDFS file: /storage_user_two_prohibited error: Permission denied: user=specuser, access=WRITE" + in str(ex) + ) + def test_cache_path(started_cluster): - node1.query("create table HDFSStorCachePath (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://dedicatedcachepath@kerberizedhdfs1:9010/storage_dedicated_cache_path', 'TSV')") + node1.query( + "create table HDFSStorCachePath (id UInt32, name String, weight Float64) ENGINE = HDFS('hdfs://dedicatedcachepath@kerberizedhdfs1:9010/storage_dedicated_cache_path', 'TSV')" + ) try: node1.query("insert into HDFSStorCachePath values (1, 'FatMark', 92.53)") assert False, "Exception have to be thrown" except Exception as ex: - assert "DB::Exception: hadoop.security.kerberos.ticket.cache.path cannot be set per user" in str(ex) + assert ( + "DB::Exception: hadoop.security.kerberos.ticket.cache.path cannot be set per user" + in str(ex) + ) -if __name__ == '__main__': +if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") cluster.shutdown() diff --git a/tests/integration/test_storage_kerberized_kafka/test.py b/tests/integration/test_storage_kerberized_kafka/test.py index 567a9b7184d..f4aea059c05 100644 --- a/tests/integration/test_storage_kerberized_kafka/test.py +++ b/tests/integration/test_storage_kerberized_kafka/test.py @@ -20,20 +20,27 @@ from kafka.protocol.group import MemberAssignment import socket cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', - main_configs=['configs/kafka.xml'], - user_configs=['configs/users.xml'], - with_kerberized_kafka=True, - clickhouse_path_dir="clickhouse_path") +instance = cluster.add_instance( + "instance", + main_configs=["configs/kafka.xml"], + user_configs=["configs/users.xml"], + with_kerberized_kafka=True, + clickhouse_path_dir="clickhouse_path", +) + def producer_serializer(x): return x.encode() if isinstance(x, str) else x + def get_kafka_producer(port, serializer): errors = [] for _ in range(15): try: - producer = KafkaProducer(bootstrap_servers="localhost:{}".format(port), value_serializer=serializer) + producer = KafkaProducer( + bootstrap_servers="localhost:{}".format(port), + value_serializer=serializer, + ) logging.debug("Kafka Connection establised: localhost:{}".format(port)) return producer except Exception as e: @@ -42,9 +49,16 @@ def get_kafka_producer(port, serializer): raise Exception("Connection not establised, {}".format(errors)) + def kafka_produce(kafka_cluster, topic, messages, timestamp=None): - logging.debug("kafka_produce server:{}:{} topic:{}".format("localhost", kafka_cluster.kerberized_kafka_port, topic)) - producer = get_kafka_producer(kafka_cluster.kerberized_kafka_port, producer_serializer) + logging.debug( + "kafka_produce server:{}:{} topic:{}".format( + "localhost", kafka_cluster.kerberized_kafka_port, topic + ) + ) + producer = get_kafka_producer( + kafka_cluster.kerberized_kafka_port, producer_serializer + ) for message in messages: producer.send(topic=topic, value=message, timestamp_ms=timestamp) producer.flush() @@ -52,13 +66,16 @@ def kafka_produce(kafka_cluster, topic, messages, timestamp=None): # Fixtures + @pytest.fixture(scope="module") def kafka_cluster(): try: cluster.start() if instance.is_debug_build(): # https://github.com/ClickHouse/ClickHouse/issues/27651 - pytest.skip("librdkafka calls system function for kinit which does not pass harmful check in debug build") + pytest.skip( + "librdkafka calls system function for kinit which does not pass harmful check in debug build" + ) yield cluster finally: cluster.shutdown() @@ -66,15 +83,27 @@ def kafka_cluster(): @pytest.fixture(autouse=True) def kafka_setup_teardown(): - instance.query('DROP DATABASE IF EXISTS test; CREATE DATABASE test;') + instance.query("DROP DATABASE IF EXISTS test; CREATE DATABASE test;") yield # run test + # Tests -def test_kafka_json_as_string(kafka_cluster): - kafka_produce(kafka_cluster, 'kafka_json_as_string', ['{"t": 123, "e": {"x": "woof"} }', '', '{"t": 124, "e": {"x": "test"} }', '{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}']) - instance.query(''' +def test_kafka_json_as_string(kafka_cluster): + kafka_produce( + kafka_cluster, + "kafka_json_as_string", + [ + '{"t": 123, "e": {"x": "woof"} }', + "", + '{"t": 124, "e": {"x": "test"} }', + '{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}', + ], + ) + + instance.query( + """ CREATE TABLE test.kafka (field String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kerberized_kafka1:19092', @@ -83,24 +112,29 @@ def test_kafka_json_as_string(kafka_cluster): kafka_group_name = 'kafka_json_as_string', kafka_format = 'JSONAsString', kafka_flush_interval_ms=1000; - ''') + """ + ) time.sleep(3) - result = instance.query('SELECT * FROM test.kafka;') - expected = '''\ + result = instance.query("SELECT * FROM test.kafka;") + expected = """\ {"t": 123, "e": {"x": "woof"} } {"t": 124, "e": {"x": "test"} } {"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"} -''' +""" assert TSV(result) == TSV(expected) - assert instance.contains_in_log("Parsing of message (topic: kafka_json_as_string, partition: 0, offset: 1) return no rows") + assert instance.contains_in_log( + "Parsing of message (topic: kafka_json_as_string, partition: 0, offset: 1) return no rows" + ) + def test_kafka_json_as_string_no_kdc(kafka_cluster): # When the test is run alone (not preceded by any other kerberized kafka test), # we need a ticket to # assert instance.contains_in_log("Ticket expired") - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka_no_kdc_warm_up (field String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kerberized_kafka1:19092', @@ -109,16 +143,27 @@ def test_kafka_json_as_string_no_kdc(kafka_cluster): kafka_commit_on_select = 1, kafka_format = 'JSONAsString', kafka_flush_interval_ms=1000; - ''') + """ + ) - instance.query('SELECT * FROM test.kafka_no_kdc_warm_up;') + instance.query("SELECT * FROM test.kafka_no_kdc_warm_up;") - kafka_produce(kafka_cluster, 'kafka_json_as_string_no_kdc', ['{"t": 123, "e": {"x": "woof"} }', '', '{"t": 124, "e": {"x": "test"} }', '{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}']) + kafka_produce( + kafka_cluster, + "kafka_json_as_string_no_kdc", + [ + '{"t": 123, "e": {"x": "woof"} }', + "", + '{"t": 124, "e": {"x": "test"} }', + '{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}', + ], + ) - kafka_cluster.pause_container('kafka_kerberos') - time.sleep(45) # wait for ticket expiration + kafka_cluster.pause_container("kafka_kerberos") + time.sleep(45) # wait for ticket expiration - instance.query(''' + instance.query( + """ CREATE TABLE test.kafka_no_kdc (field String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kerberized_kafka1:19092', @@ -127,13 +172,13 @@ def test_kafka_json_as_string_no_kdc(kafka_cluster): kafka_commit_on_select = 1, kafka_format = 'JSONAsString', kafka_flush_interval_ms=1000; - ''') + """ + ) - result = instance.query('SELECT * FROM test.kafka_no_kdc;') - expected = '' - - kafka_cluster.unpause_container('kafka_kerberos') + result = instance.query("SELECT * FROM test.kafka_no_kdc;") + expected = "" + kafka_cluster.unpause_container("kafka_kerberos") assert TSV(result) == TSV(expected) assert instance.contains_in_log("StorageKafka (kafka_no_kdc): Nothing to commit") @@ -141,7 +186,7 @@ def test_kafka_json_as_string_no_kdc(kafka_cluster): assert instance.contains_in_log("Kerberos ticket refresh failed") -if __name__ == '__main__': +if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") cluster.shutdown() diff --git a/tests/integration/test_storage_mongodb/test.py b/tests/integration/test_storage_mongodb/test.py index 67b5b42b1ec..76713ea7f3b 100644 --- a/tests/integration/test_storage_mongodb/test.py +++ b/tests/integration/test_storage_mongodb/test.py @@ -10,10 +10,15 @@ from helpers.cluster import ClickHouseCluster def started_cluster(request): try: cluster = ClickHouseCluster(__file__) - node = cluster.add_instance('node', - main_configs=["configs_secure/config.d/ssl_conf.xml", "configs/named_collections.xml"], - with_mongo=True, - with_mongo_secure=request.param) + node = cluster.add_instance( + "node", + main_configs=[ + "configs_secure/config.d/ssl_conf.xml", + "configs/named_collections.xml", + ], + with_mongo=True, + with_mongo_secure=request.param, + ) cluster.start() yield cluster finally: @@ -21,76 +26,95 @@ def started_cluster(request): def get_mongo_connection(started_cluster, secure=False, with_credentials=True): - connection_str = '' + connection_str = "" if with_credentials: - connection_str = 'mongodb://root:clickhouse@localhost:{}'.format(started_cluster.mongo_port) + connection_str = "mongodb://root:clickhouse@localhost:{}".format( + started_cluster.mongo_port + ) else: - connection_str = 'mongodb://localhost:{}'.format(started_cluster.mongo_no_cred_port) + connection_str = "mongodb://localhost:{}".format( + started_cluster.mongo_no_cred_port + ) if secure: - connection_str += '/?tls=true&tlsAllowInvalidCertificates=true' + connection_str += "/?tls=true&tlsAllowInvalidCertificates=true" return pymongo.MongoClient(connection_str) -@pytest.mark.parametrize('started_cluster', [False], indirect=['started_cluster']) +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_simple_select(started_cluster): mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection['test'] - db.add_user('root', 'clickhouse') - simple_mongo_table = db['simple_table'] + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + simple_mongo_table = db["simple_table"] data = [] for i in range(0, 100): - data.append({'key': i, 'data': hex(i * i)}) + data.append({"key": i, "data": hex(i * i)}) simple_mongo_table.insert_many(data) - node = started_cluster.instances['node'] + node = started_cluster.instances["node"] node.query( - "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse')") + "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse')" + ) - assert node.query("SELECT COUNT() FROM simple_mongo_table") == '100\n' - assert node.query("SELECT sum(key) FROM simple_mongo_table") == str(sum(range(0, 100))) + '\n' + assert node.query("SELECT COUNT() FROM simple_mongo_table") == "100\n" + assert ( + node.query("SELECT sum(key) FROM simple_mongo_table") + == str(sum(range(0, 100))) + "\n" + ) - assert node.query("SELECT data from simple_mongo_table where key = 42") == hex(42 * 42) + '\n' + assert ( + node.query("SELECT data from simple_mongo_table where key = 42") + == hex(42 * 42) + "\n" + ) node.query("DROP TABLE simple_mongo_table") simple_mongo_table.drop() -@pytest.mark.parametrize('started_cluster', [False], indirect=['started_cluster']) +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_complex_data_type(started_cluster): mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection['test'] - db.add_user('root', 'clickhouse') - incomplete_mongo_table = db['complex_table'] + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + incomplete_mongo_table = db["complex_table"] data = [] for i in range(0, 100): - data.append({'key': i, 'data': hex(i * i), 'dict': {'a': i, 'b': str(i)}}) + data.append({"key": i, "data": hex(i * i), "dict": {"a": i, "b": str(i)}}) incomplete_mongo_table.insert_many(data) - node = started_cluster.instances['node'] + node = started_cluster.instances["node"] node.query( - "CREATE TABLE incomplete_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse')") + "CREATE TABLE incomplete_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse')" + ) - assert node.query("SELECT COUNT() FROM incomplete_mongo_table") == '100\n' - assert node.query("SELECT sum(key) FROM incomplete_mongo_table") == str(sum(range(0, 100))) + '\n' + assert node.query("SELECT COUNT() FROM incomplete_mongo_table") == "100\n" + assert ( + node.query("SELECT sum(key) FROM incomplete_mongo_table") + == str(sum(range(0, 100))) + "\n" + ) - assert node.query("SELECT data from incomplete_mongo_table where key = 42") == hex(42 * 42) + '\n' + assert ( + node.query("SELECT data from incomplete_mongo_table where key = 42") + == hex(42 * 42) + "\n" + ) node.query("DROP TABLE incomplete_mongo_table") incomplete_mongo_table.drop() -@pytest.mark.parametrize('started_cluster', [False], indirect=['started_cluster']) +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_incorrect_data_type(started_cluster): mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection['test'] - db.add_user('root', 'clickhouse') - strange_mongo_table = db['strange_table'] + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + strange_mongo_table = db["strange_table"] data = [] for i in range(0, 100): - data.append({'key': i, 'data': hex(i * i), 'aaaa': 'Hello'}) + data.append({"key": i, "data": hex(i * i), "aaaa": "Hello"}) strange_mongo_table.insert_many(data) - node = started_cluster.instances['node'] + node = started_cluster.instances["node"] node.query( - "CREATE TABLE strange_mongo_table(key String, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse')") + "CREATE TABLE strange_mongo_table(key String, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse')" + ) with pytest.raises(QueryRuntimeException): node.query("SELECT COUNT() FROM strange_mongo_table") @@ -99,7 +123,8 @@ def test_incorrect_data_type(started_cluster): node.query("SELECT uniq(key) FROM strange_mongo_table") node.query( - "CREATE TABLE strange_mongo_table2(key UInt64, data String, bbbb String) ENGINE = MongoDB('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse')") + "CREATE TABLE strange_mongo_table2(key UInt64, data String, bbbb String) ENGINE = MongoDB('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse')" + ) with pytest.raises(QueryRuntimeException): node.query("SELECT bbbb FROM strange_mongo_table2") @@ -107,79 +132,102 @@ def test_incorrect_data_type(started_cluster): node.query("DROP TABLE strange_mongo_table2") strange_mongo_table.drop() -@pytest.mark.parametrize('started_cluster', [True], indirect=['started_cluster']) + +@pytest.mark.parametrize("started_cluster", [True], indirect=["started_cluster"]) def test_secure_connection(started_cluster): mongo_connection = get_mongo_connection(started_cluster, secure=True) - db = mongo_connection['test'] - db.add_user('root', 'clickhouse') - simple_mongo_table = db['simple_table'] + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + simple_mongo_table = db["simple_table"] data = [] for i in range(0, 100): - data.append({'key': i, 'data': hex(i * i)}) + data.append({"key": i, "data": hex(i * i)}) simple_mongo_table.insert_many(data) - node = started_cluster.instances['node'] + node = started_cluster.instances["node"] node.query( - "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', 'ssl=true')") + "CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', 'ssl=true')" + ) - assert node.query("SELECT COUNT() FROM simple_mongo_table") == '100\n' - assert node.query("SELECT sum(key) FROM simple_mongo_table") == str(sum(range(0, 100))) + '\n' + assert node.query("SELECT COUNT() FROM simple_mongo_table") == "100\n" + assert ( + node.query("SELECT sum(key) FROM simple_mongo_table") + == str(sum(range(0, 100))) + "\n" + ) - assert node.query("SELECT data from simple_mongo_table where key = 42") == hex(42 * 42) + '\n' + assert ( + node.query("SELECT data from simple_mongo_table where key = 42") + == hex(42 * 42) + "\n" + ) node.query("DROP TABLE simple_mongo_table") simple_mongo_table.drop() -@pytest.mark.parametrize('started_cluster', [False], indirect=['started_cluster']) + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_predefined_connection_configuration(started_cluster): mongo_connection = get_mongo_connection(started_cluster) - db = mongo_connection['test'] - db.add_user('root', 'clickhouse') - simple_mongo_table = db['simple_table'] + db = mongo_connection["test"] + db.add_user("root", "clickhouse") + simple_mongo_table = db["simple_table"] data = [] for i in range(0, 100): - data.append({'key': i, 'data': hex(i * i)}) + data.append({"key": i, "data": hex(i * i)}) simple_mongo_table.insert_many(data) - node = started_cluster.instances['node'] - node.query("create table simple_mongo_table(key UInt64, data String) engine = MongoDB(mongo1)") - assert node.query("SELECT count() FROM simple_mongo_table") == '100\n' + node = started_cluster.instances["node"] + node.query( + "create table simple_mongo_table(key UInt64, data String) engine = MongoDB(mongo1)" + ) + assert node.query("SELECT count() FROM simple_mongo_table") == "100\n" simple_mongo_table.drop() -@pytest.mark.parametrize('started_cluster', [False], indirect=['started_cluster']) + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_no_credentials(started_cluster): mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) - db = mongo_connection['test'] - simple_mongo_table = db['simple_table'] + db = mongo_connection["test"] + simple_mongo_table = db["simple_table"] data = [] for i in range(0, 100): - data.append({'key': i, 'data': hex(i * i)}) + data.append({"key": i, "data": hex(i * i)}) simple_mongo_table.insert_many(data) - node = started_cluster.instances['node'] - node.query("create table simple_mongo_table_2(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', '', '')") - assert node.query("SELECT count() FROM simple_mongo_table_2") == '100\n' + node = started_cluster.instances["node"] + node.query( + "create table simple_mongo_table_2(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', '', '')" + ) + assert node.query("SELECT count() FROM simple_mongo_table_2") == "100\n" simple_mongo_table.drop() -@pytest.mark.parametrize('started_cluster', [False], indirect=['started_cluster']) + +@pytest.mark.parametrize("started_cluster", [False], indirect=["started_cluster"]) def test_auth_source(started_cluster): mongo_connection = get_mongo_connection(started_cluster, with_credentials=False) - admin_db = mongo_connection['admin'] - admin_db.add_user('root', 'clickhouse', roles=[{ 'role': "userAdminAnyDatabase", 'db': "admin" }, "readWriteAnyDatabase"]) - simple_mongo_table = admin_db['simple_table'] + admin_db = mongo_connection["admin"] + admin_db.add_user( + "root", + "clickhouse", + roles=[{"role": "userAdminAnyDatabase", "db": "admin"}, "readWriteAnyDatabase"], + ) + simple_mongo_table = admin_db["simple_table"] data = [] for i in range(0, 50): - data.append({'key': i, 'data': hex(i * i)}) + data.append({"key": i, "data": hex(i * i)}) simple_mongo_table.insert_many(data) - db = mongo_connection['test'] - simple_mongo_table = db['simple_table'] + db = mongo_connection["test"] + simple_mongo_table = db["simple_table"] data = [] for i in range(0, 100): - data.append({'key': i, 'data': hex(i * i)}) + data.append({"key": i, "data": hex(i * i)}) simple_mongo_table.insert_many(data) - node = started_cluster.instances['node'] - node.query("create table simple_mongo_table_fail(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse')") + node = started_cluster.instances["node"] + node.query( + "create table simple_mongo_table_fail(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse')" + ) node.query_and_get_error("SELECT count() FROM simple_mongo_table_fail") - node.query("create table simple_mongo_table_ok(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse', 'authSource=admin')") - assert node.query("SELECT count() FROM simple_mongo_table_ok") == '100\n' + node.query( + "create table simple_mongo_table_ok(key UInt64, data String) engine = MongoDB('mongo2:27017', 'test', 'simple_table', 'root', 'clickhouse', 'authSource=admin')" + ) + assert node.query("SELECT count() FROM simple_mongo_table_ok") == "100\n" simple_mongo_table.drop() diff --git a/tests/integration/test_storage_mysql/test.py b/tests/integration/test_storage_mysql/test.py index 713a8793f48..34ef17327f9 100644 --- a/tests/integration/test_storage_mysql/test.py +++ b/tests/integration/test_storage_mysql/test.py @@ -10,9 +10,20 @@ from helpers.client import QueryRuntimeException cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml', 'configs/named_collections.xml'], with_mysql=True) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_mysql_cluster=True) -node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml'], user_configs=['configs/users.xml'], with_mysql=True) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/remote_servers.xml", "configs/named_collections.xml"], + with_mysql=True, +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/remote_servers.xml"], with_mysql_cluster=True +) +node3 = cluster.add_instance( + "node3", + main_configs=["configs/remote_servers.xml"], + user_configs=["configs/users.xml"], + with_mysql=True, +) create_table_sql_template = """ CREATE TABLE `clickhouse`.`{}` ( @@ -28,18 +39,24 @@ drop_table_sql_template = """ DROP TABLE IF EXISTS `clickhouse`.`{}`; """ + def get_mysql_conn(started_cluster, host): - conn = pymysql.connect(user='root', password='clickhouse', host=host, port=started_cluster.mysql_port) + conn = pymysql.connect( + user="root", password="clickhouse", host=host, port=started_cluster.mysql_port + ) return conn + def create_mysql_table(conn, tableName): with conn.cursor() as cursor: cursor.execute(create_table_sql_template.format(tableName)) + def drop_mysql_table(conn, tableName): with conn.cursor() as cursor: cursor.execute(drop_table_sql_template.format(tableName)) + def create_mysql_db(conn, name): with conn.cursor() as cursor: cursor.execute("DROP DATABASE IF EXISTS {}".format(name)) @@ -52,11 +69,11 @@ def started_cluster(): cluster.start() conn = get_mysql_conn(cluster, cluster.mysql_ip) - create_mysql_db(conn, 'clickhouse') + create_mysql_db(conn, "clickhouse") ## create mysql db and table conn1 = get_mysql_conn(cluster, cluster.mysql2_ip) - create_mysql_db(conn1, 'clickhouse') + create_mysql_db(conn1, "clickhouse") yield cluster finally: @@ -64,274 +81,424 @@ def started_cluster(): def test_many_connections(started_cluster): - table_name = 'test_many_connections' - node1.query(f'DROP TABLE IF EXISTS {table_name}') + table_name = "test_many_connections" + node1.query(f"DROP TABLE IF EXISTS {table_name}") conn = get_mysql_conn(started_cluster, cluster.mysql_ip) drop_mysql_table(conn, table_name) create_mysql_table(conn, table_name) - node1.query(''' + node1.query( + """ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse'); -'''.format(table_name, table_name)) +""".format( + table_name, table_name + ) + ) - node1.query("INSERT INTO {} (id, name) SELECT number, concat('name_', toString(number)) from numbers(10) ".format(table_name)) + node1.query( + "INSERT INTO {} (id, name) SELECT number, concat('name_', toString(number)) from numbers(10) ".format( + table_name + ) + ) query = "SELECT count() FROM (" - for i in range (24): + for i in range(24): query += "SELECT id FROM {t} UNION ALL " query += "SELECT id FROM {t})" - assert node1.query(query.format(t=table_name)) == '250\n' + assert node1.query(query.format(t=table_name)) == "250\n" drop_mysql_table(conn, table_name) conn.close() def test_insert_select(started_cluster): - table_name = 'test_insert_select' - node1.query(f'DROP TABLE IF EXISTS {table_name}') + table_name = "test_insert_select" + node1.query(f"DROP TABLE IF EXISTS {table_name}") conn = get_mysql_conn(started_cluster, cluster.mysql_ip) drop_mysql_table(conn, table_name) create_mysql_table(conn, table_name) - - node1.query(''' + node1.query( + """ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse'); -'''.format(table_name, table_name)) +""".format( + table_name, table_name + ) + ) node1.query( "INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000) ".format( - table_name)) - assert node1.query("SELECT count() FROM {}".format(table_name)).rstrip() == '10000' - assert node1.query("SELECT sum(money) FROM {}".format(table_name)).rstrip() == '30000' + table_name + ) + ) + assert node1.query("SELECT count() FROM {}".format(table_name)).rstrip() == "10000" + assert ( + node1.query("SELECT sum(money) FROM {}".format(table_name)).rstrip() == "30000" + ) conn.close() def test_replace_select(started_cluster): - table_name = 'test_replace_select' - node1.query(f'DROP TABLE IF EXISTS {table_name}') + table_name = "test_replace_select" + node1.query(f"DROP TABLE IF EXISTS {table_name}") conn = get_mysql_conn(started_cluster, cluster.mysql_ip) drop_mysql_table(conn, table_name) create_mysql_table(conn, table_name) - node1.query(''' + node1.query( + """ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse', 1); -'''.format(table_name, table_name)) +""".format( + table_name, table_name + ) + ) node1.query( "INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000) ".format( - table_name)) + table_name + ) + ) node1.query( "INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000) ".format( - table_name)) - assert node1.query("SELECT count() FROM {}".format(table_name)).rstrip() == '10000' - assert node1.query("SELECT sum(money) FROM {}".format(table_name)).rstrip() == '30000' + table_name + ) + ) + assert node1.query("SELECT count() FROM {}".format(table_name)).rstrip() == "10000" + assert ( + node1.query("SELECT sum(money) FROM {}".format(table_name)).rstrip() == "30000" + ) conn.close() def test_insert_on_duplicate_select(started_cluster): - table_name = 'test_insert_on_duplicate_select' - node1.query(f'DROP TABLE IF EXISTS {table_name}') + table_name = "test_insert_on_duplicate_select" + node1.query(f"DROP TABLE IF EXISTS {table_name}") conn = get_mysql_conn(started_cluster, cluster.mysql_ip) drop_mysql_table(conn, table_name) create_mysql_table(conn, table_name) - node1.query(''' + node1.query( + """ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse', 0, 'update money = money + values(money)'); -'''.format(table_name, table_name)) +""".format( + table_name, table_name + ) + ) node1.query( "INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000) ".format( - table_name)) + table_name + ) + ) node1.query( "INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000) ".format( - table_name)) - assert node1.query("SELECT count() FROM {}".format(table_name)).rstrip() == '10000' - assert node1.query("SELECT sum(money) FROM {}".format(table_name)).rstrip() == '60000' + table_name + ) + ) + assert node1.query("SELECT count() FROM {}".format(table_name)).rstrip() == "10000" + assert ( + node1.query("SELECT sum(money) FROM {}".format(table_name)).rstrip() == "60000" + ) conn.close() def test_where(started_cluster): - table_name = 'test_where' - node1.query(f'DROP TABLE IF EXISTS {table_name}') + table_name = "test_where" + node1.query(f"DROP TABLE IF EXISTS {table_name}") conn = get_mysql_conn(started_cluster, cluster.mysql_ip) drop_mysql_table(conn, table_name) create_mysql_table(conn, table_name) - node1.query(''' + node1.query( + """ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse'); -'''.format(table_name, table_name)) +""".format( + table_name, table_name + ) + ) node1.query( "INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000) ".format( - table_name)) - assert node1.query("SELECT count() FROM {} WHERE name LIKE '%name_%'".format(table_name)).rstrip() == '10000' - assert node1.query("SELECT count() FROM {} WHERE name NOT LIKE '%tmp_%'".format(table_name)).rstrip() == '10000' - assert node1.query("SELECT count() FROM {} WHERE money IN (1, 2, 3)".format(table_name)).rstrip() == '10000' - assert node1.query("SELECT count() FROM {} WHERE money IN (1, 2, 4, 5, 6)".format(table_name)).rstrip() == '0' - assert node1.query( - "SELECT count() FROM {} WHERE money NOT IN (1, 2, 4, 5, 6)".format(table_name)).rstrip() == '10000' - assert node1.query( - "SELECT count() FROM {} WHERE name LIKE concat('name_', toString(1))".format(table_name)).rstrip() == '1' + table_name + ) + ) + assert ( + node1.query( + "SELECT count() FROM {} WHERE name LIKE '%name_%'".format(table_name) + ).rstrip() + == "10000" + ) + assert ( + node1.query( + "SELECT count() FROM {} WHERE name NOT LIKE '%tmp_%'".format(table_name) + ).rstrip() + == "10000" + ) + assert ( + node1.query( + "SELECT count() FROM {} WHERE money IN (1, 2, 3)".format(table_name) + ).rstrip() + == "10000" + ) + assert ( + node1.query( + "SELECT count() FROM {} WHERE money IN (1, 2, 4, 5, 6)".format(table_name) + ).rstrip() + == "0" + ) + assert ( + node1.query( + "SELECT count() FROM {} WHERE money NOT IN (1, 2, 4, 5, 6)".format( + table_name + ) + ).rstrip() + == "10000" + ) + assert ( + node1.query( + "SELECT count() FROM {} WHERE name LIKE concat('name_', toString(1))".format( + table_name + ) + ).rstrip() + == "1" + ) conn.close() def test_table_function(started_cluster): conn = get_mysql_conn(started_cluster, cluster.mysql_ip) - drop_mysql_table(conn, 'table_function') - create_mysql_table(conn, 'table_function') - table_function = "mysql('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse')".format('table_function') - assert node1.query("SELECT count() FROM {}".format(table_function)).rstrip() == '0' + drop_mysql_table(conn, "table_function") + create_mysql_table(conn, "table_function") + table_function = ( + "mysql('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse')".format( + "table_function" + ) + ) + assert node1.query("SELECT count() FROM {}".format(table_function)).rstrip() == "0" node1.query( "INSERT INTO {} (id, name, money) select number, concat('name_', toString(number)), 3 from numbers(10000)".format( - 'TABLE FUNCTION ' + table_function)) - assert node1.query("SELECT count() FROM {}".format(table_function)).rstrip() == '10000' - assert node1.query("SELECT sum(c) FROM (" - "SELECT count() as c FROM {} WHERE id % 3 == 0" - " UNION ALL SELECT count() as c FROM {} WHERE id % 3 == 1" - " UNION ALL SELECT count() as c FROM {} WHERE id % 3 == 2)".format(table_function, - table_function, - table_function)).rstrip() == '10000' - assert node1.query("SELECT sum(`money`) FROM {}".format(table_function)).rstrip() == '30000' - node1.query("INSERT INTO {} (id, name, age, money) SELECT id + 100000, name, age, money FROM {}".format( - 'TABLE FUNCTION ' + table_function, table_function)) - assert node1.query("SELECT sum(`money`) FROM {}".format(table_function)).rstrip() == '60000' + "TABLE FUNCTION " + table_function + ) + ) + assert ( + node1.query("SELECT count() FROM {}".format(table_function)).rstrip() == "10000" + ) + assert ( + node1.query( + "SELECT sum(c) FROM (" + "SELECT count() as c FROM {} WHERE id % 3 == 0" + " UNION ALL SELECT count() as c FROM {} WHERE id % 3 == 1" + " UNION ALL SELECT count() as c FROM {} WHERE id % 3 == 2)".format( + table_function, table_function, table_function + ) + ).rstrip() + == "10000" + ) + assert ( + node1.query("SELECT sum(`money`) FROM {}".format(table_function)).rstrip() + == "30000" + ) + node1.query( + "INSERT INTO {} (id, name, age, money) SELECT id + 100000, name, age, money FROM {}".format( + "TABLE FUNCTION " + table_function, table_function + ) + ) + assert ( + node1.query("SELECT sum(`money`) FROM {}".format(table_function)).rstrip() + == "60000" + ) conn.close() def test_binary_type(started_cluster): conn = get_mysql_conn(started_cluster, cluster.mysql_ip) - drop_mysql_table(conn, 'binary_type') + drop_mysql_table(conn, "binary_type") with conn.cursor() as cursor: - cursor.execute("CREATE TABLE clickhouse.binary_type (id INT PRIMARY KEY, data BINARY(16) NOT NULL)") - table_function = "mysql('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse')".format('binary_type') - node1.query("INSERT INTO {} VALUES (42, 'clickhouse')".format('TABLE FUNCTION ' + table_function)) - assert node1.query("SELECT * FROM {}".format(table_function)) == '42\tclickhouse\\0\\0\\0\\0\\0\\0\n' + cursor.execute( + "CREATE TABLE clickhouse.binary_type (id INT PRIMARY KEY, data BINARY(16) NOT NULL)" + ) + table_function = ( + "mysql('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse')".format( + "binary_type" + ) + ) + node1.query( + "INSERT INTO {} VALUES (42, 'clickhouse')".format( + "TABLE FUNCTION " + table_function + ) + ) + assert ( + node1.query("SELECT * FROM {}".format(table_function)) + == "42\tclickhouse\\0\\0\\0\\0\\0\\0\n" + ) def test_enum_type(started_cluster): - table_name = 'test_enum_type' - node1.query(f'DROP TABLE IF EXISTS {table_name}') + table_name = "test_enum_type" + node1.query(f"DROP TABLE IF EXISTS {table_name}") conn = get_mysql_conn(started_cluster, cluster.mysql_ip) drop_mysql_table(conn, table_name) create_mysql_table(conn, table_name) - node1.query(''' + node1.query( + """ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32, source Enum8('IP' = 1, 'URL' = 2)) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse', 1); -'''.format(table_name, table_name)) - node1.query("INSERT INTO {} (id, name, age, money, source) VALUES (1, 'name', 0, 0, 'URL')".format(table_name)) - assert node1.query("SELECT source FROM {} LIMIT 1".format(table_name)).rstrip() == 'URL' +""".format( + table_name, table_name + ) + ) + node1.query( + "INSERT INTO {} (id, name, age, money, source) VALUES (1, 'name', 0, 0, 'URL')".format( + table_name + ) + ) + assert ( + node1.query("SELECT source FROM {} LIMIT 1".format(table_name)).rstrip() + == "URL" + ) conn.close() - def test_mysql_distributed(started_cluster): - table_name = 'test_replicas' + table_name = "test_replicas" conn1 = get_mysql_conn(started_cluster, started_cluster.mysql_ip) conn2 = get_mysql_conn(started_cluster, started_cluster.mysql2_ip) conn3 = get_mysql_conn(started_cluster, started_cluster.mysql3_ip) conn4 = get_mysql_conn(started_cluster, started_cluster.mysql4_ip) - create_mysql_db(conn1, 'clickhouse') - create_mysql_db(conn2, 'clickhouse') - create_mysql_db(conn3, 'clickhouse') - create_mysql_db(conn4, 'clickhouse') + create_mysql_db(conn1, "clickhouse") + create_mysql_db(conn2, "clickhouse") + create_mysql_db(conn3, "clickhouse") + create_mysql_db(conn4, "clickhouse") create_mysql_table(conn1, table_name) create_mysql_table(conn2, table_name) create_mysql_table(conn3, table_name) create_mysql_table(conn4, table_name) - node2.query('DROP TABLE IF EXISTS test_replicas') + node2.query("DROP TABLE IF EXISTS test_replicas") # Storage with with 3 replicas - node2.query(''' + node2.query( + """ CREATE TABLE test_replicas (id UInt32, name String, age UInt32, money UInt32) - ENGINE = MySQL('mysql{2|3|4}:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse'); ''') + ENGINE = MySQL('mysql{2|3|4}:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse'); """ + ) # Fill remote tables with different data to be able to check nodes = [node1, node2, node2, node2] for i in range(1, 5): - nodes[i-1].query('DROP TABLE IF EXISTS test_replica{}'.format(i)) - nodes[i-1].query(''' + nodes[i - 1].query("DROP TABLE IF EXISTS test_replica{}".format(i)) + nodes[i - 1].query( + """ CREATE TABLE test_replica{} (id UInt32, name String, age UInt32, money UInt32) - ENGINE = MySQL('mysql{}:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse');'''.format(i, 57 if i==1 else i)) - nodes[i-1].query("INSERT INTO test_replica{} (id, name) SELECT number, 'host{}' from numbers(10) ".format(i, i)) + ENGINE = MySQL('mysql{}:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse');""".format( + i, 57 if i == 1 else i + ) + ) + nodes[i - 1].query( + "INSERT INTO test_replica{} (id, name) SELECT number, 'host{}' from numbers(10) ".format( + i, i + ) + ) # test multiple ports parsing - result = node2.query('''SELECT DISTINCT(name) FROM mysql('mysql{57|2|3}:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse'); ''') - assert(result == 'host1\n' or result == 'host2\n' or result == 'host3\n') - result = node2.query('''SELECT DISTINCT(name) FROM mysql('mysql57:3306|mysql2:3306|mysql3:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse'); ''') - assert(result == 'host1\n' or result == 'host2\n' or result == 'host3\n') + result = node2.query( + """SELECT DISTINCT(name) FROM mysql('mysql{57|2|3}:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse'); """ + ) + assert result == "host1\n" or result == "host2\n" or result == "host3\n" + result = node2.query( + """SELECT DISTINCT(name) FROM mysql('mysql57:3306|mysql2:3306|mysql3:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse'); """ + ) + assert result == "host1\n" or result == "host2\n" or result == "host3\n" # check all replicas are traversed query = "SELECT * FROM (" - for i in range (3): + for i in range(3): query += "SELECT name FROM test_replicas UNION DISTINCT " query += "SELECT name FROM test_replicas)" result = node2.query(query) - assert(result == 'host2\nhost3\nhost4\n') + assert result == "host2\nhost3\nhost4\n" # Storage with with two shards, each has 2 replicas - node2.query('DROP TABLE IF EXISTS test_shards') + node2.query("DROP TABLE IF EXISTS test_shards") - node2.query(''' + node2.query( + """ CREATE TABLE test_shards (id UInt32, name String, age UInt32, money UInt32) - ENGINE = ExternalDistributed('MySQL', 'mysql{57|2}:3306,mysql{3|4}:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse'); ''') + ENGINE = ExternalDistributed('MySQL', 'mysql{57|2}:3306,mysql{3|4}:3306', 'clickhouse', 'test_replicas', 'root', 'clickhouse'); """ + ) # Check only one replica in each shard is used result = node2.query("SELECT DISTINCT(name) FROM test_shards ORDER BY name") - assert(result == 'host1\nhost3\n') + assert result == "host1\nhost3\n" # check all replicas are traversed query = "SELECT name FROM (" - for i in range (3): + for i in range(3): query += "SELECT name FROM test_shards UNION DISTINCT " query += "SELECT name FROM test_shards) ORDER BY name" result = node2.query(query) - assert(result == 'host1\nhost2\nhost3\nhost4\n') + assert result == "host1\nhost2\nhost3\nhost4\n" # disconnect mysql57 - started_cluster.pause_container('mysql57') + started_cluster.pause_container("mysql57") result = node2.query("SELECT DISTINCT(name) FROM test_shards ORDER BY name") - started_cluster.unpause_container('mysql57') - assert(result == 'host2\nhost4\n' or result == 'host3\nhost4\n') + started_cluster.unpause_container("mysql57") + assert result == "host2\nhost4\n" or result == "host3\nhost4\n" def test_external_settings(started_cluster): - table_name = 'test_external_settings' - node1.query(f'DROP TABLE IF EXISTS {table_name}') + table_name = "test_external_settings" + node1.query(f"DROP TABLE IF EXISTS {table_name}") conn = get_mysql_conn(started_cluster, started_cluster.mysql_ip) drop_mysql_table(conn, table_name) create_mysql_table(conn, table_name) - node3.query(f'DROP TABLE IF EXISTS {table_name}') - node3.query(''' + node3.query(f"DROP TABLE IF EXISTS {table_name}") + node3.query( + """ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse'); -'''.format(table_name, table_name)) +""".format( + table_name, table_name + ) + ) node3.query( "INSERT INTO {}(id, name, money) select number, concat('name_', toString(number)), 3 from numbers(100) ".format( - table_name)) - assert node3.query("SELECT count() FROM {}".format(table_name)).rstrip() == '100' - assert node3.query("SELECT sum(money) FROM {}".format(table_name)).rstrip() == '300' - node3.query("select value from system.settings where name = 'max_block_size' FORMAT TSV") == "2\n" - node3.query("select value from system.settings where name = 'external_storage_max_read_rows' FORMAT TSV") == "0\n" - assert node3.query("SELECT COUNT(DISTINCT blockNumber()) FROM {} FORMAT TSV".format(table_name)) == '50\n' + table_name + ) + ) + assert node3.query("SELECT count() FROM {}".format(table_name)).rstrip() == "100" + assert node3.query("SELECT sum(money) FROM {}".format(table_name)).rstrip() == "300" + node3.query( + "select value from system.settings where name = 'max_block_size' FORMAT TSV" + ) == "2\n" + node3.query( + "select value from system.settings where name = 'external_storage_max_read_rows' FORMAT TSV" + ) == "0\n" + assert ( + node3.query( + "SELECT COUNT(DISTINCT blockNumber()) FROM {} FORMAT TSV".format(table_name) + ) + == "50\n" + ) conn.close() def test_settings_connection_wait_timeout(started_cluster): - table_name = 'test_settings_connection_wait_timeout' - node1.query(f'DROP TABLE IF EXISTS {table_name}') + table_name = "test_settings_connection_wait_timeout" + node1.query(f"DROP TABLE IF EXISTS {table_name}") wait_timeout = 2 conn = get_mysql_conn(started_cluster, cluster.mysql_ip) drop_mysql_table(conn, table_name) create_mysql_table(conn, table_name) - node1.query(''' + node1.query( + """ CREATE TABLE {} ( id UInt32, @@ -341,10 +508,16 @@ def test_settings_connection_wait_timeout(started_cluster): ) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse') SETTINGS connection_wait_timeout={}, connection_pool_size=1 - '''.format(table_name, table_name, wait_timeout) + """.format( + table_name, table_name, wait_timeout + ) ) - node1.query("INSERT INTO {} (id, name) SELECT number, concat('name_', toString(number)) from numbers(10) ".format(table_name)) + node1.query( + "INSERT INTO {} (id, name) SELECT number, concat('name_', toString(number)) from numbers(10) ".format( + table_name + ) + ) def worker(): node1.query("SELECT sleepEachRow(1) FROM {}".format(table_name)) @@ -356,7 +529,10 @@ def test_settings_connection_wait_timeout(started_cluster): time.sleep(1) started = time.time() - with pytest.raises(QueryRuntimeException, match=r"Exception: mysqlxx::Pool is full \(connection_wait_timeout is exceeded\)"): + with pytest.raises( + QueryRuntimeException, + match=r"Exception: mysqlxx::Pool is full \(connection_wait_timeout is exceeded\)", + ): node1.query("SELECT sleepEachRow(1) FROM {}".format(table_name)) ended = time.time() assert (ended - started) >= wait_timeout @@ -369,70 +545,98 @@ def test_settings_connection_wait_timeout(started_cluster): def test_predefined_connection_configuration(started_cluster): conn = get_mysql_conn(started_cluster, started_cluster.mysql_ip) - table_name = 'test_table' + table_name = "test_table" drop_mysql_table(conn, table_name) create_mysql_table(conn, table_name) - node1.query(''' + node1.query( + """ DROP TABLE IF EXISTS test_table; CREATE TABLE test_table (id UInt32, name String, age UInt32, money UInt32) ENGINE MySQL(mysql1); - ''') - node1.query("INSERT INTO test_table (id, name, money) select number, toString(number), number from numbers(100)") - assert (node1.query(f"SELECT count() FROM test_table").rstrip() == '100') + """ + ) + node1.query( + "INSERT INTO test_table (id, name, money) select number, toString(number), number from numbers(100)" + ) + assert node1.query(f"SELECT count() FROM test_table").rstrip() == "100" - node1.query(''' + node1.query( + """ DROP TABLE IF EXISTS test_table; CREATE TABLE test_table (id UInt32, name String, age UInt32, money UInt32) ENGINE MySQL(mysql1, replace_query=1); - ''') - node1.query("INSERT INTO test_table (id, name, money) select number, toString(number), number from numbers(100)") - node1.query("INSERT INTO test_table (id, name, money) select number, toString(number), number from numbers(100)") - assert (node1.query(f"SELECT count() FROM test_table").rstrip() == '100') + """ + ) + node1.query( + "INSERT INTO test_table (id, name, money) select number, toString(number), number from numbers(100)" + ) + node1.query( + "INSERT INTO test_table (id, name, money) select number, toString(number), number from numbers(100)" + ) + assert node1.query(f"SELECT count() FROM test_table").rstrip() == "100" - node1.query_and_get_error(''' + node1.query_and_get_error( + """ DROP TABLE IF EXISTS test_table; CREATE TABLE test_table (id UInt32, name String, age UInt32, money UInt32) ENGINE MySQL(mysql1, query=1); - ''') - node1.query_and_get_error(''' + """ + ) + node1.query_and_get_error( + """ DROP TABLE IF EXISTS test_table; CREATE TABLE test_table (id UInt32, name String, age UInt32, money UInt32) ENGINE MySQL(mysql1, replace_query=1, on_duplicate_clause='kek'); - ''') - node1.query_and_get_error(''' + """ + ) + node1.query_and_get_error( + """ DROP TABLE IF EXISTS test_table; CREATE TABLE test_table (id UInt32, name String, age UInt32, money UInt32) ENGINE MySQL(fff); - ''') - node1.query_and_get_error(''' + """ + ) + node1.query_and_get_error( + """ DROP TABLE IF EXISTS test_table; CREATE TABLE test_table (id UInt32, name String, age UInt32, money UInt32) ENGINE MySQL(mysql2); - ''') + """ + ) - node1.query(''' + node1.query( + """ DROP TABLE IF EXISTS test_table; CREATE TABLE test_table (id UInt32, name String, age UInt32, money UInt32) ENGINE MySQL(mysql3, port=3306); - ''') - assert (node1.query(f"SELECT count() FROM test_table").rstrip() == '100') + """ + ) + assert node1.query(f"SELECT count() FROM test_table").rstrip() == "100" - assert 'Connection pool cannot have zero size' in node1.query_and_get_error("SELECT count() FROM mysql(mysql1, table='test_table', connection_pool_size=0)") - assert 'Connection pool cannot have zero size' in node1.query_and_get_error("SELECT count() FROM mysql(mysql4)") - assert int(node1.query("SELECT count() FROM mysql(mysql4, connection_pool_size=1)")) == 100 + assert "Connection pool cannot have zero size" in node1.query_and_get_error( + "SELECT count() FROM mysql(mysql1, table='test_table', connection_pool_size=0)" + ) + assert "Connection pool cannot have zero size" in node1.query_and_get_error( + "SELECT count() FROM mysql(mysql4)" + ) + assert ( + int(node1.query("SELECT count() FROM mysql(mysql4, connection_pool_size=1)")) + == 100 + ) # Regression for (k, v) IN ((k, v)) def test_mysql_in(started_cluster): - table_name = 'test_mysql_in' - node1.query(f'DROP TABLE IF EXISTS {table_name}') + table_name = "test_mysql_in" + node1.query(f"DROP TABLE IF EXISTS {table_name}") conn = get_mysql_conn(started_cluster, cluster.mysql_ip) drop_mysql_table(conn, table_name) create_mysql_table(conn, table_name) - node1.query(''' + node1.query( + """ CREATE TABLE {} ( id UInt32, @@ -441,52 +645,94 @@ def test_mysql_in(started_cluster): money UInt32 ) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse') - '''.format(table_name, table_name) + """.format( + table_name, table_name + ) ) - node1.query("INSERT INTO {} (id, name) SELECT number, concat('name_', toString(number)) from numbers(10) ".format(table_name)) + node1.query( + "INSERT INTO {} (id, name) SELECT number, concat('name_', toString(number)) from numbers(10) ".format( + table_name + ) + ) node1.query("SELECT * FROM {} WHERE (id) IN (1)".format(table_name)) node1.query("SELECT * FROM {} WHERE (id) IN (1, 2)".format(table_name)) - node1.query("SELECT * FROM {} WHERE (id, name) IN ((1, 'name_1'))".format(table_name)) - node1.query("SELECT * FROM {} WHERE (id, name) IN ((1, 'name_1'),(1, 'name_1'))".format(table_name)) + node1.query( + "SELECT * FROM {} WHERE (id, name) IN ((1, 'name_1'))".format(table_name) + ) + node1.query( + "SELECT * FROM {} WHERE (id, name) IN ((1, 'name_1'),(1, 'name_1'))".format( + table_name + ) + ) drop_mysql_table(conn, table_name) conn.close() + def test_mysql_null(started_cluster): - table_name = 'test_mysql_in' - node1.query(f'DROP TABLE IF EXISTS {table_name}') + table_name = "test_mysql_in" + node1.query(f"DROP TABLE IF EXISTS {table_name}") conn = get_mysql_conn(started_cluster, cluster.mysql_ip) drop_mysql_table(conn, table_name) with conn.cursor() as cursor: - cursor.execute(""" + cursor.execute( + """ CREATE TABLE `clickhouse`.`{}` ( `id` int(11) NOT NULL, `money` int NULL default NULL, PRIMARY KEY (`id`)) ENGINE=InnoDB; - """.format(table_name)) + """.format( + table_name + ) + ) - node1.query(''' + node1.query( + """ CREATE TABLE {} ( id UInt32, money Nullable(UInt32) ) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse') - '''.format(table_name, table_name) + """.format( + table_name, table_name + ) ) - node1.query("INSERT INTO {} (id, money) SELECT number, if(number%2, NULL, 1) from numbers(10) ".format(table_name)) + node1.query( + "INSERT INTO {} (id, money) SELECT number, if(number%2, NULL, 1) from numbers(10) ".format( + table_name + ) + ) - assert int(node1.query("SELECT count() FROM {} WHERE money IS NULL SETTINGS external_table_strict_query=1".format(table_name))) == 5 - assert int(node1.query("SELECT count() FROM {} WHERE money IS NOT NULL SETTINGS external_table_strict_query=1".format(table_name))) == 5 + assert ( + int( + node1.query( + "SELECT count() FROM {} WHERE money IS NULL SETTINGS external_table_strict_query=1".format( + table_name + ) + ) + ) + == 5 + ) + assert ( + int( + node1.query( + "SELECT count() FROM {} WHERE money IS NOT NULL SETTINGS external_table_strict_query=1".format( + table_name + ) + ) + ) + == 5 + ) drop_mysql_table(conn, table_name) conn.close() -if __name__ == '__main__': +if __name__ == "__main__": with contextmanager(started_cluster)() as cluster: for name, instance in list(cluster.instances.items()): print(name, instance.ip_address) diff --git a/tests/integration/test_storage_postgresql/test.py b/tests/integration/test_storage_postgresql/test.py index 79b4bfc89e1..8366ca5dc25 100644 --- a/tests/integration/test_storage_postgresql/test.py +++ b/tests/integration/test_storage_postgresql/test.py @@ -6,8 +6,12 @@ from helpers.cluster import ClickHouseCluster from helpers.postgres_utility import get_postgres_conn cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/named_collections.xml'], with_postgres=True) -node2 = cluster.add_instance('node2', main_configs=['configs/named_collections.xml'], with_postgres_cluster=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/named_collections.xml"], with_postgres=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/named_collections.xml"], with_postgres_cluster=True +) @pytest.fixture(scope="module") @@ -20,6 +24,7 @@ def started_cluster(): finally: cluster.shutdown() + @pytest.fixture(autouse=True) def setup_teardown(): print("PostgreSQL is available - running test") @@ -27,56 +32,73 @@ def setup_teardown(): node1.query("DROP DATABASE test") node1.query("CREATE DATABASE test") + def test_postgres_select_insert(started_cluster): cursor = started_cluster.postgres_conn.cursor() - table_name = 'test_many' - table = f'''postgresql('{started_cluster.postgres_ip}:{started_cluster.postgres_port}', 'postgres', '{table_name}', 'postgres', 'mysecretpassword')''' - cursor.execute(f'DROP TABLE IF EXISTS {table_name}') - cursor.execute(f'CREATE TABLE {table_name} (a integer, b text, c integer)') + table_name = "test_many" + table = f"""postgresql('{started_cluster.postgres_ip}:{started_cluster.postgres_port}', 'postgres', '{table_name}', 'postgres', 'mysecretpassword')""" + cursor.execute(f"DROP TABLE IF EXISTS {table_name}") + cursor.execute(f"CREATE TABLE {table_name} (a integer, b text, c integer)") - result = node1.query(f''' + result = node1.query( + f""" INSERT INTO TABLE FUNCTION {table} - SELECT number, concat('name_', toString(number)), 3 from numbers(10000)''') + SELECT number, concat('name_', toString(number)), 3 from numbers(10000)""" + ) check1 = f"SELECT count() FROM {table}" check2 = f"SELECT Sum(c) FROM {table}" check3 = f"SELECT count(c) FROM {table} WHERE a % 2 == 0" check4 = f"SELECT count() FROM {table} WHERE b LIKE concat('name_', toString(1))" - assert (node1.query(check1)).rstrip() == '10000' - assert (node1.query(check2)).rstrip() == '30000' - assert (node1.query(check3)).rstrip() == '5000' - assert (node1.query(check4)).rstrip() == '1' + assert (node1.query(check1)).rstrip() == "10000" + assert (node1.query(check2)).rstrip() == "30000" + assert (node1.query(check3)).rstrip() == "5000" + assert (node1.query(check4)).rstrip() == "1" # Triggers issue https://github.com/ClickHouse/ClickHouse/issues/26088 # for i in range(1, 1000): # assert (node1.query(check1)).rstrip() == '10000', f"Failed on {i}" - cursor.execute(f'DROP TABLE {table_name} ') + cursor.execute(f"DROP TABLE {table_name} ") def test_postgres_conversions(started_cluster): cursor = started_cluster.postgres_conn.cursor() - cursor.execute(f'DROP TABLE IF EXISTS test_types') - cursor.execute(f'DROP TABLE IF EXISTS test_array_dimensions') + cursor.execute(f"DROP TABLE IF EXISTS test_types") + cursor.execute(f"DROP TABLE IF EXISTS test_array_dimensions") cursor.execute( - '''CREATE TABLE test_types ( + """CREATE TABLE test_types ( a smallint, b integer, c bigint, d real, e double precision, f serial, g bigserial, - h timestamp, i date, j decimal(5, 3), k numeric, l boolean)''') - node1.query(''' + h timestamp, i date, j decimal(5, 3), k numeric, l boolean)""" + ) + node1.query( + """ INSERT INTO TABLE FUNCTION postgresql('postgres1:5432', 'postgres', 'test_types', 'postgres', 'mysecretpassword') VALUES - (-32768, -2147483648, -9223372036854775808, 1.12345, 1.1234567890, 2147483647, 9223372036854775807, '2000-05-12 12:12:12.012345', '2000-05-12', 22.222, 22.222, 1)''') - result = node1.query(''' - SELECT a, b, c, d, e, f, g, h, i, j, toDecimal128(k, 3), l FROM postgresql('postgres1:5432', 'postgres', 'test_types', 'postgres', 'mysecretpassword')''') - assert(result == '-32768\t-2147483648\t-9223372036854775808\t1.12345\t1.123456789\t2147483647\t9223372036854775807\t2000-05-12 12:12:12.012345\t2000-05-12\t22.222\t22.222\t1\n') - - cursor.execute("INSERT INTO test_types (l) VALUES (TRUE), (true), ('yes'), ('y'), ('1');") - cursor.execute("INSERT INTO test_types (l) VALUES (FALSE), (false), ('no'), ('off'), ('0');") - expected = "1\n1\n1\n1\n1\n1\n0\n0\n0\n0\n0\n" - result = node1.query('''SELECT l FROM postgresql('postgres1:5432', 'postgres', 'test_types', 'postgres', 'mysecretpassword')''') - assert(result == expected) + (-32768, -2147483648, -9223372036854775808, 1.12345, 1.1234567890, 2147483647, 9223372036854775807, '2000-05-12 12:12:12.012345', '2000-05-12', 22.222, 22.222, 1)""" + ) + result = node1.query( + """ + SELECT a, b, c, d, e, f, g, h, i, j, toDecimal128(k, 3), l FROM postgresql('postgres1:5432', 'postgres', 'test_types', 'postgres', 'mysecretpassword')""" + ) + assert ( + result + == "-32768\t-2147483648\t-9223372036854775808\t1.12345\t1.123456789\t2147483647\t9223372036854775807\t2000-05-12 12:12:12.012345\t2000-05-12\t22.222\t22.222\t1\n" + ) cursor.execute( - '''CREATE TABLE IF NOT EXISTS test_array_dimensions + "INSERT INTO test_types (l) VALUES (TRUE), (true), ('yes'), ('y'), ('1');" + ) + cursor.execute( + "INSERT INTO test_types (l) VALUES (FALSE), (false), ('no'), ('off'), ('0');" + ) + expected = "1\n1\n1\n1\n1\n1\n0\n0\n0\n0\n0\n" + result = node1.query( + """SELECT l FROM postgresql('postgres1:5432', 'postgres', 'test_types', 'postgres', 'mysecretpassword')""" + ) + assert result == expected + + cursor.execute( + """CREATE TABLE IF NOT EXISTS test_array_dimensions ( a Date[] NOT NULL, -- Date b Timestamp[] NOT NULL, -- DateTime64(6) @@ -88,24 +110,29 @@ def test_postgres_conversions(started_cluster): h Integer[][][], -- Nullable(Int32) i Char(2)[][][][], -- Nullable(String) k Char(2)[] -- Nullable(String) - )''') + )""" + ) - result = node1.query(''' - DESCRIBE TABLE postgresql('postgres1:5432', 'postgres', 'test_array_dimensions', 'postgres', 'mysecretpassword')''') - expected = ('a\tArray(Date)\t\t\t\t\t\n' + - 'b\tArray(DateTime64(6))\t\t\t\t\t\n' + - 'c\tArray(Array(Float32))\t\t\t\t\t\n' + - 'd\tArray(Array(Float64))\t\t\t\t\t\n' + - 'e\tArray(Array(Array(Decimal(5, 5))))\t\t\t\t\t\n' + - 'f\tArray(Array(Array(Int32)))\t\t\t\t\t\n' + - 'g\tArray(Array(Array(Array(Array(String)))))\t\t\t\t\t\n' + - 'h\tArray(Array(Array(Nullable(Int32))))\t\t\t\t\t\n' + - 'i\tArray(Array(Array(Array(Nullable(String)))))\t\t\t\t\t\n' + - 'k\tArray(Nullable(String))' - ) - assert(result.rstrip() == expected) + result = node1.query( + """ + DESCRIBE TABLE postgresql('postgres1:5432', 'postgres', 'test_array_dimensions', 'postgres', 'mysecretpassword')""" + ) + expected = ( + "a\tArray(Date)\t\t\t\t\t\n" + + "b\tArray(DateTime64(6))\t\t\t\t\t\n" + + "c\tArray(Array(Float32))\t\t\t\t\t\n" + + "d\tArray(Array(Float64))\t\t\t\t\t\n" + + "e\tArray(Array(Array(Decimal(5, 5))))\t\t\t\t\t\n" + + "f\tArray(Array(Array(Int32)))\t\t\t\t\t\n" + + "g\tArray(Array(Array(Array(Array(String)))))\t\t\t\t\t\n" + + "h\tArray(Array(Array(Nullable(Int32))))\t\t\t\t\t\n" + + "i\tArray(Array(Array(Array(Nullable(String)))))\t\t\t\t\t\n" + + "k\tArray(Nullable(String))" + ) + assert result.rstrip() == expected - node1.query("INSERT INTO TABLE FUNCTION postgresql('postgres1:5432', 'postgres', 'test_array_dimensions', 'postgres', 'mysecretpassword') " + node1.query( + "INSERT INTO TABLE FUNCTION postgresql('postgres1:5432', 'postgres', 'test_array_dimensions', 'postgres', 'mysecretpassword') " "VALUES (" "['2000-05-12', '2000-05-12'], " "['2000-05-12 12:12:12.012345', '2000-05-12 12:12:12.012345'], " @@ -117,135 +144,179 @@ def test_postgres_conversions(started_cluster): "[[[1, NULL], [NULL, 1]], [[NULL, NULL], [NULL, NULL]], [[4, 4], [5, 5]]], " "[[[[NULL]]]], " "[]" - ")") + ")" + ) - result = node1.query(''' - SELECT * FROM postgresql('postgres1:5432', 'postgres', 'test_array_dimensions', 'postgres', 'mysecretpassword')''') + result = node1.query( + """ + SELECT * FROM postgresql('postgres1:5432', 'postgres', 'test_array_dimensions', 'postgres', 'mysecretpassword')""" + ) expected = ( - "['2000-05-12','2000-05-12']\t" + - "['2000-05-12 12:12:12.012345','2000-05-12 12:12:12.012345']\t" + - "[[1.12345],[1.12345],[1.12345]]\t" + - "[[1.1234567891],[1.1234567891],[1.1234567891]]\t" + - "[[[0.11111,0.11111]],[[0.22222,0.22222]],[[0.33333,0.33333]]]\t" + "['2000-05-12','2000-05-12']\t" + + "['2000-05-12 12:12:12.012345','2000-05-12 12:12:12.012345']\t" + + "[[1.12345],[1.12345],[1.12345]]\t" + + "[[1.1234567891],[1.1234567891],[1.1234567891]]\t" + + "[[[0.11111,0.11111]],[[0.22222,0.22222]],[[0.33333,0.33333]]]\t" "[[[1,1],[1,1]],[[3,3],[3,3]],[[4,4],[5,5]]]\t" "[[[[['winx','winx','winx']]]]]\t" "[[[1,NULL],[NULL,1]],[[NULL,NULL],[NULL,NULL]],[[4,4],[5,5]]]\t" "[[[[NULL]]]]\t" "[]\n" - ) - assert(result == expected) + ) + assert result == expected - cursor.execute(f'DROP TABLE test_types') - cursor.execute(f'DROP TABLE test_array_dimensions') + cursor.execute(f"DROP TABLE test_types") + cursor.execute(f"DROP TABLE test_array_dimensions") def test_non_default_scema(started_cluster): - node1.query('DROP TABLE IF EXISTS test_pg_table_schema') - node1.query('DROP TABLE IF EXISTS test_pg_table_schema_with_dots') + node1.query("DROP TABLE IF EXISTS test_pg_table_schema") + node1.query("DROP TABLE IF EXISTS test_pg_table_schema_with_dots") cursor = started_cluster.postgres_conn.cursor() - cursor.execute('DROP SCHEMA IF EXISTS test_schema CASCADE') + cursor.execute("DROP SCHEMA IF EXISTS test_schema CASCADE") cursor.execute('DROP SCHEMA IF EXISTS "test.nice.schema" CASCADE') - cursor.execute('CREATE SCHEMA test_schema') - cursor.execute('CREATE TABLE test_schema.test_table (a integer)') - cursor.execute('INSERT INTO test_schema.test_table SELECT i FROM generate_series(0, 99) as t(i)') + cursor.execute("CREATE SCHEMA test_schema") + cursor.execute("CREATE TABLE test_schema.test_table (a integer)") + cursor.execute( + "INSERT INTO test_schema.test_table SELECT i FROM generate_series(0, 99) as t(i)" + ) - node1.query(''' + node1.query( + """ CREATE TABLE test.test_pg_table_schema (a UInt32) ENGINE PostgreSQL('postgres1:5432', 'postgres', 'test_table', 'postgres', 'mysecretpassword', 'test_schema'); - ''') + """ + ) - result = node1.query('SELECT * FROM test.test_pg_table_schema') - expected = node1.query('SELECT number FROM numbers(100)') - assert(result == expected) + result = node1.query("SELECT * FROM test.test_pg_table_schema") + expected = node1.query("SELECT number FROM numbers(100)") + assert result == expected - table_function = '''postgresql('postgres1:5432', 'postgres', 'test_table', 'postgres', 'mysecretpassword', 'test_schema')''' - result = node1.query(f'SELECT * FROM {table_function}') - assert(result == expected) + table_function = """postgresql('postgres1:5432', 'postgres', 'test_table', 'postgres', 'mysecretpassword', 'test_schema')""" + result = node1.query(f"SELECT * FROM {table_function}") + assert result == expected cursor.execute('''CREATE SCHEMA "test.nice.schema"''') - cursor.execute('''CREATE TABLE "test.nice.schema"."test.nice.table" (a integer)''') - cursor.execute('INSERT INTO "test.nice.schema"."test.nice.table" SELECT i FROM generate_series(0, 99) as t(i)') + cursor.execute("""CREATE TABLE "test.nice.schema"."test.nice.table" (a integer)""") + cursor.execute( + 'INSERT INTO "test.nice.schema"."test.nice.table" SELECT i FROM generate_series(0, 99) as t(i)' + ) - node1.query(''' + node1.query( + """ CREATE TABLE test.test_pg_table_schema_with_dots (a UInt32) ENGINE PostgreSQL('postgres1:5432', 'postgres', 'test.nice.table', 'postgres', 'mysecretpassword', 'test.nice.schema'); - ''') - result = node1.query('SELECT * FROM test.test_pg_table_schema_with_dots') - assert(result == expected) + """ + ) + result = node1.query("SELECT * FROM test.test_pg_table_schema_with_dots") + assert result == expected - cursor.execute('INSERT INTO "test_schema"."test_table" SELECT i FROM generate_series(100, 199) as t(i)') - result = node1.query(f'SELECT * FROM {table_function}') - expected = node1.query('SELECT number FROM numbers(200)') - assert(result == expected) + cursor.execute( + 'INSERT INTO "test_schema"."test_table" SELECT i FROM generate_series(100, 199) as t(i)' + ) + result = node1.query(f"SELECT * FROM {table_function}") + expected = node1.query("SELECT number FROM numbers(200)") + assert result == expected - cursor.execute('DROP SCHEMA test_schema CASCADE') + cursor.execute("DROP SCHEMA test_schema CASCADE") cursor.execute('DROP SCHEMA "test.nice.schema" CASCADE') - node1.query('DROP TABLE test.test_pg_table_schema') - node1.query('DROP TABLE test.test_pg_table_schema_with_dots') + node1.query("DROP TABLE test.test_pg_table_schema") + node1.query("DROP TABLE test.test_pg_table_schema_with_dots") def test_concurrent_queries(started_cluster): - conn = get_postgres_conn(started_cluster.postgres_ip, started_cluster.postgres_port, database=False) + conn = get_postgres_conn( + started_cluster.postgres_ip, started_cluster.postgres_port, database=False + ) cursor = conn.cursor() - database_name = 'concurrent_test' + database_name = "concurrent_test" - cursor.execute(f'DROP DATABASE IF EXISTS {database_name}') - cursor.execute(f'CREATE DATABASE {database_name}') - conn = get_postgres_conn(started_cluster.postgres_ip, started_cluster.postgres_port, database=True, database_name=database_name) + cursor.execute(f"DROP DATABASE IF EXISTS {database_name}") + cursor.execute(f"CREATE DATABASE {database_name}") + conn = get_postgres_conn( + started_cluster.postgres_ip, + started_cluster.postgres_port, + database=True, + database_name=database_name, + ) cursor = conn.cursor() - cursor.execute('CREATE TABLE test_table (key integer, value integer)') + cursor.execute("CREATE TABLE test_table (key integer, value integer)") - node1.query(f''' + node1.query( + f""" CREATE TABLE test.test_table (key UInt32, value UInt32) ENGINE = PostgreSQL(postgres1, database='{database_name}', table='test_table') - ''') + """ + ) - node1.query(f''' + node1.query( + f""" CREATE TABLE test.stat (numbackends UInt32, datname String) ENGINE = PostgreSQL(postgres1, database='{database_name}', table='pg_stat_database') - ''') + """ + ) def node_select(_): for i in range(20): - result = node1.query("SELECT * FROM test.test_table", user='default') + result = node1.query("SELECT * FROM test.test_table", user="default") def node_insert(_): for i in range(20): - result = node1.query("INSERT INTO test.test_table SELECT number, number FROM numbers(1000)", user='default') + result = node1.query( + "INSERT INTO test.test_table SELECT number, number FROM numbers(1000)", + user="default", + ) def node_insert_select(_): for i in range(20): - result = node1.query("INSERT INTO test.test_table SELECT number, number FROM numbers(1000)", user='default') - result = node1.query("SELECT * FROM test.test_table LIMIT 100", user='default') + result = node1.query( + "INSERT INTO test.test_table SELECT number, number FROM numbers(1000)", + user="default", + ) + result = node1.query( + "SELECT * FROM test.test_table LIMIT 100", user="default" + ) busy_pool = Pool(30) p = busy_pool.map_async(node_select, range(30)) p.wait() - count = int(node1.query(f"SELECT numbackends FROM test.stat WHERE datname = '{database_name}'")) + count = int( + node1.query( + f"SELECT numbackends FROM test.stat WHERE datname = '{database_name}'" + ) + ) print(count) - assert(count <= 18) + assert count <= 18 busy_pool = Pool(30) p = busy_pool.map_async(node_insert, range(30)) p.wait() - count = int(node1.query(f"SELECT numbackends FROM test.stat WHERE datname = '{database_name}'")) + count = int( + node1.query( + f"SELECT numbackends FROM test.stat WHERE datname = '{database_name}'" + ) + ) print(count) - assert(count <= 18) + assert count <= 18 busy_pool = Pool(30) p = busy_pool.map_async(node_insert_select, range(30)) p.wait() - count = int(node1.query(f"SELECT numbackends FROM test.stat WHERE datname = '{database_name}'")) + count = int( + node1.query( + f"SELECT numbackends FROM test.stat WHERE datname = '{database_name}'" + ) + ) print(count) - assert(count <= 18) + assert count <= 18 - node1.query('DROP TABLE test.test_table;') - node1.query('DROP TABLE test.stat;') + node1.query("DROP TABLE test.test_table;") + node1.query("DROP TABLE test.stat;") def test_postgres_distributed(started_cluster): @@ -256,82 +327,106 @@ def test_postgres_distributed(started_cluster): cursors = [cursor0, cursor1, cursor2, cursor3] for i in range(4): - cursors[i].execute('DROP TABLE IF EXISTS test_replicas') - cursors[i].execute('CREATE TABLE test_replicas (id Integer, name Text)') - cursors[i].execute(f"""INSERT INTO test_replicas select i, 'host{i+1}' from generate_series(0, 99) as t(i);"""); + cursors[i].execute("DROP TABLE IF EXISTS test_replicas") + cursors[i].execute("CREATE TABLE test_replicas (id Integer, name Text)") + cursors[i].execute( + f"""INSERT INTO test_replicas select i, 'host{i+1}' from generate_series(0, 99) as t(i);""" + ) # test multiple ports parsing - result = node2.query('''SELECT DISTINCT(name) FROM postgresql('postgres{1|2|3}:5432', 'postgres', 'test_replicas', 'postgres', 'mysecretpassword'); ''') - assert(result == 'host1\n' or result == 'host2\n' or result == 'host3\n') - result = node2.query('''SELECT DISTINCT(name) FROM postgresql('postgres2:5431|postgres3:5432', 'postgres', 'test_replicas', 'postgres', 'mysecretpassword'); ''') - assert(result == 'host3\n' or result == 'host2\n') + result = node2.query( + """SELECT DISTINCT(name) FROM postgresql('postgres{1|2|3}:5432', 'postgres', 'test_replicas', 'postgres', 'mysecretpassword'); """ + ) + assert result == "host1\n" or result == "host2\n" or result == "host3\n" + result = node2.query( + """SELECT DISTINCT(name) FROM postgresql('postgres2:5431|postgres3:5432', 'postgres', 'test_replicas', 'postgres', 'mysecretpassword'); """ + ) + assert result == "host3\n" or result == "host2\n" # Create storage with with 3 replicas - node2.query('DROP TABLE IF EXISTS test_replicas') - node2.query(''' + node2.query("DROP TABLE IF EXISTS test_replicas") + node2.query( + """ CREATE TABLE test_replicas (id UInt32, name String) - ENGINE = PostgreSQL('postgres{2|3|4}:5432', 'postgres', 'test_replicas', 'postgres', 'mysecretpassword'); ''') + ENGINE = PostgreSQL('postgres{2|3|4}:5432', 'postgres', 'test_replicas', 'postgres', 'mysecretpassword'); """ + ) # Check all replicas are traversed query = "SELECT name FROM (" - for i in range (3): + for i in range(3): query += "SELECT name FROM test_replicas UNION DISTINCT " query += "SELECT name FROM test_replicas) ORDER BY name" result = node2.query(query) - assert(result == 'host2\nhost3\nhost4\n') + assert result == "host2\nhost3\nhost4\n" # Create storage with with two two shards, each has 2 replicas - node2.query('DROP TABLE IF EXISTS test_shards') + node2.query("DROP TABLE IF EXISTS test_shards") - node2.query(''' + node2.query( + """ CREATE TABLE test_shards (id UInt32, name String, age UInt32, money UInt32) - ENGINE = ExternalDistributed('PostgreSQL', 'postgres{1|2}:5432,postgres{3|4}:5432', 'postgres', 'test_replicas', 'postgres', 'mysecretpassword'); ''') + ENGINE = ExternalDistributed('PostgreSQL', 'postgres{1|2}:5432,postgres{3|4}:5432', 'postgres', 'test_replicas', 'postgres', 'mysecretpassword'); """ + ) # Check only one replica in each shard is used result = node2.query("SELECT DISTINCT(name) FROM test_shards ORDER BY name") - assert(result == 'host1\nhost3\n') + assert result == "host1\nhost3\n" - node2.query(''' + node2.query( + """ CREATE TABLE test_shards2 (id UInt32, name String, age UInt32, money UInt32) - ENGINE = ExternalDistributed('PostgreSQL', postgres4, description='postgres{1|2}:5432,postgres{3|4}:5432'); ''') + ENGINE = ExternalDistributed('PostgreSQL', postgres4, description='postgres{1|2}:5432,postgres{3|4}:5432'); """ + ) result = node2.query("SELECT DISTINCT(name) FROM test_shards2 ORDER BY name") - assert(result == 'host1\nhost3\n') + assert result == "host1\nhost3\n" # Check all replicas are traversed query = "SELECT name FROM (" - for i in range (3): + for i in range(3): query += "SELECT name FROM test_shards UNION DISTINCT " query += "SELECT name FROM test_shards) ORDER BY name" result = node2.query(query) - assert(result == 'host1\nhost2\nhost3\nhost4\n') + assert result == "host1\nhost2\nhost3\nhost4\n" # Disconnect postgres1 - started_cluster.pause_container('postgres1') + started_cluster.pause_container("postgres1") result = node2.query("SELECT DISTINCT(name) FROM test_shards ORDER BY name") - started_cluster.unpause_container('postgres1') - assert(result == 'host2\nhost4\n' or result == 'host3\nhost4\n') - node2.query('DROP TABLE test_shards') - node2.query('DROP TABLE test_replicas') + started_cluster.unpause_container("postgres1") + assert result == "host2\nhost4\n" or result == "host3\nhost4\n" + node2.query("DROP TABLE test_shards") + node2.query("DROP TABLE test_replicas") def test_datetime_with_timezone(started_cluster): cursor = started_cluster.postgres_conn.cursor() cursor.execute("DROP TABLE IF EXISTS test_timezone") node1.query("DROP TABLE IF EXISTS test.test_timezone") - cursor.execute("CREATE TABLE test_timezone (ts timestamp without time zone, ts_z timestamp with time zone)") - cursor.execute("insert into test_timezone select '2014-04-04 20:00:00', '2014-04-04 20:00:00'::timestamptz at time zone 'America/New_York';") + cursor.execute( + "CREATE TABLE test_timezone (ts timestamp without time zone, ts_z timestamp with time zone)" + ) + cursor.execute( + "insert into test_timezone select '2014-04-04 20:00:00', '2014-04-04 20:00:00'::timestamptz at time zone 'America/New_York';" + ) cursor.execute("select * from test_timezone") result = cursor.fetchall()[0] - logging.debug(f'{result[0]}, {str(result[1])[:-6]}') - node1.query("create table test.test_timezone ( ts DateTime, ts_z DateTime('America/New_York')) ENGINE PostgreSQL('postgres1:5432', 'postgres', 'test_timezone', 'postgres', 'mysecretpassword');") - assert(node1.query("select ts from test.test_timezone").strip() == str(result[0])) + logging.debug(f"{result[0]}, {str(result[1])[:-6]}") + node1.query( + "create table test.test_timezone ( ts DateTime, ts_z DateTime('America/New_York')) ENGINE PostgreSQL('postgres1:5432', 'postgres', 'test_timezone', 'postgres', 'mysecretpassword');" + ) + assert node1.query("select ts from test.test_timezone").strip() == str(result[0]) # [:-6] because 2014-04-04 16:00:00+00:00 -> 2014-04-04 16:00:00 - assert(node1.query("select ts_z from test.test_timezone").strip() == str(result[1])[:-6]) - assert(node1.query("select * from test.test_timezone") == "2014-04-04 20:00:00\t2014-04-04 16:00:00\n") + assert ( + node1.query("select ts_z from test.test_timezone").strip() + == str(result[1])[:-6] + ) + assert ( + node1.query("select * from test.test_timezone") + == "2014-04-04 20:00:00\t2014-04-04 16:00:00\n" + ) cursor.execute("DROP TABLE test_timezone") node1.query("DROP TABLE test.test_timezone") @@ -340,121 +435,170 @@ def test_postgres_ndim(started_cluster): cursor = started_cluster.postgres_conn.cursor() cursor.execute("DROP TABLE IF EXISTS arr1, arr2") - cursor.execute('CREATE TABLE arr1 (a Integer[])') + cursor.execute("CREATE TABLE arr1 (a Integer[])") cursor.execute("INSERT INTO arr1 SELECT '{{1}, {2}}'") # The point is in creating a table via 'as select *', in postgres att_ndim will not be correct in this case. - cursor.execute('CREATE TABLE arr2 AS SELECT * FROM arr1') - cursor.execute("SELECT attndims AS dims FROM pg_attribute WHERE attrelid = 'arr2'::regclass; ") + cursor.execute("CREATE TABLE arr2 AS SELECT * FROM arr1") + cursor.execute( + "SELECT attndims AS dims FROM pg_attribute WHERE attrelid = 'arr2'::regclass; " + ) result = cursor.fetchall()[0] - assert(int(result[0]) == 0) + assert int(result[0]) == 0 - result = node1.query('''SELECT toTypeName(a) FROM postgresql('postgres1:5432', 'postgres', 'arr2', 'postgres', 'mysecretpassword')''') - assert(result.strip() == "Array(Array(Nullable(Int32)))") + result = node1.query( + """SELECT toTypeName(a) FROM postgresql('postgres1:5432', 'postgres', 'arr2', 'postgres', 'mysecretpassword')""" + ) + assert result.strip() == "Array(Array(Nullable(Int32)))" cursor.execute("DROP TABLE arr1, arr2") def test_postgres_on_conflict(started_cluster): cursor = started_cluster.postgres_conn.cursor() - table = 'test_conflict' - cursor.execute(f'DROP TABLE IF EXISTS {table}') - cursor.execute(f'CREATE TABLE {table} (a integer PRIMARY KEY, b text, c integer)') + table = "test_conflict" + cursor.execute(f"DROP TABLE IF EXISTS {table}") + cursor.execute(f"CREATE TABLE {table} (a integer PRIMARY KEY, b text, c integer)") - node1.query(''' + node1.query( + """ CREATE TABLE test.test_conflict (a UInt32, b String, c Int32) ENGINE PostgreSQL('postgres1:5432', 'postgres', 'test_conflict', 'postgres', 'mysecretpassword', '', 'ON CONFLICT DO NOTHING'); - ''') - node1.query(f''' INSERT INTO test.{table} SELECT number, concat('name_', toString(number)), 3 from numbers(100)''') - node1.query(f''' INSERT INTO test.{table} SELECT number, concat('name_', toString(number)), 4 from numbers(100)''') + """ + ) + node1.query( + f""" INSERT INTO test.{table} SELECT number, concat('name_', toString(number)), 3 from numbers(100)""" + ) + node1.query( + f""" INSERT INTO test.{table} SELECT number, concat('name_', toString(number)), 4 from numbers(100)""" + ) check1 = f"SELECT count() FROM test.{table}" - assert (node1.query(check1)).rstrip() == '100' + assert (node1.query(check1)).rstrip() == "100" - table_func = f'''postgresql('{started_cluster.postgres_ip}:{started_cluster.postgres_port}', 'postgres', '{table}', 'postgres', 'mysecretpassword', '', 'ON CONFLICT DO NOTHING')''' - node1.query(f'''INSERT INTO TABLE FUNCTION {table_func} SELECT number, concat('name_', toString(number)), 3 from numbers(100)''') - node1.query(f'''INSERT INTO TABLE FUNCTION {table_func} SELECT number, concat('name_', toString(number)), 3 from numbers(100)''') + table_func = f"""postgresql('{started_cluster.postgres_ip}:{started_cluster.postgres_port}', 'postgres', '{table}', 'postgres', 'mysecretpassword', '', 'ON CONFLICT DO NOTHING')""" + node1.query( + f"""INSERT INTO TABLE FUNCTION {table_func} SELECT number, concat('name_', toString(number)), 3 from numbers(100)""" + ) + node1.query( + f"""INSERT INTO TABLE FUNCTION {table_func} SELECT number, concat('name_', toString(number)), 3 from numbers(100)""" + ) check1 = f"SELECT count() FROM test.{table}" - assert (node1.query(check1)).rstrip() == '100' + assert (node1.query(check1)).rstrip() == "100" - cursor.execute(f'DROP TABLE {table} ') + cursor.execute(f"DROP TABLE {table} ") def test_predefined_connection_configuration(started_cluster): cursor = started_cluster.postgres_conn.cursor() - cursor.execute(f'DROP TABLE IF EXISTS test_table') - cursor.execute(f'CREATE TABLE test_table (a integer PRIMARY KEY, b integer)') + cursor.execute(f"DROP TABLE IF EXISTS test_table") + cursor.execute(f"CREATE TABLE test_table (a integer PRIMARY KEY, b integer)") - node1.query(''' + node1.query( + """ DROP TABLE IF EXISTS test.test_table; CREATE TABLE test.test_table (a UInt32, b Int32) ENGINE PostgreSQL(postgres1); - ''') - node1.query(f''' INSERT INTO test.test_table SELECT number, number from numbers(100)''') - assert (node1.query(f"SELECT count() FROM test.test_table").rstrip() == '100') + """ + ) + node1.query( + f""" INSERT INTO test.test_table SELECT number, number from numbers(100)""" + ) + assert node1.query(f"SELECT count() FROM test.test_table").rstrip() == "100" - node1.query(''' + node1.query( + """ DROP TABLE test.test_table; CREATE TABLE test.test_table (a UInt32, b Int32) ENGINE PostgreSQL(postgres1, on_conflict='ON CONFLICT DO NOTHING'); - ''') - node1.query(f''' INSERT INTO test.test_table SELECT number, number from numbers(100)''') - node1.query(f''' INSERT INTO test.test_table SELECT number, number from numbers(100)''') - assert (node1.query(f"SELECT count() FROM test.test_table").rstrip() == '100') + """ + ) + node1.query( + f""" INSERT INTO test.test_table SELECT number, number from numbers(100)""" + ) + node1.query( + f""" INSERT INTO test.test_table SELECT number, number from numbers(100)""" + ) + assert node1.query(f"SELECT count() FROM test.test_table").rstrip() == "100" - node1.query('DROP TABLE test.test_table;') - node1.query_and_get_error(''' + node1.query("DROP TABLE test.test_table;") + node1.query_and_get_error( + """ CREATE TABLE test.test_table (a UInt32, b Int32) ENGINE PostgreSQL(postgres1, 'ON CONFLICT DO NOTHING'); - ''') - node1.query_and_get_error(''' + """ + ) + node1.query_and_get_error( + """ CREATE TABLE test.test_table (a UInt32, b Int32) ENGINE PostgreSQL(postgres2); - ''') - node1.query_and_get_error(''' + """ + ) + node1.query_and_get_error( + """ CREATE TABLE test.test_table (a UInt32, b Int32) ENGINE PostgreSQL(unknown_collection); - ''') + """ + ) - node1.query(''' + node1.query( + """ CREATE TABLE test.test_table (a UInt32, b Int32) ENGINE PostgreSQL(postgres1, port=5432, database='postgres', table='test_table'); - ''') - assert (node1.query(f"SELECT count() FROM test.test_table").rstrip() == '100') + """ + ) + assert node1.query(f"SELECT count() FROM test.test_table").rstrip() == "100" - node1.query(''' + node1.query( + """ DROP TABLE test.test_table; CREATE TABLE test.test_table (a UInt32, b Int32) ENGINE PostgreSQL(postgres3, port=5432); - ''') - assert (node1.query(f"SELECT count() FROM test.test_table").rstrip() == '100') + """ + ) + assert node1.query(f"SELECT count() FROM test.test_table").rstrip() == "100" - assert (node1.query(f"SELECT count() FROM postgresql(postgres1)").rstrip() == '100') - node1.query("INSERT INTO TABLE FUNCTION postgresql(postgres1, on_conflict='ON CONFLICT DO NOTHING') SELECT number, number from numbers(100)") - assert (node1.query(f"SELECT count() FROM postgresql(postgres1)").rstrip() == '100') + assert node1.query(f"SELECT count() FROM postgresql(postgres1)").rstrip() == "100" + node1.query( + "INSERT INTO TABLE FUNCTION postgresql(postgres1, on_conflict='ON CONFLICT DO NOTHING') SELECT number, number from numbers(100)" + ) + assert node1.query(f"SELECT count() FROM postgresql(postgres1)").rstrip() == "100" - cursor.execute('DROP SCHEMA IF EXISTS test_schema CASCADE') - cursor.execute('CREATE SCHEMA test_schema') - cursor.execute('CREATE TABLE test_schema.test_table (a integer)') - node1.query("INSERT INTO TABLE FUNCTION postgresql(postgres1, schema='test_schema', on_conflict='ON CONFLICT DO NOTHING') SELECT number from numbers(200)") - assert (node1.query(f"SELECT count() FROM postgresql(postgres1, schema='test_schema')").rstrip() == '200') + cursor.execute("DROP SCHEMA IF EXISTS test_schema CASCADE") + cursor.execute("CREATE SCHEMA test_schema") + cursor.execute("CREATE TABLE test_schema.test_table (a integer)") + node1.query( + "INSERT INTO TABLE FUNCTION postgresql(postgres1, schema='test_schema', on_conflict='ON CONFLICT DO NOTHING') SELECT number from numbers(200)" + ) + assert ( + node1.query( + f"SELECT count() FROM postgresql(postgres1, schema='test_schema')" + ).rstrip() + == "200" + ) - cursor.execute('DROP SCHEMA test_schema CASCADE') - cursor.execute(f'DROP TABLE test_table ') + cursor.execute("DROP SCHEMA test_schema CASCADE") + cursor.execute(f"DROP TABLE test_table ") def test_where_false(started_cluster): cursor = started_cluster.postgres_conn.cursor() cursor.execute("DROP TABLE IF EXISTS test") - cursor.execute('CREATE TABLE test (a Integer)') + cursor.execute("CREATE TABLE test (a Integer)") cursor.execute("INSERT INTO test SELECT 1") - result = node1.query("SELECT count() FROM postgresql('postgres1:5432', 'postgres', 'test', 'postgres', 'mysecretpassword') WHERE 1=0") - assert(int(result) == 0) - result = node1.query("SELECT count() FROM postgresql('postgres1:5432', 'postgres', 'test', 'postgres', 'mysecretpassword') WHERE 0") - assert(int(result) == 0) - result = node1.query("SELECT count() FROM postgresql('postgres1:5432', 'postgres', 'test', 'postgres', 'mysecretpassword') WHERE 1=1") - assert(int(result) == 1) + result = node1.query( + "SELECT count() FROM postgresql('postgres1:5432', 'postgres', 'test', 'postgres', 'mysecretpassword') WHERE 1=0" + ) + assert int(result) == 0 + result = node1.query( + "SELECT count() FROM postgresql('postgres1:5432', 'postgres', 'test', 'postgres', 'mysecretpassword') WHERE 0" + ) + assert int(result) == 0 + result = node1.query( + "SELECT count() FROM postgresql('postgres1:5432', 'postgres', 'test', 'postgres', 'mysecretpassword') WHERE 1=1" + ) + assert int(result) == 1 cursor.execute("DROP TABLE test") @@ -465,7 +609,7 @@ def test_datetime64(started_cluster): cursor.execute("insert into test select '1960-01-01 20:00:00';") result = node1.query("select * from postgresql(postgres1, table='test')") - assert(result.strip() == '1960-01-01 20:00:00.000000') + assert result.strip() == "1960-01-01 20:00:00.000000" def test_uuid(started_cluster): @@ -475,11 +619,13 @@ def test_uuid(started_cluster): cursor.execute("""CREATE EXTENSION IF NOT EXISTS "uuid-ossp";""") cursor.execute("insert into test select uuid_generate_v1();") - result = node1.query("select toTypeName(u) from postgresql(postgres1, table='test')") - assert(result.strip() == 'Nullable(UUID)') + result = node1.query( + "select toTypeName(u) from postgresql(postgres1, table='test')" + ) + assert result.strip() == "Nullable(UUID)" -if __name__ == '__main__': +if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") cluster.shutdown() diff --git a/tests/integration/test_storage_postgresql_replica/test.py b/tests/integration/test_storage_postgresql_replica/test.py index 4602d567b46..e51a9335a65 100644 --- a/tests/integration/test_storage_postgresql_replica/test.py +++ b/tests/integration/test_storage_postgresql_replica/test.py @@ -11,7 +11,12 @@ from helpers.test_tools import TSV import threading cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', main_configs=['configs/log_conf.xml'], with_postgres=True, stay_alive=True) +instance = cluster.add_instance( + "instance", + main_configs=["configs/log_conf.xml"], + with_postgres=True, + stay_alive=True, +) postgres_table_template = """ CREATE TABLE IF NOT EXISTS {} ( @@ -19,44 +24,63 @@ postgres_table_template = """ """ queries = [ - 'INSERT INTO postgresql_replica select i, i from generate_series(0, 10000) as t(i);', - 'DELETE FROM postgresql_replica WHERE (value*value) % 3 = 0;', - 'UPDATE postgresql_replica SET value = value + 125 WHERE key % 2 = 0;', + "INSERT INTO postgresql_replica select i, i from generate_series(0, 10000) as t(i);", + "DELETE FROM postgresql_replica WHERE (value*value) % 3 = 0;", + "UPDATE postgresql_replica SET value = value + 125 WHERE key % 2 = 0;", "UPDATE postgresql_replica SET key=key+20000 WHERE key%2=0", - 'INSERT INTO postgresql_replica select i, i from generate_series(40000, 50000) as t(i);', - 'DELETE FROM postgresql_replica WHERE key % 10 = 0;', - 'UPDATE postgresql_replica SET value = value + 101 WHERE key % 2 = 1;', + "INSERT INTO postgresql_replica select i, i from generate_series(40000, 50000) as t(i);", + "DELETE FROM postgresql_replica WHERE key % 10 = 0;", + "UPDATE postgresql_replica SET value = value + 101 WHERE key % 2 = 1;", "UPDATE postgresql_replica SET key=key+80000 WHERE key%2=1", - 'DELETE FROM postgresql_replica WHERE value % 2 = 0;', - 'UPDATE postgresql_replica SET value = value + 2000 WHERE key % 5 = 0;', - 'INSERT INTO postgresql_replica select i, i from generate_series(200000, 250000) as t(i);', - 'DELETE FROM postgresql_replica WHERE value % 3 = 0;', - 'UPDATE postgresql_replica SET value = value * 2 WHERE key % 3 = 0;', + "DELETE FROM postgresql_replica WHERE value % 2 = 0;", + "UPDATE postgresql_replica SET value = value + 2000 WHERE key % 5 = 0;", + "INSERT INTO postgresql_replica select i, i from generate_series(200000, 250000) as t(i);", + "DELETE FROM postgresql_replica WHERE value % 3 = 0;", + "UPDATE postgresql_replica SET value = value * 2 WHERE key % 3 = 0;", "UPDATE postgresql_replica SET key=key+500000 WHERE key%2=1", - 'INSERT INTO postgresql_replica select i, i from generate_series(1000000, 1050000) as t(i);', - 'DELETE FROM postgresql_replica WHERE value % 9 = 2;', + "INSERT INTO postgresql_replica select i, i from generate_series(1000000, 1050000) as t(i);", + "DELETE FROM postgresql_replica WHERE value % 9 = 2;", "UPDATE postgresql_replica SET key=key+10000000", - 'UPDATE postgresql_replica SET value = value + 2 WHERE key % 3 = 1;', - 'DELETE FROM postgresql_replica WHERE value%5 = 0;' - ] + "UPDATE postgresql_replica SET value = value + 2 WHERE key % 3 = 1;", + "DELETE FROM postgresql_replica WHERE value%5 = 0;", +] @pytest.mark.timeout(30) -def check_tables_are_synchronized(table_name, order_by='key', postgres_database='postgres_database'): - expected = instance.query('select * from {}.{} order by {};'.format(postgres_database, table_name, order_by)) - result = instance.query('select * from test.{} order by {};'.format(table_name, order_by)) +def check_tables_are_synchronized( + table_name, order_by="key", postgres_database="postgres_database" +): + expected = instance.query( + "select * from {}.{} order by {};".format( + postgres_database, table_name, order_by + ) + ) + result = instance.query( + "select * from test.{} order by {};".format(table_name, order_by) + ) while result != expected: time.sleep(0.5) - result = instance.query('select * from test.{} order by {};'.format(table_name, order_by)) + result = instance.query( + "select * from test.{} order by {};".format(table_name, order_by) + ) - assert(result == expected) + assert result == expected -def get_postgres_conn(ip, port, database=False, auto_commit=True, database_name='postgres_database'): + +def get_postgres_conn( + ip, port, database=False, auto_commit=True, database_name="postgres_database" +): if database == True: - conn_string = "host={} port={} dbname='{}' user='postgres' password='mysecretpassword'".format(ip, port, database_name) + conn_string = "host={} port={} dbname='{}' user='postgres' password='mysecretpassword'".format( + ip, port, database_name + ) else: - conn_string = "host={} port={} user='postgres' password='mysecretpassword'".format(ip, port) + conn_string = ( + "host={} port={} user='postgres' password='mysecretpassword'".format( + ip, port + ) + ) conn = psycopg2.connect(conn_string) if auto_commit: @@ -64,29 +88,43 @@ def get_postgres_conn(ip, port, database=False, auto_commit=True, database_name= conn.autocommit = True return conn + def create_postgres_db(cursor, name): cursor.execute("CREATE DATABASE {}".format(name)) -def create_clickhouse_postgres_db(ip, port, name='postgres_database'): - instance.query(''' + +def create_clickhouse_postgres_db(ip, port, name="postgres_database"): + instance.query( + """ CREATE DATABASE {} - ENGINE = PostgreSQL('{}:{}', '{}', 'postgres', 'mysecretpassword')'''.format(name, ip, port, name)) + ENGINE = PostgreSQL('{}:{}', '{}', 'postgres', 'mysecretpassword')""".format( + name, ip, port, name + ) + ) + def create_materialized_table(ip, port): - instance.query(''' + instance.query( + """ CREATE TABLE test.postgresql_replica (key UInt64, value UInt64) ENGINE = MaterializedPostgreSQL( '{}:{}', 'postgres_database', 'postgresql_replica', 'postgres', 'mysecretpassword') - PRIMARY KEY key; '''.format(ip, port)) + PRIMARY KEY key; """.format( + ip, port + ) + ) + def create_postgres_table(cursor, table_name, replica_identity_full=False): cursor.execute("DROP TABLE IF EXISTS {}".format(table_name)) cursor.execute(postgres_table_template.format(table_name)) if replica_identity_full: - cursor.execute('ALTER TABLE {} REPLICA IDENTITY FULL;'.format(table_name)) + cursor.execute("ALTER TABLE {} REPLICA IDENTITY FULL;".format(table_name)) -def postgresql_replica_check_result(result, check=False, ref_file='test_postgresql_replica.reference'): +def postgresql_replica_check_result( + result, check=False, ref_file="test_postgresql_replica.reference" +): fpath = p.join(p.dirname(__file__), ref_file) with open(fpath) as reference: if check: @@ -99,14 +137,14 @@ def postgresql_replica_check_result(result, check=False, ref_file='test_postgres def started_cluster(): try: cluster.start() - conn = get_postgres_conn(ip=cluster.postgres_ip, - port=cluster.postgres_port) + conn = get_postgres_conn(ip=cluster.postgres_ip, port=cluster.postgres_port) cursor = conn.cursor() - create_postgres_db(cursor, 'postgres_database') - create_clickhouse_postgres_db(ip=cluster.postgres_ip, - port=cluster.postgres_port) + create_postgres_db(cursor, "postgres_database") + create_clickhouse_postgres_db( + ip=cluster.postgres_ip, port=cluster.postgres_port + ) - instance.query('CREATE DATABASE test') + instance.query("CREATE DATABASE test") yield cluster finally: @@ -115,486 +153,601 @@ def started_cluster(): @pytest.mark.timeout(320) def test_initial_load_from_snapshot(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - create_postgres_table(cursor, 'postgresql_replica'); - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + create_postgres_table(cursor, "postgresql_replica") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)" + ) - instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - create_materialized_table(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port) + instance.query("DROP TABLE IF EXISTS test.postgresql_replica") + create_materialized_table( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) - result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;") while postgresql_replica_check_result(result) == False: time.sleep(0.2) - result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;") - cursor.execute('DROP TABLE postgresql_replica;') + cursor.execute("DROP TABLE postgresql_replica;") postgresql_replica_check_result(result, True) @pytest.mark.timeout(320) def test_no_connection_at_startup(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - create_postgres_table(cursor, 'postgresql_replica'); - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + create_postgres_table(cursor, "postgresql_replica") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)" + ) - instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - create_materialized_table(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port) + instance.query("DROP TABLE IF EXISTS test.postgresql_replica") + create_materialized_table( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) time.sleep(3) - instance.query('DETACH TABLE test.postgresql_replica') - started_cluster.pause_container('postgres1') + instance.query("DETACH TABLE test.postgresql_replica") + started_cluster.pause_container("postgres1") - instance.query('ATTACH TABLE test.postgresql_replica') + instance.query("ATTACH TABLE test.postgresql_replica") time.sleep(3) - started_cluster.unpause_container('postgres1') + started_cluster.unpause_container("postgres1") - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") while int(result) == 0: - time.sleep(0.5); - result = instance.query('SELECT count() FROM test.postgresql_replica;') + time.sleep(0.5) + result = instance.query("SELECT count() FROM test.postgresql_replica;") - result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') - cursor.execute('DROP TABLE postgresql_replica;') + result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;") + cursor.execute("DROP TABLE postgresql_replica;") postgresql_replica_check_result(result, True) @pytest.mark.timeout(320) def test_detach_attach_is_ok(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - create_postgres_table(cursor, 'postgresql_replica'); - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + create_postgres_table(cursor, "postgresql_replica") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)" + ) - instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - create_materialized_table(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port) + instance.query("DROP TABLE IF EXISTS test.postgresql_replica") + create_materialized_table( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) - result = instance.query('SELECT count() FROM test.postgresql_replica;') - while (int(result) == 0): + result = instance.query("SELECT count() FROM test.postgresql_replica;") + while int(result) == 0: time.sleep(0.2) - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") - result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;") postgresql_replica_check_result(result, True) - instance.query('DETACH TABLE test.postgresql_replica') - instance.query('ATTACH TABLE test.postgresql_replica') + instance.query("DETACH TABLE test.postgresql_replica") + instance.query("ATTACH TABLE test.postgresql_replica") - result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;") while postgresql_replica_check_result(result) == False: time.sleep(0.5) - result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;") - cursor.execute('DROP TABLE postgresql_replica;') + cursor.execute("DROP TABLE postgresql_replica;") postgresql_replica_check_result(result, True) @pytest.mark.timeout(320) def test_replicating_insert_queries(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - create_postgres_table(cursor, 'postgresql_replica'); + create_postgres_table(cursor, "postgresql_replica") - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(10)") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(10)" + ) - instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - create_materialized_table(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port) + instance.query("DROP TABLE IF EXISTS test.postgresql_replica") + create_materialized_table( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) - result = instance.query('SELECT count() FROM test.postgresql_replica;') - while (int(result) != 10): + result = instance.query("SELECT count() FROM test.postgresql_replica;") + while int(result) != 10: time.sleep(0.2) - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 10 + number, 10 + number from numbers(10)") - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 20 + number, 20 + number from numbers(10)") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT 10 + number, 10 + number from numbers(10)" + ) + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT 20 + number, 20 + number from numbers(10)" + ) - result = instance.query('SELECT count() FROM test.postgresql_replica;') - while (int(result) != 30): + result = instance.query("SELECT count() FROM test.postgresql_replica;") + while int(result) != 30: time.sleep(0.2) - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 30 + number, 30 + number from numbers(10)") - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 40 + number, 40 + number from numbers(10)") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT 30 + number, 30 + number from numbers(10)" + ) + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT 40 + number, 40 + number from numbers(10)" + ) - result = instance.query('SELECT count() FROM test.postgresql_replica;') - while (int(result) != 50): + result = instance.query("SELECT count() FROM test.postgresql_replica;") + while int(result) != 50: time.sleep(0.2) - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") - result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') - cursor.execute('DROP TABLE postgresql_replica;') + result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;") + cursor.execute("DROP TABLE postgresql_replica;") postgresql_replica_check_result(result, True) @pytest.mark.timeout(320) def test_replicating_delete_queries(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - create_postgres_table(cursor, 'postgresql_replica'); + create_postgres_table(cursor, "postgresql_replica") - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)" + ) - instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - create_materialized_table(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port) + instance.query("DROP TABLE IF EXISTS test.postgresql_replica") + create_materialized_table( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) - result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;") while postgresql_replica_check_result(result) == False: time.sleep(0.2) - result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;") - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 50 + number, 50 + number from numbers(50)") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT 50 + number, 50 + number from numbers(50)" + ) - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") while int(result) != 100: time.sleep(0.5) - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") - cursor.execute('DELETE FROM postgresql_replica WHERE key > 49;') + cursor.execute("DELETE FROM postgresql_replica WHERE key > 49;") - result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;") while postgresql_replica_check_result(result) == False: time.sleep(0.5) - result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;") - cursor.execute('DROP TABLE postgresql_replica;') + cursor.execute("DROP TABLE postgresql_replica;") postgresql_replica_check_result(result, True) @pytest.mark.timeout(320) def test_replicating_update_queries(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - create_postgres_table(cursor, 'postgresql_replica'); + create_postgres_table(cursor, "postgresql_replica") - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number + 10 from numbers(50)") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT number, number + 10 from numbers(50)" + ) - instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - create_materialized_table(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port) + instance.query("DROP TABLE IF EXISTS test.postgresql_replica") + create_materialized_table( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) - result = instance.query('SELECT count() FROM test.postgresql_replica;') - while (int(result) != 50): + result = instance.query("SELECT count() FROM test.postgresql_replica;") + while int(result) != 50: time.sleep(0.2) - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") - cursor.execute('UPDATE postgresql_replica SET value = value - 10;') + cursor.execute("UPDATE postgresql_replica SET value = value - 10;") - result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;") while postgresql_replica_check_result(result) == False: time.sleep(0.5) - result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;") - cursor.execute('DROP TABLE postgresql_replica;') + cursor.execute("DROP TABLE postgresql_replica;") postgresql_replica_check_result(result, True) @pytest.mark.timeout(320) def test_resume_from_written_version(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - create_postgres_table(cursor, 'postgresql_replica'); - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number + 10 from numbers(50)") + create_postgres_table(cursor, "postgresql_replica") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT number, number + 10 from numbers(50)" + ) - instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - create_materialized_table(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port) + instance.query("DROP TABLE IF EXISTS test.postgresql_replica") + create_materialized_table( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) - result = instance.query('SELECT count() FROM test.postgresql_replica;') - while (int(result) != 50): + result = instance.query("SELECT count() FROM test.postgresql_replica;") + while int(result) != 50: time.sleep(0.2) - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT 50 + number, 50 + number from numbers(50)") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT 50 + number, 50 + number from numbers(50)" + ) - result = instance.query('SELECT count() FROM test.postgresql_replica;') - while (int(result) != 100): + result = instance.query("SELECT count() FROM test.postgresql_replica;") + while int(result) != 100: time.sleep(0.2) - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") - instance.query('DETACH TABLE test.postgresql_replica') + instance.query("DETACH TABLE test.postgresql_replica") - cursor.execute('DELETE FROM postgresql_replica WHERE key > 49;') - cursor.execute('UPDATE postgresql_replica SET value = value - 10;') + cursor.execute("DELETE FROM postgresql_replica WHERE key > 49;") + cursor.execute("UPDATE postgresql_replica SET value = value - 10;") - instance.query('ATTACH TABLE test.postgresql_replica') + instance.query("ATTACH TABLE test.postgresql_replica") - result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;") while postgresql_replica_check_result(result) == False: time.sleep(0.5) - result = instance.query('SELECT * FROM test.postgresql_replica ORDER BY key;') + result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;") - cursor.execute('DROP TABLE postgresql_replica;') + cursor.execute("DROP TABLE postgresql_replica;") postgresql_replica_check_result(result, True) @pytest.mark.timeout(320) def test_many_replication_messages(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - create_postgres_table(cursor, 'postgresql_replica'); - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(100000)") + create_postgres_table(cursor, "postgresql_replica") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(100000)" + ) - instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - create_materialized_table(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port) + instance.query("DROP TABLE IF EXISTS test.postgresql_replica") + create_materialized_table( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) - result = instance.query('SELECT count() FROM test.postgresql_replica;') - while (int(result) != 100000): + result = instance.query("SELECT count() FROM test.postgresql_replica;") + while int(result) != 100000: time.sleep(0.2) - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") print("SYNC OK") - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(100000, 100000)") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(100000, 100000)" + ) - result = instance.query('SELECT count() FROM test.postgresql_replica;') - while (int(result) != 200000): + result = instance.query("SELECT count() FROM test.postgresql_replica;") + while int(result) != 200000: time.sleep(1) - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") print("INSERT OK") - result = instance.query('SELECT key FROM test.postgresql_replica ORDER BY key;') + result = instance.query("SELECT key FROM test.postgresql_replica ORDER BY key;") expected = instance.query("SELECT number from numbers(200000)") - assert(result == expected) + assert result == expected - cursor.execute('UPDATE postgresql_replica SET value = key + 1 WHERE key < 100000;') + cursor.execute("UPDATE postgresql_replica SET value = key + 1 WHERE key < 100000;") - result = instance.query('SELECT key FROM test.postgresql_replica WHERE value = key + 1 ORDER BY key;') + result = instance.query( + "SELECT key FROM test.postgresql_replica WHERE value = key + 1 ORDER BY key;" + ) expected = instance.query("SELECT number from numbers(100000)") - while (result != expected): + while result != expected: time.sleep(1) - result = instance.query('SELECT key FROM test.postgresql_replica WHERE value = key + 1 ORDER BY key;') + result = instance.query( + "SELECT key FROM test.postgresql_replica WHERE value = key + 1 ORDER BY key;" + ) print("UPDATE OK") - cursor.execute('DELETE FROM postgresql_replica WHERE key % 2 = 1;') - cursor.execute('DELETE FROM postgresql_replica WHERE key != value;') + cursor.execute("DELETE FROM postgresql_replica WHERE key % 2 = 1;") + cursor.execute("DELETE FROM postgresql_replica WHERE key != value;") - result = instance.query('SELECT count() FROM (SELECT * FROM test.postgresql_replica);') - while (int(result) != 50000): + result = instance.query( + "SELECT count() FROM (SELECT * FROM test.postgresql_replica);" + ) + while int(result) != 50000: time.sleep(1) - result = instance.query('SELECT count() FROM (SELECT * FROM test.postgresql_replica);') + result = instance.query( + "SELECT count() FROM (SELECT * FROM test.postgresql_replica);" + ) print("DELETE OK") - cursor.execute('DROP TABLE postgresql_replica;') + cursor.execute("DROP TABLE postgresql_replica;") @pytest.mark.timeout(320) def test_connection_loss(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - create_postgres_table(cursor, 'postgresql_replica'); - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + create_postgres_table(cursor, "postgresql_replica") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)" + ) - instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - create_materialized_table(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port) + instance.query("DROP TABLE IF EXISTS test.postgresql_replica") + create_materialized_table( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) i = 50 while i < 100000: - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT {} + number, number from numbers(10000)".format(i)) + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT {} + number, number from numbers(10000)".format( + i + ) + ) i += 10000 - started_cluster.pause_container('postgres1') + started_cluster.pause_container("postgres1") - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") print(int(result)) time.sleep(6) - started_cluster.unpause_container('postgres1') + started_cluster.unpause_container("postgres1") - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") while int(result) < 100050: time.sleep(1) - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") - cursor.execute('DROP TABLE postgresql_replica;') - assert(int(result) == 100050) + cursor.execute("DROP TABLE postgresql_replica;") + assert int(result) == 100050 @pytest.mark.timeout(320) def test_clickhouse_restart(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - create_postgres_table(cursor, 'postgresql_replica'); - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)") + create_postgres_table(cursor, "postgresql_replica") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(50)" + ) - instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - create_materialized_table(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port) + instance.query("DROP TABLE IF EXISTS test.postgresql_replica") + create_materialized_table( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) i = 50 while i < 100000: - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT {} + number, number from numbers(10000)".format(i)) + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT {} + number, number from numbers(10000)".format( + i + ) + ) i += 10000 instance.restart_clickhouse() - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") while int(result) < 100050: time.sleep(1) - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") - cursor.execute('DROP TABLE postgresql_replica;') + cursor.execute("DROP TABLE postgresql_replica;") print(result) - assert(int(result) == 100050) + assert int(result) == 100050 def test_rename_table(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - create_postgres_table(cursor, 'postgresql_replica'); + create_postgres_table(cursor, "postgresql_replica") - instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - create_materialized_table(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port) + instance.query("DROP TABLE IF EXISTS test.postgresql_replica") + create_materialized_table( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(25)") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(25)" + ) - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") while int(result) != 25: time.sleep(0.5) - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") - instance.query('RENAME TABLE test.postgresql_replica TO test.postgresql_replica_renamed') - assert(int(instance.query('SELECT count() FROM test.postgresql_replica_renamed;')) == 25) + instance.query( + "RENAME TABLE test.postgresql_replica TO test.postgresql_replica_renamed" + ) + assert ( + int(instance.query("SELECT count() FROM test.postgresql_replica_renamed;")) + == 25 + ) - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(25, 25)") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(25, 25)" + ) - result = instance.query('SELECT count() FROM test.postgresql_replica_renamed;') + result = instance.query("SELECT count() FROM test.postgresql_replica_renamed;") while int(result) != 50: time.sleep(0.5) - result = instance.query('SELECT count() FROM test.postgresql_replica_renamed;') + result = instance.query("SELECT count() FROM test.postgresql_replica_renamed;") - result = instance.query('SELECT * FROM test.postgresql_replica_renamed ORDER BY key;') + result = instance.query( + "SELECT * FROM test.postgresql_replica_renamed ORDER BY key;" + ) postgresql_replica_check_result(result, True) - cursor.execute('DROP TABLE postgresql_replica;') - instance.query('DROP TABLE IF EXISTS test.postgresql_replica_renamed') + cursor.execute("DROP TABLE postgresql_replica;") + instance.query("DROP TABLE IF EXISTS test.postgresql_replica_renamed") def test_virtual_columns(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - create_postgres_table(cursor, 'postgresql_replica'); + create_postgres_table(cursor, "postgresql_replica") - instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - create_materialized_table(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port) + instance.query("DROP TABLE IF EXISTS test.postgresql_replica") + create_materialized_table( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(10)") - result = instance.query('SELECT count() FROM test.postgresql_replica;') + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(10)" + ) + result = instance.query("SELECT count() FROM test.postgresql_replica;") while int(result) != 10: time.sleep(0.5) - result = instance.query('SELECT count() FROM test.postgresql_replica;') + result = instance.query("SELECT count() FROM test.postgresql_replica;") # just check that it works, no check with `expected` becuase _version is taken as LSN, which will be different each time. - result = instance.query('SELECT key, value, _sign, _version FROM test.postgresql_replica;') + result = instance.query( + "SELECT key, value, _sign, _version FROM test.postgresql_replica;" + ) print(result) - cursor.execute('DROP TABLE postgresql_replica;') + cursor.execute("DROP TABLE postgresql_replica;") def test_abrupt_connection_loss_while_heavy_replication(started_cluster): instance.query("DROP DATABASE IF EXISTS test_database") - conn = get_postgres_conn(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - create_postgres_table(cursor, 'postgresql_replica'); + create_postgres_table(cursor, "postgresql_replica") - instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - create_materialized_table(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port) + instance.query("DROP TABLE IF EXISTS test.postgresql_replica") + create_materialized_table( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) for i in range(len(queries)): query = queries[i] cursor.execute(query) - print('query {}'.format(query)) + print("query {}".format(query)) - started_cluster.pause_container('postgres1') + started_cluster.pause_container("postgres1") result = instance.query("SELECT count() FROM test.postgresql_replica") - print(result) # Just debug + print(result) # Just debug - started_cluster.unpause_container('postgres1') + started_cluster.unpause_container("postgres1") - check_tables_are_synchronized('postgresql_replica'); + check_tables_are_synchronized("postgresql_replica") result = instance.query("SELECT count() FROM test.postgresql_replica") - print(result) # Just debug + print(result) # Just debug def test_abrupt_server_restart_while_heavy_replication(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - create_postgres_table(cursor, 'postgresql_replica'); + create_postgres_table(cursor, "postgresql_replica") - instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - create_materialized_table(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port) + instance.query("DROP TABLE IF EXISTS test.postgresql_replica") + create_materialized_table( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) for query in queries: cursor.execute(query) - print('query {}'.format(query)) + print("query {}".format(query)) instance.restart_clickhouse() result = instance.query("SELECT count() FROM test.postgresql_replica") - print(result) # Just debug + print(result) # Just debug - check_tables_are_synchronized('postgresql_replica'); + check_tables_are_synchronized("postgresql_replica") result = instance.query("SELECT count() FROM test.postgresql_replica") - print(result) # Just debug + print(result) # Just debug def test_drop_table_immediately(started_cluster): - conn = get_postgres_conn(ip=started_cluster.postgres_ip, - port=started_cluster.postgres_port, - database=True) + conn = get_postgres_conn( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + database=True, + ) cursor = conn.cursor() - create_postgres_table(cursor, 'postgresql_replica'); - instance.query("INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(100000)") + create_postgres_table(cursor, "postgresql_replica") + instance.query( + "INSERT INTO postgres_database.postgresql_replica SELECT number, number from numbers(100000)" + ) - instance.query('DROP TABLE IF EXISTS test.postgresql_replica') - create_materialized_table(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) - instance.query('DROP TABLE test.postgresql_replica') - create_materialized_table(ip=started_cluster.postgres_ip, port=started_cluster.postgres_port) - check_tables_are_synchronized('postgresql_replica'); - instance.query('DROP TABLE test.postgresql_replica') + instance.query("DROP TABLE IF EXISTS test.postgresql_replica") + create_materialized_table( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) + instance.query("DROP TABLE test.postgresql_replica") + create_materialized_table( + ip=started_cluster.postgres_ip, port=started_cluster.postgres_port + ) + check_tables_are_synchronized("postgresql_replica") + instance.query("DROP TABLE test.postgresql_replica") -if __name__ == '__main__': +if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") cluster.shutdown() diff --git a/tests/integration/test_storage_rabbitmq/rabbitmq_pb2.py b/tests/integration/test_storage_rabbitmq/rabbitmq_pb2.py index 6abc087dc75..df5c29adc6d 100644 --- a/tests/integration/test_storage_rabbitmq/rabbitmq_pb2.py +++ b/tests/integration/test_storage_rabbitmq/rabbitmq_pb2.py @@ -12,60 +12,85 @@ from google.protobuf import symbol_database as _symbol_database _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( - name='clickhouse_path/format_schemas/rabbitmq.proto', - package='', - syntax='proto3', + name="clickhouse_path/format_schemas/rabbitmq.proto", + package="", + syntax="proto3", serialized_options=None, create_key=_descriptor._internal_create_key, - serialized_pb=b'\n-clickhouse_path/format_schemas/rabbitmq.proto\"+\n\rKeyValueProto\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12\r\n\x05value\x18\x02 \x01(\tb\x06proto3' + serialized_pb=b'\n-clickhouse_path/format_schemas/rabbitmq.proto"+\n\rKeyValueProto\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12\r\n\x05value\x18\x02 \x01(\tb\x06proto3', ) _KEYVALUEPROTO = _descriptor.Descriptor( - name='KeyValueProto', - full_name='KeyValueProto', + name="KeyValueProto", + full_name="KeyValueProto", filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='key', full_name='KeyValueProto.key', index=0, - number=1, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + name="key", + full_name="KeyValueProto.key", + index=0, + number=1, + type=4, + cpp_type=4, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), _descriptor.FieldDescriptor( - name='value', full_name='KeyValueProto.value', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ + name="value", + full_name="KeyValueProto.value", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), ], + extensions=[], nested_types=[], - enum_types=[ - ], + enum_types=[], serialized_options=None, is_extendable=False, - syntax='proto3', + syntax="proto3", extension_ranges=[], - oneofs=[ - ], + oneofs=[], serialized_start=49, serialized_end=92, ) -DESCRIPTOR.message_types_by_name['KeyValueProto'] = _KEYVALUEPROTO +DESCRIPTOR.message_types_by_name["KeyValueProto"] = _KEYVALUEPROTO _sym_db.RegisterFileDescriptor(DESCRIPTOR) -KeyValueProto = _reflection.GeneratedProtocolMessageType('KeyValueProto', (_message.Message,), { - 'DESCRIPTOR': _KEYVALUEPROTO, - '__module__': 'clickhouse_path.format_schemas.rabbitmq_pb2' - # @@protoc_insertion_point(class_scope:KeyValueProto) -}) +KeyValueProto = _reflection.GeneratedProtocolMessageType( + "KeyValueProto", + (_message.Message,), + { + "DESCRIPTOR": _KEYVALUEPROTO, + "__module__": "clickhouse_path.format_schemas.rabbitmq_pb2" + # @@protoc_insertion_point(class_scope:KeyValueProto) + }, +) _sym_db.RegisterMessage(KeyValueProto) # @@protoc_insertion_point(module_scope) diff --git a/tests/integration/test_storage_rabbitmq/test.py b/tests/integration/test_storage_rabbitmq/test.py index a3d99159cb2..d5011607556 100644 --- a/tests/integration/test_storage_rabbitmq/test.py +++ b/tests/integration/test_storage_rabbitmq/test.py @@ -18,16 +18,22 @@ from helpers.test_tools import TSV from . import rabbitmq_pb2 cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', - main_configs=['configs/rabbitmq.xml', 'configs/macros.xml', 'configs/named_collection.xml'], - user_configs=['configs/users.xml'], - with_rabbitmq=True) +instance = cluster.add_instance( + "instance", + main_configs=[ + "configs/rabbitmq.xml", + "configs/macros.xml", + "configs/named_collection.xml", + ], + user_configs=["configs/users.xml"], + with_rabbitmq=True, +) # Helpers -def rabbitmq_check_result(result, check=False, ref_file='test_rabbitmq_json.reference'): +def rabbitmq_check_result(result, check=False, ref_file="test_rabbitmq_json.reference"): fpath = p.join(p.dirname(__file__), ref_file) with open(fpath) as reference: if check: @@ -35,6 +41,7 @@ def rabbitmq_check_result(result, check=False, ref_file='test_rabbitmq_json.refe else: return TSV(result) == TSV(reference) + def wait_rabbitmq_to_start(rabbitmq_docker_id, timeout=180): start = time.time() while time.time() - start < timeout: @@ -47,26 +54,28 @@ def wait_rabbitmq_to_start(rabbitmq_docker_id, timeout=180): logging.debug("Can't connect to RabbitMQ " + str(ex)) time.sleep(0.5) + def kill_rabbitmq(rabbitmq_id): - p = subprocess.Popen(('docker', 'stop', rabbitmq_id), stdout=subprocess.PIPE) + p = subprocess.Popen(("docker", "stop", rabbitmq_id), stdout=subprocess.PIPE) p.communicate() return p.returncode == 0 def revive_rabbitmq(rabbitmq_id): - p = subprocess.Popen(('docker', 'start', rabbitmq_id), stdout=subprocess.PIPE) + p = subprocess.Popen(("docker", "start", rabbitmq_id), stdout=subprocess.PIPE) p.communicate() wait_rabbitmq_to_start(rabbitmq_id) # Fixtures + @pytest.fixture(scope="module") def rabbitmq_cluster(): try: cluster.start() logging.debug("rabbitmq_id is {}".format(instance.cluster.rabbitmq_docker_id)) - instance.query('CREATE DATABASE test') + instance.query("CREATE DATABASE test") yield cluster @@ -78,14 +87,16 @@ def rabbitmq_cluster(): def rabbitmq_setup_teardown(): print("RabbitMQ is available - running test") yield # run test - instance.query('DROP DATABASE test NO DELAY') - instance.query('CREATE DATABASE test') + instance.query("DROP DATABASE test NO DELAY") + instance.query("CREATE DATABASE test") # Tests + def test_rabbitmq_select(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = '{}:5672', @@ -93,27 +104,34 @@ def test_rabbitmq_select(rabbitmq_cluster): rabbitmq_commit_on_select = 1, rabbitmq_format = 'JSONEachRow', rabbitmq_row_delimiter = '\\n'; - '''.format(rabbitmq_cluster.rabbitmq_host)) + """.format( + rabbitmq_cluster.rabbitmq_host + ) + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() messages = [] for i in range(50): - messages.append(json.dumps({'key': i, 'value': i})) + messages.append(json.dumps({"key": i, "value": i})) for message in messages: - channel.basic_publish(exchange='select', routing_key='', body=message) + channel.basic_publish(exchange="select", routing_key="", body=message) connection.close() # The order of messages in select * from test.rabbitmq is not guaranteed, so sleep to collect everything in one select time.sleep(1) - result = '' + result = "" while True: - result += instance.query('SELECT * FROM test.rabbitmq ORDER BY key', ignore_error=True) + result += instance.query( + "SELECT * FROM test.rabbitmq ORDER BY key", ignore_error=True + ) if rabbitmq_check_result(result): break @@ -121,7 +139,8 @@ def test_rabbitmq_select(rabbitmq_cluster): def test_rabbitmq_select_empty(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = '{}:5672', @@ -129,47 +148,58 @@ def test_rabbitmq_select_empty(rabbitmq_cluster): rabbitmq_commit_on_select = 1, rabbitmq_format = 'TSV', rabbitmq_row_delimiter = '\\n'; - '''.format(rabbitmq_cluster.rabbitmq_host)) + """.format( + rabbitmq_cluster.rabbitmq_host + ) + ) - assert int(instance.query('SELECT count() FROM test.rabbitmq')) == 0 + assert int(instance.query("SELECT count() FROM test.rabbitmq")) == 0 def test_rabbitmq_json_without_delimiter(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = '{}:5672', rabbitmq_commit_on_select = 1, rabbitmq_exchange_name = 'json', rabbitmq_format = 'JSONEachRow' - '''.format(rabbitmq_cluster.rabbitmq_host)) + """.format( + rabbitmq_cluster.rabbitmq_host + ) + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() - messages = '' + messages = "" for i in range(25): - messages += json.dumps({'key': i, 'value': i}) + '\n' + messages += json.dumps({"key": i, "value": i}) + "\n" all_messages = [messages] for message in all_messages: - channel.basic_publish(exchange='json', routing_key='', body=message) + channel.basic_publish(exchange="json", routing_key="", body=message) - messages = '' + messages = "" for i in range(25, 50): - messages += json.dumps({'key': i, 'value': i}) + '\n' + messages += json.dumps({"key": i, "value": i}) + "\n" all_messages = [messages] for message in all_messages: - channel.basic_publish(exchange='json', routing_key='', body=message) + channel.basic_publish(exchange="json", routing_key="", body=message) connection.close() time.sleep(1) - result = '' + result = "" while True: - result += instance.query('SELECT * FROM test.rabbitmq ORDER BY key', ignore_error=True) + result += instance.query( + "SELECT * FROM test.rabbitmq ORDER BY key", ignore_error=True + ) if rabbitmq_check_result(result): break @@ -177,7 +207,8 @@ def test_rabbitmq_json_without_delimiter(rabbitmq_cluster): def test_rabbitmq_csv_with_delimiter(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -185,26 +216,31 @@ def test_rabbitmq_csv_with_delimiter(rabbitmq_cluster): rabbitmq_commit_on_select = 1, rabbitmq_format = 'CSV', rabbitmq_row_delimiter = '\\n'; - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() messages = [] for i in range(50): - messages.append('{i}, {i}'.format(i=i)) + messages.append("{i}, {i}".format(i=i)) for message in messages: - channel.basic_publish(exchange='csv', routing_key='', body=message) + channel.basic_publish(exchange="csv", routing_key="", body=message) connection.close() time.sleep(1) - result = '' + result = "" while True: - result += instance.query('SELECT * FROM test.rabbitmq ORDER BY key', ignore_error=True) + result += instance.query( + "SELECT * FROM test.rabbitmq ORDER BY key", ignore_error=True + ) if rabbitmq_check_result(result): break @@ -212,7 +248,8 @@ def test_rabbitmq_csv_with_delimiter(rabbitmq_cluster): def test_rabbitmq_tsv_with_delimiter(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -226,24 +263,27 @@ def test_rabbitmq_tsv_with_delimiter(rabbitmq_cluster): ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.rabbitmq; - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() messages = [] for i in range(50): - messages.append('{i}\t{i}'.format(i=i)) + messages.append("{i}\t{i}".format(i=i)) for message in messages: - channel.basic_publish(exchange='tsv', routing_key='', body=message) + channel.basic_publish(exchange="tsv", routing_key="", body=message) connection.close() - result = '' + result = "" while True: - result = instance.query('SELECT * FROM test.view ORDER BY key') + result = instance.query("SELECT * FROM test.view ORDER BY key") if rabbitmq_check_result(result): break @@ -251,31 +291,37 @@ def test_rabbitmq_tsv_with_delimiter(rabbitmq_cluster): def test_rabbitmq_macros(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = '{rabbitmq_host}:{rabbitmq_port}', rabbitmq_commit_on_select = 1, rabbitmq_exchange_name = '{rabbitmq_exchange_name}', rabbitmq_format = '{rabbitmq_format}' - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() - message = '' + message = "" for i in range(50): - message += json.dumps({'key': i, 'value': i}) + '\n' - channel.basic_publish(exchange='macro', routing_key='', body=message) + message += json.dumps({"key": i, "value": i}) + "\n" + channel.basic_publish(exchange="macro", routing_key="", body=message) connection.close() time.sleep(1) - result = '' + result = "" while True: - result += instance.query('SELECT * FROM test.rabbitmq ORDER BY key', ignore_error=True) + result += instance.query( + "SELECT * FROM test.rabbitmq ORDER BY key", ignore_error=True + ) if rabbitmq_check_result(result): break @@ -283,7 +329,8 @@ def test_rabbitmq_macros(rabbitmq_cluster): def test_rabbitmq_materialized_view(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -301,25 +348,28 @@ def test_rabbitmq_materialized_view(rabbitmq_cluster): ORDER BY key; CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS SELECT * FROM test.rabbitmq group by (key, value); - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() messages = [] for i in range(50): - messages.append(json.dumps({'key': i, 'value': i})) + messages.append(json.dumps({"key": i, "value": i})) for message in messages: - channel.basic_publish(exchange='mv', routing_key='', body=message) + channel.basic_publish(exchange="mv", routing_key="", body=message) time_limit_sec = 60 deadline = time.monotonic() + time_limit_sec while time.monotonic() < deadline: - result = instance.query('SELECT * FROM test.view ORDER BY key') - if (rabbitmq_check_result(result)): + result = instance.query("SELECT * FROM test.view ORDER BY key") + if rabbitmq_check_result(result): break rabbitmq_check_result(result, True) @@ -327,8 +377,8 @@ def test_rabbitmq_materialized_view(rabbitmq_cluster): deadline = time.monotonic() + time_limit_sec while time.monotonic() < deadline: - result = instance.query('SELECT * FROM test.view2 ORDER BY key') - if (rabbitmq_check_result(result)): + result = instance.query("SELECT * FROM test.view2 ORDER BY key") + if rabbitmq_check_result(result): break rabbitmq_check_result(result, True) @@ -336,7 +386,8 @@ def test_rabbitmq_materialized_view(rabbitmq_cluster): def test_rabbitmq_materialized_view_with_subquery(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -348,21 +399,24 @@ def test_rabbitmq_materialized_view_with_subquery(rabbitmq_cluster): ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM (SELECT * FROM test.rabbitmq); - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() messages = [] for i in range(50): - messages.append(json.dumps({'key': i, 'value': i})) + messages.append(json.dumps({"key": i, "value": i})) for message in messages: - channel.basic_publish(exchange='mvsq', routing_key='', body=message) + channel.basic_publish(exchange="mvsq", routing_key="", body=message) while True: - result = instance.query('SELECT * FROM test.view ORDER BY key') + result = instance.query("SELECT * FROM test.view ORDER BY key") if rabbitmq_check_result(result): break @@ -371,7 +425,8 @@ def test_rabbitmq_materialized_view_with_subquery(rabbitmq_cluster): def test_rabbitmq_many_materialized_views(rabbitmq_cluster): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.view1; DROP TABLE IF EXISTS test.view2; DROP TABLE IF EXISTS test.consumer1; @@ -392,31 +447,36 @@ def test_rabbitmq_many_materialized_views(rabbitmq_cluster): SELECT * FROM test.rabbitmq; CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS SELECT * FROM test.rabbitmq; - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() messages = [] for i in range(50): - messages.append(json.dumps({'key': i, 'value': i})) + messages.append(json.dumps({"key": i, "value": i})) for message in messages: - channel.basic_publish(exchange='mmv', routing_key='', body=message) + channel.basic_publish(exchange="mmv", routing_key="", body=message) while True: - result1 = instance.query('SELECT * FROM test.view1 ORDER BY key') - result2 = instance.query('SELECT * FROM test.view2 ORDER BY key') + result1 = instance.query("SELECT * FROM test.view1 ORDER BY key") + result2 = instance.query("SELECT * FROM test.view2 ORDER BY key") if rabbitmq_check_result(result1) and rabbitmq_check_result(result2): break - instance.query(''' + instance.query( + """ DROP TABLE test.consumer1; DROP TABLE test.consumer2; DROP TABLE test.view1; DROP TABLE test.view2; - ''') + """ + ) connection.close() rabbitmq_check_result(result1, True) @@ -425,7 +485,8 @@ def test_rabbitmq_many_materialized_views(rabbitmq_cluster): @pytest.mark.skip(reason="clichouse_path with rabbitmq.proto fails to be exported") def test_rabbitmq_protobuf(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq (key UInt64, value String) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -437,43 +498,46 @@ def test_rabbitmq_protobuf(rabbitmq_cluster): ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.rabbitmq; - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() - data = '' + data = "" for i in range(0, 20): msg = rabbitmq_pb2.KeyValueProto() msg.key = i msg.value = str(i) serialized_msg = msg.SerializeToString() data = data + _VarintBytes(len(serialized_msg)) + serialized_msg - channel.basic_publish(exchange='pb', routing_key='', body=data) - data = '' + channel.basic_publish(exchange="pb", routing_key="", body=data) + data = "" for i in range(20, 21): msg = rabbitmq_pb2.KeyValueProto() msg.key = i msg.value = str(i) serialized_msg = msg.SerializeToString() data = data + _VarintBytes(len(serialized_msg)) + serialized_msg - channel.basic_publish(exchange='pb', routing_key='', body=data) - data = '' + channel.basic_publish(exchange="pb", routing_key="", body=data) + data = "" for i in range(21, 50): msg = rabbitmq_pb2.KeyValueProto() msg.key = i msg.value = str(i) serialized_msg = msg.SerializeToString() data = data + _VarintBytes(len(serialized_msg)) + serialized_msg - channel.basic_publish(exchange='pb', routing_key='', body=data) + channel.basic_publish(exchange="pb", routing_key="", body=data) connection.close() - result = '' + result = "" while True: - result = instance.query('SELECT * FROM test.view ORDER BY key') + result = instance.query("SELECT * FROM test.view ORDER BY key") if rabbitmq_check_result(result): break @@ -484,14 +548,20 @@ def test_rabbitmq_big_message(rabbitmq_cluster): # Create batchs of messages of size ~100Kb rabbitmq_messages = 1000 batch_messages = 1000 - messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(rabbitmq_messages)] + messages = [ + json.dumps({"key": i, "value": "x" * 100}) * batch_messages + for i in range(rabbitmq_messages) + ] - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq (key UInt64, value String) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -502,26 +572,30 @@ def test_rabbitmq_big_message(rabbitmq_cluster): ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.rabbitmq; - ''') + """ + ) for message in messages: - channel.basic_publish(exchange='big', routing_key='', body=message) + channel.basic_publish(exchange="big", routing_key="", body=message) while True: - result = instance.query('SELECT count() FROM test.view') + result = instance.query("SELECT count() FROM test.view") if int(result) == batch_messages * rabbitmq_messages: break connection.close() - assert int(result) == rabbitmq_messages * batch_messages, 'ClickHouse lost some messages: {}'.format(result) + assert ( + int(result) == rabbitmq_messages * batch_messages + ), "ClickHouse lost some messages: {}".format(result) def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster): NUM_CONSUMERS = 10 NUM_QUEUES = 10 - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -536,13 +610,16 @@ def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster): SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT *, _channel_id AS channel_id FROM test.rabbitmq; - ''') + """ + ) i = [0] messages_num = 10000 - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) def produce(): connection = pika.BlockingConnection(parameters) @@ -550,14 +627,18 @@ def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster): messages = [] for _ in range(messages_num): - messages.append(json.dumps({'key': i[0], 'value': i[0]})) + messages.append(json.dumps({"key": i[0], "value": i[0]})) i[0] += 1 current = 0 for message in messages: current += 1 mes_id = str(current) - channel.basic_publish(exchange='test_sharding', routing_key='', - properties=pika.BasicProperties(message_id=mes_id), body=message) + channel.basic_publish( + exchange="test_sharding", + routing_key="", + properties=pika.BasicProperties(message_id=mes_id), + body=message, + ) connection.close() threads = [] @@ -569,9 +650,9 @@ def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster): time.sleep(random.uniform(0, 1)) thread.start() - result1 = '' + result1 = "" while True: - result1 = instance.query('SELECT count() FROM test.view') + result1 = instance.query("SELECT count() FROM test.view") time.sleep(1) if int(result1) == messages_num * threads_num: break @@ -581,7 +662,9 @@ def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster): for thread in threads: thread.join() - assert int(result1) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result) + assert ( + int(result1) == messages_num * threads_num + ), "ClickHouse lost some messages: {}".format(result) assert int(result2) == 10 @@ -589,7 +672,8 @@ def test_rabbitmq_mv_combo(rabbitmq_cluster): NUM_MV = 5 NUM_CONSUMERS = 4 - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -599,10 +683,12 @@ def test_rabbitmq_mv_combo(rabbitmq_cluster): rabbitmq_num_queues = 5, rabbitmq_format = 'JSONEachRow', rabbitmq_row_delimiter = '\\n'; - ''') + """ + ) for mv_id in range(NUM_MV): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.combo_{0}; DROP TABLE IF EXISTS test.combo_{0}_mv; CREATE TABLE test.combo_{0} (key UInt64, value UInt64) @@ -610,15 +696,20 @@ def test_rabbitmq_mv_combo(rabbitmq_cluster): ORDER BY key; CREATE MATERIALIZED VIEW test.combo_{0}_mv TO test.combo_{0} AS SELECT * FROM test.rabbitmq; - '''.format(mv_id)) + """.format( + mv_id + ) + ) time.sleep(2) i = [0] messages_num = 10000 - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) def produce(): connection = pika.BlockingConnection(parameters) @@ -626,11 +717,15 @@ def test_rabbitmq_mv_combo(rabbitmq_cluster): messages = [] for _ in range(messages_num): - messages.append(json.dumps({'key': i[0], 'value': i[0]})) + messages.append(json.dumps({"key": i[0], "value": i[0]})) i[0] += 1 for msg_id in range(messages_num): - channel.basic_publish(exchange='combo', routing_key='', - properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id]) + channel.basic_publish( + exchange="combo", + routing_key="", + properties=pika.BasicProperties(message_id=str(msg_id)), + body=messages[msg_id], + ) connection.close() threads = [] @@ -645,7 +740,9 @@ def test_rabbitmq_mv_combo(rabbitmq_cluster): while True: result = 0 for mv_id in range(NUM_MV): - result += int(instance.query('SELECT count() FROM test.combo_{0}'.format(mv_id))) + result += int( + instance.query("SELECT count() FROM test.combo_{0}".format(mv_id)) + ) if int(result) == messages_num * threads_num * NUM_MV: break time.sleep(1) @@ -654,16 +751,23 @@ def test_rabbitmq_mv_combo(rabbitmq_cluster): thread.join() for mv_id in range(NUM_MV): - instance.query(''' + instance.query( + """ DROP TABLE test.combo_{0}_mv; DROP TABLE test.combo_{0}; - '''.format(mv_id)) + """.format( + mv_id + ) + ) - assert int(result) == messages_num * threads_num * NUM_MV, 'ClickHouse lost some messages: {}'.format(result) + assert ( + int(result) == messages_num * threads_num * NUM_MV + ), "ClickHouse lost some messages: {}".format(result) def test_rabbitmq_insert(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -672,28 +776,31 @@ def test_rabbitmq_insert(rabbitmq_cluster): rabbitmq_routing_key_list = 'insert1', rabbitmq_format = 'TSV', rabbitmq_row_delimiter = '\\n'; - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) consumer_connection = pika.BlockingConnection(parameters) consumer = consumer_connection.channel() - result = consumer.queue_declare(queue='') + result = consumer.queue_declare(queue="") queue_name = result.method.queue - consumer.queue_bind(exchange='insert', queue=queue_name, routing_key='insert1') + consumer.queue_bind(exchange="insert", queue=queue_name, routing_key="insert1") values = [] for i in range(50): values.append("({i}, {i})".format(i=i)) - values = ','.join(values) + values = ",".join(values) while True: try: instance.query("INSERT INTO test.rabbitmq VALUES {}".format(values)) break except QueryRuntimeException as e: - if 'Local: Timed out.' in str(e): + if "Local: Timed out." in str(e): continue else: raise @@ -703,19 +810,20 @@ def test_rabbitmq_insert(rabbitmq_cluster): def onReceived(channel, method, properties, body): i = 0 insert_messages.append(body.decode()) - if (len(insert_messages) == 50): + if len(insert_messages) == 50: channel.stop_consuming() consumer.basic_consume(onReceived, queue_name) consumer.start_consuming() consumer_connection.close() - result = '\n'.join(insert_messages) + result = "\n".join(insert_messages) rabbitmq_check_result(result, True) def test_rabbitmq_insert_headers_exchange(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -724,29 +832,36 @@ def test_rabbitmq_insert_headers_exchange(rabbitmq_cluster): rabbitmq_routing_key_list = 'test=insert,topic=headers', rabbitmq_format = 'TSV', rabbitmq_row_delimiter = '\\n'; - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) consumer_connection = pika.BlockingConnection(parameters) consumer = consumer_connection.channel() - result = consumer.queue_declare(queue='') + result = consumer.queue_declare(queue="") queue_name = result.method.queue - consumer.queue_bind(exchange='insert_headers', queue=queue_name, routing_key="", - arguments={'x-match': 'all', 'test': 'insert', 'topic': 'headers'}) + consumer.queue_bind( + exchange="insert_headers", + queue=queue_name, + routing_key="", + arguments={"x-match": "all", "test": "insert", "topic": "headers"}, + ) values = [] for i in range(50): values.append("({i}, {i})".format(i=i)) - values = ','.join(values) + values = ",".join(values) while True: try: instance.query("INSERT INTO test.rabbitmq VALUES {}".format(values)) break except QueryRuntimeException as e: - if 'Local: Timed out.' in str(e): + if "Local: Timed out." in str(e): continue else: raise @@ -756,19 +871,20 @@ def test_rabbitmq_insert_headers_exchange(rabbitmq_cluster): def onReceived(channel, method, properties, body): i = 0 insert_messages.append(body.decode()) - if (len(insert_messages) == 50): + if len(insert_messages) == 50: channel.stop_consuming() consumer.basic_consume(onReceived, queue_name) consumer.start_consuming() consumer_connection.close() - result = '\n'.join(insert_messages) + result = "\n".join(insert_messages) rabbitmq_check_result(result, True) def test_rabbitmq_many_inserts(rabbitmq_cluster): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.rabbitmq_many; DROP TABLE IF EXISTS test.rabbitmq_consume; DROP TABLE IF EXISTS test.view_many; @@ -789,21 +905,24 @@ def test_rabbitmq_many_inserts(rabbitmq_cluster): rabbitmq_routing_key_list = 'insert2', rabbitmq_format = 'TSV', rabbitmq_row_delimiter = '\\n'; - ''') + """ + ) messages_num = 10000 values = [] for i in range(messages_num): values.append("({i}, {i})".format(i=i)) - values = ','.join(values) + values = ",".join(values) def insert(): while True: try: - instance.query("INSERT INTO test.rabbitmq_many VALUES {}".format(values)) + instance.query( + "INSERT INTO test.rabbitmq_many VALUES {}".format(values) + ) break except QueryRuntimeException as e: - if 'Local: Timed out.' in str(e): + if "Local: Timed out." in str(e): continue else: raise @@ -816,36 +935,43 @@ def test_rabbitmq_many_inserts(rabbitmq_cluster): time.sleep(random.uniform(0, 1)) thread.start() - instance.query(''' + instance.query( + """ CREATE TABLE test.view_many (key UInt64, value UInt64) ENGINE = MergeTree ORDER BY key; CREATE MATERIALIZED VIEW test.consumer_many TO test.view_many AS SELECT * FROM test.rabbitmq_consume; - ''') + """ + ) for thread in threads: thread.join() while True: - result = instance.query('SELECT count() FROM test.view_many') + result = instance.query("SELECT count() FROM test.view_many") print(result, messages_num * threads_num) if int(result) == messages_num * threads_num: break time.sleep(1) - instance.query(''' + instance.query( + """ DROP TABLE test.rabbitmq_consume; DROP TABLE test.rabbitmq_many; DROP TABLE test.consumer_many; DROP TABLE test.view_many; - ''') + """ + ) - assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result) + assert ( + int(result) == messages_num * threads_num + ), "ClickHouse lost some messages: {}".format(result) def test_rabbitmq_overloaded_insert(rabbitmq_cluster): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.view_overload; DROP TABLE IF EXISTS test.consumer_overload; DROP TABLE IF EXISTS test.rabbitmq_consume; @@ -875,7 +1001,8 @@ def test_rabbitmq_overloaded_insert(rabbitmq_cluster): SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3; CREATE MATERIALIZED VIEW test.consumer_overload TO test.view_overload AS SELECT * FROM test.rabbitmq_consume; - ''') + """ + ) messages_num = 100000 @@ -883,14 +1010,16 @@ def test_rabbitmq_overloaded_insert(rabbitmq_cluster): values = [] for i in range(messages_num): values.append("({i}, {i})".format(i=i)) - values = ','.join(values) + values = ",".join(values) while True: try: - instance.query("INSERT INTO test.rabbitmq_overload VALUES {}".format(values)) + instance.query( + "INSERT INTO test.rabbitmq_overload VALUES {}".format(values) + ) break except QueryRuntimeException as e: - if 'Local: Timed out.' in str(e): + if "Local: Timed out." in str(e): continue else: raise @@ -904,37 +1033,44 @@ def test_rabbitmq_overloaded_insert(rabbitmq_cluster): thread.start() while True: - result = instance.query('SELECT count() FROM test.view_overload') + result = instance.query("SELECT count() FROM test.view_overload") time.sleep(1) if int(result) == messages_num * threads_num: break - instance.query(''' + instance.query( + """ DROP TABLE test.consumer_overload; DROP TABLE test.view_overload; DROP TABLE test.rabbitmq_consume; DROP TABLE test.rabbitmq_overload; - ''') + """ + ) for thread in threads: thread.join() - assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result) + assert ( + int(result) == messages_num * threads_num + ), "ClickHouse lost some messages: {}".format(result) def test_rabbitmq_direct_exchange(rabbitmq_cluster): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.destination; CREATE TABLE test.destination(key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3; - ''') + """ + ) num_tables = 5 for consumer_id in range(num_tables): print(("Setting up table {}".format(consumer_id))) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.direct_exchange_{0}; DROP TABLE IF EXISTS test.direct_exchange_{0}_mv; CREATE TABLE test.direct_exchange_{0} (key UInt64, value UInt64) @@ -949,19 +1085,24 @@ def test_rabbitmq_direct_exchange(rabbitmq_cluster): rabbitmq_row_delimiter = '\\n'; CREATE MATERIALIZED VIEW test.direct_exchange_{0}_mv TO test.destination AS SELECT key, value FROM test.direct_exchange_{0}; - '''.format(consumer_id)) + """.format( + consumer_id + ) + ) i = [0] messages_num = 1000 - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() messages = [] for _ in range(messages_num): - messages.append(json.dumps({'key': i[0], 'value': i[0]})) + messages.append(json.dumps({"key": i[0], "value": i[0]})) i[0] += 1 key_num = 0 @@ -971,42 +1112,56 @@ def test_rabbitmq_direct_exchange(rabbitmq_cluster): for message in messages: mes_id = str(randrange(10)) channel.basic_publish( - exchange='direct_exchange_testing', routing_key=key, - properties=pika.BasicProperties(message_id=mes_id), body=message) + exchange="direct_exchange_testing", + routing_key=key, + properties=pika.BasicProperties(message_id=mes_id), + body=message, + ) connection.close() while True: - result = instance.query('SELECT count() FROM test.destination') + result = instance.query("SELECT count() FROM test.destination") time.sleep(1) if int(result) == messages_num * num_tables: break for consumer_id in range(num_tables): - instance.query(''' + instance.query( + """ DROP TABLE test.direct_exchange_{0}_mv; DROP TABLE test.direct_exchange_{0}; - '''.format(consumer_id)) + """.format( + consumer_id + ) + ) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.destination; - ''') + """ + ) - assert int(result) == messages_num * num_tables, 'ClickHouse lost some messages: {}'.format(result) + assert ( + int(result) == messages_num * num_tables + ), "ClickHouse lost some messages: {}".format(result) def test_rabbitmq_fanout_exchange(rabbitmq_cluster): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.destination; CREATE TABLE test.destination(key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key; - ''') + """ + ) num_tables = 5 for consumer_id in range(num_tables): print(("Setting up table {}".format(consumer_id))) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.fanout_exchange_{0}; DROP TABLE IF EXISTS test.fanout_exchange_{0}_mv; CREATE TABLE test.fanout_exchange_{0} (key UInt64, value UInt64) @@ -1021,58 +1176,78 @@ def test_rabbitmq_fanout_exchange(rabbitmq_cluster): rabbitmq_row_delimiter = '\\n'; CREATE MATERIALIZED VIEW test.fanout_exchange_{0}_mv TO test.destination AS SELECT key, value FROM test.fanout_exchange_{0}; - '''.format(consumer_id)) + """.format( + consumer_id + ) + ) i = [0] messages_num = 1000 - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() messages = [] for _ in range(messages_num): - messages.append(json.dumps({'key': i[0], 'value': i[0]})) + messages.append(json.dumps({"key": i[0], "value": i[0]})) i[0] += 1 for msg_id in range(messages_num): - channel.basic_publish(exchange='fanout_exchange_testing', routing_key='', - properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id]) + channel.basic_publish( + exchange="fanout_exchange_testing", + routing_key="", + properties=pika.BasicProperties(message_id=str(msg_id)), + body=messages[msg_id], + ) connection.close() while True: - result = instance.query('SELECT count() FROM test.destination') + result = instance.query("SELECT count() FROM test.destination") time.sleep(1) if int(result) == messages_num * num_tables: break for consumer_id in range(num_tables): - instance.query(''' + instance.query( + """ DROP TABLE test.fanout_exchange_{0}_mv; DROP TABLE test.fanout_exchange_{0}; - '''.format(consumer_id)) + """.format( + consumer_id + ) + ) - instance.query(''' + instance.query( + """ DROP TABLE test.destination; - ''') + """ + ) - assert int(result) == messages_num * num_tables, 'ClickHouse lost some messages: {}'.format(result) + assert ( + int(result) == messages_num * num_tables + ), "ClickHouse lost some messages: {}".format(result) def test_rabbitmq_topic_exchange(rabbitmq_cluster): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.destination; CREATE TABLE test.destination(key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key; - ''') + """ + ) num_tables = 5 for consumer_id in range(num_tables): print(("Setting up table {}".format(consumer_id))) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.topic_exchange_{0}; DROP TABLE IF EXISTS test.topic_exchange_{0}_mv; CREATE TABLE test.topic_exchange_{0} (key UInt64, value UInt64) @@ -1087,11 +1262,15 @@ def test_rabbitmq_topic_exchange(rabbitmq_cluster): rabbitmq_row_delimiter = '\\n'; CREATE MATERIALIZED VIEW test.topic_exchange_{0}_mv TO test.destination AS SELECT key, value FROM test.topic_exchange_{0}; - '''.format(consumer_id)) + """.format( + consumer_id + ) + ) for consumer_id in range(num_tables): print(("Setting up table {}".format(num_tables + consumer_id))) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.topic_exchange_{0}; DROP TABLE IF EXISTS test.topic_exchange_{0}_mv; CREATE TABLE test.topic_exchange_{0} (key UInt64, value UInt64) @@ -1106,19 +1285,24 @@ def test_rabbitmq_topic_exchange(rabbitmq_cluster): rabbitmq_row_delimiter = '\\n'; CREATE MATERIALIZED VIEW test.topic_exchange_{0}_mv TO test.destination AS SELECT key, value FROM test.topic_exchange_{0}; - '''.format(num_tables + consumer_id)) + """.format( + num_tables + consumer_id + ) + ) i = [0] messages_num = 1000 - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() messages = [] for _ in range(messages_num): - messages.append(json.dumps({'key': i[0], 'value': i[0]})) + messages.append(json.dumps({"key": i[0], "value": i[0]})) i[0] += 1 key_num = 0 @@ -1126,50 +1310,65 @@ def test_rabbitmq_topic_exchange(rabbitmq_cluster): key = "topic." + str(key_num) key_num += 1 for message in messages: - channel.basic_publish(exchange='topic_exchange_testing', routing_key=key, body=message) + channel.basic_publish( + exchange="topic_exchange_testing", routing_key=key, body=message + ) key = "random.logs" current = 0 for msg_id in range(messages_num): - channel.basic_publish(exchange='topic_exchange_testing', routing_key=key, - properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id]) + channel.basic_publish( + exchange="topic_exchange_testing", + routing_key=key, + properties=pika.BasicProperties(message_id=str(msg_id)), + body=messages[msg_id], + ) connection.close() while True: - result = instance.query('SELECT count() FROM test.destination') + result = instance.query("SELECT count() FROM test.destination") time.sleep(1) if int(result) == messages_num * num_tables + messages_num * num_tables: break for consumer_id in range(num_tables * 2): - instance.query(''' + instance.query( + """ DROP TABLE test.topic_exchange_{0}_mv; DROP TABLE test.topic_exchange_{0}; - '''.format(consumer_id)) + """.format( + consumer_id + ) + ) - instance.query(''' + instance.query( + """ DROP TABLE test.destination; - ''') + """ + ) - assert int( - result) == messages_num * num_tables + messages_num * num_tables, 'ClickHouse lost some messages: {}'.format( - result) + assert ( + int(result) == messages_num * num_tables + messages_num * num_tables + ), "ClickHouse lost some messages: {}".format(result) def test_rabbitmq_hash_exchange(rabbitmq_cluster): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.destination; CREATE TABLE test.destination(key UInt64, value UInt64, channel_id String) ENGINE = MergeTree() ORDER BY key; - ''') + """ + ) num_tables = 4 for consumer_id in range(num_tables): - table_name = 'rabbitmq_consumer{}'.format(consumer_id) + table_name = "rabbitmq_consumer{}".format(consumer_id) print(("Setting up {}".format(table_name))) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.{0}; DROP TABLE IF EXISTS test.{0}_mv; CREATE TABLE test.{0} (key UInt64, value UInt64) @@ -1183,13 +1382,18 @@ def test_rabbitmq_hash_exchange(rabbitmq_cluster): rabbitmq_row_delimiter = '\\n'; CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS SELECT key, value, _channel_id AS channel_id FROM test.{0}; - '''.format(table_name)) + """.format( + table_name + ) + ) i = [0] messages_num = 500 - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) def produce(): # init connection here because otherwise python rabbitmq client might fail @@ -1197,11 +1401,15 @@ def test_rabbitmq_hash_exchange(rabbitmq_cluster): channel = connection.channel() messages = [] for _ in range(messages_num): - messages.append(json.dumps({'key': i[0], 'value': i[0]})) + messages.append(json.dumps({"key": i[0], "value": i[0]})) i[0] += 1 for msg_id in range(messages_num): - channel.basic_publish(exchange='hash_exchange_testing', routing_key=str(msg_id), - properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id]) + channel.basic_publish( + exchange="hash_exchange_testing", + routing_key=str(msg_id), + properties=pika.BasicProperties(message_id=str(msg_id)), + body=messages[msg_id], + ) connection.close() threads = [] @@ -1213,9 +1421,9 @@ def test_rabbitmq_hash_exchange(rabbitmq_cluster): time.sleep(random.uniform(0, 1)) thread.start() - result1 = '' + result1 = "" while True: - result1 = instance.query('SELECT count() FROM test.destination') + result1 = instance.query("SELECT count() FROM test.destination") time.sleep(1) if int(result1) == messages_num * threads_num: break @@ -1223,32 +1431,43 @@ def test_rabbitmq_hash_exchange(rabbitmq_cluster): result2 = instance.query("SELECT count(DISTINCT channel_id) FROM test.destination") for consumer_id in range(num_tables): - table_name = 'rabbitmq_consumer{}'.format(consumer_id) - instance.query(''' + table_name = "rabbitmq_consumer{}".format(consumer_id) + instance.query( + """ DROP TABLE test.{0}_mv; DROP TABLE test.{0}; - '''.format(table_name)) + """.format( + table_name + ) + ) - instance.query(''' + instance.query( + """ DROP TABLE test.destination; - ''') + """ + ) for thread in threads: thread.join() - assert int(result1) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result) + assert ( + int(result1) == messages_num * threads_num + ), "ClickHouse lost some messages: {}".format(result) assert int(result2) == 4 * num_tables def test_rabbitmq_multiple_bindings(rabbitmq_cluster): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.destination; CREATE TABLE test.destination(key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key; - ''') + """ + ) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.bindings; DROP TABLE IF EXISTS test.bindings_mv; CREATE TABLE test.bindings (key UInt64, value UInt64) @@ -1261,13 +1480,16 @@ def test_rabbitmq_multiple_bindings(rabbitmq_cluster): rabbitmq_row_delimiter = '\\n'; CREATE MATERIALIZED VIEW test.bindings_mv TO test.destination AS SELECT * FROM test.bindings; - ''') + """ + ) i = [0] messages_num = 500 - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) def produce(): # init connection here because otherwise python rabbitmq client might fail @@ -1276,14 +1498,16 @@ def test_rabbitmq_multiple_bindings(rabbitmq_cluster): messages = [] for _ in range(messages_num): - messages.append(json.dumps({'key': i[0], 'value': i[0]})) + messages.append(json.dumps({"key": i[0], "value": i[0]})) i[0] += 1 - keys = ['key1', 'key2', 'key3', 'key4', 'key5'] + keys = ["key1", "key2", "key3", "key4", "key5"] for key in keys: for message in messages: - channel.basic_publish(exchange='multiple_bindings_testing', routing_key=key, body=message) + channel.basic_publish( + exchange="multiple_bindings_testing", routing_key=key, body=message + ) connection.close() @@ -1297,7 +1521,7 @@ def test_rabbitmq_multiple_bindings(rabbitmq_cluster): thread.start() while True: - result = instance.query('SELECT count() FROM test.destination') + result = instance.query("SELECT count() FROM test.destination") time.sleep(1) if int(result) == messages_num * threads_num * 5: break @@ -1305,27 +1529,34 @@ def test_rabbitmq_multiple_bindings(rabbitmq_cluster): for thread in threads: thread.join() - instance.query(''' + instance.query( + """ DROP TABLE test.bindings; DROP TABLE test.bindings_mv; DROP TABLE test.destination; - ''') + """ + ) - assert int(result) == messages_num * threads_num * 5, 'ClickHouse lost some messages: {}'.format(result) + assert ( + int(result) == messages_num * threads_num * 5 + ), "ClickHouse lost some messages: {}".format(result) def test_rabbitmq_headers_exchange(rabbitmq_cluster): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.destination; CREATE TABLE test.destination(key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key; - ''') + """ + ) num_tables_to_receive = 2 for consumer_id in range(num_tables_to_receive): print(("Setting up table {}".format(consumer_id))) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.headers_exchange_{0}; DROP TABLE IF EXISTS test.headers_exchange_{0}_mv; CREATE TABLE test.headers_exchange_{0} (key UInt64, value UInt64) @@ -1339,12 +1570,16 @@ def test_rabbitmq_headers_exchange(rabbitmq_cluster): rabbitmq_row_delimiter = '\\n'; CREATE MATERIALIZED VIEW test.headers_exchange_{0}_mv TO test.destination AS SELECT key, value FROM test.headers_exchange_{0}; - '''.format(consumer_id)) + """.format( + consumer_id + ) + ) num_tables_to_ignore = 2 for consumer_id in range(num_tables_to_ignore): print(("Setting up table {}".format(consumer_id + num_tables_to_receive))) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.headers_exchange_{0}; DROP TABLE IF EXISTS test.headers_exchange_{0}_mv; CREATE TABLE test.headers_exchange_{0} (key UInt64, value UInt64) @@ -1357,54 +1592,71 @@ def test_rabbitmq_headers_exchange(rabbitmq_cluster): rabbitmq_row_delimiter = '\\n'; CREATE MATERIALIZED VIEW test.headers_exchange_{0}_mv TO test.destination AS SELECT key, value FROM test.headers_exchange_{0}; - '''.format(consumer_id + num_tables_to_receive)) + """.format( + consumer_id + num_tables_to_receive + ) + ) i = [0] messages_num = 1000 - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() messages = [] for _ in range(messages_num): - messages.append(json.dumps({'key': i[0], 'value': i[0]})) + messages.append(json.dumps({"key": i[0], "value": i[0]})) i[0] += 1 fields = {} - fields['format'] = 'logs' - fields['type'] = 'report' - fields['year'] = '2020' + fields["format"] = "logs" + fields["type"] = "report" + fields["year"] = "2020" for msg_id in range(messages_num): - channel.basic_publish(exchange='headers_exchange_testing', routing_key='', - properties=pika.BasicProperties(headers=fields, message_id=str(msg_id)), - body=messages[msg_id]) + channel.basic_publish( + exchange="headers_exchange_testing", + routing_key="", + properties=pika.BasicProperties(headers=fields, message_id=str(msg_id)), + body=messages[msg_id], + ) connection.close() while True: - result = instance.query('SELECT count() FROM test.destination') + result = instance.query("SELECT count() FROM test.destination") time.sleep(1) if int(result) == messages_num * num_tables_to_receive: break for consumer_id in range(num_tables_to_receive + num_tables_to_ignore): - instance.query(''' + instance.query( + """ DROP TABLE test.headers_exchange_{0}_mv; DROP TABLE test.headers_exchange_{0}; - '''.format(consumer_id)) + """.format( + consumer_id + ) + ) - instance.query(''' + instance.query( + """ DROP TABLE test.destination; - ''') + """ + ) - assert int(result) == messages_num * num_tables_to_receive, 'ClickHouse lost some messages: {}'.format(result) + assert ( + int(result) == messages_num * num_tables_to_receive + ), "ClickHouse lost some messages: {}".format(result) def test_rabbitmq_virtual_columns(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq_virtuals (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -1412,10 +1664,13 @@ def test_rabbitmq_virtual_columns(rabbitmq_cluster): rabbitmq_format = 'JSONEachRow'; CREATE MATERIALIZED VIEW test.view Engine=Log AS SELECT value, key, _exchange_name, _channel_id, _delivery_tag, _redelivered FROM test.rabbitmq_virtuals; - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() @@ -1423,26 +1678,28 @@ def test_rabbitmq_virtual_columns(rabbitmq_cluster): i = 0 messages = [] for _ in range(message_num): - messages.append(json.dumps({'key': i, 'value': i})) + messages.append(json.dumps({"key": i, "value": i})) i += 1 for message in messages: - channel.basic_publish(exchange='virtuals', routing_key='', body=message) + channel.basic_publish(exchange="virtuals", routing_key="", body=message) while True: - result = instance.query('SELECT count() FROM test.view') + result = instance.query("SELECT count() FROM test.view") time.sleep(1) if int(result) == message_num: break connection.close() - result = instance.query(''' + result = instance.query( + """ SELECT key, value, _exchange_name, SUBSTRING(_channel_id, 1, 3), _delivery_tag, _redelivered FROM test.view ORDER BY key - ''') + """ + ) - expected = '''\ + expected = """\ 0 0 virtuals 1_0 1 0 1 1 virtuals 1_0 2 0 2 2 virtuals 1_0 3 0 @@ -1453,18 +1710,21 @@ def test_rabbitmq_virtual_columns(rabbitmq_cluster): 7 7 virtuals 1_0 8 0 8 8 virtuals 1_0 9 0 9 9 virtuals 1_0 10 0 -''' +""" - instance.query(''' + instance.query( + """ DROP TABLE test.rabbitmq_virtuals; DROP TABLE test.view; - ''') + """ + ) assert TSV(result) == TSV(expected) def test_rabbitmq_virtual_columns_with_materialized_view(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq_virtuals_mv (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -1476,10 +1736,13 @@ def test_rabbitmq_virtual_columns_with_materialized_view(rabbitmq_cluster): CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT *, _exchange_name as exchange_name, _channel_id as channel_id, _delivery_tag as delivery_tag, _redelivered as redelivered FROM test.rabbitmq_virtuals_mv; - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() @@ -1487,14 +1750,14 @@ def test_rabbitmq_virtual_columns_with_materialized_view(rabbitmq_cluster): i = 0 messages = [] for _ in range(message_num): - messages.append(json.dumps({'key': i, 'value': i})) + messages.append(json.dumps({"key": i, "value": i})) i += 1 for message in messages: - channel.basic_publish(exchange='virtuals_mv', routing_key='', body=message) + channel.basic_publish(exchange="virtuals_mv", routing_key="", body=message) while True: - result = instance.query('SELECT count() FROM test.view') + result = instance.query("SELECT count() FROM test.view") time.sleep(1) if int(result) == message_num: break @@ -1502,8 +1765,9 @@ def test_rabbitmq_virtual_columns_with_materialized_view(rabbitmq_cluster): connection.close() result = instance.query( - "SELECT key, value, exchange_name, SUBSTRING(channel_id, 1, 3), delivery_tag, redelivered FROM test.view ORDER BY delivery_tag") - expected = '''\ + "SELECT key, value, exchange_name, SUBSTRING(channel_id, 1, 3), delivery_tag, redelivered FROM test.view ORDER BY delivery_tag" + ) + expected = """\ 0 0 virtuals_mv 1_0 1 0 1 1 virtuals_mv 1_0 2 0 2 2 virtuals_mv 1_0 3 0 @@ -1514,29 +1778,34 @@ def test_rabbitmq_virtual_columns_with_materialized_view(rabbitmq_cluster): 7 7 virtuals_mv 1_0 8 0 8 8 virtuals_mv 1_0 9 0 9 9 virtuals_mv 1_0 10 0 -''' +""" - instance.query(''' + instance.query( + """ DROP TABLE test.consumer; DROP TABLE test.view; DROP TABLE test.rabbitmq_virtuals_mv - ''') + """ + ) assert TSV(result) == TSV(expected) def test_rabbitmq_many_consumers_to_each_queue(rabbitmq_cluster): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.destination; CREATE TABLE test.destination(key UInt64, value UInt64, channel_id String) ENGINE = MergeTree() ORDER BY key; - ''') + """ + ) num_tables = 4 for table_id in range(num_tables): print(("Setting up table {}".format(table_id))) - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.many_consumers_{0}; DROP TABLE IF EXISTS test.many_consumers_{0}_mv; CREATE TABLE test.many_consumers_{0} (key UInt64, value UInt64) @@ -1550,13 +1819,18 @@ def test_rabbitmq_many_consumers_to_each_queue(rabbitmq_cluster): rabbitmq_row_delimiter = '\\n'; CREATE MATERIALIZED VIEW test.many_consumers_{0}_mv TO test.destination AS SELECT key, value, _channel_id as channel_id FROM test.many_consumers_{0}; - '''.format(table_id)) + """.format( + table_id + ) + ) i = [0] messages_num = 1000 - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) def produce(): connection = pika.BlockingConnection(parameters) @@ -1564,11 +1838,15 @@ def test_rabbitmq_many_consumers_to_each_queue(rabbitmq_cluster): messages = [] for _ in range(messages_num): - messages.append(json.dumps({'key': i[0], 'value': i[0]})) + messages.append(json.dumps({"key": i[0], "value": i[0]})) i[0] += 1 for msg_id in range(messages_num): - channel.basic_publish(exchange='many_consumers', routing_key='', - properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id]) + channel.basic_publish( + exchange="many_consumers", + routing_key="", + properties=pika.BasicProperties(message_id=str(msg_id)), + body=messages[msg_id], + ) connection.close() threads = [] @@ -1580,9 +1858,9 @@ def test_rabbitmq_many_consumers_to_each_queue(rabbitmq_cluster): time.sleep(random.uniform(0, 1)) thread.start() - result1 = '' + result1 = "" while True: - result1 = instance.query('SELECT count() FROM test.destination') + result1 = instance.query("SELECT count() FROM test.destination") time.sleep(1) if int(result1) == messages_num * threads_num: break @@ -1593,22 +1871,31 @@ def test_rabbitmq_many_consumers_to_each_queue(rabbitmq_cluster): thread.join() for consumer_id in range(num_tables): - instance.query(''' + instance.query( + """ DROP TABLE test.many_consumers_{0}; DROP TABLE test.many_consumers_{0}_mv; - '''.format(consumer_id)) + """.format( + consumer_id + ) + ) - instance.query(''' + instance.query( + """ DROP TABLE test.destination; - ''') + """ + ) - assert int(result1) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result) + assert ( + int(result1) == messages_num * threads_num + ), "ClickHouse lost some messages: {}".format(result) # 4 tables, 2 consumers for each table => 8 consumer tags assert int(result2) == 8 def test_rabbitmq_restore_failed_connection_without_losses_1(rabbitmq_cluster): - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.consume; CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = MergeTree @@ -1630,10 +1917,13 @@ def test_rabbitmq_restore_failed_connection_without_losses_1(rabbitmq_cluster): rabbitmq_persistent = '1', rabbitmq_format = 'JSONEachRow', rabbitmq_row_delimiter = '\\n'; - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() @@ -1641,19 +1931,21 @@ def test_rabbitmq_restore_failed_connection_without_losses_1(rabbitmq_cluster): values = [] for i in range(messages_num): values.append("({i}, {i})".format(i=i)) - values = ','.join(values) + values = ",".join(values) while True: try: - instance.query("INSERT INTO test.producer_reconnect VALUES {}".format(values)) + instance.query( + "INSERT INTO test.producer_reconnect VALUES {}".format(values) + ) break except QueryRuntimeException as e: - if 'Local: Timed out.' in str(e): + if "Local: Timed out." in str(e): continue else: raise - while int(instance.query('SELECT count() FROM test.view')) == 0: + while int(instance.query("SELECT count() FROM test.view")) == 0: time.sleep(0.1) kill_rabbitmq(rabbitmq_cluster.rabbitmq_docker_id) @@ -1661,21 +1953,26 @@ def test_rabbitmq_restore_failed_connection_without_losses_1(rabbitmq_cluster): revive_rabbitmq(rabbitmq_cluster.rabbitmq_docker_id) while True: - result = instance.query('SELECT count(DISTINCT key) FROM test.view') + result = instance.query("SELECT count(DISTINCT key) FROM test.view") time.sleep(1) if int(result) == messages_num: break - instance.query(''' + instance.query( + """ DROP TABLE test.consume; DROP TABLE test.producer_reconnect; - ''') + """ + ) - assert int(result) == messages_num, 'ClickHouse lost some messages: {}'.format(result) + assert int(result) == messages_num, "ClickHouse lost some messages: {}".format( + result + ) def test_rabbitmq_restore_failed_connection_without_losses_2(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.consumer_reconnect (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -1684,33 +1981,42 @@ def test_rabbitmq_restore_failed_connection_without_losses_2(rabbitmq_cluster): rabbitmq_num_queues = 10, rabbitmq_format = 'JSONEachRow', rabbitmq_row_delimiter = '\\n'; - ''') + """ + ) i = 0 messages_num = 150000 - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() messages = [] for _ in range(messages_num): - messages.append(json.dumps({'key': i, 'value': i})) + messages.append(json.dumps({"key": i, "value": i})) i += 1 for msg_id in range(messages_num): - channel.basic_publish(exchange='consumer_reconnect', routing_key='', body=messages[msg_id], - properties=pika.BasicProperties(delivery_mode=2, message_id=str(msg_id))) + channel.basic_publish( + exchange="consumer_reconnect", + routing_key="", + body=messages[msg_id], + properties=pika.BasicProperties(delivery_mode=2, message_id=str(msg_id)), + ) connection.close() - instance.query(''' + instance.query( + """ CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = MergeTree ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.consumer_reconnect; - ''') + """ + ) - while int(instance.query('SELECT count() FROM test.view')) == 0: + while int(instance.query("SELECT count() FROM test.view")) == 0: print(3) time.sleep(0.1) @@ -1726,21 +2032,26 @@ def test_rabbitmq_restore_failed_connection_without_losses_2(rabbitmq_cluster): # revive_rabbitmq() while True: - result = instance.query('SELECT count(DISTINCT key) FROM test.view') + result = instance.query("SELECT count(DISTINCT key) FROM test.view") time.sleep(1) if int(result) == messages_num: break - instance.query(''' + instance.query( + """ DROP TABLE test.consumer; DROP TABLE test.consumer_reconnect; - ''') + """ + ) - assert int(result) == messages_num, 'ClickHouse lost some messages: {}'.format(result) + assert int(result) == messages_num, "ClickHouse lost some messages: {}".format( + result + ) def test_rabbitmq_commit_on_block_write(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -1754,10 +2065,13 @@ def test_rabbitmq_commit_on_block_write(rabbitmq_cluster): ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.rabbitmq; - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() @@ -1769,46 +2083,56 @@ def test_rabbitmq_commit_on_block_write(rabbitmq_cluster): while not cancel.is_set(): messages = [] for _ in range(101): - messages.append(json.dumps({'key': i[0], 'value': i[0]})) + messages.append(json.dumps({"key": i[0], "value": i[0]})) i[0] += 1 for message in messages: - channel.basic_publish(exchange='block', routing_key='', body=message) + channel.basic_publish(exchange="block", routing_key="", body=message) rabbitmq_thread = threading.Thread(target=produce) rabbitmq_thread.start() - while int(instance.query('SELECT count() FROM test.view')) == 0: + while int(instance.query("SELECT count() FROM test.view")) == 0: time.sleep(1) cancel.set() - instance.query('DETACH TABLE test.rabbitmq;') + instance.query("DETACH TABLE test.rabbitmq;") - while int(instance.query("SELECT count() FROM system.tables WHERE database='test' AND name='rabbitmq'")) == 1: + while ( + int( + instance.query( + "SELECT count() FROM system.tables WHERE database='test' AND name='rabbitmq'" + ) + ) + == 1 + ): time.sleep(1) - instance.query('ATTACH TABLE test.rabbitmq;') + instance.query("ATTACH TABLE test.rabbitmq;") - while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]: + while int(instance.query("SELECT uniqExact(key) FROM test.view")) < i[0]: time.sleep(1) - result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view')) + result = int(instance.query("SELECT count() == uniqExact(key) FROM test.view")) - instance.query(''' + instance.query( + """ DROP TABLE test.consumer; DROP TABLE test.view; - ''') + """ + ) rabbitmq_thread.join() connection.close() - assert result == 1, 'Messages from RabbitMQ get duplicated!' + assert result == 1, "Messages from RabbitMQ get duplicated!" def test_rabbitmq_no_connection_at_startup_1(rabbitmq_cluster): # no connection when table is initialized - rabbitmq_cluster.pause_container('rabbitmq1') - instance.query_and_get_error(''' + rabbitmq_cluster.pause_container("rabbitmq1") + instance.query_and_get_error( + """ CREATE TABLE test.cs (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -1816,12 +2140,14 @@ def test_rabbitmq_no_connection_at_startup_1(rabbitmq_cluster): rabbitmq_format = 'JSONEachRow', rabbitmq_num_consumers = '5', rabbitmq_row_delimiter = '\\n'; - ''') - rabbitmq_cluster.unpause_container('rabbitmq1') + """ + ) + rabbitmq_cluster.unpause_container("rabbitmq1") def test_rabbitmq_no_connection_at_startup_2(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.cs (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -1834,39 +2160,51 @@ def test_rabbitmq_no_connection_at_startup_2(rabbitmq_cluster): ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.cs; - ''') + """ + ) instance.query("DETACH TABLE test.cs") - rabbitmq_cluster.pause_container('rabbitmq1') + rabbitmq_cluster.pause_container("rabbitmq1") instance.query("ATTACH TABLE test.cs") - rabbitmq_cluster.unpause_container('rabbitmq1') + rabbitmq_cluster.unpause_container("rabbitmq1") messages_num = 1000 - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() for i in range(messages_num): - message = json.dumps({'key': i, 'value': i}) - channel.basic_publish(exchange='cs', routing_key='', body=message, - properties=pika.BasicProperties(delivery_mode=2, message_id=str(i))) + message = json.dumps({"key": i, "value": i}) + channel.basic_publish( + exchange="cs", + routing_key="", + body=message, + properties=pika.BasicProperties(delivery_mode=2, message_id=str(i)), + ) connection.close() while True: - result = instance.query('SELECT count() FROM test.view') + result = instance.query("SELECT count() FROM test.view") time.sleep(1) if int(result) == messages_num: break - instance.query(''' + instance.query( + """ DROP TABLE test.consumer; DROP TABLE test.cs; - ''') + """ + ) - assert int(result) == messages_num, 'ClickHouse lost some messages: {}'.format(result) + assert int(result) == messages_num, "ClickHouse lost some messages: {}".format( + result + ) def test_rabbitmq_format_factory_settings(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.format_settings ( id String, date DateTime ) ENGINE = RabbitMQ @@ -1874,106 +2212,136 @@ def test_rabbitmq_format_factory_settings(rabbitmq_cluster): rabbitmq_exchange_name = 'format_settings', rabbitmq_format = 'JSONEachRow', date_time_input_format = 'best_effort'; - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() - message = json.dumps({"id":"format_settings_test","date":"2021-01-19T14:42:33.1829214Z"}) - expected = instance.query('''SELECT parseDateTimeBestEffort(CAST('2021-01-19T14:42:33.1829214Z', 'String'))''') + message = json.dumps( + {"id": "format_settings_test", "date": "2021-01-19T14:42:33.1829214Z"} + ) + expected = instance.query( + """SELECT parseDateTimeBestEffort(CAST('2021-01-19T14:42:33.1829214Z', 'String'))""" + ) - channel.basic_publish(exchange='format_settings', routing_key='', body=message) - result = '' + channel.basic_publish(exchange="format_settings", routing_key="", body=message) + result = "" while True: - result = instance.query('SELECT date FROM test.format_settings') + result = instance.query("SELECT date FROM test.format_settings") if result == expected: - break; + break - instance.query(''' + instance.query( + """ CREATE TABLE test.view ( id String, date DateTime ) ENGINE = MergeTree ORDER BY id; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.format_settings; - ''') + """ + ) - channel.basic_publish(exchange='format_settings', routing_key='', body=message) - result = '' + channel.basic_publish(exchange="format_settings", routing_key="", body=message) + result = "" while True: - result = instance.query('SELECT date FROM test.view') + result = instance.query("SELECT date FROM test.view") if result == expected: - break; + break connection.close() - instance.query(''' + instance.query( + """ DROP TABLE test.consumer; DROP TABLE test.format_settings; - ''') + """ + ) - assert(result == expected) + assert result == expected def test_rabbitmq_vhost(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq_vhost (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'vhost', rabbitmq_format = 'JSONEachRow', rabbitmq_vhost = '/' - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() - channel.basic_publish(exchange='vhost', routing_key='', body=json.dumps({'key': 1, 'value': 2})) + channel.basic_publish( + exchange="vhost", routing_key="", body=json.dumps({"key": 1, "value": 2}) + ) connection.close() while True: - result = instance.query('SELECT * FROM test.rabbitmq_vhost ORDER BY key', ignore_error=True) + result = instance.query( + "SELECT * FROM test.rabbitmq_vhost ORDER BY key", ignore_error=True + ) if result == "1\t2\n": break def test_rabbitmq_drop_table_properly(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq_drop (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'drop', rabbitmq_format = 'JSONEachRow', rabbitmq_queue_base = 'rabbit_queue_drop' - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() - channel.basic_publish(exchange='drop', routing_key='', body=json.dumps({'key': 1, 'value': 2})) + channel.basic_publish( + exchange="drop", routing_key="", body=json.dumps({"key": 1, "value": 2}) + ) while True: - result = instance.query('SELECT * FROM test.rabbitmq_drop ORDER BY key', ignore_error=True) + result = instance.query( + "SELECT * FROM test.rabbitmq_drop ORDER BY key", ignore_error=True + ) if result == "1\t2\n": break - exists = channel.queue_declare(queue='rabbit_queue_drop', passive=True) - assert(exists) + exists = channel.queue_declare(queue="rabbit_queue_drop", passive=True) + assert exists instance.query("DROP TABLE test.rabbitmq_drop") time.sleep(30) try: - exists = channel.queue_declare(callback, queue='rabbit_queue_drop', passive=True) + exists = channel.queue_declare( + callback, queue="rabbit_queue_drop", passive=True + ) except Exception as e: exists = False - assert(not exists) + assert not exists def test_rabbitmq_queue_settings(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq_settings (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -1981,53 +2349,67 @@ def test_rabbitmq_queue_settings(rabbitmq_cluster): rabbitmq_format = 'JSONEachRow', rabbitmq_queue_base = 'rabbit_queue_settings', rabbitmq_queue_settings_list = 'x-max-length=10,x-overflow=reject-publish' - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() for i in range(50): - channel.basic_publish(exchange='rabbit_exchange', routing_key='', body=json.dumps({'key': 1, 'value': 2})) + channel.basic_publish( + exchange="rabbit_exchange", + routing_key="", + body=json.dumps({"key": 1, "value": 2}), + ) connection.close() - instance.query(''' + instance.query( + """ CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = MergeTree ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.rabbitmq_settings; - ''') + """ + ) time.sleep(5) - result = instance.query('SELECT count() FROM test.rabbitmq_settings', ignore_error=True) + result = instance.query( + "SELECT count() FROM test.rabbitmq_settings", ignore_error=True + ) while int(result) != 10: time.sleep(0.5) - result = instance.query('SELECT count() FROM test.view', ignore_error=True) + result = instance.query("SELECT count() FROM test.view", ignore_error=True) - instance.query('DROP TABLE test.rabbitmq_settings') + instance.query("DROP TABLE test.rabbitmq_settings") # queue size is 10, but 50 messages were sent, they will be dropped (setting x-overflow = reject-publish) and only 10 will remain. - assert(int(result) == 10) + assert int(result) == 10 def test_rabbitmq_queue_consume(rabbitmq_cluster): - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() - channel.queue_declare(queue='rabbit_queue', durable=True) + channel.queue_declare(queue="rabbit_queue", durable=True) i = [0] messages_num = 1000 + def produce(): connection = pika.BlockingConnection(parameters) channel = connection.channel() messages = [] for _ in range(messages_num): - message = json.dumps({'key': i[0], 'value': i[0]}) - channel.basic_publish(exchange='', routing_key='rabbit_queue', body=message) + message = json.dumps({"key": i[0], "value": i[0]}) + channel.basic_publish(exchange="", routing_key="rabbit_queue", body=message) i[0] += 1 threads = [] @@ -2038,7 +2420,8 @@ def test_rabbitmq_queue_consume(rabbitmq_cluster): time.sleep(random.uniform(0, 1)) thread.start() - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq_queue (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -2049,11 +2432,12 @@ def test_rabbitmq_queue_consume(rabbitmq_cluster): ENGINE = MergeTree ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.rabbitmq_queue; - ''') + """ + ) - result = '' + result = "" while True: - result = instance.query('SELECT count() FROM test.view') + result = instance.query("SELECT count() FROM test.view") if int(result) == messages_num * threads_num: break time.sleep(1) @@ -2061,13 +2445,14 @@ def test_rabbitmq_queue_consume(rabbitmq_cluster): for thread in threads: thread.join() - instance.query('DROP TABLE test.rabbitmq_queue') + instance.query("DROP TABLE test.rabbitmq_queue") def test_rabbitmq_produce_consume_avro(rabbitmq_cluster): num_rows = 75 - instance.query(''' + instance.query( + """ DROP TABLE IF EXISTS test.view; DROP TABLE IF EXISTS test.rabbit; DROP TABLE IF EXISTS test.rabbit_writer; @@ -2090,38 +2475,51 @@ def test_rabbitmq_produce_consume_avro(rabbitmq_cluster): CREATE MATERIALIZED VIEW test.view Engine=Log AS SELECT key, value FROM test.rabbit; - ''') - - instance.query("INSERT INTO test.rabbit_writer select number*10 as key, number*100 as value from numbers({num_rows}) SETTINGS output_format_avro_rows_in_file = 7".format(num_rows=num_rows)) + """ + ) + instance.query( + "INSERT INTO test.rabbit_writer select number*10 as key, number*100 as value from numbers({num_rows}) SETTINGS output_format_avro_rows_in_file = 7".format( + num_rows=num_rows + ) + ) # Ideally we should wait for an event time.sleep(3) - expected_num_rows = instance.query("SELECT COUNT(1) FROM test.view", ignore_error=True) - assert (int(expected_num_rows) == num_rows) + expected_num_rows = instance.query( + "SELECT COUNT(1) FROM test.view", ignore_error=True + ) + assert int(expected_num_rows) == num_rows - expected_max_key = instance.query("SELECT max(key) FROM test.view", ignore_error=True) - assert (int(expected_max_key) == (num_rows - 1) * 10) + expected_max_key = instance.query( + "SELECT max(key) FROM test.view", ignore_error=True + ) + assert int(expected_max_key) == (num_rows - 1) * 10 def test_rabbitmq_bad_args(rabbitmq_cluster): - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() - channel.exchange_declare(exchange='f', exchange_type='fanout') - instance.query_and_get_error(''' + channel.exchange_declare(exchange="f", exchange_type="fanout") + instance.query_and_get_error( + """ CREATE TABLE test.drop (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', rabbitmq_exchange_name = 'f', rabbitmq_format = 'JSONEachRow'; - ''') + """ + ) def test_rabbitmq_issue_30691(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq_drop (json String) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -2129,30 +2527,57 @@ def test_rabbitmq_issue_30691(rabbitmq_cluster): rabbitmq_row_delimiter = '\\n', -- Works only if adding this setting rabbitmq_format = 'LineAsString', rabbitmq_queue_base = '30691'; - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() - channel.basic_publish(exchange='30691', routing_key='', body=json.dumps({"event_type": "purge", "as_src": 1234, "as_dst": 0, "as_path": "", - "local_pref": 100, "med": 0, "peer_as_dst": 0, - "ip_src": "", "ip_dst": "", - "port_src": 443, "port_dst": 41930, "ip_proto": "tcp", - "tos": 0, "stamp_inserted": "2021-10-26 15:20:00", - "stamp_updated": "2021-10-26 15:23:14", "packets": 2, "bytes": 1216, "writer_id": "default_amqp/449206"})) - result = '' + channel.basic_publish( + exchange="30691", + routing_key="", + body=json.dumps( + { + "event_type": "purge", + "as_src": 1234, + "as_dst": 0, + "as_path": "", + "local_pref": 100, + "med": 0, + "peer_as_dst": 0, + "ip_src": "", + "ip_dst": "", + "port_src": 443, + "port_dst": 41930, + "ip_proto": "tcp", + "tos": 0, + "stamp_inserted": "2021-10-26 15:20:00", + "stamp_updated": "2021-10-26 15:23:14", + "packets": 2, + "bytes": 1216, + "writer_id": "default_amqp/449206", + } + ), + ) + result = "" while True: - result = instance.query('SELECT * FROM test.rabbitmq_drop', ignore_error=True) + result = instance.query("SELECT * FROM test.rabbitmq_drop", ignore_error=True) print(result) if result != "": break - assert(result.strip() =="""{"event_type": "purge", "as_src": 1234, "as_dst": 0, "as_path": "", "local_pref": 100, "med": 0, "peer_as_dst": 0, "ip_src": "", "ip_dst": "", "port_src": 443, "port_dst": 41930, "ip_proto": "tcp", "tos": 0, "stamp_inserted": "2021-10-26 15:20:00", "stamp_updated": "2021-10-26 15:23:14", "packets": 2, "bytes": 1216, "writer_id": "default_amqp/449206"}""") + assert ( + result.strip() + == """{"event_type": "purge", "as_src": 1234, "as_dst": 0, "as_path": "", "local_pref": 100, "med": 0, "peer_as_dst": 0, "ip_src": "", "ip_dst": "", "port_src": 443, "port_dst": 41930, "ip_proto": "tcp", "tos": 0, "stamp_inserted": "2021-10-26 15:20:00", "stamp_updated": "2021-10-26 15:23:14", "packets": 2, "bytes": 1216, "writer_id": "default_amqp/449206"}""" + ) def test_rabbitmq_drop_mv(rabbitmq_cluster): - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -2164,53 +2589,67 @@ def test_rabbitmq_drop_mv(rabbitmq_cluster): ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.rabbitmq; - ''') + """ + ) - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() messages = [] for i in range(20): - channel.basic_publish(exchange='mv', routing_key='', body=json.dumps({'key': i, 'value': i})) + channel.basic_publish( + exchange="mv", routing_key="", body=json.dumps({"key": i, "value": i}) + ) - instance.query('DROP VIEW test.consumer') + instance.query("DROP VIEW test.consumer") for i in range(20, 40): - channel.basic_publish(exchange='mv', routing_key='', body=json.dumps({'key': i, 'value': i})) + channel.basic_publish( + exchange="mv", routing_key="", body=json.dumps({"key": i, "value": i}) + ) - instance.query(''' + instance.query( + """ CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.rabbitmq; - ''') + """ + ) for i in range(40, 50): - channel.basic_publish(exchange='mv', routing_key='', body=json.dumps({'key': i, 'value': i})) + channel.basic_publish( + exchange="mv", routing_key="", body=json.dumps({"key": i, "value": i}) + ) while True: - result = instance.query('SELECT * FROM test.view ORDER BY key') - if (rabbitmq_check_result(result)): + result = instance.query("SELECT * FROM test.view ORDER BY key") + if rabbitmq_check_result(result): break rabbitmq_check_result(result, True) - instance.query('DROP VIEW test.consumer') + instance.query("DROP VIEW test.consumer") for i in range(50, 60): - channel.basic_publish(exchange='mv', routing_key='', body=json.dumps({'key': i, 'value': i})) + channel.basic_publish( + exchange="mv", routing_key="", body=json.dumps({"key": i, "value": i}) + ) connection.close() count = 0 while True: - count = int(instance.query('SELECT count() FROM test.rabbitmq')) - if (count): + count = int(instance.query("SELECT count() FROM test.rabbitmq")) + if count: break - assert(count > 0) + assert count > 0 def test_rabbitmq_random_detach(rabbitmq_cluster): NUM_CONSUMERS = 2 NUM_QUEUES = 2 - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) ENGINE = RabbitMQ SETTINGS rabbitmq_host_port = 'rabbitmq1:5672', @@ -2224,13 +2663,16 @@ def test_rabbitmq_random_detach(rabbitmq_cluster): ORDER BY key; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT *, _channel_id AS channel_id FROM test.rabbitmq; - ''') + """ + ) i = [0] messages_num = 10000 - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) def produce(): connection = pika.BlockingConnection(parameters) @@ -2238,10 +2680,15 @@ def test_rabbitmq_random_detach(rabbitmq_cluster): messages = [] for i in range(messages_num): - messages.append(json.dumps({'key': i[0], 'value': i[0]})) + messages.append(json.dumps({"key": i[0], "value": i[0]})) i[0] += 1 mes_id = str(i) - channel.basic_publish(exchange='test_sharding', routing_key='', properties=pika.BasicProperties(message_id=mes_id), body=message) + channel.basic_publish( + exchange="test_sharding", + routing_key="", + properties=pika.BasicProperties(message_id=mes_id), + body=message, + ) connection.close() threads = [] @@ -2253,33 +2700,41 @@ def test_rabbitmq_random_detach(rabbitmq_cluster): time.sleep(random.uniform(0, 1)) thread.start() - #time.sleep(5) - #kill_rabbitmq(rabbitmq_cluster.rabbitmq_docker_id) - #instance.query("detach table test.rabbitmq") - #revive_rabbitmq(rabbitmq_cluster.rabbitmq_docker_id) + # time.sleep(5) + # kill_rabbitmq(rabbitmq_cluster.rabbitmq_docker_id) + # instance.query("detach table test.rabbitmq") + # revive_rabbitmq(rabbitmq_cluster.rabbitmq_docker_id) for thread in threads: thread.join() def test_rabbitmq_predefined_configuration(rabbitmq_cluster): - credentials = pika.PlainCredentials('root', 'clickhouse') - parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials) + credentials = pika.PlainCredentials("root", "clickhouse") + parameters = pika.ConnectionParameters( + rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, "/", credentials + ) connection = pika.BlockingConnection(parameters) channel = connection.channel() - instance.query(''' + instance.query( + """ CREATE TABLE test.rabbitmq (key UInt64, value UInt64) - ENGINE = RabbitMQ(rabbit1, rabbitmq_vhost = '/') ''') + ENGINE = RabbitMQ(rabbit1, rabbitmq_vhost = '/') """ + ) - channel.basic_publish(exchange='named', routing_key='', body=json.dumps({'key': 1, 'value': 2})) + channel.basic_publish( + exchange="named", routing_key="", body=json.dumps({"key": 1, "value": 2}) + ) while True: - result = instance.query('SELECT * FROM test.rabbitmq ORDER BY key', ignore_error=True) + result = instance.query( + "SELECT * FROM test.rabbitmq ORDER BY key", ignore_error=True + ) if result == "1\t2\n": break -if __name__ == '__main__': +if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") cluster.shutdown() diff --git a/tests/integration/test_storage_s3/s3_mocks/echo.py b/tests/integration/test_storage_s3/s3_mocks/echo.py index ced84e54d62..5103d7ebc15 100644 --- a/tests/integration/test_storage_s3/s3_mocks/echo.py +++ b/tests/integration/test_storage_s3/s3_mocks/echo.py @@ -19,11 +19,10 @@ class RequestHandler(http.server.BaseHTTPRequestHandler): self.send_header("Content-Type", "text/plain") self.end_headers() - def do_GET(self): self.do_HEAD() if self.path.startswith("/get-my-path/"): - self.wfile.write(b'/' + self.path.split('/', maxsplit=2)[2].encode()) + self.wfile.write(b"/" + self.path.split("/", maxsplit=2)[2].encode()) elif self.path == "/": self.wfile.write(b"OK") diff --git a/tests/integration/test_storage_s3/s3_mocks/mock_s3.py b/tests/integration/test_storage_s3/s3_mocks/mock_s3.py index 9009d345f49..870353ebaa8 100644 --- a/tests/integration/test_storage_s3/s3_mocks/mock_s3.py +++ b/tests/integration/test_storage_s3/s3_mocks/mock_s3.py @@ -3,26 +3,26 @@ import sys from bottle import abort, route, run, request, response -@route('/redirected/<_path:path>') +@route("/redirected/<_path:path>") def infinite_redirect(_path): response.set_header("Location", request.url) response.status = 307 - return 'Redirected' + return "Redirected" -@route('/<_bucket>/<_path:path>') +@route("/<_bucket>/<_path:path>") def server(_bucket, _path): for name in request.headers: - if name == 'Authorization' and request.headers[name] == 'Bearer TOKEN': - return '1, 2, 3' + if name == "Authorization" and request.headers[name] == "Bearer TOKEN": + return "1, 2, 3" response.status = 403 - response.content_type = 'text/xml' + response.content_type = "text/xml" return 'ForbiddenErrorForbidden Errortxfbd566d03042474888193-00608d7537' -@route('/') +@route("/") def ping(): - return 'OK' + return "OK" -run(host='0.0.0.0', port=int(sys.argv[1])) +run(host="0.0.0.0", port=int(sys.argv[1])) diff --git a/tests/integration/test_storage_s3/s3_mocks/unstable_server.py b/tests/integration/test_storage_s3/s3_mocks/unstable_server.py index ca2d6103cf6..103dd30340c 100644 --- a/tests/integration/test_storage_s3/s3_mocks/unstable_server.py +++ b/tests/integration/test_storage_s3/s3_mocks/unstable_server.py @@ -8,7 +8,7 @@ import sys def gen_n_digit_number(n): assert 0 < n < 19 - return random.randint(10**(n-1), 10**n-1) + return random.randint(10 ** (n - 1), 10**n - 1) sum_in_4_column = 0 @@ -19,6 +19,7 @@ def gen_line(): columns = 4 row = [] + def add_number(): digits = random.randint(1, 18) row.append(gen_n_digit_number(digits)) @@ -37,7 +38,10 @@ def gen_line(): random.seed("Unstable server/1.0") # Generating some "random" data and append a line which contains sum of numbers in column 4. -lines = b"".join((gen_line() for _ in range(500000))) + f"0,0,0,{-sum_in_4_column}\n".encode() +lines = ( + b"".join((gen_line() for _ in range(500000))) + + f"0,0,0,{-sum_in_4_column}\n".encode() +) class RequestHandler(http.server.BaseHTTPRequestHandler): @@ -47,7 +51,9 @@ class RequestHandler(http.server.BaseHTTPRequestHandler): self.end_bytes = len(lines) self.size = self.end_bytes self.send_block_size = 256 - self.stop_at = random.randint(900000, 1300000) // self.send_block_size # Block size is 1024**2. + self.stop_at = ( + random.randint(900000, 1300000) // self.send_block_size + ) # Block size is 1024**2. if "Range" in self.headers: cr = self.headers["Range"] @@ -55,9 +61,12 @@ class RequestHandler(http.server.BaseHTTPRequestHandler): assert parts[0] == "bytes" self.from_bytes = int(parts[1]) if parts[2]: - self.end_bytes = int(parts[2])+1 + self.end_bytes = int(parts[2]) + 1 self.send_response(206) - self.send_header("Content-Range", f"bytes {self.from_bytes}-{self.end_bytes-1}/{self.size}") + self.send_header( + "Content-Range", + f"bytes {self.from_bytes}-{self.end_bytes-1}/{self.size}", + ) else: self.send_response(200) @@ -76,17 +85,20 @@ class RequestHandler(http.server.BaseHTTPRequestHandler): self.send_header("Content-Type", "text/plain") self.end_headers() - def do_GET(self): self.do_HEAD() if self.path == "/root/test.csv": - for c, i in enumerate(range(self.from_bytes, self.end_bytes, self.send_block_size)): - self.wfile.write(lines[i:min(i+self.send_block_size, self.end_bytes)]) + for c, i in enumerate( + range(self.from_bytes, self.end_bytes, self.send_block_size) + ): + self.wfile.write( + lines[i : min(i + self.send_block_size, self.end_bytes)] + ) if (c + 1) % self.stop_at == 0: - #self.wfile._sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack("ii", 0, 0)) - #self.wfile._sock.shutdown(socket.SHUT_RDWR) - #self.wfile._sock.close() - print('Dropping connection') + # self.wfile._sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack("ii", 0, 0)) + # self.wfile._sock.shutdown(socket.SHUT_RDWR) + # self.wfile._sock.close() + print("Dropping connection") break elif self.path == "/": diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index fa183a365b1..dd29d0a5d6a 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -17,48 +17,56 @@ MINIO_INTERNAL_PORT = 9001 SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -CONFIG_PATH = os.path.join(SCRIPT_DIR, './{}/dummy/configs/config.d/defaultS3.xml'.format(get_instances_dir())) +CONFIG_PATH = os.path.join( + SCRIPT_DIR, "./{}/dummy/configs/config.d/defaultS3.xml".format(get_instances_dir()) +) # Creates S3 bucket for tests and allows anonymous read-write access to it. def prepare_s3_bucket(started_cluster): # Allows read-write access for bucket without authorization. - bucket_read_write_policy = {"Version": "2012-10-17", - "Statement": [ - { - "Sid": "", - "Effect": "Allow", - "Principal": {"AWS": "*"}, - "Action": "s3:GetBucketLocation", - "Resource": "arn:aws:s3:::root" - }, - { - "Sid": "", - "Effect": "Allow", - "Principal": {"AWS": "*"}, - "Action": "s3:ListBucket", - "Resource": "arn:aws:s3:::root" - }, - { - "Sid": "", - "Effect": "Allow", - "Principal": {"AWS": "*"}, - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::root/*" - }, - { - "Sid": "", - "Effect": "Allow", - "Principal": {"AWS": "*"}, - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::root/*" - } - ]} + bucket_read_write_policy = { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Principal": {"AWS": "*"}, + "Action": "s3:GetBucketLocation", + "Resource": "arn:aws:s3:::root", + }, + { + "Sid": "", + "Effect": "Allow", + "Principal": {"AWS": "*"}, + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::root", + }, + { + "Sid": "", + "Effect": "Allow", + "Principal": {"AWS": "*"}, + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::root/*", + }, + { + "Sid": "", + "Effect": "Allow", + "Principal": {"AWS": "*"}, + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::root/*", + }, + ], + } minio_client = started_cluster.minio_client - minio_client.set_bucket_policy(started_cluster.minio_bucket, json.dumps(bucket_read_write_policy)) + minio_client.set_bucket_policy( + started_cluster.minio_bucket, json.dumps(bucket_read_write_policy) + ) - started_cluster.minio_restricted_bucket = "{}-with-auth".format(started_cluster.minio_bucket) + started_cluster.minio_restricted_bucket = "{}-with-auth".format( + started_cluster.minio_bucket + ) if minio_client.bucket_exists(started_cluster.minio_restricted_bucket): minio_client.remove_bucket(started_cluster.minio_restricted_bucket) @@ -87,11 +95,22 @@ def get_s3_file_content(started_cluster, bucket, filename, decode=True): def started_cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("restricted_dummy", main_configs=["configs/config_for_test_remote_host_filter.xml"], - with_minio=True) - cluster.add_instance("dummy", with_minio=True, main_configs=["configs/defaultS3.xml", "configs/named_collections.xml"]) - cluster.add_instance("s3_max_redirects", with_minio=True, main_configs=["configs/defaultS3.xml"], - user_configs=["configs/s3_max_redirects.xml"]) + cluster.add_instance( + "restricted_dummy", + main_configs=["configs/config_for_test_remote_host_filter.xml"], + with_minio=True, + ) + cluster.add_instance( + "dummy", + with_minio=True, + main_configs=["configs/defaultS3.xml", "configs/named_collections.xml"], + ) + cluster.add_instance( + "s3_max_redirects", + with_minio=True, + main_configs=["configs/defaultS3.xml"], + user_configs=["configs/s3_max_redirects.xml"], + ) logging.info("Starting cluster...") cluster.start() logging.info("Cluster started") @@ -116,20 +135,27 @@ def run_query(instance, query, stdin=None, settings=None): # Test simple put. Also checks that wrong credentials produce an error with every compression method. -@pytest.mark.parametrize("maybe_auth,positive,compression", [ - pytest.param("", True, 'auto', id="positive"), - pytest.param("'minio','minio123',", True, 'auto', id="auth_positive"), - pytest.param("'wrongid','wrongkey',", False, 'auto', id="auto"), - pytest.param("'wrongid','wrongkey',", False, 'gzip', id="gzip"), - pytest.param("'wrongid','wrongkey',", False, 'deflate', id="deflate"), - pytest.param("'wrongid','wrongkey',", False, 'brotli', id="brotli"), - pytest.param("'wrongid','wrongkey',", False, 'xz', id="xz"), - pytest.param("'wrongid','wrongkey',", False, 'zstd', id="zstd") -]) +@pytest.mark.parametrize( + "maybe_auth,positive,compression", + [ + pytest.param("", True, "auto", id="positive"), + pytest.param("'minio','minio123',", True, "auto", id="auth_positive"), + pytest.param("'wrongid','wrongkey',", False, "auto", id="auto"), + pytest.param("'wrongid','wrongkey',", False, "gzip", id="gzip"), + pytest.param("'wrongid','wrongkey',", False, "deflate", id="deflate"), + pytest.param("'wrongid','wrongkey',", False, "brotli", id="brotli"), + pytest.param("'wrongid','wrongkey',", False, "xz", id="xz"), + pytest.param("'wrongid','wrongkey',", False, "zstd", id="zstd"), + ], +) def test_put(started_cluster, maybe_auth, positive, compression): # type: (ClickHouseCluster) -> None - bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket + bucket = ( + started_cluster.minio_bucket + if not maybe_auth + else started_cluster.minio_restricted_bucket + ) instance = started_cluster.instances["dummy"] # type: ClickHouseInstance table_format = "column1 UInt32, column2 UInt32, column3 UInt32" values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)" @@ -166,7 +192,9 @@ def test_partition_by(started_cluster): assert "78,43,45\n" == get_s3_file_content(started_cluster, bucket, "test_45.csv") filename = "test2_{_partition_id}.csv" - instance.query(f"create table p ({table_format}) engine=S3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV') partition by column3") + instance.query( + f"create table p ({table_format}) engine=S3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV') partition by column3" + ) instance.query(f"insert into p values {values}") assert "1,2,3\n" == get_s3_file_content(started_cluster, bucket, "test2_3.csv") assert "3,2,1\n" == get_s3_file_content(started_cluster, bucket, "test2_1.csv") @@ -186,7 +214,9 @@ def test_partition_by_string_column(started_cluster): run_query(instance, put_query) - assert '1,"foo/bar"\n' == get_s3_file_content(started_cluster, bucket, "test_foo/bar.csv") + assert '1,"foo/bar"\n' == get_s3_file_content( + started_cluster, bucket, "test_foo/bar.csv" + ) assert '3,"йцук"\n' == get_s3_file_content(started_cluster, bucket, "test_йцук.csv") assert '78,"你好"\n' == get_s3_file_content(started_cluster, bucket, "test_你好.csv") @@ -208,10 +238,7 @@ def test_partition_by_const_column(started_cluster): assert values_csv == get_s3_file_content(started_cluster, bucket, "test_88.csv") -@pytest.mark.parametrize("special", [ - "space", - "plus" -]) +@pytest.mark.parametrize("special", ["space", "plus"]) def test_get_file_with_special(started_cluster, special): symbol = {"space": " ", "plus": "+"}[special] urlsafe_symbol = {"space": "%20", "plus": "%2B"}[special] @@ -219,26 +246,41 @@ def test_get_file_with_special(started_cluster, special): bucket = started_cluster.minio_restricted_bucket instance = started_cluster.instances["dummy"] table_format = "column1 UInt32, column2 UInt32, column3 UInt32" - values = [[12549, 2463, 19893], [64021, 38652, 66703], [81611, 39650, 83516], [11079, 59507, 61546], [51764, 69952, 6876], [41165, 90293, 29095], [40167, 78432, 48309], [81629, 81327, 11855], [55852, 21643, 98507], [6738, 54643, 41155]] - values_csv = ('\n'.join((','.join(map(str, row)) for row in values)) + '\n').encode() + values = [ + [12549, 2463, 19893], + [64021, 38652, 66703], + [81611, 39650, 83516], + [11079, 59507, 61546], + [51764, 69952, 6876], + [41165, 90293, 29095], + [40167, 78432, 48309], + [81629, 81327, 11855], + [55852, 21643, 98507], + [6738, 54643, 41155], + ] + values_csv = ( + "\n".join((",".join(map(str, row)) for row in values)) + "\n" + ).encode() filename = f"get_file_with_{special}_{symbol}two.csv" put_s3_file_content(started_cluster, bucket, filename, values_csv) get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}_{urlsafe_symbol}two.csv', {auth}'CSV', '{table_format}') FORMAT TSV" - assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values + assert [ + list(map(int, l.split())) for l in run_query(instance, get_query).splitlines() + ] == values get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}*.csv', {auth}'CSV', '{table_format}') FORMAT TSV" - assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values + assert [ + list(map(int, l.split())) for l in run_query(instance, get_query).splitlines() + ] == values get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}_{urlsafe_symbol}*.csv', {auth}'CSV', '{table_format}') FORMAT TSV" - assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values + assert [ + list(map(int, l.split())) for l in run_query(instance, get_query).splitlines() + ] == values -@pytest.mark.parametrize("special", [ - "space", - "plus", - "plus2" -]) +@pytest.mark.parametrize("special", ["space", "plus", "plus2"]) def test_get_path_with_special(started_cluster, special): symbol = {"space": "%20", "plus": "%2B", "plus2": "%2B"}[special] safe_symbol = {"space": "%20", "plus": "+", "plus2": "%2B"}[special] @@ -250,9 +292,7 @@ def test_get_path_with_special(started_cluster, special): # Test put no data to S3. -@pytest.mark.parametrize("auth", [ - pytest.param("'minio','minio123',", id="minio") -]) +@pytest.mark.parametrize("auth", [pytest.param("'minio','minio123',", id="minio")]) def test_empty_put(started_cluster, auth): # type: (ClickHouseCluster, str) -> None @@ -265,20 +305,37 @@ def test_empty_put(started_cluster, auth): CREATE TABLE empty_table ( {} ) ENGINE = Null() - """.format(table_format) + """.format( + table_format + ) run_query(instance, drop_empty_table_query) run_query(instance, create_empty_table_query) filename = "empty_put_test.csv" put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') select * from empty_table".format( - started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, auth, table_format) + started_cluster.minio_ip, + MINIO_INTERNAL_PORT, + bucket, + filename, + auth, + table_format, + ) run_query(instance, put_query) try: - run_query(instance, "select count(*) from s3('http://{}:{}/{}/{}', {}'CSV', '{}')".format( - started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, auth, table_format)) + run_query( + instance, + "select count(*) from s3('http://{}:{}/{}/{}', {}'CSV', '{}')".format( + started_cluster.minio_ip, + MINIO_INTERNAL_PORT, + bucket, + filename, + auth, + table_format, + ), + ) assert False, "Query should be failed." except helpers.client.QueryRuntimeException as e: @@ -286,20 +343,33 @@ def test_empty_put(started_cluster, auth): # Test put values in CSV format. -@pytest.mark.parametrize("maybe_auth,positive", [ - pytest.param("", True, id="positive"), - pytest.param("'minio','minio123',", True, id="auth_positive"), - pytest.param("'wrongid','wrongkey',", False, id="negative"), -]) +@pytest.mark.parametrize( + "maybe_auth,positive", + [ + pytest.param("", True, id="positive"), + pytest.param("'minio','minio123',", True, id="auth_positive"), + pytest.param("'wrongid','wrongkey',", False, id="negative"), + ], +) def test_put_csv(started_cluster, maybe_auth, positive): # type: (ClickHouseCluster, bool, str) -> None - bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket + bucket = ( + started_cluster.minio_bucket + if not maybe_auth + else started_cluster.minio_restricted_bucket + ) instance = started_cluster.instances["dummy"] # type: ClickHouseInstance table_format = "column1 UInt32, column2 UInt32, column3 UInt32" filename = "test.csv" put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV settings s3_truncate_on_insert=1".format( - started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, maybe_auth, table_format) + started_cluster.minio_ip, + MINIO_INTERNAL_PORT, + bucket, + filename, + maybe_auth, + table_format, + ) csv_data = "8,9,16\n11,18,13\n22,14,2\n" try: @@ -323,13 +393,24 @@ def test_put_get_with_redirect(started_cluster): values_csv = "1,1,1\n1,1,1\n11,11,11\n" filename = "test.csv" query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values settings s3_truncate_on_insert=1 {}".format( - started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format, values) + started_cluster.minio_redirect_host, + started_cluster.minio_redirect_port, + bucket, + filename, + table_format, + values, + ) run_query(instance, query) assert values_csv == get_s3_file_content(started_cluster, bucket, filename) query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/{}', 'CSV', '{}')".format( - started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format) + started_cluster.minio_redirect_host, + started_cluster.minio_redirect_port, + bucket, + filename, + table_format, + ) stdout = run_query(instance, query) assert list(map(str.split, stdout.splitlines())) == [ @@ -351,12 +432,24 @@ def test_put_with_zero_redirect(started_cluster): # Should work without redirect query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values settings s3_truncate_on_insert=1 {}".format( - started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, table_format, values) + started_cluster.minio_ip, + MINIO_INTERNAL_PORT, + bucket, + filename, + table_format, + values, + ) run_query(instance, query) # Should not work with redirect query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values settings s3_truncate_on_insert=1 {}".format( - started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format, values) + started_cluster.minio_redirect_host, + started_cluster.minio_redirect_port, + bucket, + filename, + table_format, + values, + ) exception_raised = False try: run_query(instance, query) @@ -369,40 +462,69 @@ def test_put_with_zero_redirect(started_cluster): def test_put_get_with_globs(started_cluster): # type: (ClickHouseCluster) -> None - unique_prefix = random.randint(1,10000) + unique_prefix = random.randint(1, 10000) bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] # type: ClickHouseInstance table_format = "column1 UInt32, column2 UInt32, column3 UInt32" max_path = "" for i in range(10): for j in range(10): - path = "{}/{}_{}/{}.csv".format(unique_prefix, i, random.choice(['a', 'b', 'c', 'd']), j) + path = "{}/{}_{}/{}.csv".format( + unique_prefix, i, random.choice(["a", "b", "c", "d"]), j + ) max_path = max(path, max_path) values = "({},{},{})".format(i, j, i + j) query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format( - started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, path, table_format, values) + started_cluster.minio_ip, + MINIO_INTERNAL_PORT, + bucket, + path, + table_format, + values, + ) run_query(instance, query) query = "select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from s3('http://{}:{}/{}/{}/*_{{a,b,c,d}}/%3f.csv', 'CSV', '{}')".format( - started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, unique_prefix, table_format) + started_cluster.minio_redirect_host, + started_cluster.minio_redirect_port, + bucket, + unique_prefix, + table_format, + ) assert run_query(instance, query).splitlines() == [ - "450\t450\t900\t0.csv\t{bucket}/{max_path}".format(bucket=bucket, max_path=max_path)] + "450\t450\t900\t0.csv\t{bucket}/{max_path}".format( + bucket=bucket, max_path=max_path + ) + ] minio = started_cluster.minio_client - for obj in list(minio.list_objects(started_cluster.minio_bucket, prefix='{}/'.format(unique_prefix), recursive=True)): + for obj in list( + minio.list_objects( + started_cluster.minio_bucket, + prefix="{}/".format(unique_prefix), + recursive=True, + ) + ): minio.remove_object(started_cluster.minio_bucket, obj.object_name) # Test multipart put. -@pytest.mark.parametrize("maybe_auth,positive", [ - pytest.param("", True, id="positive"), - pytest.param("'wrongid','wrongkey'", False, id="negative"), - # ("'minio','minio123',",True), Redirect with credentials not working with nginx. -]) +@pytest.mark.parametrize( + "maybe_auth,positive", + [ + pytest.param("", True, id="positive"), + pytest.param("'wrongid','wrongkey'", False, id="negative"), + # ("'minio','minio123',",True), Redirect with credentials not working with nginx. + ], +) def test_multipart_put(started_cluster, maybe_auth, positive): # type: (ClickHouseCluster) -> None - bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket + bucket = ( + started_cluster.minio_bucket + if not maybe_auth + else started_cluster.minio_restricted_bucket + ) instance = started_cluster.instances["dummy"] # type: ClickHouseInstance table_format = "column1 UInt32, column2 UInt32, column3 UInt32" @@ -421,11 +543,24 @@ def test_multipart_put(started_cluster, maybe_auth, positive): filename = "test_multipart.csv" put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format( - started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, maybe_auth, table_format) + started_cluster.minio_redirect_host, + started_cluster.minio_redirect_port, + bucket, + filename, + maybe_auth, + table_format, + ) try: - run_query(instance, put_query, stdin=csv_data, settings={'s3_min_upload_part_size': min_part_size_bytes, - 's3_max_single_part_upload_size': 0}) + run_query( + instance, + put_query, + stdin=csv_data, + settings={ + "s3_min_upload_part_size": min_part_size_bytes, + "s3_max_single_part_upload_size": 0, + }, + ) except helpers.client.QueryRuntimeException: if positive: raise @@ -444,12 +579,18 @@ def test_remote_host_filter(started_cluster): format = "column1 UInt32, column2 UInt32, column3 UInt32" query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/test.csv', 'CSV', '{}')".format( - "invalid_host", MINIO_INTERNAL_PORT, started_cluster.minio_bucket, format) + "invalid_host", MINIO_INTERNAL_PORT, started_cluster.minio_bucket, format + ) assert "not allowed in configuration file" in instance.query_and_get_error(query) other_values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)" query = "insert into table function s3('http://{}:{}/{}/test.csv', 'CSV', '{}') values {}".format( - "invalid_host", MINIO_INTERNAL_PORT, started_cluster.minio_bucket, format, other_values) + "invalid_host", + MINIO_INTERNAL_PORT, + started_cluster.minio_bucket, + format, + other_values, + ) assert "not allowed in configuration file" in instance.query_and_get_error(query) @@ -476,25 +617,39 @@ def test_s3_glob_scheherazade(started_cluster): nights_per_job = 1001 // 30 jobs = [] for night in range(0, 1001, nights_per_job): + def add_tales(start, end): for i in range(start, end): path = "night_{}/tale.csv".format(i) query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format( - started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, path, table_format, values) + started_cluster.minio_ip, + MINIO_INTERNAL_PORT, + bucket, + path, + table_format, + values, + ) run_query(instance, query) - jobs.append(threading.Thread(target=add_tales, args=(night, min(night + nights_per_job, 1001)))) + jobs.append( + threading.Thread( + target=add_tales, args=(night, min(night + nights_per_job, 1001)) + ) + ) jobs[-1].start() for job in jobs: job.join() query = "select count(), sum(column1), sum(column2), sum(column3) from s3('http://{}:{}/{}/night_*/tale.csv', 'CSV', '{}')".format( - started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, table_format) + started_cluster.minio_redirect_host, + started_cluster.minio_redirect_port, + bucket, + table_format, + ) assert run_query(instance, query).splitlines() == ["1001\t1001\t1001\t1001"] - def run_s3_mocks(started_cluster): logging.info("Starting s3 mocks") mocks = ( @@ -505,33 +660,46 @@ def run_s3_mocks(started_cluster): for mock_filename, container, port in mocks: container_id = started_cluster.get_container_id(container) current_dir = os.path.dirname(__file__) - started_cluster.copy_file_to_container(container_id, os.path.join(current_dir, "s3_mocks", mock_filename), mock_filename) - started_cluster.exec_in_container(container_id, ["python", mock_filename, port], detach=True) + started_cluster.copy_file_to_container( + container_id, + os.path.join(current_dir, "s3_mocks", mock_filename), + mock_filename, + ) + started_cluster.exec_in_container( + container_id, ["python", mock_filename, port], detach=True + ) # Wait for S3 mocks to start for mock_filename, container, port in mocks: num_attempts = 100 for attempt in range(num_attempts): - ping_response = started_cluster.exec_in_container(started_cluster.get_container_id(container), - ["curl", "-s", f"http://localhost:{port}/"], nothrow=True) - if ping_response != 'OK': + ping_response = started_cluster.exec_in_container( + started_cluster.get_container_id(container), + ["curl", "-s", f"http://localhost:{port}/"], + nothrow=True, + ) + if ping_response != "OK": if attempt == num_attempts - 1: - assert ping_response == 'OK', 'Expected "OK", but got "{}"'.format(ping_response) + assert ping_response == "OK", 'Expected "OK", but got "{}"'.format( + ping_response + ) else: time.sleep(1) else: - logging.debug(f"mock {mock_filename} ({port}) answered {ping_response} on attempt {attempt}") + logging.debug( + f"mock {mock_filename} ({port}) answered {ping_response} on attempt {attempt}" + ) break logging.info("S3 mocks started") def replace_config(old, new): - config = open(CONFIG_PATH, 'r') + config = open(CONFIG_PATH, "r") config_lines = config.readlines() config.close() config_lines = [line.replace(old, new) for line in config_lines] - config = open(CONFIG_PATH, 'w') + config = open(CONFIG_PATH, "w") config.writelines(config_lines) config.close() @@ -542,28 +710,36 @@ def test_custom_auth_headers(started_cluster): get_query = "select * from s3('http://resolver:8080/{bucket}/{file}', 'CSV', '{table_format}')".format( bucket=started_cluster.minio_restricted_bucket, file=filename, - table_format=table_format) + table_format=table_format, + ) instance = started_cluster.instances["dummy"] # type: ClickHouseInstance result = run_query(instance, get_query) - assert result == '1\t2\t3\n' + assert result == "1\t2\t3\n" instance.query("DROP TABLE IF EXISTS test") instance.query( "CREATE TABLE test ({table_format}) ENGINE = S3('http://resolver:8080/{bucket}/{file}', 'CSV')".format( bucket=started_cluster.minio_restricted_bucket, file=filename, - table_format=table_format - )) - assert run_query(instance, "SELECT * FROM test") == '1\t2\t3\n' + table_format=table_format, + ) + ) + assert run_query(instance, "SELECT * FROM test") == "1\t2\t3\n" - replace_config("
Authorization: Bearer TOKEN", "
Authorization: Bearer INVALID_TOKEN") + replace_config( + "
Authorization: Bearer TOKEN", + "
Authorization: Bearer INVALID_TOKEN", + ) instance.query("SYSTEM RELOAD CONFIG") ret, err = instance.query_and_get_answer_with_error("SELECT * FROM test") assert ret == "" and err != "" - replace_config("
Authorization: Bearer INVALID_TOKEN", "
Authorization: Bearer TOKEN") + replace_config( + "
Authorization: Bearer INVALID_TOKEN", + "
Authorization: Bearer TOKEN", + ) instance.query("SYSTEM RELOAD CONFIG") - assert run_query(instance, "SELECT * FROM test") == '1\t2\t3\n' + assert run_query(instance, "SELECT * FROM test") == "1\t2\t3\n" instance.query("DROP TABLE test") @@ -578,7 +754,7 @@ def test_custom_auth_headers_exclusion(started_cluster): print(result) assert ei.value.returncode == 243 - assert 'Forbidden Error' in ei.value.stderr + assert "Forbidden Error" in ei.value.stderr def test_infinite_redirect(started_cluster): @@ -595,10 +771,15 @@ def test_infinite_redirect(started_cluster): exception_raised = True finally: assert exception_raised -@pytest.mark.parametrize("extension,method", [ - pytest.param("bin", "gzip", id="bin"), - pytest.param("gz", "auto", id="gz"), -]) + + +@pytest.mark.parametrize( + "extension,method", + [ + pytest.param("bin", "gzip", id="bin"), + pytest.param("gz", "auto", id="gz"), + ], +) def test_storage_s3_get_gzip(started_cluster, extension, method): bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] @@ -620,7 +801,7 @@ def test_storage_s3_get_gzip(started_cluster, extension, method): "Jerry Gonzalez,15", "Angela James,10", "Norman Ortega,33", - "" + "", ] run_query(instance, f"DROP TABLE IF EXISTS {name}") @@ -630,10 +811,13 @@ def test_storage_s3_get_gzip(started_cluster, extension, method): compressed.close() put_s3_file_content(started_cluster, bucket, filename, buf.getvalue()) - run_query(instance, f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3( + run_query( + instance, + f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3( 'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{filename}', 'CSV', - '{method}')""") + '{method}')""", + ) run_query(instance, f"SELECT sum(id) FROM {name}").splitlines() == ["565"] run_query(instance, f"DROP TABLE {name}") @@ -670,21 +854,25 @@ def test_storage_s3_put_uncompressed(started_cluster): "'Kathie Dawson',100", "'Gregg Mcquistion',11", ] - run_query(instance, "CREATE TABLE {} (name String, id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format( - name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename)) + run_query( + instance, + "CREATE TABLE {} (name String, id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format( + name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename + ), + ) run_query(instance, "INSERT INTO {} VALUES ({})".format(name, "),(".join(data))) run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["753"] uncompressed_content = get_s3_file_content(started_cluster, bucket, filename) - assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 753 + assert sum([int(i.split(",")[1]) for i in uncompressed_content.splitlines()]) == 753 -@pytest.mark.parametrize("extension,method", [ - pytest.param("bin", "gzip", id="bin"), - pytest.param("gz", "auto", id="gz") -]) +@pytest.mark.parametrize( + "extension,method", + [pytest.param("bin", "gzip", id="bin"), pytest.param("gz", "auto", id="gz")], +) def test_storage_s3_put_gzip(started_cluster, extension, method): bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] @@ -705,21 +893,26 @@ def test_storage_s3_put_gzip(started_cluster, extension, method): "'Myrtle Pelt',93", "'Sylvia Naffziger',18", "'Amanda Cave',83", - "'Yolanda Joseph',89" + "'Yolanda Joseph',89", ] - run_query(instance, f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3( + run_query( + instance, + f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3( 'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{filename}', 'CSV', - '{method}')""") + '{method}')""", + ) run_query(instance, f"INSERT INTO {name} VALUES ({'),('.join(data)})") run_query(instance, f"SELECT sum(id) FROM {name}").splitlines() == ["708"] - buf = io.BytesIO(get_s3_file_content(started_cluster, bucket, filename, decode=False)) + buf = io.BytesIO( + get_s3_file_content(started_cluster, bucket, filename, decode=False) + ) f = gzip.GzipFile(fileobj=buf, mode="rb") uncompressed_content = f.read().decode() - assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 708 + assert sum([int(i.split(",")[1]) for i in uncompressed_content.splitlines()]) == 708 def test_truncate_table(started_cluster): @@ -727,8 +920,11 @@ def test_truncate_table(started_cluster): instance = started_cluster.instances["dummy"] # type: ClickHouseInstance name = "truncate" - instance.query("CREATE TABLE {} (id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format( - name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, name)) + instance.query( + "CREATE TABLE {} (id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format( + name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, name + ) + ) instance.query("INSERT INTO {} SELECT number FROM numbers(10)".format(name)) result = instance.query("SELECT * FROM {}".format(name)) @@ -738,11 +934,14 @@ def test_truncate_table(started_cluster): minio = started_cluster.minio_client timeout = 30 while timeout > 0: - if len(list(minio.list_objects(started_cluster.minio_bucket, 'truncate/'))) == 0: + if ( + len(list(minio.list_objects(started_cluster.minio_bucket, "truncate/"))) + == 0 + ): return timeout -= 1 time.sleep(1) - assert(len(list(minio.list_objects(started_cluster.minio_bucket, 'truncate/'))) == 0) + assert len(list(minio.list_objects(started_cluster.minio_bucket, "truncate/"))) == 0 assert instance.query("SELECT * FROM {}".format(name)) == "" @@ -752,30 +951,47 @@ def test_predefined_connection_configuration(started_cluster): name = "test_table" instance.query("drop table if exists {}".format(name)) - instance.query("CREATE TABLE {} (id UInt32) ENGINE = S3(s3_conf1, format='CSV')".format(name)) + instance.query( + "CREATE TABLE {} (id UInt32) ENGINE = S3(s3_conf1, format='CSV')".format(name) + ) instance.query("INSERT INTO {} SELECT number FROM numbers(10)".format(name)) result = instance.query("SELECT * FROM {}".format(name)) assert result == instance.query("SELECT number FROM numbers(10)") - result = instance.query("SELECT * FROM s3(s3_conf1, format='CSV', structure='id UInt32')") + result = instance.query( + "SELECT * FROM s3(s3_conf1, format='CSV', structure='id UInt32')" + ) assert result == instance.query("SELECT number FROM numbers(10)") result = "" + + def test_url_reconnect_in_the_middle(started_cluster): bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] table_format = "id String, data String" filename = "test_url_reconnect_{}.tsv".format(random.randint(0, 1000)) - instance.query(f"""insert into table function + instance.query( + f"""insert into table function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'TSV', '{table_format}') - select number, randomPrintableASCII(number % 1000) from numbers(1000000)""") + select number, randomPrintableASCII(number % 1000) from numbers(1000000)""" + ) with PartitionManager() as pm: - pm_rule_reject = {'probability': 0.02, 'destination': instance.ip_address, 'source_port': started_cluster.minio_port, 'action': 'REJECT --reject-with tcp-reset'} - pm_rule_drop_all = {'destination': instance.ip_address, 'source_port': started_cluster.minio_port, 'action': 'DROP'} + pm_rule_reject = { + "probability": 0.02, + "destination": instance.ip_address, + "source_port": started_cluster.minio_port, + "action": "REJECT --reject-with tcp-reset", + } + pm_rule_drop_all = { + "destination": instance.ip_address, + "source_port": started_cluster.minio_port, + "action": "DROP", + } pm._add_rule(pm_rule_reject) def select(): @@ -783,8 +999,9 @@ def test_url_reconnect_in_the_middle(started_cluster): result = instance.query( f"""select sum(cityHash64(x)) from (select toUInt64(id) + sleep(0.1) as x from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'TSV', '{table_format}') - settings http_max_tries = 10, http_retry_max_backoff_ms=2000, http_send_timeout=1, http_receive_timeout=1)""") - assert(int(result) == 3914219105369203805) + settings http_max_tries = 10, http_retry_max_backoff_ms=2000, http_send_timeout=1, http_receive_timeout=1)""" + ) + assert int(result) == 3914219105369203805 thread = threading.Thread(target=select) thread.start() @@ -797,7 +1014,7 @@ def test_url_reconnect_in_the_middle(started_cluster): thread.join() - assert(int(result) == 3914219105369203805) + assert int(result) == 3914219105369203805 def test_seekable_formats(started_cluster): @@ -805,22 +1022,29 @@ def test_seekable_formats(started_cluster): instance = started_cluster.instances["dummy"] # type: ClickHouseInstance table_function = f"s3(s3_parquet, structure='a Int32, b String', format='Parquet')" - instance.query(f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000) settings s3_truncate_on_insert=1") + instance.query( + f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000) settings s3_truncate_on_insert=1" + ) result = instance.query(f"SELECT count() FROM {table_function}") - assert(int(result) == 5000000) + assert int(result) == 5000000 table_function = f"s3(s3_orc, structure='a Int32, b String', format='ORC')" - exec_query_with_retry(instance, f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000) settings s3_truncate_on_insert=1") + exec_query_with_retry( + instance, + f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000) settings s3_truncate_on_insert=1", + ) result = instance.query(f"SELECT count() FROM {table_function}") - assert(int(result) == 5000000) + assert int(result) == 5000000 instance.query("SYSTEM FLUSH LOGS") - result = instance.query(f"SELECT formatReadableSize(memory_usage) FROM system.query_log WHERE startsWith(query, 'SELECT count() FROM s3') AND memory_usage > 0 ORDER BY event_time desc") + result = instance.query( + f"SELECT formatReadableSize(memory_usage) FROM system.query_log WHERE startsWith(query, 'SELECT count() FROM s3') AND memory_usage > 0 ORDER BY event_time desc" + ) - result = result[:result.index('.')] - assert(int(result) < 200) + result = result[: result.index(".")] + assert int(result) < 200 def test_seekable_formats_url(started_cluster): @@ -828,24 +1052,31 @@ def test_seekable_formats_url(started_cluster): instance = started_cluster.instances["dummy"] table_function = f"s3(s3_parquet, structure='a Int32, b String', format='Parquet')" - instance.query(f"insert into table function {table_function} select number, randomString(100) from numbers(5000000) settings s3_truncate_on_insert=1") + instance.query( + f"insert into table function {table_function} select number, randomString(100) from numbers(5000000) settings s3_truncate_on_insert=1" + ) table_function = f"url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_parquet', 'Parquet', 'a Int32, b String')" result = instance.query(f"SELECT count() FROM {table_function}") - assert(int(result) == 5000000) + assert int(result) == 5000000 table_function = f"s3(s3_orc, structure='a Int32, b String', format='ORC')" - exec_query_with_retry(instance, f"insert into table function {table_function} select number, randomString(100) from numbers(5000000) settings s3_truncate_on_insert=1") + exec_query_with_retry( + instance, + f"insert into table function {table_function} select number, randomString(100) from numbers(5000000) settings s3_truncate_on_insert=1", + ) table_function = f"url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_orc', 'ORC', 'a Int32, b String')" result = instance.query(f"SELECT count() FROM {table_function}") - assert(int(result) == 5000000) + assert int(result) == 5000000 instance.query("SYSTEM FLUSH LOGS") - result = instance.query(f"SELECT formatReadableSize(memory_usage) FROM system.query_log WHERE startsWith(query, 'SELECT count() FROM url') AND memory_usage > 0 ORDER BY event_time desc") + result = instance.query( + f"SELECT formatReadableSize(memory_usage) FROM system.query_log WHERE startsWith(query, 'SELECT count() FROM url') AND memory_usage > 0 ORDER BY event_time desc" + ) - result = result[:result.index('.')] - assert(int(result) < 200) + result = result[: result.index(".")] + assert int(result) < 200 def test_empty_file(started_cluster): @@ -853,62 +1084,69 @@ def test_empty_file(started_cluster): instance = started_cluster.instances["dummy"] name = "empty" - url = f'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{name}' + url = f"http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{name}" minio = started_cluster.minio_client minio.put_object(bucket, name, io.BytesIO(b""), 0) table_function = f"s3('{url}', 'CSV', 'id Int32')" result = instance.query(f"SELECT count() FROM {table_function}") - assert(int(result) == 0) + assert int(result) == 0 def test_insert_with_path_with_globs(started_cluster): instance = started_cluster.instances["dummy"] table_function_3 = f"s3('http://minio1:9001/root/test_parquet*', 'minio', 'minio123', 'Parquet', 'a Int32, b String')" - instance.query_and_get_error(f"insert into table function {table_function_3} SELECT number, randomString(100) FROM numbers(500)") + instance.query_and_get_error( + f"insert into table function {table_function_3} SELECT number, randomString(100) FROM numbers(500)" + ) def test_s3_schema_inference(started_cluster): bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] - instance.query(f"insert into table function s3(s3_native, structure='a Int32, b String', format='Native') select number, randomString(100) from numbers(5000000)") + instance.query( + f"insert into table function s3(s3_native, structure='a Int32, b String', format='Native') select number, randomString(100) from numbers(5000000)" + ) result = instance.query(f"desc s3(s3_native, format='Native')") assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n" result = instance.query(f"select count(*) from s3(s3_native, format='Native')") - assert(int(result) == 5000000) + assert int(result) == 5000000 - instance.query(f"create table schema_inference engine=S3(s3_native, format='Native')") + instance.query( + f"create table schema_inference engine=S3(s3_native, format='Native')" + ) result = instance.query(f"desc schema_inference") assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n" result = instance.query(f"select count(*) from schema_inference") - assert(int(result) == 5000000) - + assert int(result) == 5000000 table_function = f"url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_native', 'Native')" result = instance.query(f"desc {table_function}") assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n" result = instance.query(f"select count(*) from {table_function}") - assert(int(result) == 5000000) + assert int(result) == 5000000 - instance.query(f"create table schema_inference_2 engine=URL('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_native', 'Native')") + instance.query( + f"create table schema_inference_2 engine=URL('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_native', 'Native')" + ) result = instance.query(f"desc schema_inference_2") assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n" result = instance.query(f"select count(*) from schema_inference_2") - assert(int(result) == 5000000) + assert int(result) == 5000000 table_function = f"s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_native', 'Native')" result = instance.query(f"desc {table_function}") assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n" result = instance.query(f"select count(*) from {table_function}") - assert(int(result) == 5000000) + assert int(result) == 5000000 def test_empty_file(started_cluster): @@ -916,14 +1154,14 @@ def test_empty_file(started_cluster): instance = started_cluster.instances["dummy"] name = "empty" - url = f'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{name}' + url = f"http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{name}" minio = started_cluster.minio_client minio.put_object(bucket, name, io.BytesIO(b""), 0) table_function = f"s3('{url}', 'CSV', 'id Int32')" result = instance.query(f"SELECT count() FROM {table_function}") - assert(int(result) == 0) + assert int(result) == 0 def test_overwrite(started_cluster): @@ -933,12 +1171,18 @@ def test_overwrite(started_cluster): table_function = f"s3(s3_parquet, structure='a Int32, b String', format='Parquet')" instance.query(f"create table test_overwrite as {table_function}") instance.query(f"truncate table test_overwrite") - instance.query(f"insert into test_overwrite select number, randomString(100) from numbers(50) settings s3_truncate_on_insert=1") - instance.query_and_get_error(f"insert into test_overwrite select number, randomString(100) from numbers(100)") - instance.query(f"insert into test_overwrite select number, randomString(100) from numbers(200) settings s3_truncate_on_insert=1") + instance.query( + f"insert into test_overwrite select number, randomString(100) from numbers(50) settings s3_truncate_on_insert=1" + ) + instance.query_and_get_error( + f"insert into test_overwrite select number, randomString(100) from numbers(100)" + ) + instance.query( + f"insert into test_overwrite select number, randomString(100) from numbers(200) settings s3_truncate_on_insert=1" + ) result = instance.query(f"select count() from test_overwrite") - assert(int(result) == 200) + assert int(result) == 200 def test_create_new_files_on_insert(started_cluster): @@ -948,24 +1192,38 @@ def test_create_new_files_on_insert(started_cluster): table_function = f"s3(s3_parquet, structure='a Int32, b String', format='Parquet')" instance.query(f"create table test_multiple_inserts as {table_function}") instance.query(f"truncate table test_multiple_inserts") - instance.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(10) settings s3_truncate_on_insert=1") - instance.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(20) settings s3_create_new_file_on_insert=1") - instance.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(30) settings s3_create_new_file_on_insert=1") + instance.query( + f"insert into test_multiple_inserts select number, randomString(100) from numbers(10) settings s3_truncate_on_insert=1" + ) + instance.query( + f"insert into test_multiple_inserts select number, randomString(100) from numbers(20) settings s3_create_new_file_on_insert=1" + ) + instance.query( + f"insert into test_multiple_inserts select number, randomString(100) from numbers(30) settings s3_create_new_file_on_insert=1" + ) result = instance.query(f"select count() from test_multiple_inserts") - assert(int(result) == 60) + assert int(result) == 60 instance.query(f"drop table test_multiple_inserts") - table_function = f"s3(s3_parquet_gz, structure='a Int32, b String', format='Parquet')" + table_function = ( + f"s3(s3_parquet_gz, structure='a Int32, b String', format='Parquet')" + ) instance.query(f"create table test_multiple_inserts as {table_function}") instance.query(f"truncate table test_multiple_inserts") - instance.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(10) settings s3_truncate_on_insert=1") - instance.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(20) settings s3_create_new_file_on_insert=1") - instance.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(30) settings s3_create_new_file_on_insert=1") + instance.query( + f"insert into test_multiple_inserts select number, randomString(100) from numbers(10) settings s3_truncate_on_insert=1" + ) + instance.query( + f"insert into test_multiple_inserts select number, randomString(100) from numbers(20) settings s3_create_new_file_on_insert=1" + ) + instance.query( + f"insert into test_multiple_inserts select number, randomString(100) from numbers(30) settings s3_create_new_file_on_insert=1" + ) result = instance.query(f"select count() from test_multiple_inserts") - assert(int(result) == 60) + assert int(result) == 60 def test_format_detection(started_cluster): @@ -975,46 +1233,65 @@ def test_format_detection(started_cluster): instance.query(f"create table arrow_table_s3 (x UInt64) engine=S3(s3_arrow)") instance.query(f"insert into arrow_table_s3 select 1") result = instance.query(f"select * from s3(s3_arrow)") - assert(int(result) == 1) + assert int(result) == 1 - result = instance.query(f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow')") - assert(int(result) == 1) - - result = instance.query(f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow')") - assert(int(result) == 1) + result = instance.query( + f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow')" + ) + assert int(result) == 1 + result = instance.query( + f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow')" + ) + assert int(result) == 1 instance.query(f"create table parquet_table_s3 (x UInt64) engine=S3(s3_parquet2)") instance.query(f"insert into parquet_table_s3 select 1") result = instance.query(f"select * from s3(s3_parquet2)") - assert(int(result) == 1) + assert int(result) == 1 - result = instance.query(f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.parquet')") - assert(int(result) == 1) + result = instance.query( + f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.parquet')" + ) + assert int(result) == 1 - result = instance.query(f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.parquet')") - assert(int(result) == 1) + result = instance.query( + f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.parquet')" + ) + assert int(result) == 1 def test_schema_inference_from_globs(started_cluster): bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] - instance.query(f"insert into table function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test1.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL") - instance.query(f"insert into table function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test2.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select 0") + instance.query( + f"insert into table function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test1.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL" + ) + instance.query( + f"insert into table function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test2.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select 0" + ) url_filename = "test{1,2}.jsoncompacteachrow" - result = instance.query(f"desc url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{url_filename}')") - assert(result.strip() == 'c1\tNullable(Float64)') + result = instance.query( + f"desc url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{url_filename}')" + ) + assert result.strip() == "c1\tNullable(Float64)" - result = instance.query(f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{url_filename}')") - assert(sorted(result.split()) == ['0', '\\N']) + result = instance.query( + f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{url_filename}')" + ) + assert sorted(result.split()) == ["0", "\\N"] - result = instance.query(f"desc s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test*.jsoncompacteachrow')") - assert(result.strip() == 'c1\tNullable(Float64)') + result = instance.query( + f"desc s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test*.jsoncompacteachrow')" + ) + assert result.strip() == "c1\tNullable(Float64)" - result = instance.query(f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test*.jsoncompacteachrow')") - assert(sorted(result.split()) == ['0', '\\N']) + result = instance.query( + f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test*.jsoncompacteachrow')" + ) + assert sorted(result.split()) == ["0", "\\N"] def test_signatures(started_cluster): @@ -1025,20 +1302,30 @@ def test_signatures(started_cluster): instance.query(f"truncate table test_signatures") instance.query(f"insert into test_signatures select 1") - result = instance.query(f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow')") - assert(int(result) == 1) + result = instance.query( + f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow')" + ) + assert int(result) == 1 - result = instance.query(f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'Arrow', 'x UInt64')") - assert(int(result) == 1) + result = instance.query( + f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'Arrow', 'x UInt64')" + ) + assert int(result) == 1 - result = instance.query(f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'minio', 'minio123')") - assert(int(result) == 1) + result = instance.query( + f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'minio', 'minio123')" + ) + assert int(result) == 1 - result = instance.query(f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'Arrow', 'x UInt64', 'auto')") - assert(int(result) == 1) + result = instance.query( + f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'Arrow', 'x UInt64', 'auto')" + ) + assert int(result) == 1 - result = instance.query(f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'minio', 'minio123', 'Arrow')") - assert(int(result) == 1) + result = instance.query( + f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow', 'minio', 'minio123', 'Arrow')" + ) + assert int(result) == 1 def test_select_columns(started_cluster): @@ -1048,29 +1335,43 @@ def test_select_columns(started_cluster): structure = "id UInt32, value1 Int32, value2 Int32" instance.query(f"drop table if exists {name}") - instance.query(f"CREATE TABLE {name} ({structure}) ENGINE = S3(s3_conf1, format='Parquet')") + instance.query( + f"CREATE TABLE {name} ({structure}) ENGINE = S3(s3_conf1, format='Parquet')" + ) limit = 10000000 - instance.query(f"INSERT INTO {name} SELECT * FROM generateRandom('{structure}') LIMIT {limit} SETTINGS s3_truncate_on_insert=1") + instance.query( + f"INSERT INTO {name} SELECT * FROM generateRandom('{structure}') LIMIT {limit} SETTINGS s3_truncate_on_insert=1" + ) instance.query(f"SELECT value2 FROM {name}") instance.query("SYSTEM FLUSH LOGS") - result1 = instance.query(f"SELECT read_bytes FROM system.query_log WHERE type='QueryFinish' and query LIKE 'SELECT value2 FROM {name}'") + result1 = instance.query( + f"SELECT read_bytes FROM system.query_log WHERE type='QueryFinish' and query LIKE 'SELECT value2 FROM {name}'" + ) instance.query(f"SELECT * FROM {name}") instance.query("SYSTEM FLUSH LOGS") - result2 = instance.query(f"SELECT read_bytes FROM system.query_log WHERE type='QueryFinish' and query LIKE 'SELECT * FROM {name}'") + result2 = instance.query( + f"SELECT read_bytes FROM system.query_log WHERE type='QueryFinish' and query LIKE 'SELECT * FROM {name}'" + ) - assert(int(result1) * 3 <= int(result2)) + assert int(result1) * 3 <= int(result2) def test_insert_select_schema_inference(started_cluster): bucket = started_cluster.minio_bucket instance = started_cluster.instances["dummy"] - instance.query(f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_insert_select.native') select toUInt64(1) as x") - result = instance.query(f"desc s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_insert_select.native')") - assert(result.strip() == 'x\tUInt64') + instance.query( + f"insert into function s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_insert_select.native') select toUInt64(1) as x" + ) + result = instance.query( + f"desc s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_insert_select.native')" + ) + assert result.strip() == "x\tUInt64" - result = instance.query(f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_insert_select.native')") - assert(int(result) == 1) + result = instance.query( + f"select * from s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_insert_select.native')" + ) + assert int(result) == 1 diff --git a/tests/integration/test_storage_url/test.py b/tests/integration/test_storage_url/test.py index 1ced71bc849..6ffb38bd8d7 100644 --- a/tests/integration/test_storage_url/test.py +++ b/tests/integration/test_storage_url/test.py @@ -4,11 +4,14 @@ from helpers.cluster import ClickHouseCluster uuids = [] + @pytest.fixture(scope="module") def cluster(): try: cluster = ClickHouseCluster(__file__) - cluster.add_instance("node1", main_configs=["configs/conf.xml"], with_nginx=True) + cluster.add_instance( + "node1", main_configs=["configs/conf.xml"], with_nginx=True + ) cluster.start() yield cluster @@ -20,10 +23,18 @@ def cluster(): def test_partition_by(cluster): node1 = cluster.instances["node1"] - node1.query(f"insert into table function url(url1) partition by column3 values (1, 2, 3), (3, 2, 1), (1, 3, 2)") - result = node1.query(f"select * from url('http://nginx:80/test_1', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32')") - assert(result.strip() == "3\t2\t1") - result = node1.query(f"select * from url('http://nginx:80/test_2', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32')") - assert(result.strip() == "1\t3\t2") - result = node1.query(f"select * from url('http://nginx:80/test_3', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32')") - assert(result.strip() == "1\t2\t3") + node1.query( + f"insert into table function url(url1) partition by column3 values (1, 2, 3), (3, 2, 1), (1, 3, 2)" + ) + result = node1.query( + f"select * from url('http://nginx:80/test_1', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32')" + ) + assert result.strip() == "3\t2\t1" + result = node1.query( + f"select * from url('http://nginx:80/test_2', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32')" + ) + assert result.strip() == "1\t3\t2" + result = node1.query( + f"select * from url('http://nginx:80/test_3', 'TSV', 'column1 UInt32, column2 UInt32, column3 UInt32')" + ) + assert result.strip() == "1\t2\t3" diff --git a/tests/integration/test_system_clusters_actual_information/test.py b/tests/integration/test_system_clusters_actual_information/test.py index 48f654dc30a..865c80db1c9 100644 --- a/tests/integration/test_system_clusters_actual_information/test.py +++ b/tests/integration/test_system_clusters_actual_information/test.py @@ -10,19 +10,26 @@ from helpers.cluster import ClickHouseCluster from helpers.network import PartitionManager cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', with_zookeeper=True, main_configs=['configs/remote_servers.xml']) -node_1 = cluster.add_instance('node_1', with_zookeeper=True) +node = cluster.add_instance( + "node", with_zookeeper=True, main_configs=["configs/remote_servers.xml"] +) +node_1 = cluster.add_instance("node_1", with_zookeeper=True) + @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() - node_1.query_with_retry('DROP TABLE IF EXISTS replicated') + node_1.query_with_retry("DROP TABLE IF EXISTS replicated") - node_1.query_with_retry('''CREATE TABLE replicated (id UInt32, date Date) ENGINE = - ReplicatedMergeTree('/clickhouse/tables/replicated', 'node_1') ORDER BY id PARTITION BY toYYYYMM(date)''') + node_1.query_with_retry( + """CREATE TABLE replicated (id UInt32, date Date) ENGINE = + ReplicatedMergeTree('/clickhouse/tables/replicated', 'node_1') ORDER BY id PARTITION BY toYYYYMM(date)""" + ) - node.query_with_retry("CREATE TABLE distributed (id UInt32, date Date) ENGINE = Distributed('test_cluster', 'default', 'replicated')") + node.query_with_retry( + "CREATE TABLE distributed (id UInt32, date Date) ENGINE = Distributed('test_cluster', 'default', 'replicated')" + ) yield cluster @@ -30,21 +37,26 @@ def started_cluster(): cluster.shutdown() - def test(started_cluster): cluster.pause_container("node_1") node.query("SYSTEM RELOAD CONFIG") - node.query_and_get_error("SELECT count() FROM distributed SETTINGS receive_timeout=1") + node.query_and_get_error( + "SELECT count() FROM distributed SETTINGS receive_timeout=1" + ) - result = node.query("SELECT errors_count, estimated_recovery_time FROM system.clusters WHERE cluster='test_cluster' and host_name='node_1'") + result = node.query( + "SELECT errors_count, estimated_recovery_time FROM system.clusters WHERE cluster='test_cluster' and host_name='node_1'" + ) errors_count, recovery_time = map(int, result.split()) assert errors_count == 3 - while True: + while True: time.sleep(1) - result = node.query("SELECT errors_count, estimated_recovery_time FROM system.clusters WHERE cluster='test_cluster' and host_name='node_1'") + result = node.query( + "SELECT errors_count, estimated_recovery_time FROM system.clusters WHERE cluster='test_cluster' and host_name='node_1'" + ) prev_time = recovery_time errors_count, recovery_time = map(int, result.split()) @@ -58,4 +70,3 @@ def test(started_cluster): assert errors_count == 0 cluster.unpause_container("node_1") - diff --git a/tests/integration/test_system_ddl_worker_queue/test.py b/tests/integration/test_system_ddl_worker_queue/test.py index c5037fc400e..4659e5b92e8 100644 --- a/tests/integration/test_system_ddl_worker_queue/test.py +++ b/tests/integration/test_system_ddl_worker_queue/test.py @@ -4,10 +4,18 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node3 = cluster.add_instance( + "node3", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node4 = cluster.add_instance( + "node4", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) nodes = [node1, node2, node3, node4] @@ -20,13 +28,17 @@ def started_cluster(): for i, node in enumerate([node1, node2]): node.query("CREATE DATABASE testdb") node.query( - '''CREATE TABLE testdb.test_table(id UInt32, val String) ENGINE = ReplicatedMergeTree('/clickhouse/test/test_table1', '{}') ORDER BY id;'''.format( - i)) + """CREATE TABLE testdb.test_table(id UInt32, val String) ENGINE = ReplicatedMergeTree('/clickhouse/test/test_table1', '{}') ORDER BY id;""".format( + i + ) + ) for i, node in enumerate([node3, node4]): node.query("CREATE DATABASE testdb") node.query( - '''CREATE TABLE testdb.test_table(id UInt32, val String) ENGINE = ReplicatedMergeTree('/clickhouse/test/test_table2', '{}') ORDER BY id;'''.format( - i)) + """CREATE TABLE testdb.test_table(id UInt32, val String) ENGINE = ReplicatedMergeTree('/clickhouse/test/test_table2', '{}') ORDER BY id;""".format( + i + ) + ) yield cluster finally: @@ -34,15 +46,25 @@ def started_cluster(): def test_distributed_ddl_queue(started_cluster): - node1.query("INSERT INTO testdb.test_table SELECT number, toString(number) FROM numbers(100)") - node3.query("INSERT INTO testdb.test_table SELECT number, toString(number) FROM numbers(100)") + node1.query( + "INSERT INTO testdb.test_table SELECT number, toString(number) FROM numbers(100)" + ) + node3.query( + "INSERT INTO testdb.test_table SELECT number, toString(number) FROM numbers(100)" + ) node2.query("SYSTEM SYNC REPLICA testdb.test_table") node4.query("SYSTEM SYNC REPLICA testdb.test_table") - node1.query("ALTER TABLE testdb.test_table ON CLUSTER test_cluster ADD COLUMN somecolumn UInt8 AFTER val", - settings={"replication_alter_partitions_sync": "2"}) + node1.query( + "ALTER TABLE testdb.test_table ON CLUSTER test_cluster ADD COLUMN somecolumn UInt8 AFTER val", + settings={"replication_alter_partitions_sync": "2"}, + ) for node in nodes: node.query("SYSTEM SYNC REPLICA testdb.test_table") assert node.query("SELECT somecolumn FROM testdb.test_table LIMIT 1") == "0\n" - assert node.query( - "SELECT If((SELECT count(*) FROM system.distributed_ddl_queue WHERE cluster='test_cluster' AND entry='query-0000000000') > 0, 'ok', 'fail')") == "ok\n" + assert ( + node.query( + "SELECT If((SELECT count(*) FROM system.distributed_ddl_queue WHERE cluster='test_cluster' AND entry='query-0000000000') > 0, 'ok', 'fail')" + ) + == "ok\n" + ) diff --git a/tests/integration/test_system_flush_logs/test.py b/tests/integration/test_system_flush_logs/test.py index 407e66d56a7..d9ab76d2d61 100644 --- a/tests/integration/test_system_flush_logs/test.py +++ b/tests/integration/test_system_flush_logs/test.py @@ -6,22 +6,21 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node_default') +node = cluster.add_instance("node_default") system_logs = [ # disabled by default - ('system.text_log', 0), - + ("system.text_log", 0), # enabled by default - ('system.query_log', 1), - ('system.query_thread_log', 1), - ('system.part_log', 1), - ('system.trace_log', 1), - ('system.metric_log', 1), + ("system.query_log", 1), + ("system.query_thread_log", 1), + ("system.part_log", 1), + ("system.trace_log", 1), + ("system.metric_log", 1), ] -@pytest.fixture(scope='module', autouse=True) +@pytest.fixture(scope="module", autouse=True) def start_cluster(): try: cluster.start() @@ -30,14 +29,14 @@ def start_cluster(): cluster.shutdown() -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def flush_logs(): - node.query('SYSTEM FLUSH LOGS') + node.query("SYSTEM FLUSH LOGS") -@pytest.mark.parametrize('table,exists', system_logs) +@pytest.mark.parametrize("table,exists", system_logs) def test_system_logs(flush_logs, table, exists): - q = 'SELECT * FROM {}'.format(table) + q = "SELECT * FROM {}".format(table) if exists: node.query(q) else: @@ -47,13 +46,16 @@ def test_system_logs(flush_logs, table, exists): # Logic is tricky, let's check that there is no hang in case of message queue # is not empty (this is another code path in the code). def test_system_logs_non_empty_queue(): - node.query('SELECT 1', settings={ - # right now defaults are the same, - # this set explicitly to avoid depends from defaults. - 'log_queries': 1, - 'log_queries_min_type': 'QUERY_START', - }) - node.query('SYSTEM FLUSH LOGS') + node.query( + "SELECT 1", + settings={ + # right now defaults are the same, + # this set explicitly to avoid depends from defaults. + "log_queries": 1, + "log_queries_min_type": "QUERY_START", + }, + ) + node.query("SYSTEM FLUSH LOGS") def test_system_suspend(): diff --git a/tests/integration/test_system_logs_comment/__init__.py b/tests/integration/test_system_logs_comment/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_system_logs_comment/test.py b/tests/integration/test_system_logs_comment/test.py new file mode 100644 index 00000000000..0659a2689a0 --- /dev/null +++ b/tests/integration/test_system_logs_comment/test.py @@ -0,0 +1,49 @@ +# pylint: disable=line-too-long +# pylint: disable=unused-argument +# pylint: disable=redefined-outer-name + +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance("node_default", stay_alive=True) + + +@pytest.fixture(scope="module", autouse=True) +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def test_system_logs_comment(): + node.exec_in_container( + [ + "bash", + "-c", + f"""echo " + + + ENGINE = MergeTree + PARTITION BY (event_date) + ORDER BY (event_time) + TTL event_date + INTERVAL 14 DAY DELETE + SETTINGS ttl_only_drop_parts=1 + COMMENT 'test_comment' + + + + + " > /etc/clickhouse-server/config.d/yyy-override-query_log.xml + """, + ] + ) + node.restart_clickhouse() + + node.query("select 1") + node.query("system flush logs") + + comment = node.query("SELECT comment FROM system.tables WHERE name = 'query_log'") + assert comment == "test_comment\n" diff --git a/tests/integration/test_system_logs_recreate/test.py b/tests/integration/test_system_logs_recreate/test.py index c0afa8cd555..387ad35dda2 100644 --- a/tests/integration/test_system_logs_recreate/test.py +++ b/tests/integration/test_system_logs_recreate/test.py @@ -6,9 +6,10 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node_default', stay_alive=True) +node = cluster.add_instance("node_default", stay_alive=True) -@pytest.fixture(scope='module', autouse=True) + +@pytest.fixture(scope="module", autouse=True) def start_cluster(): try: cluster.start() @@ -20,23 +21,34 @@ def start_cluster(): def test_system_logs_recreate(): system_logs = [ # enabled by default - 'query_log', - 'query_thread_log', - 'part_log', - 'trace_log', - 'metric_log', + "query_log", + "query_thread_log", + "part_log", + "trace_log", + "metric_log", ] - node.query('SYSTEM FLUSH LOGS') + node.query("SYSTEM FLUSH LOGS") for table in system_logs: - assert 'ENGINE = MergeTree' in node.query(f'SHOW CREATE TABLE system.{table}') - assert 'ENGINE = Null' not in node.query(f'SHOW CREATE TABLE system.{table}') - assert len(node.query(f"SHOW TABLES FROM system LIKE '{table}%'").strip().split('\n')) == 1 + assert "ENGINE = MergeTree" in node.query(f"SHOW CREATE TABLE system.{table}") + assert "ENGINE = Null" not in node.query(f"SHOW CREATE TABLE system.{table}") + assert ( + len( + node.query(f"SHOW TABLES FROM system LIKE '{table}%'") + .strip() + .split("\n") + ) + == 1 + ) # NOTE: we use zzz- prefix to make it the last file, # so that it will be applied last. for table in system_logs: - node.exec_in_container(['bash', '-c', f"""echo " + node.exec_in_container( + [ + "bash", + "-c", + f"""echo " <{table}> ENGINE = Null @@ -44,41 +56,74 @@ def test_system_logs_recreate(): " > /etc/clickhouse-server/config.d/zzz-override-{table}.xml - """]) + """, + ] + ) node.restart_clickhouse() - node.query('SYSTEM FLUSH LOGS') + node.query("SYSTEM FLUSH LOGS") for table in system_logs: - assert 'ENGINE = MergeTree' not in node.query(f'SHOW CREATE TABLE system.{table}') - assert 'ENGINE = Null' in node.query(f'SHOW CREATE TABLE system.{table}') - assert len(node.query(f"SHOW TABLES FROM system LIKE '{table}%'").strip().split('\n')) == 2 + assert "ENGINE = MergeTree" not in node.query( + f"SHOW CREATE TABLE system.{table}" + ) + assert "ENGINE = Null" in node.query(f"SHOW CREATE TABLE system.{table}") + assert ( + len( + node.query(f"SHOW TABLES FROM system LIKE '{table}%'") + .strip() + .split("\n") + ) + == 2 + ) for table in system_logs: - node.exec_in_container(['rm', f'/etc/clickhouse-server/config.d/zzz-override-{table}.xml']) + node.exec_in_container( + ["rm", f"/etc/clickhouse-server/config.d/zzz-override-{table}.xml"] + ) node.restart_clickhouse() - node.query('SYSTEM FLUSH LOGS') + node.query("SYSTEM FLUSH LOGS") for table in system_logs: - assert 'ENGINE = MergeTree' in node.query(f'SHOW CREATE TABLE system.{table}') - assert 'ENGINE = Null' not in node.query(f'SHOW CREATE TABLE system.{table}') - assert len(node.query(f"SHOW TABLES FROM system LIKE '{table}%'").strip().split('\n')) == 3 + assert "ENGINE = MergeTree" in node.query(f"SHOW CREATE TABLE system.{table}") + assert "ENGINE = Null" not in node.query(f"SHOW CREATE TABLE system.{table}") + assert ( + len( + node.query(f"SHOW TABLES FROM system LIKE '{table}%'") + .strip() + .split("\n") + ) + == 3 + ) - node.query('SYSTEM FLUSH LOGS') + node.query("SYSTEM FLUSH LOGS") # Ensure that there was no superfluous RENAME's # IOW that the table created only when the structure is indeed different. for table in system_logs: - assert len(node.query(f"SHOW TABLES FROM system LIKE '{table}%'").strip().split('\n')) == 3 + assert ( + len( + node.query(f"SHOW TABLES FROM system LIKE '{table}%'") + .strip() + .split("\n") + ) + == 3 + ) def test_drop_system_log(): - node.exec_in_container(['bash', '-c', f"""echo " + node.exec_in_container( + [ + "bash", + "-c", + f"""echo " 1000000 " > /etc/clickhouse-server/config.d/yyy-override-query_log.xml - """]) + """, + ] + ) node.restart_clickhouse() node.query("select 1") node.query("system flush logs") @@ -89,5 +134,7 @@ def test_drop_system_log(): node.query("select 3") node.query("system flush logs") assert node.query("select count() > 0 from system.query_log") == "1\n" - node.exec_in_container(['rm', f'/etc/clickhouse-server/config.d/yyy-override-query_log.xml']) + node.exec_in_container( + ["rm", f"/etc/clickhouse-server/config.d/yyy-override-query_log.xml"] + ) node.restart_clickhouse() diff --git a/tests/integration/test_system_merges/test.py b/tests/integration/test_system_merges/test.py index 672b637f783..9239cb11065 100644 --- a/tests/integration/test_system_merges/test.py +++ b/tests/integration/test_system_merges/test.py @@ -6,23 +6,29 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', - main_configs=['configs/logs_config.xml'], - with_zookeeper=True, - macros={"shard": 0, "replica": 1}) +node1 = cluster.add_instance( + "node1", + main_configs=["configs/logs_config.xml"], + with_zookeeper=True, + macros={"shard": 0, "replica": 1}, +) -node2 = cluster.add_instance('node2', - main_configs=['configs/logs_config.xml'], - with_zookeeper=True, - macros={"shard": 0, "replica": 2}) +node2 = cluster.add_instance( + "node2", + main_configs=["configs/logs_config.xml"], + with_zookeeper=True, + macros={"shard": 0, "replica": 2}, +) @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() - node1.query('CREATE DATABASE test ENGINE=Ordinary') # Different paths with Atomic - node2.query('CREATE DATABASE test ENGINE=Ordinary') + node1.query( + "CREATE DATABASE test ENGINE=Ordinary" + ) # Different paths with Atomic + node2.query("CREATE DATABASE test ENGINE=Ordinary") yield cluster finally: @@ -33,10 +39,7 @@ def split_tsv(data): return [x.split("\t") for x in data.splitlines()] -@pytest.mark.parametrize("replicated", [ - "", - "replicated" -]) +@pytest.mark.parametrize("replicated", ["", "replicated"]) def test_merge_simple(started_cluster, replicated): try: clickhouse_path = "/var/lib/clickhouse" @@ -45,25 +48,36 @@ def test_merge_simple(started_cluster, replicated): name = db_name + "." + table_name table_path = "data/" + db_name + "/" + table_name nodes = [node1, node2] if replicated else [node1] - engine = "ReplicatedMergeTree('/clickhouse/test_merge_simple', '{replica}')" if replicated else "MergeTree()" + engine = ( + "ReplicatedMergeTree('/clickhouse/test_merge_simple', '{replica}')" + if replicated + else "MergeTree()" + ) node_check = nodes[-1] starting_block = 0 if replicated else 1 for node in nodes: - node.query(""" + node.query( + """ CREATE TABLE {name} ( `a` Int64 ) ENGINE = {engine} ORDER BY sleep(2) - """.format(engine=engine, name=name)) + """.format( + engine=engine, name=name + ) + ) node1.query("INSERT INTO {name} VALUES (1)".format(name=name)) node1.query("INSERT INTO {name} VALUES (2)".format(name=name)) node1.query("INSERT INTO {name} VALUES (3)".format(name=name)) - parts = ["all_{}_{}_0".format(x, x) for x in range(starting_block, starting_block + 3)] + parts = [ + "all_{}_{}_0".format(x, x) + for x in range(starting_block, starting_block + 3) + ] result_part = "all_{}_{}_1".format(starting_block, starting_block + 2) def optimize(): @@ -75,38 +89,54 @@ def test_merge_simple(started_cluster, replicated): t.start() time.sleep(1) - assert split_tsv(node_check.query(""" + assert ( + split_tsv( + node_check.query( + """ SELECT database, table, num_parts, source_part_names, source_part_paths, result_part_name, result_part_path, partition_id, is_mutation FROM system.merges WHERE table = '{name}' - """.format(name=table_name))) == [ - [ - db_name, - table_name, - "3", - "['{}','{}','{}']".format(*parts), - "['{clickhouse}/{table_path}/{}/','{clickhouse}/{table_path}/{}/','{clickhouse}/{table_path}/{}/']".format( - *parts, clickhouse=clickhouse_path, table_path=table_path), - result_part, - "{clickhouse}/{table_path}/{}/".format(result_part, clickhouse=clickhouse_path, table_path=table_path), - "all", - "0" + """.format( + name=table_name + ) + ) + ) + == [ + [ + db_name, + table_name, + "3", + "['{}','{}','{}']".format(*parts), + "['{clickhouse}/{table_path}/{}/','{clickhouse}/{table_path}/{}/','{clickhouse}/{table_path}/{}/']".format( + *parts, clickhouse=clickhouse_path, table_path=table_path + ), + result_part, + "{clickhouse}/{table_path}/{}/".format( + result_part, clickhouse=clickhouse_path, table_path=table_path + ), + "all", + "0", + ] ] - ] + ) t.join() wait.join() - assert node_check.query("SELECT * FROM system.merges WHERE table = '{name}'".format(name=table_name)) == "" + assert ( + node_check.query( + "SELECT * FROM system.merges WHERE table = '{name}'".format( + name=table_name + ) + ) + == "" + ) finally: for node in nodes: node.query("DROP TABLE {name}".format(name=name)) -@pytest.mark.parametrize("replicated", [ - "", - "replicated" -]) +@pytest.mark.parametrize("replicated", ["", "replicated"]) def test_mutation_simple(started_cluster, replicated): try: clickhouse_path = "/var/lib/clickhouse" @@ -115,53 +145,88 @@ def test_mutation_simple(started_cluster, replicated): name = db_name + "." + table_name table_path = "data/" + db_name + "/" + table_name nodes = [node1, node2] if replicated else [node1] - engine = "ReplicatedMergeTree('/clickhouse/test_mutation_simple', '{replica}')" if replicated else "MergeTree()" + engine = ( + "ReplicatedMergeTree('/clickhouse/test_mutation_simple', '{replica}')" + if replicated + else "MergeTree()" + ) node_check = nodes[-1] starting_block = 0 if replicated else 1 for node in nodes: - node.query(""" + node.query( + """ CREATE TABLE {name} ( `a` Int64 ) ENGINE = {engine} ORDER BY tuple() - """.format(engine=engine, name=name)) + """.format( + engine=engine, name=name + ) + ) node1.query("INSERT INTO {name} VALUES (1)".format(name=name)) part = "all_{}_{}_0".format(starting_block, starting_block) - result_part = "all_{}_{}_0_{}".format(starting_block, starting_block, starting_block + 1) + result_part = "all_{}_{}_0_{}".format( + starting_block, starting_block, starting_block + 1 + ) def alter(): - node1.query("ALTER TABLE {name} UPDATE a = 42 WHERE sleep(2) OR 1".format(name=name), settings={ - 'mutations_sync': 1, - }) + node1.query( + "ALTER TABLE {name} UPDATE a = 42 WHERE sleep(2) OR 1".format( + name=name + ), + settings={ + "mutations_sync": 1, + }, + ) t = threading.Thread(target=alter) t.start() time.sleep(1) - assert split_tsv(node_check.query(""" + assert ( + split_tsv( + node_check.query( + """ SELECT database, table, num_parts, source_part_names, source_part_paths, result_part_name, result_part_path, partition_id, is_mutation FROM system.merges WHERE table = '{name}' - """.format(name=table_name))) == [ - [ - db_name, - table_name, - "1", - "['{}']".format(part), - "['{clickhouse}/{table_path}/{}/']".format(part, clickhouse=clickhouse_path, table_path=table_path), - result_part, - "{clickhouse}/{table_path}/{}/".format(result_part, clickhouse=clickhouse_path, table_path=table_path), - "all", - "1" - ], - ] + """.format( + name=table_name + ) + ) + ) + == [ + [ + db_name, + table_name, + "1", + "['{}']".format(part), + "['{clickhouse}/{table_path}/{}/']".format( + part, clickhouse=clickhouse_path, table_path=table_path + ), + result_part, + "{clickhouse}/{table_path}/{}/".format( + result_part, clickhouse=clickhouse_path, table_path=table_path + ), + "all", + "1", + ], + ] + ) t.join() - assert node_check.query("SELECT * FROM system.merges WHERE table = '{name}'".format(name=table_name)) == "" + assert ( + node_check.query( + "SELECT * FROM system.merges WHERE table = '{name}'".format( + name=table_name + ) + ) + == "" + ) finally: for node in nodes: diff --git a/tests/integration/test_system_metrics/test.py b/tests/integration/test_system_metrics/test.py index efcc6f88a24..439e8b66db1 100644 --- a/tests/integration/test_system_metrics/test.py +++ b/tests/integration/test_system_metrics/test.py @@ -9,17 +9,24 @@ from helpers.network import PartitionManager def fill_nodes(nodes, shard): for node in nodes: node.query( - ''' + """ CREATE DATABASE test; CREATE TABLE test.test_table(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; - '''.format(shard=shard, replica=node.name)) + """.format( + shard=shard, replica=node.name + ) + ) cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -37,31 +44,62 @@ def start_cluster(): finally: cluster.shutdown() + def test_readonly_metrics(start_cluster): - assert node1.query("SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'") == "0\n" + assert ( + node1.query("SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'") + == "0\n" + ) with PartitionManager() as pm: ## make node1 readonly -> heal -> readonly -> heal -> detach table -> heal -> attach table pm.drop_instance_zk_connections(node1) - assert_eq_with_retry(node1, "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'", "1\n", retry_count=300, sleep_time=1) + assert_eq_with_retry( + node1, + "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'", + "1\n", + retry_count=300, + sleep_time=1, + ) pm.heal_all() - assert_eq_with_retry(node1, "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'", "0\n", retry_count=300, sleep_time=1) + assert_eq_with_retry( + node1, + "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'", + "0\n", + retry_count=300, + sleep_time=1, + ) pm.drop_instance_zk_connections(node1) - assert_eq_with_retry(node1, "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'", "1\n", retry_count=300, sleep_time=1) - + assert_eq_with_retry( + node1, + "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'", + "1\n", + retry_count=300, + sleep_time=1, + ) node1.query("DETACH TABLE test.test_table") - assert "0\n" == node1.query("SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'") + assert "0\n" == node1.query( + "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'" + ) pm.heal_all() node1.query("ATTACH TABLE test.test_table") - assert_eq_with_retry(node1, "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'", "0\n", retry_count=300, sleep_time=1) + assert_eq_with_retry( + node1, + "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'", + "0\n", + retry_count=300, + sleep_time=1, + ) -#For LowCardinality-columns, the bytes for N rows is not N*size of 1 row. + +# For LowCardinality-columns, the bytes for N rows is not N*size of 1 row. def test_metrics_storage_buffer_size(start_cluster): - node1.query(''' + node1.query( + """ CREATE TABLE test.test_mem_table ( `str` LowCardinality(String) @@ -73,18 +111,49 @@ def test_metrics_storage_buffer_size(start_cluster): `str` LowCardinality(String) ) ENGINE = Buffer('test', 'test_mem_table', 1, 600, 600, 1000, 100000, 100000, 10000000); - ''') + """ + ) - #before flush + # before flush node1.query("INSERT INTO test.buffer_table VALUES('hello');") - assert node1.query("SELECT value FROM system.metrics WHERE metric = 'StorageBufferRows'") == "1\n" - assert node1.query("SELECT value FROM system.metrics WHERE metric = 'StorageBufferBytes'") == "24\n" + assert ( + node1.query( + "SELECT value FROM system.metrics WHERE metric = 'StorageBufferRows'" + ) + == "1\n" + ) + assert ( + node1.query( + "SELECT value FROM system.metrics WHERE metric = 'StorageBufferBytes'" + ) + == "24\n" + ) node1.query("INSERT INTO test.buffer_table VALUES('hello');") - assert node1.query("SELECT value FROM system.metrics WHERE metric = 'StorageBufferRows'") == "2\n" - assert node1.query("SELECT value FROM system.metrics WHERE metric = 'StorageBufferBytes'") == "25\n" + assert ( + node1.query( + "SELECT value FROM system.metrics WHERE metric = 'StorageBufferRows'" + ) + == "2\n" + ) + assert ( + node1.query( + "SELECT value FROM system.metrics WHERE metric = 'StorageBufferBytes'" + ) + == "25\n" + ) - #flush + # flush node1.query("OPTIMIZE TABLE test.buffer_table") - assert node1.query("SELECT value FROM system.metrics WHERE metric = 'StorageBufferRows'") == "0\n" - assert node1.query("SELECT value FROM system.metrics WHERE metric = 'StorageBufferBytes'") == "0\n" + assert ( + node1.query( + "SELECT value FROM system.metrics WHERE metric = 'StorageBufferRows'" + ) + == "0\n" + ) + assert ( + node1.query( + "SELECT value FROM system.metrics WHERE metric = 'StorageBufferBytes'" + ) + == "0\n" + ) diff --git a/tests/integration/test_system_queries/test.py b/tests/integration/test_system_queries/test.py index 50f4afd1abe..9138a934554 100644 --- a/tests/integration/test_system_queries/test.py +++ b/tests/integration/test_system_queries/test.py @@ -17,15 +17,24 @@ def started_cluster(): global instance try: cluster = ClickHouseCluster(__file__) - cluster.add_instance('ch1', - main_configs=["configs/config.d/clusters_config.xml", "configs/config.d/query_log.xml"], - dictionaries=["configs/dictionaries/dictionary_clickhouse_cache.xml", - "configs/dictionaries/dictionary_clickhouse_flat.xml"]) + cluster.add_instance( + "ch1", + main_configs=[ + "configs/config.d/clusters_config.xml", + "configs/config.d/query_log.xml", + ], + dictionaries=[ + "configs/dictionaries/dictionary_clickhouse_cache.xml", + "configs/dictionaries/dictionary_clickhouse_flat.xml", + ], + ) cluster.start() - instance = cluster.instances['ch1'] - instance.query('CREATE DATABASE dictionaries ENGINE = Dictionary') - instance.query('CREATE TABLE dictionary_source (id UInt64, value UInt8) ENGINE = Memory') + instance = cluster.instances["ch1"] + instance.query("CREATE DATABASE dictionaries ENGINE = Dictionary") + instance.query( + "CREATE TABLE dictionary_source (id UInt64, value UInt8) ENGINE = Memory" + ) yield cluster finally: @@ -34,104 +43,154 @@ def started_cluster(): def test_SYSTEM_RELOAD_DICTIONARY(started_cluster): - instance = cluster.instances['ch1'] + instance = cluster.instances["ch1"] instance.query("SYSTEM RELOAD DICTIONARIES") - assert TSV(instance.query( - "SELECT dictHas('clickhouse_flat', toUInt64(0)), dictHas('clickhouse_flat', toUInt64(1))")) == TSV("0\t0\n") + assert TSV( + instance.query( + "SELECT dictHas('clickhouse_flat', toUInt64(0)), dictHas('clickhouse_flat', toUInt64(1))" + ) + ) == TSV("0\t0\n") instance.query("INSERT INTO dictionary_source VALUES (0, 0)") - assert TSV(instance.query( - "SELECT dictGetUInt8('clickhouse_cache', 'value', toUInt64(0)), dictHas('clickhouse_cache', toUInt64(1))")) == TSV( - "0\t0\n") + assert TSV( + instance.query( + "SELECT dictGetUInt8('clickhouse_cache', 'value', toUInt64(0)), dictHas('clickhouse_cache', toUInt64(1))" + ) + ) == TSV("0\t0\n") instance.query("INSERT INTO dictionary_source VALUES (1, 1)") - assert TSV(instance.query( - "SELECT dictGetUInt8('clickhouse_cache', 'value', toUInt64(0)), dictHas('clickhouse_cache', toUInt64(1))")) == TSV( - "0\t0\n") + assert TSV( + instance.query( + "SELECT dictGetUInt8('clickhouse_cache', 'value', toUInt64(0)), dictHas('clickhouse_cache', toUInt64(1))" + ) + ) == TSV("0\t0\n") instance.query("SYSTEM RELOAD DICTIONARY clickhouse_cache") - assert TSV(instance.query( - "SELECT dictGetUInt8('clickhouse_cache', 'value', toUInt64(0)), dictGetUInt8('clickhouse_cache', 'value', toUInt64(1))")) == TSV( - "0\t1\n") - assert TSV(instance.query( - "SELECT dictHas('clickhouse_flat', toUInt64(0)), dictHas('clickhouse_flat', toUInt64(1))")) == TSV("0\t0\n") + assert TSV( + instance.query( + "SELECT dictGetUInt8('clickhouse_cache', 'value', toUInt64(0)), dictGetUInt8('clickhouse_cache', 'value', toUInt64(1))" + ) + ) == TSV("0\t1\n") + assert TSV( + instance.query( + "SELECT dictHas('clickhouse_flat', toUInt64(0)), dictHas('clickhouse_flat', toUInt64(1))" + ) + ) == TSV("0\t0\n") instance.query("SYSTEM RELOAD DICTIONARIES") - assert TSV(instance.query( - "SELECT dictGetUInt8('clickhouse_cache', 'value', toUInt64(0)), dictGetUInt8('clickhouse_cache', 'value', toUInt64(1))")) == TSV( - "0\t1\n") - assert TSV(instance.query( - "SELECT dictGetUInt8('clickhouse_flat', 'value', toUInt64(0)), dictGetUInt8('clickhouse_flat', 'value', toUInt64(1))")) == TSV( - "0\t1\n") + assert TSV( + instance.query( + "SELECT dictGetUInt8('clickhouse_cache', 'value', toUInt64(0)), dictGetUInt8('clickhouse_cache', 'value', toUInt64(1))" + ) + ) == TSV("0\t1\n") + assert TSV( + instance.query( + "SELECT dictGetUInt8('clickhouse_flat', 'value', toUInt64(0)), dictGetUInt8('clickhouse_flat', 'value', toUInt64(1))" + ) + ) == TSV("0\t1\n") def test_DROP_DNS_CACHE(started_cluster): - instance = cluster.instances['ch1'] + instance = cluster.instances["ch1"] - instance.exec_in_container(['bash', '-c', 'echo 127.0.0.1 localhost > /etc/hosts'], privileged=True, user='root') - instance.exec_in_container(['bash', '-c', 'echo ::1 localhost >> /etc/hosts'], privileged=True, user='root') + instance.exec_in_container( + ["bash", "-c", "echo 127.0.0.1 localhost > /etc/hosts"], + privileged=True, + user="root", + ) + instance.exec_in_container( + ["bash", "-c", "echo ::1 localhost >> /etc/hosts"], privileged=True, user="root" + ) - instance.exec_in_container(['bash', '-c', 'echo 127.255.255.255 lost_host >> /etc/hosts'], privileged=True, - user='root') + instance.exec_in_container( + ["bash", "-c", "echo 127.255.255.255 lost_host >> /etc/hosts"], + privileged=True, + user="root", + ) instance.query("SYSTEM DROP DNS CACHE") with pytest.raises(QueryRuntimeException): instance.query("SELECT * FROM remote('lost_host', 'system', 'one')") instance.query( - "CREATE TABLE distributed_lost_host (dummy UInt8) ENGINE = Distributed(lost_host_cluster, 'system', 'one')") + "CREATE TABLE distributed_lost_host (dummy UInt8) ENGINE = Distributed(lost_host_cluster, 'system', 'one')" + ) with pytest.raises(QueryRuntimeException): instance.query("SELECT * FROM distributed_lost_host") - instance.exec_in_container(['bash', '-c', 'echo 127.0.0.1 localhost > /etc/hosts'], privileged=True, user='root') - instance.exec_in_container(['bash', '-c', 'echo ::1 localhost >> /etc/hosts'], privileged=True, user='root') + instance.exec_in_container( + ["bash", "-c", "echo 127.0.0.1 localhost > /etc/hosts"], + privileged=True, + user="root", + ) + instance.exec_in_container( + ["bash", "-c", "echo ::1 localhost >> /etc/hosts"], privileged=True, user="root" + ) - instance.exec_in_container(['bash', '-c', 'echo 127.0.0.1 lost_host >> /etc/hosts'], privileged=True, user='root') + instance.exec_in_container( + ["bash", "-c", "echo 127.0.0.1 lost_host >> /etc/hosts"], + privileged=True, + user="root", + ) instance.query("SYSTEM DROP DNS CACHE") instance.query("SELECT * FROM remote('lost_host', 'system', 'one')") instance.query("SELECT * FROM distributed_lost_host") - assert TSV(instance.query( - "SELECT DISTINCT host_name, host_address FROM system.clusters WHERE cluster='lost_host_cluster'")) == TSV( - "lost_host\t127.0.0.1\n") + assert TSV( + instance.query( + "SELECT DISTINCT host_name, host_address FROM system.clusters WHERE cluster='lost_host_cluster'" + ) + ) == TSV("lost_host\t127.0.0.1\n") def test_RELOAD_CONFIG_AND_MACROS(started_cluster): macros = "ro" - create_macros = 'echo "{}" > /etc/clickhouse-server/config.d/macros.xml'.format(macros) + create_macros = 'echo "{}" > /etc/clickhouse-server/config.d/macros.xml'.format( + macros + ) - instance = cluster.instances['ch1'] + instance = cluster.instances["ch1"] - instance.exec_in_container(['bash', '-c', create_macros], privileged=True, user='root') + instance.exec_in_container( + ["bash", "-c", create_macros], privileged=True, user="root" + ) instance.query("SYSTEM RELOAD CONFIG") - assert TSV(instance.query("select * from system.macros")) == TSV("instance\tch1\nmac\tro\n") + assert TSV(instance.query("select * from system.macros")) == TSV( + "instance\tch1\nmac\tro\n" + ) def test_system_flush_logs(started_cluster): - instance = cluster.instances['ch1'] - instance.query(''' + instance = cluster.instances["ch1"] + instance.query( + """ SET log_queries = 0; SYSTEM FLUSH LOGS; TRUNCATE TABLE system.query_log; - ''') + """ + ) for i in range(4): # Sleep to execute flushing from background thread at first query # by expiration of flush_interval_millisecond and test probable race condition. time.sleep(0.5) - result = instance.query(''' + result = instance.query( + """ SELECT 1 FORMAT Null; SET log_queries = 0; SYSTEM FLUSH LOGS; - SELECT count() FROM system.query_log;''') - instance.query(''' + SELECT count() FROM system.query_log;""" + ) + instance.query( + """ SET log_queries = 0; SYSTEM FLUSH LOGS; TRUNCATE TABLE system.query_log; - ''') - assert TSV(result) == TSV('4') + """ + ) + assert TSV(result) == TSV("4") -if __name__ == '__main__': +if __name__ == "__main__": with contextmanager(started_cluster)() as cluster: for name, instance in list(cluster.instances.items()): print(name, instance.ip_address) diff --git a/tests/integration/test_system_replicated_fetches/test.py b/tests/integration/test_system_replicated_fetches/test.py index fcbdd4addd9..2b516ebf69b 100644 --- a/tests/integration/test_system_replicated_fetches/test.py +++ b/tests/integration/test_system_replicated_fetches/test.py @@ -11,8 +11,9 @@ import string import json cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True) -node2 = cluster.add_instance('node2', with_zookeeper=True) +node1 = cluster.add_instance("node1", with_zookeeper=True) +node2 = cluster.add_instance("node2", with_zookeeper=True) + @pytest.fixture(scope="module") def started_cluster(): @@ -24,21 +25,35 @@ def started_cluster(): finally: cluster.shutdown() + def get_random_string(length): - return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length)) + return "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(length) + ) + def test_system_replicated_fetches(started_cluster): - node1.query("CREATE TABLE t (key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/test/t', '1') ORDER BY tuple()") - node2.query("CREATE TABLE t (key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/test/t', '2') ORDER BY tuple()") + node1.query( + "CREATE TABLE t (key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/test/t', '1') ORDER BY tuple()" + ) + node2.query( + "CREATE TABLE t (key UInt64, data String) ENGINE = ReplicatedMergeTree('/clickhouse/test/t', '2') ORDER BY tuple()" + ) with PartitionManager() as pm: node2.query("SYSTEM STOP FETCHES t") - node1.query("INSERT INTO t SELECT number, '{}' FROM numbers(10000)".format(get_random_string(104857))) + node1.query( + "INSERT INTO t SELECT number, '{}' FROM numbers(10000)".format( + get_random_string(104857) + ) + ) pm.add_network_delay(node1, 80) node2.query("SYSTEM START FETCHES t") fetches_result = [] for _ in range(1000): - result = json.loads(node2.query("SELECT * FROM system.replicated_fetches FORMAT JSON")) + result = json.loads( + node2.query("SELECT * FROM system.replicated_fetches FORMAT JSON") + ) if not result["data"]: if fetches_result: break @@ -52,45 +67,69 @@ def test_system_replicated_fetches(started_cluster): assert node2.query("SELECT COUNT() FROM t") == "10000\n" for elem in fetches_result: - elem['bytes_read_compressed'] = float(elem['bytes_read_compressed']) - elem['total_size_bytes_compressed'] = float(elem['total_size_bytes_compressed']) - elem['progress'] = float(elem['progress']) - elem['elapsed'] = float(elem['elapsed']) + elem["bytes_read_compressed"] = float(elem["bytes_read_compressed"]) + elem["total_size_bytes_compressed"] = float(elem["total_size_bytes_compressed"]) + elem["progress"] = float(elem["progress"]) + elem["elapsed"] = float(elem["elapsed"]) assert len(fetches_result) > 0 first_non_empty = fetches_result[0] - assert first_non_empty['database'] == "default" - assert first_non_empty['table'] == "t" - assert first_non_empty['source_replica_hostname'] == 'node1' - assert first_non_empty['source_replica_port'] == 9009 - assert first_non_empty['source_replica_path'] == '/clickhouse/test/t/replicas/1' - assert first_non_empty['interserver_scheme'] == 'http' - assert first_non_empty['result_part_name'] == 'all_0_0_0' - assert first_non_empty['result_part_path'].startswith('/var/lib/clickhouse/') - assert first_non_empty['result_part_path'].endswith('all_0_0_0/') - assert first_non_empty['partition_id'] == 'all' - assert first_non_empty['URI'].startswith('http://node1:9009/?endpoint=DataPartsExchange') + assert first_non_empty["database"] == "default" + assert first_non_empty["table"] == "t" + assert first_non_empty["source_replica_hostname"] == "node1" + assert first_non_empty["source_replica_port"] == 9009 + assert first_non_empty["source_replica_path"] == "/clickhouse/test/t/replicas/1" + assert first_non_empty["interserver_scheme"] == "http" + assert first_non_empty["result_part_name"] == "all_0_0_0" + assert first_non_empty["result_part_path"].startswith("/var/lib/clickhouse/") + assert first_non_empty["result_part_path"].endswith("all_0_0_0/") + assert first_non_empty["partition_id"] == "all" + assert first_non_empty["URI"].startswith( + "http://node1:9009/?endpoint=DataPartsExchange" + ) for elem in fetches_result: - assert elem['bytes_read_compressed'] <= elem['total_size_bytes_compressed'], "Bytes read ({}) more than total bytes ({}). It's a bug".format(elem['bytes_read_compressed'], elem['total_size_bytes_compressed']) - assert 0.0 <= elem['progress'] <= 1.0, "Progress shouldn't less than 0 and bigger than 1, got {}".format(elem['progress']) - assert 0.0 <= elem['elapsed'], "Elapsed time must be greater than 0, got {}".format(elem['elapsed']) + assert ( + elem["bytes_read_compressed"] <= elem["total_size_bytes_compressed"] + ), "Bytes read ({}) more than total bytes ({}). It's a bug".format( + elem["bytes_read_compressed"], elem["total_size_bytes_compressed"] + ) + assert ( + 0.0 <= elem["progress"] <= 1.0 + ), "Progress shouldn't less than 0 and bigger than 1, got {}".format( + elem["progress"] + ) + assert ( + 0.0 <= elem["elapsed"] + ), "Elapsed time must be greater than 0, got {}".format(elem["elapsed"]) - prev_progress = first_non_empty['progress'] + prev_progress = first_non_empty["progress"] for elem in fetches_result: - assert elem['progress'] >= prev_progress, "Progress decreasing prev{}, next {}? It's a bug".format(prev_progress, elem['progress']) - prev_progress = elem['progress'] + assert ( + elem["progress"] >= prev_progress + ), "Progress decreasing prev{}, next {}? It's a bug".format( + prev_progress, elem["progress"] + ) + prev_progress = elem["progress"] - prev_bytes = first_non_empty['bytes_read_compressed'] + prev_bytes = first_non_empty["bytes_read_compressed"] for elem in fetches_result: - assert elem['bytes_read_compressed'] >= prev_bytes, "Bytes read decreasing prev {}, next {}? It's a bug".format(prev_bytes, elem['bytes_read_compressed']) - prev_bytes = elem['bytes_read_compressed'] + assert ( + elem["bytes_read_compressed"] >= prev_bytes + ), "Bytes read decreasing prev {}, next {}? It's a bug".format( + prev_bytes, elem["bytes_read_compressed"] + ) + prev_bytes = elem["bytes_read_compressed"] - prev_elapsed = first_non_empty['elapsed'] + prev_elapsed = first_non_empty["elapsed"] for elem in fetches_result: - assert elem['elapsed'] >= prev_elapsed, "Elapsed time decreasing prev {}, next {}? It's a bug".format(prev_elapsed, elem['elapsed']) - prev_elapsed = elem['elapsed'] + assert ( + elem["elapsed"] >= prev_elapsed + ), "Elapsed time decreasing prev {}, next {}? It's a bug".format( + prev_elapsed, elem["elapsed"] + ) + prev_elapsed = elem["elapsed"] node1.query("DROP TABLE IF EXISTS t SYNC") node2.query("DROP TABLE IF EXISTS t SYNC") diff --git a/tests/integration/test_table_functions_access_rights/test.py b/tests/integration/test_table_functions_access_rights/test.py index 90106303315..705150c8bdd 100644 --- a/tests/integration/test_table_functions_access_rights/test.py +++ b/tests/integration/test_table_functions_access_rights/test.py @@ -3,7 +3,7 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance') +instance = cluster.add_instance("instance") @pytest.fixture(scope="module", autouse=True) @@ -11,8 +11,12 @@ def started_cluster(): try: cluster.start() - instance.query("CREATE TABLE table1(x UInt32) ENGINE = MergeTree ORDER BY tuple()") - instance.query("CREATE TABLE table2(x UInt32) ENGINE = MergeTree ORDER BY tuple()") + instance.query( + "CREATE TABLE table1(x UInt32) ENGINE = MergeTree ORDER BY tuple()" + ) + instance.query( + "CREATE TABLE table2(x UInt32) ENGINE = MergeTree ORDER BY tuple()" + ) instance.query("INSERT INTO table1 VALUES (1)") instance.query("INSERT INTO table2 VALUES (2)") @@ -35,21 +39,29 @@ def test_merge(): assert instance.query(select_query) == "1\n2\n" instance.query("CREATE USER A") - assert "it's necessary to have grant CREATE TEMPORARY TABLE ON *.*" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant CREATE TEMPORARY TABLE ON *.*" + in instance.query_and_get_error(select_query, user="A") + ) instance.query("GRANT CREATE TEMPORARY TABLE ON *.* TO A") - assert "no tables in database matches" in instance.query_and_get_error(select_query, user = 'A') + assert "no tables in database matches" in instance.query_and_get_error( + select_query, user="A" + ) instance.query("GRANT SELECT ON default.table1 TO A") - assert instance.query(select_query, user = 'A') == "1\n" + assert instance.query(select_query, user="A") == "1\n" instance.query("GRANT SELECT ON default.* TO A") - assert instance.query(select_query, user = 'A') == "1\n2\n" + assert instance.query(select_query, user="A") == "1\n2\n" instance.query("REVOKE SELECT ON default.table1 FROM A") - assert instance.query(select_query, user = 'A') == "2\n" + assert instance.query(select_query, user="A") == "2\n" instance.query("REVOKE ALL ON default.* FROM A") instance.query("GRANT SELECT ON default.table1 TO A") instance.query("GRANT INSERT ON default.table2 TO A") - assert "it's necessary to have grant SELECT ON default.table2" in instance.query_and_get_error(select_query, user = 'A') + assert ( + "it's necessary to have grant SELECT ON default.table2" + in instance.query_and_get_error(select_query, user="A") + ) diff --git a/tests/integration/test_tcp_handler_http_responses/test_case.py b/tests/integration/test_tcp_handler_http_responses/test_case.py index 38b5ba909a7..2fc53674ca4 100644 --- a/tests/integration/test_tcp_handler_http_responses/test_case.py +++ b/tests/integration/test_tcp_handler_http_responses/test_case.py @@ -7,16 +7,15 @@ import requests cluster = ClickHouseCluster(__file__) node_with_http = cluster.add_instance( - 'node_with_http', - main_configs=["configs/config.d/http-port-31337.xml"] + "node_with_http", main_configs=["configs/config.d/http-port-31337.xml"] ) HTTP_PORT = 31337 node_without_http = cluster.add_instance( - 'node_without_http', - main_configs=["configs/config.d/no-http-port.xml"] + "node_without_http", main_configs=["configs/config.d/no-http-port.xml"] ) + @pytest.fixture(scope="module") def start_cluster(): try: @@ -26,17 +25,15 @@ def start_cluster(): finally: cluster.shutdown() + def test_request_to_http_full_instance(start_cluster): - response = requests.get( - f'http://{node_with_http.ip_address}:9000' - ) + response = requests.get(f"http://{node_with_http.ip_address}:9000") assert response.status_code == 400 assert str(HTTP_PORT) in response.text + def test_request_to_http_less_instance(start_cluster): - response = requests.post( - f'http://{node_without_http.ip_address}:9000' - ) + response = requests.post(f"http://{node_without_http.ip_address}:9000") assert response.status_code == 400 assert str(HTTP_PORT) not in response.text - assert "8123" not in response.text + assert "8123" not in response.text diff --git a/tests/integration/test_text_log_level/test.py b/tests/integration/test_text_log_level/test.py index 44679481266..dc0ae6333d6 100644 --- a/tests/integration/test_text_log_level/test.py +++ b/tests/integration/test_text_log_level/test.py @@ -7,10 +7,10 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=["configs/config.d/text_log.xml"]) +node = cluster.add_instance("node", main_configs=["configs/config.d/text_log.xml"]) -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def start_cluster(): try: cluster.start() @@ -23,11 +23,27 @@ def start_cluster(): def test_basic(start_cluster): with pytest.raises(QueryRuntimeException): # generates log with "Error" level - node.query('SELECT * FROM no_such_table') + node.query("SELECT * FROM no_such_table") - node.query('SYSTEM FLUSH LOGS') + node.query("SYSTEM FLUSH LOGS") - assert int(node.query("SELECT count() FROM system.text_log WHERE level = 'Trace'")) == 0 - assert int(node.query("SELECT count() FROM system.text_log WHERE level = 'Debug'")) == 0 - assert int(node.query("SELECT count() FROM system.text_log WHERE level = 'Information'")) >= 1 - assert int(node.query("SELECT count() FROM system.text_log WHERE level = 'Error'")) >= 1 + assert ( + int(node.query("SELECT count() FROM system.text_log WHERE level = 'Trace'")) + == 0 + ) + assert ( + int(node.query("SELECT count() FROM system.text_log WHERE level = 'Debug'")) + == 0 + ) + assert ( + int( + node.query( + "SELECT count() FROM system.text_log WHERE level = 'Information'" + ) + ) + >= 1 + ) + assert ( + int(node.query("SELECT count() FROM system.text_log WHERE level = 'Error'")) + >= 1 + ) diff --git a/tests/integration/test_timezone_config/test.py b/tests/integration/test_timezone_config/test.py index af7e3548e6a..e4a9f75abab 100644 --- a/tests/integration/test_timezone_config/test.py +++ b/tests/integration/test_timezone_config/test.py @@ -3,7 +3,7 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', main_configs=['configs/config.xml']) +node = cluster.add_instance("node", main_configs=["configs/config.xml"]) @pytest.fixture(scope="module") @@ -18,24 +18,52 @@ def start_cluster(): def test_check_timezone_config(start_cluster): assert node.query("SELECT toDateTime(1111111111)") == "2005-03-17 17:58:31\n" + def test_overflow_toDate(start_cluster): assert node.query("SELECT toDate('2999-12-31','UTC')") == "2149-06-06\n" assert node.query("SELECT toDate('2021-12-21','UTC')") == "2021-12-21\n" assert node.query("SELECT toDate('1000-12-31','UTC')") == "1970-01-01\n" + def test_overflow_toDate32(start_cluster): assert node.query("SELECT toDate32('2999-12-31','UTC')") == "2283-11-11\n" assert node.query("SELECT toDate32('2021-12-21','UTC')") == "2021-12-21\n" assert node.query("SELECT toDate32('1000-12-31','UTC')") == "1925-01-01\n" + def test_overflow_toDateTime(start_cluster): - assert node.query("SELECT toDateTime('2999-12-31 00:00:00','UTC')") == "2106-02-07 06:28:15\n" - assert node.query("SELECT toDateTime('2106-02-07 06:28:15','UTC')") == "2106-02-07 06:28:15\n" - assert node.query("SELECT toDateTime('1970-01-01 00:00:00','UTC')") == "1970-01-01 00:00:00\n" - assert node.query("SELECT toDateTime('1000-01-01 00:00:00','UTC')") == "1970-01-01 00:00:00\n" + assert ( + node.query("SELECT toDateTime('2999-12-31 00:00:00','UTC')") + == "2106-02-07 06:28:15\n" + ) + assert ( + node.query("SELECT toDateTime('2106-02-07 06:28:15','UTC')") + == "2106-02-07 06:28:15\n" + ) + assert ( + node.query("SELECT toDateTime('1970-01-01 00:00:00','UTC')") + == "1970-01-01 00:00:00\n" + ) + assert ( + node.query("SELECT toDateTime('1000-01-01 00:00:00','UTC')") + == "1970-01-01 00:00:00\n" + ) + def test_overflow_parseDateTimeBestEffort(start_cluster): - assert node.query("SELECT parseDateTimeBestEffort('2999-12-31 00:00:00','UTC')") == "2106-02-07 06:28:15\n" - assert node.query("SELECT parseDateTimeBestEffort('2106-02-07 06:28:15','UTC')") == "2106-02-07 06:28:15\n" - assert node.query("SELECT parseDateTimeBestEffort('1970-01-01 00:00:00','UTC')") == "1970-01-01 00:00:00\n" - assert node.query("SELECT parseDateTimeBestEffort('1000-01-01 00:00:00','UTC')") == "1970-01-01 00:00:00\n" + assert ( + node.query("SELECT parseDateTimeBestEffort('2999-12-31 00:00:00','UTC')") + == "2106-02-07 06:28:15\n" + ) + assert ( + node.query("SELECT parseDateTimeBestEffort('2106-02-07 06:28:15','UTC')") + == "2106-02-07 06:28:15\n" + ) + assert ( + node.query("SELECT parseDateTimeBestEffort('1970-01-01 00:00:00','UTC')") + == "1970-01-01 00:00:00\n" + ) + assert ( + node.query("SELECT parseDateTimeBestEffort('1000-01-01 00:00:00','UTC')") + == "1970-01-01 00:00:00\n" + ) diff --git a/tests/integration/test_tmp_policy/test.py b/tests/integration/test_tmp_policy/test.py index f7174c3b695..c919d9a0c3d 100644 --- a/tests/integration/test_tmp_policy/test.py +++ b/tests/integration/test_tmp_policy/test.py @@ -7,12 +7,14 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', - main_configs=["configs/config.d/storage_configuration.xml"], - tmpfs=['/disk1:size=100M', '/disk2:size=100M']) +node = cluster.add_instance( + "node", + main_configs=["configs/config.d/storage_configuration.xml"], + tmpfs=["/disk1:size=100M", "/disk2:size=100M"], +) -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def start_cluster(): try: cluster.start() @@ -22,15 +24,19 @@ def start_cluster(): def test_different_versions(start_cluster): - query = 'SELECT count(ignore(*)) FROM (SELECT * FROM system.numbers LIMIT 1e7) GROUP BY number' + query = "SELECT count(ignore(*)) FROM (SELECT * FROM system.numbers LIMIT 1e7) GROUP BY number" settings = { - 'max_bytes_before_external_group_by': 1 << 20, - 'max_bytes_before_external_sort': 1 << 20, + "max_bytes_before_external_group_by": 1 << 20, + "max_bytes_before_external_sort": 1 << 20, } - assert node.contains_in_log('Setting up /disk1/ to store temporary data in it') - assert node.contains_in_log('Setting up /disk2/ to store temporary data in it') + assert node.contains_in_log("Setting up /disk1/ to store temporary data in it") + assert node.contains_in_log("Setting up /disk2/ to store temporary data in it") node.query(query, settings=settings) - assert node.contains_in_log('Writing part of aggregation data into temporary file /disk1/') - assert node.contains_in_log('Writing part of aggregation data into temporary file /disk2/') + assert node.contains_in_log( + "Writing part of aggregation data into temporary file /disk1/" + ) + assert node.contains_in_log( + "Writing part of aggregation data into temporary file /disk2/" + ) diff --git a/tests/integration/test_ttl_move/test.py b/tests/integration/test_ttl_move/test.py index d8373ccb48a..49d7ab4f2fc 100644 --- a/tests/integration/test_ttl_move/test.py +++ b/tests/integration/test_ttl_move/test.py @@ -15,21 +15,31 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', - main_configs=['configs/logs_config.xml', "configs/config.d/instant_moves.xml", - "configs/config.d/storage_configuration.xml", - "configs/config.d/cluster.xml", ], - with_zookeeper=True, - tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'], - macros={"shard": 0, "replica": 1}) +node1 = cluster.add_instance( + "node1", + main_configs=[ + "configs/logs_config.xml", + "configs/config.d/instant_moves.xml", + "configs/config.d/storage_configuration.xml", + "configs/config.d/cluster.xml", + ], + with_zookeeper=True, + tmpfs=["/jbod1:size=40M", "/jbod2:size=40M", "/external:size=200M"], + macros={"shard": 0, "replica": 1}, +) -node2 = cluster.add_instance('node2', - main_configs=['configs/logs_config.xml', "configs/config.d/instant_moves.xml", - "configs/config.d/storage_configuration.xml", - "configs/config.d/cluster.xml", ], - with_zookeeper=True, - tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'], - macros={"shard": 0, "replica": 2}) +node2 = cluster.add_instance( + "node2", + main_configs=[ + "configs/logs_config.xml", + "configs/config.d/instant_moves.xml", + "configs/config.d/storage_configuration.xml", + "configs/config.d/cluster.xml", + ], + with_zookeeper=True, + tmpfs=["/jbod1:size=40M", "/jbod2:size=40M", "/external:size=200M"], + macros={"shard": 0, "replica": 2}, +) @pytest.fixture(scope="module") @@ -47,12 +57,20 @@ def get_used_disks_for_table(node, table_name, partition=None): suffix = "" else: suffix = "and partition='{}'".format(partition) - return node.query(""" + return ( + node.query( + """ SELECT disk_name FROM system.parts WHERE table == '{name}' AND active=1 {suffix} ORDER BY modification_time - """.format(name=table_name, suffix=suffix)).strip().split('\n') + """.format( + name=table_name, suffix=suffix + ) + ) + .strip() + .split("\n") + ) def check_used_disks_with_retry(node, table_name, expected_disks, retries): @@ -63,33 +81,55 @@ def check_used_disks_with_retry(node, table_name, expected_disks, retries): time.sleep(0.5) return False + # Use unique table name for flaky checker, that run tests multiple times def unique_table_name(base_name): - return f'{base_name}_{int(time.time())}' + return f"{base_name}_{int(time.time())}" + def wait_parts_mover(node, table, *args, **kwargs): # wait for MergeTreePartsMover - assert_logs_contain_with_retry(node, f'default.{table}.*Removed part from old location', *args, **kwargs) + assert_logs_contain_with_retry( + node, f"default.{table}.*Removed part from old location", *args, **kwargs + ) -@pytest.mark.parametrize("name,engine,alter", [ - pytest.param("mt_test_rule_with_invalid_destination", "MergeTree()", 0, id="case0"), - pytest.param("replicated_mt_test_rule_with_invalid_destination", - "ReplicatedMergeTree('/clickhouse/replicated_test_rule_with_invalid_destination', '1')", 0, id="case1"), - pytest.param("mt_test_rule_with_invalid_destination", "MergeTree()", 1, id="case2"), - pytest.param("replicated_mt_test_rule_with_invalid_destination", - "ReplicatedMergeTree('/clickhouse/replicated_test_rule_with_invalid_destination', '1')", 1, id="case3"), -]) +@pytest.mark.parametrize( + "name,engine,alter", + [ + pytest.param( + "mt_test_rule_with_invalid_destination", "MergeTree()", 0, id="case0" + ), + pytest.param( + "replicated_mt_test_rule_with_invalid_destination", + "ReplicatedMergeTree('/clickhouse/replicated_test_rule_with_invalid_destination', '1')", + 0, + id="case1", + ), + pytest.param( + "mt_test_rule_with_invalid_destination", "MergeTree()", 1, id="case2" + ), + pytest.param( + "replicated_mt_test_rule_with_invalid_destination", + "ReplicatedMergeTree('/clickhouse/replicated_test_rule_with_invalid_destination', '1')", + 1, + id="case3", + ), + ], +) def test_rule_with_invalid_destination(started_cluster, name, engine, alter): name = unique_table_name(name) try: + def get_command(x, policy): x = x or "" if alter and x: return """ ALTER TABLE {name} MODIFY TTL {expression} - """.format(expression=x, name=name) + """.format( + expression=x, name=name + ) else: return """ CREATE TABLE {name} ( @@ -99,13 +139,17 @@ def test_rule_with_invalid_destination(started_cluster, name, engine, alter): ORDER BY tuple() {expression} SETTINGS storage_policy='{policy}' - """.format(expression=x, name=name, engine=engine, policy=policy) + """.format( + expression=x, name=name, engine=engine, policy=policy + ) if alter: node1.query(get_command(None, "small_jbod_with_external")) with pytest.raises(QueryRuntimeException): - node1.query(get_command("TTL d1 TO DISK 'unknown'", "small_jbod_with_external")) + node1.query( + get_command("TTL d1 TO DISK 'unknown'", "small_jbod_with_external") + ) node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name)) @@ -113,7 +157,9 @@ def test_rule_with_invalid_destination(started_cluster, name, engine, alter): node1.query(get_command(None, "small_jbod_with_external")) with pytest.raises(QueryRuntimeException): - node1.query(get_command("TTL d1 TO VOLUME 'unknown'", "small_jbod_with_external")) + node1.query( + get_command("TTL d1 TO VOLUME 'unknown'", "small_jbod_with_external") + ) node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name)) @@ -135,19 +181,41 @@ def test_rule_with_invalid_destination(started_cluster, name, engine, alter): node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name)) -@pytest.mark.parametrize("name,engine,positive", [ - pytest.param("mt_test_inserts_to_disk_do_not_work", "MergeTree()", 0, id="mt_test_inserts_to_disk_do_not_work"), - pytest.param("replicated_mt_test_inserts_to_disk_do_not_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_disk_do_not_work', '1')", 0, id="replicated_mt_test_inserts_to_disk_do_not_work"), - pytest.param("mt_test_inserts_to_disk_work", "MergeTree()", 1, id="mt_test_inserts_to_disk_work_1"), - pytest.param("replicated_mt_test_inserts_to_disk_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_disk_work', '1')", 1, id="replicated_mt_test_inserts_to_disk_work_1"), -]) +@pytest.mark.parametrize( + "name,engine,positive", + [ + pytest.param( + "mt_test_inserts_to_disk_do_not_work", + "MergeTree()", + 0, + id="mt_test_inserts_to_disk_do_not_work", + ), + pytest.param( + "replicated_mt_test_inserts_to_disk_do_not_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_disk_do_not_work', '1')", + 0, + id="replicated_mt_test_inserts_to_disk_do_not_work", + ), + pytest.param( + "mt_test_inserts_to_disk_work", + "MergeTree()", + 1, + id="mt_test_inserts_to_disk_work_1", + ), + pytest.param( + "replicated_mt_test_inserts_to_disk_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_disk_work', '1')", + 1, + id="replicated_mt_test_inserts_to_disk_work_1", + ), + ], +) def test_inserts_to_disk_work(started_cluster, name, engine, positive): name = unique_table_name(name) try: - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( s1 String, d1 DateTime @@ -155,18 +223,33 @@ def test_inserts_to_disk_work(started_cluster, name, engine, positive): ORDER BY tuple() TTL d1 TO DISK 'external' SETTINGS storage_policy='small_jbod_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) data = [] # 10MB in total for i in range(10): - data.append(("randomPrintableASCII(1024*1024)", "toDateTime({})".format( - time.time() - 1 if i > 0 or positive else time.time() + 300))) + data.append( + ( + "randomPrintableASCII(1024*1024)", + "toDateTime({})".format( + time.time() - 1 if i > 0 or positive else time.time() + 300 + ), + ) + ) - node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data]))) + node1.query( + "INSERT INTO {} (s1, d1) VALUES {}".format( + name, ",".join(["(" + ",".join(x) + ")" for x in data]) + ) + ) used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"external" if positive else "jbod1"} - assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + assert ( + node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + ) finally: try: @@ -175,30 +258,49 @@ def test_inserts_to_disk_work(started_cluster, name, engine, positive): pass -@pytest.mark.parametrize("name,engine", [ - pytest.param("mt_test_moves_work_after_storage_policy_change", "MergeTree()", id="mt_test_moves_work_after_storage_policy_change"), - pytest.param("replicated_mt_test_moves_work_after_storage_policy_change", - "ReplicatedMergeTree('/clickhouse/test_moves_work_after_storage_policy_change', '1')", id="replicated_mt_test_moves_work_after_storage_policy_change"), -]) +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param( + "mt_test_moves_work_after_storage_policy_change", + "MergeTree()", + id="mt_test_moves_work_after_storage_policy_change", + ), + pytest.param( + "replicated_mt_test_moves_work_after_storage_policy_change", + "ReplicatedMergeTree('/clickhouse/test_moves_work_after_storage_policy_change', '1')", + id="replicated_mt_test_moves_work_after_storage_policy_change", + ), + ], +) def test_moves_work_after_storage_policy_change(started_cluster, name, engine): name = unique_table_name(name) try: - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( s1 String, d1 DateTime ) ENGINE = {engine} ORDER BY tuple() - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) node1.query( """ALTER TABLE {name} MODIFY SETTING storage_policy='default_with_small_jbod_with_external'""".format( - name=name)) + name=name + ) + ) # Second expression is preferred because d1 > now()-3600. node1.query( - """ALTER TABLE {name} MODIFY TTL now()-3600 TO DISK 'jbod1', d1 TO DISK 'external'""".format(name=name)) + """ALTER TABLE {name} MODIFY TTL now()-3600 TO DISK 'jbod1', d1 TO DISK 'external'""".format( + name=name + ) + ) wait_expire_1 = 12 wait_expire_2 = 4 @@ -206,9 +308,15 @@ def test_moves_work_after_storage_policy_change(started_cluster, name, engine): data = [] # 10MB in total for i in range(10): - data.append(("randomPrintableASCII(1024*1024)", "toDateTime({})".format(time_1))) + data.append( + ("randomPrintableASCII(1024*1024)", "toDateTime({})".format(time_1)) + ) - node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data]))) + node1.query( + "INSERT INTO {} (s1, d1) VALUES {}".format( + name, ",".join(["(" + ",".join(x) + ")" for x in data]) + ) + ) used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"jbod1"} @@ -217,25 +325,49 @@ def test_moves_work_after_storage_policy_change(started_cluster, name, engine): used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"external"} - assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + assert ( + node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + ) finally: node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name)) -@pytest.mark.parametrize("name,engine,positive", [ - pytest.param("mt_test_moves_to_disk_do_not_work", "MergeTree()", 0, id="mt_test_moves_to_disk_do_not_work"), - pytest.param("replicated_mt_test_moves_to_disk_do_not_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_do_not_work', '1')", 0, id="replicated_mt_test_moves_to_disk_do_not_work"), - pytest.param("mt_test_moves_to_disk_work", "MergeTree()", 1, id="mt_test_moves_to_disk_work"), - pytest.param("replicated_mt_test_moves_to_disk_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_work', '1')", 1, id="replicated_mt_test_moves_to_disk_work"), -]) +@pytest.mark.parametrize( + "name,engine,positive", + [ + pytest.param( + "mt_test_moves_to_disk_do_not_work", + "MergeTree()", + 0, + id="mt_test_moves_to_disk_do_not_work", + ), + pytest.param( + "replicated_mt_test_moves_to_disk_do_not_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_do_not_work', '1')", + 0, + id="replicated_mt_test_moves_to_disk_do_not_work", + ), + pytest.param( + "mt_test_moves_to_disk_work", + "MergeTree()", + 1, + id="mt_test_moves_to_disk_work", + ), + pytest.param( + "replicated_mt_test_moves_to_disk_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_work', '1')", + 1, + id="replicated_mt_test_moves_to_disk_work", + ), + ], +) def test_moves_to_disk_work(started_cluster, name, engine, positive): name = unique_table_name(name) try: - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( s1 String, d1 DateTime @@ -243,22 +375,35 @@ def test_moves_to_disk_work(started_cluster, name, engine, positive): ORDER BY tuple() TTL d1 TO DISK 'external' SETTINGS storage_policy='small_jbod_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) wait_expire_1 = 12 wait_expire_2 = 20 time_1 = time.time() + wait_expire_1 time_2 = time.time() + wait_expire_1 + wait_expire_2 - wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,)) + wait_expire_1_thread = threading.Thread( + target=time.sleep, args=(wait_expire_1,) + ) wait_expire_1_thread.start() data = [] # 10MB in total for i in range(10): - data.append(("randomPrintableASCII(1024*1024)", - "toDateTime({})".format(time_1 if i > 0 or positive else time_2))) + data.append( + ( + "randomPrintableASCII(1024*1024)", + "toDateTime({})".format(time_1 if i > 0 or positive else time_2), + ) + ) - node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data]))) + node1.query( + "INSERT INTO {} (s1, d1) VALUES {}".format( + name, ",".join(["(" + ",".join(x) + ")" for x in data]) + ) + ) used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"jbod1"} @@ -268,22 +413,35 @@ def test_moves_to_disk_work(started_cluster, name, engine, positive): used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"external" if positive else "jbod1"} - assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + assert ( + node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + ) finally: node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name)) -@pytest.mark.parametrize("name,engine", [ - pytest.param("mt_test_moves_to_volume_work", "MergeTree()", id="mt_test_moves_to_volume_work"), - pytest.param("replicated_mt_test_moves_to_volume_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_volume_work', '1')", id="replicated_mt_test_moves_to_volume_work"), -]) +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param( + "mt_test_moves_to_volume_work", + "MergeTree()", + id="mt_test_moves_to_volume_work", + ), + pytest.param( + "replicated_mt_test_moves_to_volume_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_volume_work', '1')", + id="replicated_mt_test_moves_to_volume_work", + ), + ], +) def test_moves_to_volume_work(started_cluster, name, engine): name = unique_table_name(name) try: - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( p1 Int64, s1 String, @@ -293,7 +451,10 @@ def test_moves_to_volume_work(started_cluster, name, engine): PARTITION BY p1 TTL d1 TO VOLUME 'external' SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) wait_expire_1 = 10 time_1 = time.time() + wait_expire_1 @@ -302,38 +463,70 @@ def test_moves_to_volume_work(started_cluster, name, engine): data = [] # 10MB in total for i in range(5): data.append( - (str(p), "randomPrintableASCII(1024*1024)", "toDateTime({})".format(time_1))) + ( + str(p), + "randomPrintableASCII(1024*1024)", + "toDateTime({})".format(time_1), + ) + ) node1.query( - "INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data]))) + "INSERT INTO {} (p1, s1, d1) VALUES {}".format( + name, ",".join(["(" + ",".join(x) + ")" for x in data]) + ) + ) used_disks = get_used_disks_for_table(node1, name) - assert set(used_disks) == {'jbod1', 'jbod2'} + assert set(used_disks) == {"jbod1", "jbod2"} wait_parts_mover(node1, name, retry_count=40) used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"external"} - assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + assert ( + node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + ) finally: node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name)) -@pytest.mark.parametrize("name,engine,positive", [ - pytest.param("mt_test_inserts_to_volume_do_not_work", "MergeTree()", 0, id="mt_test_inserts_to_volume_do_not_work"), - pytest.param("replicated_mt_test_inserts_to_volume_do_not_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_volume_do_not_work', '1')", 0, id="replicated_mt_test_inserts_to_volume_do_not_work"), - pytest.param("mt_test_inserts_to_volume_work", "MergeTree()", 1, id="mt_test_inserts_to_volume_work"), - pytest.param("replicated_mt_test_inserts_to_volume_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_volume_work', '1')", 1, id="replicated_mt_test_inserts_to_volume_work"), -]) +@pytest.mark.parametrize( + "name,engine,positive", + [ + pytest.param( + "mt_test_inserts_to_volume_do_not_work", + "MergeTree()", + 0, + id="mt_test_inserts_to_volume_do_not_work", + ), + pytest.param( + "replicated_mt_test_inserts_to_volume_do_not_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_volume_do_not_work', '1')", + 0, + id="replicated_mt_test_inserts_to_volume_do_not_work", + ), + pytest.param( + "mt_test_inserts_to_volume_work", + "MergeTree()", + 1, + id="mt_test_inserts_to_volume_work", + ), + pytest.param( + "replicated_mt_test_inserts_to_volume_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_volume_work', '1')", + 1, + id="replicated_mt_test_inserts_to_volume_work", + ), + ], +) def test_inserts_to_volume_work(started_cluster, name, engine, positive): name = unique_table_name(name) try: - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( p1 Int64, s1 String, @@ -343,56 +536,90 @@ def test_inserts_to_volume_work(started_cluster, name, engine, positive): PARTITION BY p1 TTL d1 TO VOLUME 'external' SETTINGS storage_policy='small_jbod_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) node1.query("SYSTEM STOP MOVES {name}".format(name=name)) for p in range(2): data = [] # 20MB in total for i in range(10): - data.append((str(p), "randomPrintableASCII(1024*1024)", "toDateTime({})".format( - time.time() - 1 if i > 0 or positive else time.time() + 300))) + data.append( + ( + str(p), + "randomPrintableASCII(1024*1024)", + "toDateTime({})".format( + time.time() - 1 if i > 0 or positive else time.time() + 300 + ), + ) + ) node1.query( - "INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data]))) + "INSERT INTO {} (p1, s1, d1) VALUES {}".format( + name, ",".join(["(" + ",".join(x) + ")" for x in data]) + ) + ) used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"external" if positive else "jbod1"} - assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "20" + assert ( + node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "20" + ) finally: node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name)) -@pytest.mark.parametrize("name,engine", [ - pytest.param("mt_test_moves_to_disk_eventually_work", "MergeTree()", id="mt_test_moves_to_disk_eventually_work"), - pytest.param("replicated_mt_test_moves_to_disk_eventually_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_eventually_work', '1')", id="replicated_mt_test_moves_to_disk_eventually_work"), -]) +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param( + "mt_test_moves_to_disk_eventually_work", + "MergeTree()", + id="mt_test_moves_to_disk_eventually_work", + ), + pytest.param( + "replicated_mt_test_moves_to_disk_eventually_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_eventually_work', '1')", + id="replicated_mt_test_moves_to_disk_eventually_work", + ), + ], +) def test_moves_to_disk_eventually_work(started_cluster, name, engine): name = unique_table_name(name) try: name_temp = name + "_temp" - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( s1 String ) ENGINE = MergeTree() ORDER BY tuple() SETTINGS storage_policy='only_jbod2' - """.format(name=name_temp)) + """.format( + name=name_temp + ) + ) data = [] # 35MB in total for i in range(35): data.append("randomPrintableASCII(1024*1024)") - node1.query("INSERT INTO {} VALUES {}".format(name_temp, ",".join(["(" + x + ")" for x in data]))) + node1.query( + "INSERT INTO {} VALUES {}".format( + name_temp, ",".join(["(" + x + ")" for x in data]) + ) + ) used_disks = get_used_disks_for_table(node1, name_temp) assert set(used_disks) == {"jbod2"} - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( s1 String, d1 DateTime @@ -400,14 +627,25 @@ def test_moves_to_disk_eventually_work(started_cluster, name, engine): ORDER BY tuple() TTL d1 TO DISK 'jbod2' SETTINGS storage_policy='jbod1_with_jbod2' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) data = [] # 10MB in total for i in range(10): data.append( - ("randomPrintableASCII(1024*1024)", "toDateTime({})".format(time.time() - 1))) + ( + "randomPrintableASCII(1024*1024)", + "toDateTime({})".format(time.time() - 1), + ) + ) - node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data]))) + node1.query( + "INSERT INTO {} (s1, d1) VALUES {}".format( + name, ",".join(["(" + ",".join(x) + ")" for x in data]) + ) + ) used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"jbod1"} @@ -418,7 +656,9 @@ def test_moves_to_disk_eventually_work(started_cluster, name, engine): used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"jbod2"} - assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + assert ( + node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + ) finally: node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name_temp)) @@ -430,7 +670,8 @@ def test_replicated_download_ttl_info(started_cluster): engine = "ReplicatedMergeTree('/clickhouse/test_replicated_download_ttl_info', '{replica}')" try: for i, node in enumerate((node1, node2), start=1): - node.query(""" + node.query( + """ CREATE TABLE {name} ( s1 String, d1 DateTime @@ -438,11 +679,18 @@ def test_replicated_download_ttl_info(started_cluster): ORDER BY tuple() TTL d1 TO DISK 'external' SETTINGS storage_policy='small_jbod_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) node1.query("SYSTEM STOP MOVES {}".format(name)) - node2.query("INSERT INTO {} (s1, d1) VALUES (randomPrintableASCII(1024*1024), toDateTime({}))".format(name, time.time() - 100)) + node2.query( + "INSERT INTO {} (s1, d1) VALUES (randomPrintableASCII(1024*1024), toDateTime({}))".format( + name, time.time() - 100 + ) + ) assert set(get_used_disks_for_table(node2, name)) == {"external"} @@ -459,19 +707,41 @@ def test_replicated_download_ttl_info(started_cluster): continue -@pytest.mark.parametrize("name,engine,positive", [ - pytest.param("mt_test_merges_to_disk_do_not_work", "MergeTree()", 0, id="mt_test_merges_to_disk_do_not_work"), - pytest.param("replicated_mt_test_merges_to_disk_do_not_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_merges_to_disk_do_not_work', '1')", 0, id="mt_test_merges_to_disk_do_not_work"), - pytest.param("mt_test_merges_to_disk_work", "MergeTree()", 1, id="mt_test_merges_to_disk_work"), - pytest.param("replicated_mt_test_merges_to_disk_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_merges_to_disk_work', '1')", 1, id="replicated_mt_test_merges_to_disk_work"), -]) +@pytest.mark.parametrize( + "name,engine,positive", + [ + pytest.param( + "mt_test_merges_to_disk_do_not_work", + "MergeTree()", + 0, + id="mt_test_merges_to_disk_do_not_work", + ), + pytest.param( + "replicated_mt_test_merges_to_disk_do_not_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_merges_to_disk_do_not_work', '1')", + 0, + id="mt_test_merges_to_disk_do_not_work", + ), + pytest.param( + "mt_test_merges_to_disk_work", + "MergeTree()", + 1, + id="mt_test_merges_to_disk_work", + ), + pytest.param( + "replicated_mt_test_merges_to_disk_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_merges_to_disk_work', '1')", + 1, + id="replicated_mt_test_merges_to_disk_work", + ), + ], +) def test_merges_to_disk_work(started_cluster, name, engine, positive): name = unique_table_name(name) try: - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( s1 String, d1 DateTime @@ -479,7 +749,10 @@ def test_merges_to_disk_work(started_cluster, name, engine, positive): ORDER BY tuple() TTL d1 TO DISK 'external' SETTINGS storage_policy='small_jbod_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) node1.query("SYSTEM STOP MERGES {}".format(name)) node1.query("SYSTEM STOP MOVES {}".format(name)) @@ -489,22 +762,39 @@ def test_merges_to_disk_work(started_cluster, name, engine, positive): time_1 = time.time() + wait_expire_1 time_2 = time.time() + wait_expire_1 + wait_expire_2 - wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,)) + wait_expire_1_thread = threading.Thread( + target=time.sleep, args=(wait_expire_1,) + ) wait_expire_1_thread.start() for _ in range(2): data = [] # 16MB in total for i in range(8): - data.append(("randomPrintableASCII(1024*1024)", - "toDateTime({})".format(time_1 if i > 0 or positive else time_2))) + data.append( + ( + "randomPrintableASCII(1024*1024)", + "toDateTime({})".format( + time_1 if i > 0 or positive else time_2 + ), + ) + ) node1.query( - "INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data]))) + "INSERT INTO {} (s1, d1) VALUES {}".format( + name, ",".join(["(" + ",".join(x) + ")" for x in data]) + ) + ) used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"jbod1"} - assert "2" == node1.query( - "SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip() + assert ( + "2" + == node1.query( + "SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format( + name + ) + ).strip() + ) wait_expire_1_thread.join() time.sleep(wait_expire_2 / 2) @@ -514,43 +804,70 @@ def test_merges_to_disk_work(started_cluster, name, engine, positive): used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"external" if positive else "jbod1"} - assert "1" == node1.query( - "SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip() + assert ( + "1" + == node1.query( + "SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format( + name + ) + ).strip() + ) - assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "16" + assert ( + node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "16" + ) finally: node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name)) -@pytest.mark.parametrize("name,engine", [ - pytest.param("mt_test_merges_with_full_disk_work", "MergeTree()", id="mt_test_merges_with_full_disk_work"), - pytest.param("replicated_mt_test_merges_with_full_disk_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_merges_with_full_disk_work', '1')", id="replicated_mt_test_merges_with_full_disk_work"), -]) +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param( + "mt_test_merges_with_full_disk_work", + "MergeTree()", + id="mt_test_merges_with_full_disk_work", + ), + pytest.param( + "replicated_mt_test_merges_with_full_disk_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_merges_with_full_disk_work', '1')", + id="replicated_mt_test_merges_with_full_disk_work", + ), + ], +) def test_merges_with_full_disk_work(started_cluster, name, engine): name = unique_table_name(name) try: name_temp = name + "_temp" - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( s1 String ) ENGINE = MergeTree() ORDER BY tuple() SETTINGS storage_policy='only_jbod2' - """.format(name=name_temp)) + """.format( + name=name_temp + ) + ) data = [] # 35MB in total for i in range(35): data.append("randomPrintableASCII(1024*1024)") - node1.query("INSERT INTO {} VALUES {}".format(name_temp, ",".join(["(" + x + ")" for x in data]))) + node1.query( + "INSERT INTO {} VALUES {}".format( + name_temp, ",".join(["(" + x + ")" for x in data]) + ) + ) used_disks = get_used_disks_for_table(node1, name_temp) assert set(used_disks) == {"jbod2"} - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( s1 String, d1 DateTime @@ -558,25 +875,41 @@ def test_merges_with_full_disk_work(started_cluster, name, engine): ORDER BY tuple() TTL d1 TO DISK 'jbod2' SETTINGS storage_policy='jbod1_with_jbod2' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) wait_expire_1 = 10 time_1 = time.time() + wait_expire_1 - wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,)) + wait_expire_1_thread = threading.Thread( + target=time.sleep, args=(wait_expire_1,) + ) wait_expire_1_thread.start() for _ in range(2): data = [] # 12MB in total for i in range(6): - data.append(("randomPrintableASCII(1024*1024)", "toDateTime({})".format(time_1))) # 1MB row + data.append( + ("randomPrintableASCII(1024*1024)", "toDateTime({})".format(time_1)) + ) # 1MB row node1.query( - "INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data]))) + "INSERT INTO {} (s1, d1) VALUES {}".format( + name, ",".join(["(" + ",".join(x) + ")" for x in data]) + ) + ) used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"jbod1"} - assert "2" == node1.query( - "SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip() + assert ( + "2" + == node1.query( + "SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format( + name + ) + ).strip() + ) wait_expire_1_thread.join() @@ -585,29 +918,59 @@ def test_merges_with_full_disk_work(started_cluster, name, engine): used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"jbod1"} # Merged to the same disk against the rule. - assert "1" == node1.query( - "SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip() + assert ( + "1" + == node1.query( + "SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format( + name + ) + ).strip() + ) - assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "12" + assert ( + node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "12" + ) finally: node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name_temp)) node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name)) -@pytest.mark.parametrize("name,engine,positive", [ - pytest.param("mt_test_moves_after_merges_do_not_work", "MergeTree()", 0, id="mt_test_moves_after_merges_do_not_work"), - pytest.param("replicated_mt_test_moves_after_merges_do_not_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_do_not_work', '1')", 0, id="replicated_mt_test_moves_after_merges_do_not_work"), - pytest.param("mt_test_moves_after_merges_work", "MergeTree()", 1, id="mt_test_moves_after_merges_work"), - pytest.param("replicated_mt_test_moves_after_merges_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_work', '1')", 1, id="replicated_mt_test_moves_after_merges_work"), -]) +@pytest.mark.parametrize( + "name,engine,positive", + [ + pytest.param( + "mt_test_moves_after_merges_do_not_work", + "MergeTree()", + 0, + id="mt_test_moves_after_merges_do_not_work", + ), + pytest.param( + "replicated_mt_test_moves_after_merges_do_not_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_do_not_work', '1')", + 0, + id="replicated_mt_test_moves_after_merges_do_not_work", + ), + pytest.param( + "mt_test_moves_after_merges_work", + "MergeTree()", + 1, + id="mt_test_moves_after_merges_work", + ), + pytest.param( + "replicated_mt_test_moves_after_merges_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_work', '1')", + 1, + id="replicated_mt_test_moves_after_merges_work", + ), + ], +) def test_moves_after_merges_work(started_cluster, name, engine, positive): name = unique_table_name(name) try: - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( s1 String, d1 DateTime @@ -615,31 +978,51 @@ def test_moves_after_merges_work(started_cluster, name, engine, positive): ORDER BY tuple() TTL d1 TO DISK 'external' SETTINGS storage_policy='small_jbod_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) wait_expire_1 = 16 wait_expire_2 = 20 time_1 = time.time() + wait_expire_1 time_2 = time.time() + wait_expire_1 + wait_expire_2 - wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,)) + wait_expire_1_thread = threading.Thread( + target=time.sleep, args=(wait_expire_1,) + ) wait_expire_1_thread.start() for _ in range(2): data = [] # 14MB in total for i in range(7): - data.append(("randomPrintableASCII(1024*1024)", - "toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row + data.append( + ( + "randomPrintableASCII(1024*1024)", + "toDateTime({})".format( + time_1 if i > 0 or positive else time_2 + ), + ) + ) # 1MB row node1.query( - "INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data]))) + "INSERT INTO {} (s1, d1) VALUES {}".format( + name, ",".join(["(" + ",".join(x) + ")" for x in data]) + ) + ) node1.query("OPTIMIZE TABLE {}".format(name)) used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"jbod1"} - assert "1" == node1.query( - "SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip() + assert ( + "1" + == node1.query( + "SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format( + name + ) + ).strip() + ) wait_expire_1_thread.join() time.sleep(wait_expire_2 / 2) @@ -647,31 +1030,81 @@ def test_moves_after_merges_work(started_cluster, name, engine, positive): used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"external" if positive else "jbod1"} - assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "14" + assert ( + node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "14" + ) finally: node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name)) -@pytest.mark.parametrize("name,engine,positive,bar", [ - pytest.param("mt_test_moves_after_alter_do_not_work", "MergeTree()", 0, "DELETE", id="mt_negative"), - pytest.param("replicated_mt_test_moves_after_alter_do_not_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_do_not_work', '1')", 0, "DELETE", id="repicated_negative"), - pytest.param("mt_test_moves_after_alter_work", "MergeTree()", 1, "DELETE", id="mt_positive"), - pytest.param("replicated_mt_test_moves_after_alter_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_work', '1')", 1, "DELETE", id="repicated_positive"), - pytest.param("mt_test_moves_after_alter_do_not_work", "MergeTree()", 0, "TO DISK 'external'", id="mt_external_negative"), - pytest.param("replicated_mt_test_moves_after_alter_do_not_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_do_not_work', '1')", 0, "TO DISK 'external'", id="replicated_external_negative"), - pytest.param("mt_test_moves_after_alter_work", "MergeTree()", 1, "TO DISK 'external'", id="mt_external_positive"), - pytest.param("replicated_mt_test_moves_after_alter_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_work', '1')", 1, "TO DISK 'external'", id="replicated_external_positive"), -]) +@pytest.mark.parametrize( + "name,engine,positive,bar", + [ + pytest.param( + "mt_test_moves_after_alter_do_not_work", + "MergeTree()", + 0, + "DELETE", + id="mt_negative", + ), + pytest.param( + "replicated_mt_test_moves_after_alter_do_not_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_do_not_work', '1')", + 0, + "DELETE", + id="repicated_negative", + ), + pytest.param( + "mt_test_moves_after_alter_work", + "MergeTree()", + 1, + "DELETE", + id="mt_positive", + ), + pytest.param( + "replicated_mt_test_moves_after_alter_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_work', '1')", + 1, + "DELETE", + id="repicated_positive", + ), + pytest.param( + "mt_test_moves_after_alter_do_not_work", + "MergeTree()", + 0, + "TO DISK 'external'", + id="mt_external_negative", + ), + pytest.param( + "replicated_mt_test_moves_after_alter_do_not_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_do_not_work', '1')", + 0, + "TO DISK 'external'", + id="replicated_external_negative", + ), + pytest.param( + "mt_test_moves_after_alter_work", + "MergeTree()", + 1, + "TO DISK 'external'", + id="mt_external_positive", + ), + pytest.param( + "replicated_mt_test_moves_after_alter_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_work', '1')", + 1, + "TO DISK 'external'", + id="replicated_external_positive", + ), + ], +) def test_ttls_do_not_work_after_alter(started_cluster, name, engine, positive, bar): name = unique_table_name(name) try: - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( s1 String, d1 DateTime @@ -679,40 +1112,64 @@ def test_ttls_do_not_work_after_alter(started_cluster, name, engine, positive, b ORDER BY tuple() TTL d1 TO DISK 'external' SETTINGS storage_policy='small_jbod_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) if positive: - node1.query(""" + node1.query( + """ ALTER TABLE {name} MODIFY TTL d1 + INTERVAL 15 MINUTE {bar} - """.format(name=name, bar=bar)) # That shall disable TTL. + """.format( + name=name, bar=bar + ) + ) # That shall disable TTL. data = [] # 10MB in total for i in range(10): data.append( - ("randomPrintableASCII(1024*1024)", "toDateTime({})".format(time.time() - 1))) # 1MB row - node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data]))) + ( + "randomPrintableASCII(1024*1024)", + "toDateTime({})".format(time.time() - 1), + ) + ) # 1MB row + node1.query( + "INSERT INTO {} (s1, d1) VALUES {}".format( + name, ",".join(["(" + ",".join(x) + ")" for x in data]) + ) + ) used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"jbod1" if positive else "external"} - assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + assert ( + node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + ) finally: node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name)) -@pytest.mark.parametrize("name,engine", [ - pytest.param("mt_test_materialize_ttl_in_partition", "MergeTree()", id="mt"), - pytest.param("replicated_mt_test_materialize_ttl_in_partition", - "ReplicatedMergeTree('/clickhouse/test_materialize_ttl_in_partition', '1')", id="replicated"), -]) +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param("mt_test_materialize_ttl_in_partition", "MergeTree()", id="mt"), + pytest.param( + "replicated_mt_test_materialize_ttl_in_partition", + "ReplicatedMergeTree('/clickhouse/test_materialize_ttl_in_partition', '1')", + id="replicated", + ), + ], +) def test_materialize_ttl_in_partition(started_cluster, name, engine): name = unique_table_name(name) try: - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( p1 Int8, s1 String, @@ -721,61 +1178,109 @@ def test_materialize_ttl_in_partition(started_cluster, name, engine): ORDER BY p1 PARTITION BY p1 SETTINGS storage_policy='small_jbod_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) data = [] # 5MB in total for i in range(5): - data.append((str(i), "randomPrintableASCII(1024*1024)", - "toDateTime({})".format(time.time() - 1))) # 1MB row + data.append( + ( + str(i), + "randomPrintableASCII(1024*1024)", + "toDateTime({})".format(time.time() - 1), + ) + ) # 1MB row node1.query( - "INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data]))) + "INSERT INTO {} (p1, s1, d1) VALUES {}".format( + name, ",".join(["(" + ",".join(x) + ")" for x in data]) + ) + ) used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"jbod1"} - node1.query(""" + node1.query( + """ ALTER TABLE {name} MODIFY TTL d1 TO DISK 'external' SETTINGS materialize_ttl_after_modify = 0 - """.format(name=name)) + """.format( + name=name + ) + ) time.sleep(3) used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"jbod1"} - node1.query(""" + node1.query( + """ ALTER TABLE {name} MATERIALIZE TTL IN PARTITION 2 - """.format(name=name)) + """.format( + name=name + ) + ) - node1.query(""" + node1.query( + """ ALTER TABLE {name} MATERIALIZE TTL IN PARTITION 4 - """.format(name=name)) + """.format( + name=name + ) + ) time.sleep(3) used_disks_sets = [] for i in range(len(data)): - used_disks_sets.append(set(get_used_disks_for_table(node1, name, partition=i))) + used_disks_sets.append( + set(get_used_disks_for_table(node1, name, partition=i)) + ) - assert used_disks_sets == [{"jbod1"}, {"jbod1"}, {"external"}, {"jbod1"}, {"external"}] + assert used_disks_sets == [ + {"jbod1"}, + {"jbod1"}, + {"external"}, + {"jbod1"}, + {"external"}, + ] - assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == str(len(data)) + assert node1.query( + "SELECT count() FROM {name}".format(name=name) + ).strip() == str(len(data)) finally: node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name)) -@pytest.mark.parametrize("name,engine,positive", [ - pytest.param("mt_test_alter_multiple_ttls_positive", "MergeTree()", True, id="positive"), - pytest.param("mt_replicated_test_alter_multiple_ttls_positive", - "ReplicatedMergeTree('/clickhouse/replicated_test_alter_multiple_ttls_positive', '1')", True, id="replicated_positive"), - pytest.param("mt_test_alter_multiple_ttls_negative", "MergeTree()", False, id="negative"), - pytest.param("mt_replicated_test_alter_multiple_ttls_negative", - "ReplicatedMergeTree('/clickhouse/replicated_test_alter_multiple_ttls_negative', '1')", False, id="replicated_negative"), -]) +@pytest.mark.parametrize( + "name,engine,positive", + [ + pytest.param( + "mt_test_alter_multiple_ttls_positive", "MergeTree()", True, id="positive" + ), + pytest.param( + "mt_replicated_test_alter_multiple_ttls_positive", + "ReplicatedMergeTree('/clickhouse/replicated_test_alter_multiple_ttls_positive', '1')", + True, + id="replicated_positive", + ), + pytest.param( + "mt_test_alter_multiple_ttls_negative", "MergeTree()", False, id="negative" + ), + pytest.param( + "mt_replicated_test_alter_multiple_ttls_negative", + "ReplicatedMergeTree('/clickhouse/replicated_test_alter_multiple_ttls_negative', '1')", + False, + id="replicated_negative", + ), + ], +) def test_alter_multiple_ttls(started_cluster, name, engine, positive): name = unique_table_name(name) @@ -802,7 +1307,8 @@ limitations under the License.""" """ now = time.time() try: - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( p1 Int64, s1 String, @@ -813,14 +1319,21 @@ limitations under the License.""" TTL d1 + INTERVAL 34 SECOND TO DISK 'jbod2', d1 + INTERVAL 64 SECOND TO VOLUME 'external' SETTINGS storage_policy='jbods_with_external', merge_with_ttl_timeout=0 - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) - node1.query(""" + node1.query( + """ ALTER TABLE {name} MODIFY TTL d1 + INTERVAL 0 SECOND TO DISK 'jbod2', d1 + INTERVAL 14 SECOND TO VOLUME 'external', d1 + INTERVAL 19 SECOND DELETE - """.format(name=name)) + """.format( + name=name + ) + ) for p in range(3): data = [] # 6MB in total @@ -828,13 +1341,23 @@ limitations under the License.""" for i in range(2): p1 = p d1 = now - 1 if i > 0 or positive else now + 300 - data.append("({}, randomPrintableASCII(1024*1024), toDateTime({}))".format(p1, d1)) - node1.query("INSERT INTO {name} (p1, s1, d1) VALUES {values}".format(name=name, values=",".join(data))) + data.append( + "({}, randomPrintableASCII(1024*1024), toDateTime({}))".format( + p1, d1 + ) + ) + node1.query( + "INSERT INTO {name} (p1, s1, d1) VALUES {values}".format( + name=name, values=",".join(data) + ) + ) used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"jbod2"} if positive else {"jbod1", "jbod2"} - assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["6"] + assert node1.query( + "SELECT count() FROM {name}".format(name=name) + ).splitlines() == ["6"] if positive: expected_disks = {"external"} @@ -843,12 +1366,16 @@ limitations under the License.""" check_used_disks_with_retry(node1, name, expected_disks, 50) - assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["6"] + assert node1.query( + "SELECT count() FROM {name}".format(name=name) + ).splitlines() == ["6"] time.sleep(5) for i in range(50): - rows_count = int(node1.query("SELECT count() FROM {name}".format(name=name)).strip()) + rows_count = int( + node1.query("SELECT count() FROM {name}".format(name=name)).strip() + ) if positive: if rows_count == 0: break @@ -867,16 +1394,23 @@ limitations under the License.""" node1.query("DROP TABLE IF EXISTS {name} NO DELAY".format(name=name)) -@pytest.mark.parametrize("name,engine", [ - pytest.param("concurrently_altering_ttl_mt", "MergeTree()", id="mt"), - pytest.param("concurrently_altering_ttl_replicated_mt", - "ReplicatedMergeTree('/clickhouse/concurrently_altering_ttl_replicated_mt', '1')", id="replicated_mt"), -]) +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param("concurrently_altering_ttl_mt", "MergeTree()", id="mt"), + pytest.param( + "concurrently_altering_ttl_replicated_mt", + "ReplicatedMergeTree('/clickhouse/concurrently_altering_ttl_replicated_mt', '1')", + id="replicated_mt", + ), + ], +) def test_concurrent_alter_with_ttl_move(started_cluster, name, engine): name = unique_table_name(name) try: - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( EventDate Date, number UInt64 @@ -884,7 +1418,10 @@ def test_concurrent_alter_with_ttl_move(started_cluster, name, engine): ORDER BY tuple() PARTITION BY toYYYYMM(EventDate) SETTINGS storage_policy='jbods_with_external' - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) values = list({random.randint(1, 1000000) for _ in range(0, 1000)}) @@ -892,8 +1429,12 @@ def test_concurrent_alter_with_ttl_move(started_cluster, name, engine): for i in range(num): day = random.randint(11, 30) value = values.pop() - month = '0' + str(random.choice([3, 4])) - node1.query("INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format(name, m=month, d=day, v=value)) + month = "0" + str(random.choice([3, 4])) + node1.query( + "INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format( + name, m=month, d=day, v=value + ) + ) def alter_move(num): def produce_alter_move(node, name): @@ -901,9 +1442,15 @@ def test_concurrent_alter_with_ttl_move(started_cluster, name, engine): if move_type == "PART": for _ in range(10): try: - parts = node1.query( - "SELECT name from system.parts where table = '{}' and active = 1".format( - name)).strip().split('\n') + parts = ( + node1.query( + "SELECT name from system.parts where table = '{}' and active = 1".format( + name + ) + ) + .strip() + .split("\n") + ) break except QueryRuntimeException: pass @@ -920,8 +1467,15 @@ def test_concurrent_alter_with_ttl_move(started_cluster, name, engine): else: move_volume = random.choice(["'main'", "'external'"]) try: - node1.query("ALTER TABLE {} MOVE {mt} {mp} TO {md} {mv}".format( - name, mt=move_type, mp=move_part, md=move_disk, mv=move_volume)) + node1.query( + "ALTER TABLE {} MOVE {mt} {mp} TO {md} {mv}".format( + name, + mt=move_type, + mp=move_part, + md=move_disk, + mv=move_volume, + ) + ) except QueryRuntimeException: pass @@ -931,7 +1485,9 @@ def test_concurrent_alter_with_ttl_move(started_cluster, name, engine): def alter_update(num): for i in range(num): try: - node1.query("ALTER TABLE {} UPDATE number = number + 1 WHERE 1".format(name)) + node1.query( + "ALTER TABLE {} UPDATE number = number + 1 WHERE 1".format(name) + ) except: pass @@ -940,19 +1496,30 @@ def test_concurrent_alter_with_ttl_move(started_cluster, name, engine): ttls = [] for j in range(random.randint(1, 10)): what = random.choice( - ["TO VOLUME 'main'", "TO VOLUME 'external'", "TO DISK 'jbod1'", "TO DISK 'jbod2'", - "TO DISK 'external'"]) + [ + "TO VOLUME 'main'", + "TO VOLUME 'external'", + "TO DISK 'jbod1'", + "TO DISK 'jbod2'", + "TO DISK 'external'", + ] + ) when = "now()+{}".format(random.randint(-1, 5)) ttls.append("{} {}".format(when, what)) try: - node1.query("ALTER TABLE {} MODIFY TTL {}".format(name, ", ".join(ttls))) + node1.query( + "ALTER TABLE {} MODIFY TTL {}".format(name, ", ".join(ttls)) + ) except QueryRuntimeException: pass def optimize_table(num): for i in range(num): try: # optimize may throw after concurrent alter - node1.query("OPTIMIZE TABLE {} FINAL".format(name), settings={'optimize_throw_if_noop': '1'}) + node1.query( + "OPTIMIZE TABLE {} FINAL".format(name), + settings={"optimize_throw_if_noop": "1"}, + ) break except: pass @@ -976,15 +1543,19 @@ def test_concurrent_alter_with_ttl_move(started_cluster, name, engine): @pytest.mark.skip(reason="Flacky test") -@pytest.mark.parametrize("name,positive", [ - pytest.param("test_double_move_while_select_negative", 0, id="negative"), - pytest.param("test_double_move_while_select_positive", 1, id="positive"), -]) +@pytest.mark.parametrize( + "name,positive", + [ + pytest.param("test_double_move_while_select_negative", 0, id="negative"), + pytest.param("test_double_move_while_select_positive", 1, id="positive"), + ], +) def test_double_move_while_select(started_cluster, name, positive): name = unique_table_name(name) try: - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( n Int64, s String @@ -992,59 +1563,104 @@ def test_double_move_while_select(started_cluster, name, positive): ORDER BY tuple() PARTITION BY n SETTINGS storage_policy='small_jbod_with_external' - """.format(name=name)) + """.format( + name=name + ) + ) node1.query( - "INSERT INTO {name} VALUES (1, randomPrintableASCII(10*1024*1024))".format(name=name)) + "INSERT INTO {name} VALUES (1, randomPrintableASCII(10*1024*1024))".format( + name=name + ) + ) parts = node1.query( - "SELECT name FROM system.parts WHERE table = '{name}' AND active = 1".format(name=name)).splitlines() + "SELECT name FROM system.parts WHERE table = '{name}' AND active = 1".format( + name=name + ) + ).splitlines() assert len(parts) == 1 - node1.query("ALTER TABLE {name} MOVE PART '{part}' TO DISK 'external'".format(name=name, part=parts[0])) + node1.query( + "ALTER TABLE {name} MOVE PART '{part}' TO DISK 'external'".format( + name=name, part=parts[0] + ) + ) def long_select(): if positive: - node1.query("SELECT sleep(3), sleep(2), sleep(1), n FROM {name}".format(name=name)) + node1.query( + "SELECT sleep(3), sleep(2), sleep(1), n FROM {name}".format( + name=name + ) + ) thread = threading.Thread(target=long_select) thread.start() time.sleep(1) - node1.query("ALTER TABLE {name} MOVE PART '{part}' TO DISK 'jbod1'".format(name=name, part=parts[0])) + node1.query( + "ALTER TABLE {name} MOVE PART '{part}' TO DISK 'jbod1'".format( + name=name, part=parts[0] + ) + ) # Fill jbod1 to force ClickHouse to make move of partition 1 to external. node1.query( - "INSERT INTO {name} VALUES (2, randomPrintableASCII(9*1024*1024))".format(name=name)) + "INSERT INTO {name} VALUES (2, randomPrintableASCII(9*1024*1024))".format( + name=name + ) + ) node1.query( - "INSERT INTO {name} VALUES (3, randomPrintableASCII(9*1024*1024))".format(name=name)) + "INSERT INTO {name} VALUES (3, randomPrintableASCII(9*1024*1024))".format( + name=name + ) + ) node1.query( - "INSERT INTO {name} VALUES (4, randomPrintableASCII(9*1024*1024))".format(name=name)) + "INSERT INTO {name} VALUES (4, randomPrintableASCII(9*1024*1024))".format( + name=name + ) + ) wait_parts_mover(node1, name, retry_count=40) # If SELECT locked old part on external, move shall fail. assert node1.query( - "SELECT disk_name FROM system.parts WHERE table = '{name}' AND active = 1 AND name = '{part}'" - .format(name=name, part=parts[0])).splitlines() == ["jbod1" if positive else "external"] + "SELECT disk_name FROM system.parts WHERE table = '{name}' AND active = 1 AND name = '{part}'".format( + name=name, part=parts[0] + ) + ).splitlines() == ["jbod1" if positive else "external"] thread.join() - assert node1.query("SELECT n FROM {name} ORDER BY n".format(name=name)).splitlines() == ["1", "2", "3", "4"] + assert node1.query( + "SELECT n FROM {name} ORDER BY n".format(name=name) + ).splitlines() == ["1", "2", "3", "4"] finally: node1.query("DROP TABLE IF EXISTS {name} NO DELAY".format(name=name)) -@pytest.mark.parametrize("name,engine,positive", [ - pytest.param("mt_test_alter_with_merge_do_not_work", "MergeTree()", 0, id="mt"), - pytest.param("replicated_mt_test_alter_with_merge_do_not_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_alter_with_merge_do_not_work', '1')", 0, id="replicated"), - pytest.param("mt_test_alter_with_merge_work", "MergeTree()", 1, id="mt_work"), - pytest.param("replicated_mt_test_alter_with_merge_work", - "ReplicatedMergeTree('/clickhouse/replicated_test_alter_with_merge_work', '1')", 1, id="replicated_work"), -]) +@pytest.mark.parametrize( + "name,engine,positive", + [ + pytest.param("mt_test_alter_with_merge_do_not_work", "MergeTree()", 0, id="mt"), + pytest.param( + "replicated_mt_test_alter_with_merge_do_not_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_alter_with_merge_do_not_work', '1')", + 0, + id="replicated", + ), + pytest.param("mt_test_alter_with_merge_work", "MergeTree()", 1, id="mt_work"), + pytest.param( + "replicated_mt_test_alter_with_merge_work", + "ReplicatedMergeTree('/clickhouse/replicated_test_alter_with_merge_work', '1')", + 1, + id="replicated_work", + ), + ], +) def test_alter_with_merge_work(started_cluster, name, engine, positive): name = unique_table_name(name) @@ -1063,7 +1679,8 @@ limitations under the License.""" and parts are merged. """ try: - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( s1 String, d1 DateTime @@ -1072,12 +1689,18 @@ limitations under the License.""" TTL d1 + INTERVAL 3000 SECOND TO DISK 'jbod2', d1 + INTERVAL 6000 SECOND TO VOLUME 'external' SETTINGS storage_policy='jbods_with_external', merge_with_ttl_timeout=0 - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) def optimize_table(num): for i in range(num): try: # optimize may throw after concurrent alter - node1.query("OPTIMIZE TABLE {} FINAL".format(name), settings={'optimize_throw_if_noop': '1'}) + node1.query( + "OPTIMIZE TABLE {} FINAL".format(name), + settings={"optimize_throw_if_noop": "1"}, + ) break except: pass @@ -1087,26 +1710,44 @@ limitations under the License.""" now = time.time() for i in range(2): d1 = now - 1 if positive else now + 300 - data.append("(randomPrintableASCII(1024*1024), toDateTime({}))".format(d1)) + data.append( + "(randomPrintableASCII(1024*1024), toDateTime({}))".format(d1) + ) values = ",".join(data) - node1.query("INSERT INTO {name} (s1, d1) VALUES {values}".format(name=name, values=values)) + node1.query( + "INSERT INTO {name} (s1, d1) VALUES {values}".format( + name=name, values=values + ) + ) used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"jbod1", "jbod2"} - node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["6"] + node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == [ + "6" + ] - node1.query(""" + node1.query( + """ ALTER TABLE {name} MODIFY TTL d1 + INTERVAL 0 SECOND TO DISK 'jbod2', d1 + INTERVAL 5 SECOND TO VOLUME 'external', d1 + INTERVAL 10 SECOND DELETE - """.format(name=name)) + """.format( + name=name + ) + ) optimize_table(20) - assert node1.query( - "SELECT count() FROM system.parts WHERE table = '{name}' AND active = 1".format(name=name)) == "1\n" + assert ( + node1.query( + "SELECT count() FROM system.parts WHERE table = '{name}' AND active = 1".format( + name=name + ) + ) + == "1\n" + ) time.sleep(5) @@ -1130,17 +1771,38 @@ limitations under the License.""" node1.query("DROP TABLE IF EXISTS {name} NO DELAY".format(name=name)) -@pytest.mark.parametrize("name,dest_type,engine", [ - pytest.param("mt_test_disabled_ttl_move_on_insert_work", "DISK", "MergeTree()", id="disk"), - pytest.param("mt_test_disabled_ttl_move_on_insert_work", "VOLUME", "MergeTree()", id="volume"), - pytest.param("replicated_mt_test_disabled_ttl_move_on_insert_work", "DISK", "ReplicatedMergeTree('/clickhouse/replicated_test_disabled_ttl_move_on_insert_work', '1')", id="replicated_disk"), - pytest.param("replicated_mt_test_disabled_ttl_move_on_insert_work", "VOLUME", "ReplicatedMergeTree('/clickhouse/replicated_test_disabled_ttl_move_on_insert_work', '1')", id="replicated_volume"), -]) +@pytest.mark.parametrize( + "name,dest_type,engine", + [ + pytest.param( + "mt_test_disabled_ttl_move_on_insert_work", "DISK", "MergeTree()", id="disk" + ), + pytest.param( + "mt_test_disabled_ttl_move_on_insert_work", + "VOLUME", + "MergeTree()", + id="volume", + ), + pytest.param( + "replicated_mt_test_disabled_ttl_move_on_insert_work", + "DISK", + "ReplicatedMergeTree('/clickhouse/replicated_test_disabled_ttl_move_on_insert_work', '1')", + id="replicated_disk", + ), + pytest.param( + "replicated_mt_test_disabled_ttl_move_on_insert_work", + "VOLUME", + "ReplicatedMergeTree('/clickhouse/replicated_test_disabled_ttl_move_on_insert_work', '1')", + id="replicated_volume", + ), + ], +) def test_disabled_ttl_move_on_insert(started_cluster, name, dest_type, engine): name = unique_table_name(name) try: - node1.query(""" + node1.query( + """ CREATE TABLE {name} ( s1 String, d1 DateTime @@ -1148,26 +1810,42 @@ def test_disabled_ttl_move_on_insert(started_cluster, name, dest_type, engine): ORDER BY tuple() TTL d1 TO {dest_type} 'external' SETTINGS storage_policy='jbod_without_instant_ttl_move' - """.format(name=name, dest_type=dest_type, engine=engine)) + """.format( + name=name, dest_type=dest_type, engine=engine + ) + ) node1.query("SYSTEM STOP MOVES {}".format(name)) data = [] # 10MB in total for i in range(10): - data.append(("randomPrintableASCII(1024*1024)", "toDateTime({})".format(time.time() - 1))) + data.append( + ( + "randomPrintableASCII(1024*1024)", + "toDateTime({})".format(time.time() - 1), + ) + ) - node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data]))) + node1.query( + "INSERT INTO {} (s1, d1) VALUES {}".format( + name, ",".join(["(" + ",".join(x) + ")" for x in data]) + ) + ) used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"jbod1"} - assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + assert ( + node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + ) node1.query("SYSTEM START MOVES {}".format(name)) time.sleep(3) used_disks = get_used_disks_for_table(node1, name) assert set(used_disks) == {"external"} - assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + assert ( + node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + ) finally: try: @@ -1176,10 +1854,13 @@ def test_disabled_ttl_move_on_insert(started_cluster, name, dest_type, engine): pass -@pytest.mark.parametrize("name,dest_type", [ - pytest.param("replicated_mt_move_if_exists", "DISK", id="replicated_disk"), - pytest.param("replicated_mt_move_if_exists", "VOLUME", id="replicated_volume"), -]) +@pytest.mark.parametrize( + "name,dest_type", + [ + pytest.param("replicated_mt_move_if_exists", "DISK", id="replicated_disk"), + pytest.param("replicated_mt_move_if_exists", "VOLUME", id="replicated_volume"), + ], +) def test_ttl_move_if_exists(started_cluster, name, dest_type): name = unique_table_name(name) @@ -1195,20 +1876,43 @@ def test_ttl_move_if_exists(started_cluster, name, dest_type): """ with pytest.raises(QueryRuntimeException): - node1.query(query_template.format( \ - name=name, node_name=node1.name, dest_type=dest_type, \ - if_exists='', policy='only_jbod_1')) + node1.query( + query_template.format( + name=name, + node_name=node1.name, + dest_type=dest_type, + if_exists="", + policy="only_jbod_1", + ) + ) - for (node, policy) in zip([node1, node2], ['only_jbod_1', 'small_jbod_with_external']): - node.query(query_template.format( \ - name=name, node_name=node.name, dest_type=dest_type, \ - if_exists='IF EXISTS', policy=policy)) + for (node, policy) in zip( + [node1, node2], ["only_jbod_1", "small_jbod_with_external"] + ): + node.query( + query_template.format( + name=name, + node_name=node.name, + dest_type=dest_type, + if_exists="IF EXISTS", + policy=policy, + ) + ) data = [] # 10MB in total for i in range(10): - data.append(("randomPrintableASCII(1024*1024)", "toDateTime({})".format(time.time() - 1))) + data.append( + ( + "randomPrintableASCII(1024*1024)", + "toDateTime({})".format(time.time() - 1), + ) + ) - node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data]))) + node1.query( + "INSERT INTO {} (s1, d1) VALUES {}".format( + name, ",".join(["(" + ",".join(x) + ")" for x in data]) + ) + ) node2.query("SYSTEM SYNC REPLICA {}".format(name)) time.sleep(5) @@ -1219,8 +1923,12 @@ def test_ttl_move_if_exists(started_cluster, name, dest_type): used_disks2 = get_used_disks_for_table(node2, name) assert set(used_disks2) == {"external"} - assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" - assert node2.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + assert ( + node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + ) + assert ( + node2.query("SELECT count() FROM {name}".format(name=name)).strip() == "10" + ) finally: try: diff --git a/tests/integration/test_ttl_replicated/test.py b/tests/integration/test_ttl_replicated/test.py index f37c28b2a80..bcdb2d25912 100644 --- a/tests/integration/test_ttl_replicated/test.py +++ b/tests/integration/test_ttl_replicated/test.py @@ -6,14 +6,36 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV, exec_query_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True) -node2 = cluster.add_instance('node2', with_zookeeper=True) +node1 = cluster.add_instance("node1", with_zookeeper=True) +node2 = cluster.add_instance("node2", with_zookeeper=True) -node3 = cluster.add_instance('node3', with_zookeeper=True) -node4 = cluster.add_instance('node4', with_zookeeper=True, image='yandex/clickhouse-server', tag='20.12.4.5', stay_alive=True, with_installed_binary=True) +node3 = cluster.add_instance("node3", with_zookeeper=True) +node4 = cluster.add_instance( + "node4", + with_zookeeper=True, + image="yandex/clickhouse-server", + tag="20.12.4.5", + stay_alive=True, + with_installed_binary=True, +) + +node5 = cluster.add_instance( + "node5", + with_zookeeper=True, + image="yandex/clickhouse-server", + tag="20.12.4.5", + stay_alive=True, + with_installed_binary=True, +) +node6 = cluster.add_instance( + "node6", + with_zookeeper=True, + image="yandex/clickhouse-server", + tag="20.12.4.5", + stay_alive=True, + with_installed_binary=True, +) -node5 = cluster.add_instance('node5', with_zookeeper=True, image='yandex/clickhouse-server', tag='20.12.4.5', stay_alive=True, with_installed_binary=True) -node6 = cluster.add_instance('node6', with_zookeeper=True, image='yandex/clickhouse-server', tag='20.12.4.5', stay_alive=True, with_installed_binary=True) @pytest.fixture(scope="module") def started_cluster(): @@ -33,25 +55,37 @@ def drop_table(nodes, table_name): for node in nodes: node.query("DROP TABLE IF EXISTS {} NO DELAY".format(table_name)) + # Column TTL works only with wide parts, because it's very expensive to apply it for compact parts def test_ttl_columns(started_cluster): drop_table([node1, node2], "test_ttl") for node in [node1, node2]: node.query( - ''' + """ CREATE TABLE test_ttl(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_columns', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS merge_with_ttl_timeout=0, min_bytes_for_wide_part=0; - '''.format(replica=node.name)) + """.format( + replica=node.name + ) + ) - node1.query("INSERT INTO test_ttl VALUES (toDateTime('2000-10-10 00:00:00'), 1, 1, 3)") - node1.query("INSERT INTO test_ttl VALUES (toDateTime('2000-10-11 10:00:00'), 2, 2, 4)") + node1.query( + "INSERT INTO test_ttl VALUES (toDateTime('2000-10-10 00:00:00'), 1, 1, 3)" + ) + node1.query( + "INSERT INTO test_ttl VALUES (toDateTime('2000-10-11 10:00:00'), 2, 2, 4)" + ) time.sleep(1) # sleep to allow use ttl merge selector for second time node1.query("OPTIMIZE TABLE test_ttl FINAL") expected = "1\t0\t0\n2\t0\t0\n" - assert TSV(node1.query("SELECT id, a, b FROM test_ttl ORDER BY id")) == TSV(expected) - assert TSV(node2.query("SELECT id, a, b FROM test_ttl ORDER BY id")) == TSV(expected) + assert TSV(node1.query("SELECT id, a, b FROM test_ttl ORDER BY id")) == TSV( + expected + ) + assert TSV(node2.query("SELECT id, a, b FROM test_ttl ORDER BY id")) == TSV( + expected + ) def test_merge_with_ttl_timeout(started_cluster): @@ -59,22 +93,32 @@ def test_merge_with_ttl_timeout(started_cluster): drop_table([node1, node2], table) for node in [node1, node2]: node.query( - ''' + """ CREATE TABLE {table}(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS min_bytes_for_wide_part=0; - '''.format(replica=node.name, table=table)) + """.format( + replica=node.name, table=table + ) + ) node1.query("SYSTEM STOP TTL MERGES {table}".format(table=table)) node2.query("SYSTEM STOP TTL MERGES {table}".format(table=table)) for i in range(1, 4): node1.query( - "INSERT INTO {table} VALUES (toDateTime('2000-10-{day:02d} 10:00:00'), 1, 2, 3)".format(day=i, table=table)) + "INSERT INTO {table} VALUES (toDateTime('2000-10-{day:02d} 10:00:00'), 1, 2, 3)".format( + day=i, table=table + ) + ) - assert node1.query("SELECT countIf(a = 0) FROM {table}".format(table=table)) == "0\n" - assert node2.query("SELECT countIf(a = 0) FROM {table}".format(table=table)) == "0\n" + assert ( + node1.query("SELECT countIf(a = 0) FROM {table}".format(table=table)) == "0\n" + ) + assert ( + node2.query("SELECT countIf(a = 0) FROM {table}".format(table=table)) == "0\n" + ) node1.query("SYSTEM START TTL MERGES {table}".format(table=table)) node2.query("SYSTEM START TTL MERGES {table}".format(table=table)) @@ -83,19 +127,26 @@ def test_merge_with_ttl_timeout(started_cluster): for i in range(1, 4): node1.query( - "INSERT INTO {table} VALUES (toDateTime('2000-10-{day:02d} 10:00:00'), 1, 2, 3)".format(day=i, table=table)) + "INSERT INTO {table} VALUES (toDateTime('2000-10-{day:02d} 10:00:00'), 1, 2, 3)".format( + day=i, table=table + ) + ) time.sleep(15) # TTL merges shall not happen. - assert node1.query("SELECT countIf(a = 0) FROM {table}".format(table=table)) == "3\n" - assert node2.query("SELECT countIf(a = 0) FROM {table}".format(table=table)) == "3\n" + assert ( + node1.query("SELECT countIf(a = 0) FROM {table}".format(table=table)) == "3\n" + ) + assert ( + node2.query("SELECT countIf(a = 0) FROM {table}".format(table=table)) == "3\n" + ) def test_ttl_many_columns(started_cluster): drop_table([node1, node2], "test_ttl_2") for node in [node1, node2]: node.query( - ''' + """ CREATE TABLE test_ttl_2(date DateTime, id UInt32, a Int32 TTL date, _idx Int32 TTL date, @@ -103,13 +154,20 @@ def test_ttl_many_columns(started_cluster): _partition Int32 TTL date) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_2', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS merge_with_ttl_timeout=0; - '''.format(replica=node.name)) + """.format( + replica=node.name + ) + ) node1.query("SYSTEM STOP TTL MERGES test_ttl_2") node2.query("SYSTEM STOP TTL MERGES test_ttl_2") - node1.query("INSERT INTO test_ttl_2 VALUES (toDateTime('2000-10-10 00:00:00'), 1, 2, 3, 4, 5)") - node1.query("INSERT INTO test_ttl_2 VALUES (toDateTime('2100-10-10 10:00:00'), 6, 7, 8, 9, 10)") + node1.query( + "INSERT INTO test_ttl_2 VALUES (toDateTime('2000-10-10 00:00:00'), 1, 2, 3, 4, 5)" + ) + node1.query( + "INSERT INTO test_ttl_2 VALUES (toDateTime('2100-10-10 10:00:00'), 6, 7, 8, 9, 10)" + ) node2.query("SYSTEM SYNC REPLICA test_ttl_2", timeout=5) @@ -126,24 +184,38 @@ def test_ttl_many_columns(started_cluster): node2.query("SYSTEM SYNC REPLICA test_ttl_2", timeout=5) expected = "1\t0\t0\t0\t0\n6\t7\t8\t9\t10\n" - assert TSV(node1.query("SELECT id, a, _idx, _offset, _partition FROM test_ttl_2 ORDER BY id")) == TSV(expected) - assert TSV(node2.query("SELECT id, a, _idx, _offset, _partition FROM test_ttl_2 ORDER BY id")) == TSV(expected) + assert TSV( + node1.query( + "SELECT id, a, _idx, _offset, _partition FROM test_ttl_2 ORDER BY id" + ) + ) == TSV(expected) + assert TSV( + node2.query( + "SELECT id, a, _idx, _offset, _partition FROM test_ttl_2 ORDER BY id" + ) + ) == TSV(expected) -@pytest.mark.parametrize("delete_suffix", [ - "", - "DELETE", -]) +@pytest.mark.parametrize( + "delete_suffix", + [ + "", + "DELETE", + ], +) def test_ttl_table(started_cluster, delete_suffix): drop_table([node1, node2], "test_ttl") for node in [node1, node2]: node.query( - ''' + """ CREATE TABLE test_ttl(date DateTime, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) TTL date + INTERVAL 1 DAY {delete_suffix} SETTINGS merge_with_ttl_timeout=0; - '''.format(replica=node.name, delete_suffix=delete_suffix)) + """.format( + replica=node.name, delete_suffix=delete_suffix + ) + ) node1.query("INSERT INTO test_ttl VALUES (toDateTime('2000-10-10 00:00:00'), 1)") node1.query("INSERT INTO test_ttl VALUES (toDateTime('2000-10-11 10:00:00'), 2)") @@ -158,23 +230,33 @@ def test_modify_ttl(started_cluster): drop_table([node1, node2], "test_ttl") for node in [node1, node2]: node.query( - ''' + """ CREATE TABLE test_ttl(d DateTime, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_modify', '{replica}') ORDER BY id - '''.format(replica=node.name)) + """.format( + replica=node.name + ) + ) node1.query( - "INSERT INTO test_ttl VALUES (now() - INTERVAL 5 HOUR, 1), (now() - INTERVAL 3 HOUR, 2), (now() - INTERVAL 1 HOUR, 3)") + "INSERT INTO test_ttl VALUES (now() - INTERVAL 5 HOUR, 1), (now() - INTERVAL 3 HOUR, 2), (now() - INTERVAL 1 HOUR, 3)" + ) node2.query("SYSTEM SYNC REPLICA test_ttl", timeout=20) - node1.query("ALTER TABLE test_ttl MODIFY TTL d + INTERVAL 4 HOUR SETTINGS mutations_sync = 2") + node1.query( + "ALTER TABLE test_ttl MODIFY TTL d + INTERVAL 4 HOUR SETTINGS mutations_sync = 2" + ) assert node2.query("SELECT id FROM test_ttl") == "2\n3\n" - node2.query("ALTER TABLE test_ttl MODIFY TTL d + INTERVAL 2 HOUR SETTINGS mutations_sync = 2") + node2.query( + "ALTER TABLE test_ttl MODIFY TTL d + INTERVAL 2 HOUR SETTINGS mutations_sync = 2" + ) assert node1.query("SELECT id FROM test_ttl") == "3\n" - node1.query("ALTER TABLE test_ttl MODIFY TTL d + INTERVAL 30 MINUTE SETTINGS mutations_sync = 2") + node1.query( + "ALTER TABLE test_ttl MODIFY TTL d + INTERVAL 30 MINUTE SETTINGS mutations_sync = 2" + ) assert node2.query("SELECT id FROM test_ttl") == "" @@ -182,35 +264,49 @@ def test_modify_column_ttl(started_cluster): drop_table([node1, node2], "test_ttl") for node in [node1, node2]: node.query( - ''' + """ CREATE TABLE test_ttl(d DateTime, id UInt32 DEFAULT 42) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_column', '{replica}') ORDER BY d - '''.format(replica=node.name)) + """.format( + replica=node.name + ) + ) node1.query( - "INSERT INTO test_ttl VALUES (now() - INTERVAL 5 HOUR, 1), (now() - INTERVAL 3 HOUR, 2), (now() - INTERVAL 1 HOUR, 3)") + "INSERT INTO test_ttl VALUES (now() - INTERVAL 5 HOUR, 1), (now() - INTERVAL 3 HOUR, 2), (now() - INTERVAL 1 HOUR, 3)" + ) node2.query("SYSTEM SYNC REPLICA test_ttl", timeout=20) - node1.query("ALTER TABLE test_ttl MODIFY COLUMN id UInt32 TTL d + INTERVAL 4 HOUR SETTINGS mutations_sync = 2") + node1.query( + "ALTER TABLE test_ttl MODIFY COLUMN id UInt32 TTL d + INTERVAL 4 HOUR SETTINGS mutations_sync = 2" + ) assert node2.query("SELECT id FROM test_ttl") == "42\n2\n3\n" - node1.query("ALTER TABLE test_ttl MODIFY COLUMN id UInt32 TTL d + INTERVAL 2 HOUR SETTINGS mutations_sync = 2") + node1.query( + "ALTER TABLE test_ttl MODIFY COLUMN id UInt32 TTL d + INTERVAL 2 HOUR SETTINGS mutations_sync = 2" + ) assert node1.query("SELECT id FROM test_ttl") == "42\n42\n3\n" - node1.query("ALTER TABLE test_ttl MODIFY COLUMN id UInt32 TTL d + INTERVAL 30 MINUTE SETTINGS mutations_sync = 2") + node1.query( + "ALTER TABLE test_ttl MODIFY COLUMN id UInt32 TTL d + INTERVAL 30 MINUTE SETTINGS mutations_sync = 2" + ) assert node2.query("SELECT id FROM test_ttl") == "42\n42\n42\n" def test_ttl_double_delete_rule_returns_error(started_cluster): drop_table([node1, node2], "test_ttl") try: - node1.query(''' + node1.query( + """ CREATE TABLE test_ttl(date DateTime, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_double_delete', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) TTL date + INTERVAL 1 DAY, date + INTERVAL 2 DAY SETTINGS merge_with_ttl_timeout=0 - '''.format(replica=node1.name)) + """.format( + replica=node1.name + ) + ) assert False except client.QueryRuntimeException: pass @@ -221,26 +317,41 @@ def test_ttl_double_delete_rule_returns_error(started_cluster): def optimize_with_retry(node, table_name, retry=20): for i in range(retry): try: - node.query("OPTIMIZE TABLE {name} FINAL SETTINGS optimize_throw_if_noop = 1".format(name=table_name), settings={"optimize_throw_if_noop": "1"}) + node.query( + "OPTIMIZE TABLE {name} FINAL SETTINGS optimize_throw_if_noop = 1".format( + name=table_name + ), + settings={"optimize_throw_if_noop": "1"}, + ) break except e: time.sleep(0.5) -@pytest.mark.parametrize("name,engine", [ - pytest.param("test_ttl_alter_delete", "MergeTree()", id="test_ttl_alter_delete"), - pytest.param("test_replicated_ttl_alter_delete", "ReplicatedMergeTree('/clickhouse/test_replicated_ttl_alter_delete', '1')", id="test_ttl_alter_delete_replicated"), -]) + +@pytest.mark.parametrize( + "name,engine", + [ + pytest.param( + "test_ttl_alter_delete", "MergeTree()", id="test_ttl_alter_delete" + ), + pytest.param( + "test_replicated_ttl_alter_delete", + "ReplicatedMergeTree('/clickhouse/test_replicated_ttl_alter_delete', '1')", + id="test_ttl_alter_delete_replicated", + ), + ], +) def test_ttl_alter_delete(started_cluster, name, engine): """Copyright 2019, Altinity LTD -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License.""" + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License.""" """Check compatibility with old TTL delete expressions to make sure that: * alter modify of column's TTL delete expression works @@ -260,84 +371,143 @@ limitations under the License.""" ORDER BY tuple() TTL d1 + INTERVAL 1 DAY DELETE SETTINGS min_bytes_for_wide_part=0 - """.format(name=name, engine=engine)) + """.format( + name=name, engine=engine + ) + ) - node1.query("""ALTER TABLE {name} MODIFY COLUMN s1 String TTL d1 + INTERVAL 1 SECOND""".format(name=name)) + node1.query( + """ALTER TABLE {name} MODIFY COLUMN s1 String TTL d1 + INTERVAL 1 SECOND""".format( + name=name + ) + ) node1.query("""ALTER TABLE {name} ADD COLUMN b1 Int32""".format(name=name)) - node1.query("""INSERT INTO {name} (s1, b1, d1) VALUES ('hello1', 1, toDateTime({time}))""".format(name=name, - time=time.time())) - node1.query("""INSERT INTO {name} (s1, b1, d1) VALUES ('hello2', 2, toDateTime({time}))""".format(name=name, - time=time.time() + 360)) + node1.query( + """INSERT INTO {name} (s1, b1, d1) VALUES ('hello1', 1, toDateTime({time}))""".format( + name=name, time=time.time() + ) + ) + node1.query( + """INSERT INTO {name} (s1, b1, d1) VALUES ('hello2', 2, toDateTime({time}))""".format( + name=name, time=time.time() + 360 + ) + ) time.sleep(1) optimize_with_retry(node1, name) - r = node1.query("SELECT s1, b1 FROM {name} ORDER BY b1, s1".format(name=name)).splitlines() + r = node1.query( + "SELECT s1, b1 FROM {name} ORDER BY b1, s1".format(name=name) + ).splitlines() assert r == ["\t1", "hello2\t2"] - node1.query("""ALTER TABLE {name} MODIFY COLUMN b1 Int32 TTL d1""".format(name=name)) - node1.query("""INSERT INTO {name} (s1, b1, d1) VALUES ('hello3', 3, toDateTime({time}))""".format(name=name, - time=time.time())) + node1.query( + """ALTER TABLE {name} MODIFY COLUMN b1 Int32 TTL d1""".format(name=name) + ) + node1.query( + """INSERT INTO {name} (s1, b1, d1) VALUES ('hello3', 3, toDateTime({time}))""".format( + name=name, time=time.time() + ) + ) time.sleep(1) optimize_with_retry(node1, name) - r = node1.query("SELECT s1, b1 FROM {name} ORDER BY b1, s1".format(name=name)).splitlines() + r = node1.query( + "SELECT s1, b1 FROM {name} ORDER BY b1, s1".format(name=name) + ).splitlines() assert r == ["\t0", "\t0", "hello2\t2"] + def test_ttl_empty_parts(started_cluster): drop_table([node1, node2], "test_ttl_empty_parts") for node in [node1, node2]: node.query( - ''' + """ CREATE TABLE test_ttl_empty_parts(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_empty_parts', '{replica}') ORDER BY id SETTINGS max_bytes_to_merge_at_min_space_in_pool = 1, max_bytes_to_merge_at_max_space_in_pool = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0 - '''.format(replica=node.name)) + """.format( + replica=node.name + ) + ) - for i in range (1, 7): - node1.query("INSERT INTO test_ttl_empty_parts SELECT '2{}00-01-0{}', number FROM numbers(1000)".format(i % 2, i)) + for i in range(1, 7): + node1.query( + "INSERT INTO test_ttl_empty_parts SELECT '2{}00-01-0{}', number FROM numbers(1000)".format( + i % 2, i + ) + ) assert node1.query("SELECT count() FROM test_ttl_empty_parts") == "6000\n" - assert node1.query("SELECT name FROM system.parts WHERE table = 'test_ttl_empty_parts' AND active ORDER BY name") == \ - "all_0_0_0\nall_1_1_0\nall_2_2_0\nall_3_3_0\nall_4_4_0\nall_5_5_0\n" + assert ( + node1.query( + "SELECT name FROM system.parts WHERE table = 'test_ttl_empty_parts' AND active ORDER BY name" + ) + == "all_0_0_0\nall_1_1_0\nall_2_2_0\nall_3_3_0\nall_4_4_0\nall_5_5_0\n" + ) node1.query("ALTER TABLE test_ttl_empty_parts MODIFY TTL date") assert node1.query("SELECT count() FROM test_ttl_empty_parts") == "3000\n" - time.sleep(3) # Wait for cleanup thread - assert node1.query("SELECT name FROM system.parts WHERE table = 'test_ttl_empty_parts' AND active ORDER BY name") == \ - "all_0_0_0_6\nall_2_2_0_6\nall_4_4_0_6\n" + time.sleep(3) # Wait for cleanup thread + assert ( + node1.query( + "SELECT name FROM system.parts WHERE table = 'test_ttl_empty_parts' AND active ORDER BY name" + ) + == "all_0_0_0_6\nall_2_2_0_6\nall_4_4_0_6\n" + ) for node in [node1, node2]: - node.query("ALTER TABLE test_ttl_empty_parts MODIFY SETTING max_bytes_to_merge_at_min_space_in_pool = 1000000000") - node.query("ALTER TABLE test_ttl_empty_parts MODIFY SETTING max_bytes_to_merge_at_max_space_in_pool = 1000000000") + node.query( + "ALTER TABLE test_ttl_empty_parts MODIFY SETTING max_bytes_to_merge_at_min_space_in_pool = 1000000000" + ) + node.query( + "ALTER TABLE test_ttl_empty_parts MODIFY SETTING max_bytes_to_merge_at_max_space_in_pool = 1000000000" + ) - optimize_with_retry(node1, 'test_ttl_empty_parts') - assert node1.query("SELECT name FROM system.parts WHERE table = 'test_ttl_empty_parts' AND active ORDER BY name") == "all_0_4_1_6\n" + optimize_with_retry(node1, "test_ttl_empty_parts") + assert ( + node1.query( + "SELECT name FROM system.parts WHERE table = 'test_ttl_empty_parts' AND active ORDER BY name" + ) + == "all_0_4_1_6\n" + ) # Check that after removing empty parts mutations and merges works - node1.query("INSERT INTO test_ttl_empty_parts SELECT '2100-01-20', number FROM numbers(1000)") - node1.query("ALTER TABLE test_ttl_empty_parts DELETE WHERE id % 2 = 0 SETTINGS mutations_sync = 2") + node1.query( + "INSERT INTO test_ttl_empty_parts SELECT '2100-01-20', number FROM numbers(1000)" + ) + node1.query( + "ALTER TABLE test_ttl_empty_parts DELETE WHERE id % 2 = 0 SETTINGS mutations_sync = 2" + ) assert node1.query("SELECT count() FROM test_ttl_empty_parts") == "2000\n" - optimize_with_retry(node1, 'test_ttl_empty_parts') - assert node1.query("SELECT name FROM system.parts WHERE table = 'test_ttl_empty_parts' AND active ORDER BY name") == "all_0_7_2_8\n" + optimize_with_retry(node1, "test_ttl_empty_parts") + assert ( + node1.query( + "SELECT name FROM system.parts WHERE table = 'test_ttl_empty_parts' AND active ORDER BY name" + ) + == "all_0_7_2_8\n" + ) - node2.query('SYSTEM SYNC REPLICA test_ttl_empty_parts', timeout=20) + node2.query("SYSTEM SYNC REPLICA test_ttl_empty_parts", timeout=20) - error_msg = ' default.test_ttl_empty_parts (ReplicatedMergeTreeCleanupThread)' + error_msg = ( + " default.test_ttl_empty_parts (ReplicatedMergeTreeCleanupThread)" + ) assert not node1.contains_in_log(error_msg) assert not node2.contains_in_log(error_msg) + @pytest.mark.parametrize( - ('node_left', 'node_right', 'num_run'), - [(node1, node2, 0), (node3, node4, 1), (node5, node6, 2)] + ("node_left", "node_right", "num_run"), + [(node1, node2, 0), (node3, node4, 1), (node5, node6, 2)], ) def test_ttl_compatibility(started_cluster, node_left, node_right, num_run): drop_table([node_left, node_right], "test_ttl_delete") @@ -346,36 +516,49 @@ def test_ttl_compatibility(started_cluster, node_left, node_right, num_run): for node in [node_left, node_right]: node.query( - ''' + """ CREATE TABLE test_ttl_delete(date DateTime, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_delete_{suff}', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) TTL date + INTERVAL 3 SECOND SETTINGS max_number_of_merges_with_ttl_in_pool=100, max_replicated_merges_with_ttl_in_queue=100 - '''.format(suff=num_run, replica=node.name)) + """.format( + suff=num_run, replica=node.name + ) + ) node.query( - ''' + """ CREATE TABLE test_ttl_group_by(date DateTime, id UInt32, val UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_group_by_{suff}', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) TTL date + INTERVAL 3 SECOND GROUP BY id SET val = sum(val) SETTINGS max_number_of_merges_with_ttl_in_pool=100, max_replicated_merges_with_ttl_in_queue=100 - '''.format(suff=num_run, replica=node.name)) + """.format( + suff=num_run, replica=node.name + ) + ) node.query( - ''' + """ CREATE TABLE test_ttl_where(date DateTime, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_where_{suff}', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) TTL date + INTERVAL 3 SECOND DELETE WHERE id % 2 = 1 SETTINGS max_number_of_merges_with_ttl_in_pool=100, max_replicated_merges_with_ttl_in_queue=100 - '''.format(suff=num_run, replica=node.name)) + """.format( + suff=num_run, replica=node.name + ) + ) node_left.query("INSERT INTO test_ttl_delete VALUES (now(), 1)") - node_left.query("INSERT INTO test_ttl_delete VALUES (toDateTime('2100-10-11 10:00:00'), 2)") + node_left.query( + "INSERT INTO test_ttl_delete VALUES (toDateTime('2100-10-11 10:00:00'), 2)" + ) node_right.query("INSERT INTO test_ttl_delete VALUES (now(), 3)") - node_right.query("INSERT INTO test_ttl_delete VALUES (toDateTime('2100-10-11 10:00:00'), 4)") + node_right.query( + "INSERT INTO test_ttl_delete VALUES (toDateTime('2100-10-11 10:00:00'), 4)" + ) node_left.query("INSERT INTO test_ttl_group_by VALUES (now(), 0, 1)") node_left.query("INSERT INTO test_ttl_group_by VALUES (now(), 0, 2)") @@ -392,8 +575,8 @@ def test_ttl_compatibility(started_cluster, node_left, node_right, num_run): if node_right.with_installed_binary: node_right.restart_with_latest_version() - - time.sleep(5) # Wait for TTL + + time.sleep(5) # Wait for TTL # after restart table can be in readonly mode exec_query_with_retry(node_right, "OPTIMIZE TABLE test_ttl_delete FINAL") diff --git a/tests/integration/test_union_header/test.py b/tests/integration/test_union_header/test.py index edbf4dddecf..f883057c1d8 100644 --- a/tests/integration/test_union_header/test.py +++ b/tests/integration/test_union_header/test.py @@ -4,8 +4,12 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) -node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) +node2 = cluster.add_instance( + "node2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True +) @pytest.fixture(scope="module") @@ -14,7 +18,8 @@ def started_cluster(): cluster.start() for node in (node1, node2): - node.query(''' + node.query( + """ CREATE TABLE default.t1_local ( event_date Date DEFAULT toDate(event_time), @@ -23,12 +28,15 @@ def started_cluster(): account_id String ) ENGINE = MergeTree(event_date, (event_time, account_id), 8192); - ''') + """ + ) - node.query(''' + node.query( + """ CREATE TABLE default.t1 AS default.t1_local ENGINE = Distributed('two_shards', 'default', 't1_local', rand()); - ''') + """ + ) yield cluster @@ -37,7 +45,12 @@ def started_cluster(): def test_read(started_cluster): - assert node1.query('''SELECT event_date, event_time, log_type + assert ( + node1.query( + """SELECT event_date, event_time, log_type FROM default.t1 WHERE (log_type = 30305) AND (account_id = '111111') - LIMIT 1''').strip() == '' + LIMIT 1""" + ).strip() + == "" + ) diff --git a/tests/integration/test_user_defined_object_persistence/test.py b/tests/integration/test_user_defined_object_persistence/test.py index 6993bc13615..8d775411b61 100644 --- a/tests/integration/test_user_defined_object_persistence/test.py +++ b/tests/integration/test_user_defined_object_persistence/test.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -instance = cluster.add_instance('instance', stay_alive=True) +instance = cluster.add_instance("instance", stay_alive=True) @pytest.fixture(scope="module", autouse=True) @@ -21,7 +21,7 @@ def test_persistence(): instance.query(create_function_query1) instance.query(create_function_query2) - + assert instance.query("SELECT MySum1(1,2)") == "3\n" assert instance.query("SELECT MySum2(1,2)") == "5\n" @@ -35,5 +35,9 @@ def test_persistence(): instance.restart_clickhouse() - assert "Unknown function MySum1" in instance.query_and_get_error("SELECT MySum1(1, 2)") - assert "Unknown function MySum2" in instance.query_and_get_error("SELECT MySum2(1, 2)") + assert "Unknown function MySum1" in instance.query_and_get_error( + "SELECT MySum1(1, 2)" + ) + assert "Unknown function MySum2" in instance.query_and_get_error( + "SELECT MySum2(1, 2)" + ) diff --git a/tests/integration/test_user_directories/test.py b/tests/integration/test_user_directories/test.py index 1ce4e377f2b..45afb86f464 100644 --- a/tests/integration/test_user_directories/test.py +++ b/tests/integration/test_user_directories/test.py @@ -6,7 +6,7 @@ from helpers.test_tools import TSV SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', stay_alive=True) +node = cluster.add_instance("node", stay_alive=True) @pytest.fixture(scope="module", autouse=True) @@ -15,7 +15,13 @@ def started_cluster(): cluster.start() for i in range(2, 8): - node.exec_in_container(["cp", "/etc/clickhouse-server/users.xml", "/etc/clickhouse-server/users{}.xml".format(i)]) + node.exec_in_container( + [ + "cp", + "/etc/clickhouse-server/users.xml", + "/etc/clickhouse-server/users{}.xml".format(i), + ] + ) yield cluster @@ -24,56 +30,146 @@ def started_cluster(): def test_old_style(): - node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/old_style.xml"), - '/etc/clickhouse-server/config.d/z.xml') + node.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/old_style.xml"), + "/etc/clickhouse-server/config.d/z.xml", + ) node.restart_clickhouse() assert node.query("SELECT * FROM system.user_directories") == TSV( - [["users.xml", "users.xml", '{"path":"\\\\/etc\\\\/clickhouse-server\\\\/users2.xml"}', 1], - ["local directory", "local directory", '{"path":"\\\\/var\\\\/lib\\\\/clickhouse\\\\/access2\\\\/"}', 2]]) + [ + [ + "users.xml", + "users.xml", + '{"path":"\\\\/etc\\\\/clickhouse-server\\\\/users2.xml"}', + 1, + ], + [ + "local directory", + "local directory", + '{"path":"\\\\/var\\\\/lib\\\\/clickhouse\\\\/access2\\\\/"}', + 2, + ], + ] + ) def test_local_directories(): - node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/local_directories.xml"), - '/etc/clickhouse-server/config.d/z.xml') + node.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/local_directories.xml"), + "/etc/clickhouse-server/config.d/z.xml", + ) node.restart_clickhouse() assert node.query("SELECT * FROM system.user_directories") == TSV( - [["users.xml", "users.xml", '{"path":"\\\\/etc\\\\/clickhouse-server\\\\/users3.xml"}', 1], - ["local directory", "local directory", '{"path":"\\\\/var\\\\/lib\\\\/clickhouse\\\\/access3\\\\/"}', 2], - ["local directory (ro)", "local directory", - '{"path":"\\\\/var\\\\/lib\\\\/clickhouse\\\\/access3-ro\\\\/","readonly":true}', 3]]) + [ + [ + "users.xml", + "users.xml", + '{"path":"\\\\/etc\\\\/clickhouse-server\\\\/users3.xml"}', + 1, + ], + [ + "local directory", + "local directory", + '{"path":"\\\\/var\\\\/lib\\\\/clickhouse\\\\/access3\\\\/"}', + 2, + ], + [ + "local directory (ro)", + "local directory", + '{"path":"\\\\/var\\\\/lib\\\\/clickhouse\\\\/access3-ro\\\\/","readonly":true}', + 3, + ], + ] + ) def test_relative_path(): - node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/relative_path.xml"), - '/etc/clickhouse-server/config.d/z.xml') + node.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/relative_path.xml"), + "/etc/clickhouse-server/config.d/z.xml", + ) node.restart_clickhouse() assert node.query("SELECT * FROM system.user_directories") == TSV( - [["users.xml", "users.xml", '{"path":"\\\\/etc\\\\/clickhouse-server\\\\/users4.xml"}', 1]]) + [ + [ + "users.xml", + "users.xml", + '{"path":"\\\\/etc\\\\/clickhouse-server\\\\/users4.xml"}', + 1, + ] + ] + ) def test_memory(): - node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/memory.xml"), '/etc/clickhouse-server/config.d/z.xml') + node.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/memory.xml"), + "/etc/clickhouse-server/config.d/z.xml", + ) node.restart_clickhouse() assert node.query("SELECT * FROM system.user_directories") == TSV( - [["users.xml", "users.xml", '{"path":"\\\\/etc\\\\/clickhouse-server\\\\/users5.xml"}', 1], - ["memory", "memory", '{}', 2]]) + [ + [ + "users.xml", + "users.xml", + '{"path":"\\\\/etc\\\\/clickhouse-server\\\\/users5.xml"}', + 1, + ], + ["memory", "memory", "{}", 2], + ] + ) def test_mixed_style(): - node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/mixed_style.xml"), - '/etc/clickhouse-server/config.d/z.xml') + node.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/mixed_style.xml"), + "/etc/clickhouse-server/config.d/z.xml", + ) node.restart_clickhouse() assert node.query("SELECT * FROM system.user_directories") == TSV( - [["users.xml", "users.xml", '{"path":"\\\\/etc\\\\/clickhouse-server\\\\/users6.xml"}', 1], - ["local directory", "local directory", '{"path":"\\\\/var\\\\/lib\\\\/clickhouse\\\\/access6\\\\/"}', 2], - ["local directory", "local directory", '{"path":"\\\\/var\\\\/lib\\\\/clickhouse\\\\/access6a\\\\/"}', 3], - ["memory", "memory", '{}', 4]]) + [ + [ + "users.xml", + "users.xml", + '{"path":"\\\\/etc\\\\/clickhouse-server\\\\/users6.xml"}', + 1, + ], + [ + "local directory", + "local directory", + '{"path":"\\\\/var\\\\/lib\\\\/clickhouse\\\\/access6\\\\/"}', + 2, + ], + [ + "local directory", + "local directory", + '{"path":"\\\\/var\\\\/lib\\\\/clickhouse\\\\/access6a\\\\/"}', + 3, + ], + ["memory", "memory", "{}", 4], + ] + ) def test_duplicates(): - node.copy_file_to_container(os.path.join(SCRIPT_DIR, "configs/duplicates.xml"), - '/etc/clickhouse-server/config.d/z.xml') + node.copy_file_to_container( + os.path.join(SCRIPT_DIR, "configs/duplicates.xml"), + "/etc/clickhouse-server/config.d/z.xml", + ) node.restart_clickhouse() assert node.query("SELECT * FROM system.user_directories") == TSV( - [["users.xml", "users.xml", '{"path":"\\\\/etc\\\\/clickhouse-server\\\\/users7.xml"}', 1], - ["local directory", "local directory", '{"path":"\\\\/var\\\\/lib\\\\/clickhouse\\\\/access7\\\\/"}', 2]]) + [ + [ + "users.xml", + "users.xml", + '{"path":"\\\\/etc\\\\/clickhouse-server\\\\/users7.xml"}', + 1, + ], + [ + "local directory", + "local directory", + '{"path":"\\\\/var\\\\/lib\\\\/clickhouse\\\\/access7\\\\/"}', + 2, + ], + ] + ) diff --git a/tests/integration/test_user_ip_restrictions/test.py b/tests/integration/test_user_ip_restrictions/test.py index a7344fd1a45..e41febfa2f5 100644 --- a/tests/integration/test_user_ip_restrictions/test.py +++ b/tests/integration/test_user_ip_restrictions/test.py @@ -4,23 +4,52 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node_ipv4 = cluster.add_instance('node_ipv4', main_configs=[], user_configs=['configs/users_ipv4.xml'], - ipv4_address='10.5.172.77') -client_ipv4_ok = cluster.add_instance('client_ipv4_ok', main_configs=[], user_configs=[], ipv4_address='10.5.172.10') -client_ipv4_ok_direct = cluster.add_instance('client_ipv4_ok_direct', main_configs=[], user_configs=[], - ipv4_address='10.5.173.1') -client_ipv4_ok_full_mask = cluster.add_instance('client_ipv4_ok_full_mask', main_configs=[], user_configs=[], - ipv4_address='10.5.175.77') -client_ipv4_bad = cluster.add_instance('client_ipv4_bad', main_configs=[], user_configs=[], ipv4_address='10.5.173.10') +node_ipv4 = cluster.add_instance( + "node_ipv4", + main_configs=[], + user_configs=["configs/users_ipv4.xml"], + ipv4_address="10.5.172.77", +) +client_ipv4_ok = cluster.add_instance( + "client_ipv4_ok", main_configs=[], user_configs=[], ipv4_address="10.5.172.10" +) +client_ipv4_ok_direct = cluster.add_instance( + "client_ipv4_ok_direct", main_configs=[], user_configs=[], ipv4_address="10.5.173.1" +) +client_ipv4_ok_full_mask = cluster.add_instance( + "client_ipv4_ok_full_mask", + main_configs=[], + user_configs=[], + ipv4_address="10.5.175.77", +) +client_ipv4_bad = cluster.add_instance( + "client_ipv4_bad", main_configs=[], user_configs=[], ipv4_address="10.5.173.10" +) -node_ipv6 = cluster.add_instance('node_ipv6', main_configs=["configs/config_ipv6.xml"], - user_configs=['configs/users_ipv6.xml'], ipv6_address='2001:3984:3989::1:1000') -client_ipv6_ok = cluster.add_instance('client_ipv6_ok', main_configs=[], user_configs=[], - ipv6_address='2001:3984:3989::5555') -client_ipv6_ok_direct = cluster.add_instance('client_ipv6_ok_direct', main_configs=[], user_configs=[], - ipv6_address='2001:3984:3989::1:1111') -client_ipv6_bad = cluster.add_instance('client_ipv6_bad', main_configs=[], user_configs=[], - ipv6_address='2001:3984:3989::1:1112') +node_ipv6 = cluster.add_instance( + "node_ipv6", + main_configs=["configs/config_ipv6.xml"], + user_configs=["configs/users_ipv6.xml"], + ipv6_address="2001:3984:3989::1:1000", +) +client_ipv6_ok = cluster.add_instance( + "client_ipv6_ok", + main_configs=[], + user_configs=[], + ipv6_address="2001:3984:3989::5555", +) +client_ipv6_ok_direct = cluster.add_instance( + "client_ipv6_ok_direct", + main_configs=[], + user_configs=[], + ipv6_address="2001:3984:3989::1:1111", +) +client_ipv6_bad = cluster.add_instance( + "client_ipv6_bad", + main_configs=[], + user_configs=[], + ipv6_address="2001:3984:3989::1:1112", +) @pytest.fixture(scope="module") @@ -36,30 +65,62 @@ def setup_cluster(): def test_ipv4(setup_cluster): try: client_ipv4_ok.exec_in_container( - ["bash", "-c", "/usr/bin/clickhouse client --host 10.5.172.77 --query 'select 1'"], privileged=True, - user='root') + [ + "bash", + "-c", + "/usr/bin/clickhouse client --host 10.5.172.77 --query 'select 1'", + ], + privileged=True, + user="root", + ) except Exception as ex: - assert False, "allowed client with 10.5.172.10 cannot connect to server with allowed mask '10.5.172.0/24'" + assert ( + False + ), "allowed client with 10.5.172.10 cannot connect to server with allowed mask '10.5.172.0/24'" try: client_ipv4_ok_direct.exec_in_container( - ["bash", "-c", "/usr/bin/clickhouse client --host 10.5.172.77 --query 'select 1'"], privileged=True, - user='root') + [ + "bash", + "-c", + "/usr/bin/clickhouse client --host 10.5.172.77 --query 'select 1'", + ], + privileged=True, + user="root", + ) except Exception as ex: - assert False, "allowed client with 10.5.173.1 cannot connect to server with allowed ip '10.5.173.1'" + assert ( + False + ), "allowed client with 10.5.173.1 cannot connect to server with allowed ip '10.5.173.1'" try: client_ipv4_ok_full_mask.exec_in_container( - ["bash", "-c", "/usr/bin/clickhouse client --host 10.5.172.77 --query 'select 1'"], privileged=True, - user='root') + [ + "bash", + "-c", + "/usr/bin/clickhouse client --host 10.5.172.77 --query 'select 1'", + ], + privileged=True, + user="root", + ) except Exception as ex: - assert False, "allowed client with 10.5.175.77 cannot connect to server with allowed ip '10.5.175.0/255.255.255.0'" + assert ( + False + ), "allowed client with 10.5.175.77 cannot connect to server with allowed ip '10.5.175.0/255.255.255.0'" try: client_ipv4_bad.exec_in_container( - ["bash", "-c", "/usr/bin/clickhouse client --host 10.5.172.77 --query 'select 1'"], privileged=True, - user='root') - assert False, "restricted client with 10.5.173.10 can connect to server with allowed mask '10.5.172.0/24'" + [ + "bash", + "-c", + "/usr/bin/clickhouse client --host 10.5.172.77 --query 'select 1'", + ], + privileged=True, + user="root", + ) + assert ( + False + ), "restricted client with 10.5.173.10 can connect to server with allowed mask '10.5.172.0/24'" except AssertionError: raise except Exception as ex: @@ -69,24 +130,48 @@ def test_ipv4(setup_cluster): def test_ipv6(setup_cluster): try: client_ipv6_ok.exec_in_container( - ["bash", "-c", "/usr/bin/clickhouse client --host 2001:3984:3989::1:1000 --query 'select 1'"], - privileged=True, user='root') + [ + "bash", + "-c", + "/usr/bin/clickhouse client --host 2001:3984:3989::1:1000 --query 'select 1'", + ], + privileged=True, + user="root", + ) except Exception as ex: print(ex) - assert False, "allowed client with 2001:3984:3989:0:0:0:1:1111 cannot connect to server with allowed mask '2001:3984:3989:0:0:0:0:0/112'" + assert ( + False + ), "allowed client with 2001:3984:3989:0:0:0:1:1111 cannot connect to server with allowed mask '2001:3984:3989:0:0:0:0:0/112'" try: client_ipv6_ok_direct.exec_in_container( - ["bash", "-c", "/usr/bin/clickhouse client --host 2001:3984:3989:0:0:0:1:1000 --query 'select 1'"], - privileged=True, user='root') + [ + "bash", + "-c", + "/usr/bin/clickhouse client --host 2001:3984:3989:0:0:0:1:1000 --query 'select 1'", + ], + privileged=True, + user="root", + ) except Exception as ex: - assert False, "allowed client with 2001:3984:3989:0:0:0:1:1111 cannot connect to server with allowed ip '2001:3984:3989:0:0:0:1:1111'" + assert ( + False + ), "allowed client with 2001:3984:3989:0:0:0:1:1111 cannot connect to server with allowed ip '2001:3984:3989:0:0:0:1:1111'" try: client_ipv6_bad.exec_in_container( - ["bash", "-c", "/usr/bin/clickhouse client --host 2001:3984:3989:0:0:0:1:1000 --query 'select 1'"], - privileged=True, user='root') - assert False, "restricted client with 2001:3984:3989:0:0:0:1:1112 can connect to server with allowed mask '2001:3984:3989:0:0:0:0:0/112'" + [ + "bash", + "-c", + "/usr/bin/clickhouse client --host 2001:3984:3989:0:0:0:1:1000 --query 'select 1'", + ], + privileged=True, + user="root", + ) + assert ( + False + ), "restricted client with 2001:3984:3989:0:0:0:1:1112 can connect to server with allowed mask '2001:3984:3989:0:0:0:0:0/112'" except AssertionError: raise except Exception as ex: diff --git a/tests/integration/test_user_zero_database_access/test_user_zero_database_access.py b/tests/integration/test_user_zero_database_access/test_user_zero_database_access.py index d77e8383df7..747c022a3b0 100644 --- a/tests/integration/test_user_zero_database_access/test_user_zero_database_access.py +++ b/tests/integration/test_user_zero_database_access/test_user_zero_database_access.py @@ -3,7 +3,7 @@ import pytest from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) -node = cluster.add_instance('node', user_configs=["configs/users.xml"]) +node = cluster.add_instance("node", user_configs=["configs/users.xml"]) @pytest.fixture(scope="module") @@ -19,7 +19,13 @@ def start_cluster(): def test_user_zero_database_access(start_cluster): try: node.exec_in_container( - ["bash", "-c", "/usr/bin/clickhouse client --user 'no_access' --query 'DROP DATABASE test'"], user='root') + [ + "bash", + "-c", + "/usr/bin/clickhouse client --user 'no_access' --query 'DROP DATABASE test'", + ], + user="root", + ) assert False, "user with no access rights dropped database test" except AssertionError: raise @@ -28,21 +34,37 @@ def test_user_zero_database_access(start_cluster): try: node.exec_in_container( - ["bash", "-c", "/usr/bin/clickhouse client --user 'has_access' --query 'DROP DATABASE test'"], user='root') + [ + "bash", + "-c", + "/usr/bin/clickhouse client --user 'has_access' --query 'DROP DATABASE test'", + ], + user="root", + ) except Exception as ex: assert False, "user with access rights can't drop database test" try: node.exec_in_container( - ["bash", "-c", "/usr/bin/clickhouse client --user 'has_access' --query 'CREATE DATABASE test'"], - user='root') + [ + "bash", + "-c", + "/usr/bin/clickhouse client --user 'has_access' --query 'CREATE DATABASE test'", + ], + user="root", + ) except Exception as ex: assert False, "user with access rights can't create database test" try: node.exec_in_container( - ["bash", "-c", "/usr/bin/clickhouse client --user 'no_access' --query 'CREATE DATABASE test2'"], - user='root') + [ + "bash", + "-c", + "/usr/bin/clickhouse client --user 'no_access' --query 'CREATE DATABASE test2'", + ], + user="root", + ) assert False, "user with no access rights created database test2" except AssertionError: raise @@ -51,9 +73,16 @@ def test_user_zero_database_access(start_cluster): try: node.exec_in_container( - ["bash", "-c", "/usr/bin/clickhouse client --user 'has_access' --query 'CREATE DATABASE test2'"], - user='root') - assert False, "user with limited access rights created database test2 which is outside of his scope of rights" + [ + "bash", + "-c", + "/usr/bin/clickhouse client --user 'has_access' --query 'CREATE DATABASE test2'", + ], + user="root", + ) + assert ( + False + ), "user with limited access rights created database test2 which is outside of his scope of rights" except AssertionError: raise except Exception as ex: @@ -61,26 +90,52 @@ def test_user_zero_database_access(start_cluster): try: node.exec_in_container( - ["bash", "-c", "/usr/bin/clickhouse client --user 'default' --query 'CREATE DATABASE test2'"], user='root') + [ + "bash", + "-c", + "/usr/bin/clickhouse client --user 'default' --query 'CREATE DATABASE test2'", + ], + user="root", + ) except Exception as ex: assert False, "user with full access rights can't create database test2" try: node.exec_in_container( - ["bash", "-c", "/usr/bin/clickhouse client --user 'default' --query 'DROP DATABASE test2'"], user='root') + [ + "bash", + "-c", + "/usr/bin/clickhouse client --user 'default' --query 'DROP DATABASE test2'", + ], + user="root", + ) except Exception as ex: assert False, "user with full access rights can't drop database test2" - + try: name = node.exec_in_container( - ["bash", "-c", "export CLICKHOUSE_USER=env_user_not_with_password && /usr/bin/clickhouse client --query 'SELECT currentUser()'"], user='root') + [ + "bash", + "-c", + "export CLICKHOUSE_USER=env_user_not_with_password && /usr/bin/clickhouse client --query 'SELECT currentUser()'", + ], + user="root", + ) assert name.strip() == "env_user_not_with_password" except Exception as ex: assert False, "set env CLICKHOUSE_USER can not connect server" try: name = node.exec_in_container( - ["bash", "-c", "export CLICKHOUSE_USER=env_user_with_password && export CLICKHOUSE_PASSWORD=clickhouse && /usr/bin/clickhouse client --query 'SELECT currentUser()'"], user='root') + [ + "bash", + "-c", + "export CLICKHOUSE_USER=env_user_with_password && export CLICKHOUSE_PASSWORD=clickhouse && /usr/bin/clickhouse client --query 'SELECT currentUser()'", + ], + user="root", + ) assert name.strip() == "env_user_with_password" except Exception as ex: - assert False, "set env CLICKHOUSE_USER CLICKHOUSE_PASSWORD can not connect server" + assert ( + False + ), "set env CLICKHOUSE_USER CLICKHOUSE_PASSWORD can not connect server" diff --git a/tests/integration/test_version_update/test.py b/tests/integration/test_version_update/test.py index 4e5d925852c..3332fe69e86 100644 --- a/tests/integration/test_version_update/test.py +++ b/tests/integration/test_version_update/test.py @@ -2,49 +2,92 @@ import pytest from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry, exec_query_with_retry + cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', stay_alive=True) +node1 = cluster.add_instance("node1", stay_alive=True) -node2 = cluster.add_instance('node2', with_zookeeper=True, image='yandex/clickhouse-server', tag='21.2', with_installed_binary=True, stay_alive=True) +node2 = cluster.add_instance( + "node2", + with_zookeeper=True, + image="yandex/clickhouse-server", + tag="21.2", + with_installed_binary=True, + stay_alive=True, +) # Use differents nodes because if there is node.restart_from_latest_version(), then in later tests # it will be with latest version, but shouldn't, order of tests in CI is shuffled. -node3 = cluster.add_instance('node3', image='yandex/clickhouse-server', tag='21.5', with_installed_binary=True, stay_alive=True) -node4 = cluster.add_instance('node4', image='yandex/clickhouse-server', tag='21.5', with_installed_binary=True, stay_alive=True) -node5 = cluster.add_instance('node5', image='yandex/clickhouse-server', tag='21.5', with_installed_binary=True, stay_alive=True) -node6 = cluster.add_instance('node6', image='yandex/clickhouse-server', tag='21.5', with_installed_binary=True, stay_alive=True) +node3 = cluster.add_instance( + "node3", + image="yandex/clickhouse-server", + tag="21.5", + with_installed_binary=True, + stay_alive=True, +) +node4 = cluster.add_instance( + "node4", + image="yandex/clickhouse-server", + tag="21.5", + with_installed_binary=True, + stay_alive=True, +) +node5 = cluster.add_instance( + "node5", + image="yandex/clickhouse-server", + tag="21.5", + with_installed_binary=True, + stay_alive=True, +) +node6 = cluster.add_instance( + "node6", + image="yandex/clickhouse-server", + tag="21.5", + with_installed_binary=True, + stay_alive=True, +) - -def insert_data(node, table_name='test_table', n=1, col2=1): - node.query(""" INSERT INTO {} +def insert_data(node, table_name="test_table", n=1, col2=1): + node.query( + """ INSERT INTO {} SELECT toDateTime(NOW()), {}, sumMapState(arrayMap(i -> 1, range(300)), arrayMap(i -> 1, range(300))) - FROM numbers({});""".format(table_name, col2, n)) + FROM numbers({});""".format( + table_name, col2, n + ) + ) -def create_table(node, name='test_table', version=None): +def create_table(node, name="test_table", version=None): node.query("DROP TABLE IF EXISTS {};".format(name)) if version is None: - node.query(""" + node.query( + """ CREATE TABLE {} ( `col1` DateTime, `col2` Int64, `col3` AggregateFunction(sumMap, Array(UInt8), Array(UInt8)) ) - ENGINE = AggregatingMergeTree() ORDER BY (col1, col2) """.format(name)) + ENGINE = AggregatingMergeTree() ORDER BY (col1, col2) """.format( + name + ) + ) else: - node.query(""" + node.query( + """ CREATE TABLE {} ( `col1` DateTime, `col2` Int64, `col3` AggregateFunction({}, sumMap, Array(UInt8), Array(UInt8)) ) - ENGINE = AggregatingMergeTree() ORDER BY (col1, col2) """.format(name, version)) + ENGINE = AggregatingMergeTree() ORDER BY (col1, col2) """.format( + name, version + ) + ) @pytest.fixture(scope="module") @@ -57,35 +100,51 @@ def start_cluster(): def test_modulo_partition_key_issue_23508(start_cluster): - node2.query("CREATE TABLE test (id Int64, v UInt64, value String) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/table1', '1', v) PARTITION BY id % 20 ORDER BY (id, v)") - node2.query("INSERT INTO test SELECT number, number, toString(number) FROM numbers(10)") + node2.query( + "CREATE TABLE test (id Int64, v UInt64, value String) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/table1', '1', v) PARTITION BY id % 20 ORDER BY (id, v)" + ) + node2.query( + "INSERT INTO test SELECT number, number, toString(number) FROM numbers(10)" + ) expected = node2.query("SELECT number, number, toString(number) FROM numbers(10)") - partition_data = node2.query("SELECT partition, name FROM system.parts WHERE table='test' ORDER BY partition") - assert(expected == node2.query("SELECT * FROM test ORDER BY id")) + partition_data = node2.query( + "SELECT partition, name FROM system.parts WHERE table='test' ORDER BY partition" + ) + assert expected == node2.query("SELECT * FROM test ORDER BY id") node2.restart_with_latest_version() - assert(expected == node2.query("SELECT * FROM test ORDER BY id")) - assert(partition_data == node2.query("SELECT partition, name FROM system.parts WHERE table='test' ORDER BY partition")) + assert expected == node2.query("SELECT * FROM test ORDER BY id") + assert partition_data == node2.query( + "SELECT partition, name FROM system.parts WHERE table='test' ORDER BY partition" + ) # Test from issue 16587 def test_aggregate_function_versioning_issue_16587(start_cluster): for node in [node1, node3]: node.query("DROP TABLE IF EXISTS test_table;") - node.query(""" + node.query( + """ CREATE TABLE test_table (`col1` DateTime, `col2` Int64) - ENGINE = MergeTree() ORDER BY col1""") - node.query("insert into test_table select '2020-10-26 00:00:00', 1929292 from numbers(300)") + ENGINE = MergeTree() ORDER BY col1""" + ) + node.query( + "insert into test_table select '2020-10-26 00:00:00', 1929292 from numbers(300)" + ) expected = "([1],[600])" - result_on_old_version = node3.query("select sumMap(sm) from (select sumMap([1],[1]) as sm from remote('127.0.0.{1,2}', default.test_table) group by col1, col2);").strip() - assert(result_on_old_version != expected) + result_on_old_version = node3.query( + "select sumMap(sm) from (select sumMap([1],[1]) as sm from remote('127.0.0.{1,2}', default.test_table) group by col1, col2);" + ).strip() + assert result_on_old_version != expected - result_on_new_version = node1.query("select sumMap(sm) from (select sumMap([1],[1]) as sm from remote('127.0.0.{1,2}', default.test_table) group by col1, col2);").strip() - assert(result_on_new_version == expected) + result_on_new_version = node1.query( + "select sumMap(sm) from (select sumMap([1],[1]) as sm from remote('127.0.0.{1,2}', default.test_table) group by col1, col2);" + ).strip() + assert result_on_new_version == expected def test_aggregate_function_versioning_fetch_data_from_old_to_new_server(start_cluster): @@ -95,14 +154,20 @@ def test_aggregate_function_versioning_fetch_data_from_old_to_new_server(start_c expected = "([1],[300])" - new_server_data = node1.query("select finalizeAggregation(col3) from default.test_table;").strip() - assert(new_server_data == expected) + new_server_data = node1.query( + "select finalizeAggregation(col3) from default.test_table;" + ).strip() + assert new_server_data == expected - old_server_data = node4.query("select finalizeAggregation(col3) from default.test_table;").strip() - assert(old_server_data != expected) + old_server_data = node4.query( + "select finalizeAggregation(col3) from default.test_table;" + ).strip() + assert old_server_data != expected - data_from_old_to_new_server = node1.query("select finalizeAggregation(col3) from remote('node4', default.test_table);").strip() - assert(data_from_old_to_new_server == old_server_data) + data_from_old_to_new_server = node1.query( + "select finalizeAggregation(col3) from remote('node4', default.test_table);" + ).strip() + assert data_from_old_to_new_server == old_server_data def test_aggregate_function_versioning_server_upgrade(start_cluster): @@ -112,83 +177,117 @@ def test_aggregate_function_versioning_server_upgrade(start_cluster): insert_data(node5, col2=1) # Serialization with version 0, server does not support versioning of aggregate function states. - old_server_data = node5.query("select finalizeAggregation(col3) from default.test_table;").strip() - assert(old_server_data == "([1],[44])") + old_server_data = node5.query( + "select finalizeAggregation(col3) from default.test_table;" + ).strip() + assert old_server_data == "([1],[44])" create = node5.query("describe table default.test_table;").strip() - assert(create.strip().endswith("col3\tAggregateFunction(sumMap, Array(UInt8), Array(UInt8))")) - print('Ok 1') + assert create.strip().endswith( + "col3\tAggregateFunction(sumMap, Array(UInt8), Array(UInt8))" + ) + print("Ok 1") # Upgrade server. node5.restart_with_latest_version() # Deserialized with version 0, server supports versioning. - upgraded_server_data = node5.query("select finalizeAggregation(col3) from default.test_table;").strip() - assert(upgraded_server_data == "([1],[44])") + upgraded_server_data = node5.query( + "select finalizeAggregation(col3) from default.test_table;" + ).strip() + assert upgraded_server_data == "([1],[44])" create = node5.query("describe table default.test_table;").strip() - assert(create.strip().endswith("col3\tAggregateFunction(sumMap, Array(UInt8), Array(UInt8))")) - print('Ok 2') + assert create.strip().endswith( + "col3\tAggregateFunction(sumMap, Array(UInt8), Array(UInt8))" + ) + print("Ok 2") create = node1.query("describe table default.test_table;").strip() print(create) - assert(create.strip().endswith("col3\tAggregateFunction(1, sumMap, Array(UInt8), Array(UInt8))")) + assert create.strip().endswith( + "col3\tAggregateFunction(1, sumMap, Array(UInt8), Array(UInt8))" + ) # Data from upgraded server to new server. Deserialize with version 0. - data_from_upgraded_to_new_server = node1.query("select finalizeAggregation(col3) from remote('node5', default.test_table);").strip() - assert(data_from_upgraded_to_new_server == upgraded_server_data == "([1],[44])") - print('Ok 3') + data_from_upgraded_to_new_server = node1.query( + "select finalizeAggregation(col3) from remote('node5', default.test_table);" + ).strip() + assert data_from_upgraded_to_new_server == upgraded_server_data == "([1],[44])" + print("Ok 3") # Data is serialized according to version 0 (though one of the states is version 1, but result is version 0). - upgraded_server_data = node5.query("select finalizeAggregation(col3) from remote('127.0.0.{1,2}', default.test_table);").strip() - assert(upgraded_server_data == "([1],[44])\n([1],[44])") - print('Ok 4') + upgraded_server_data = node5.query( + "select finalizeAggregation(col3) from remote('127.0.0.{1,2}', default.test_table);" + ).strip() + assert upgraded_server_data == "([1],[44])\n([1],[44])" + print("Ok 4") # Check insertion after server upgarde. insert_data(node5, col2=2) # Check newly inserted data is still serialized with 0 version. - upgraded_server_data = node5.query("select finalizeAggregation(col3) from default.test_table order by col2;").strip() - assert(upgraded_server_data == "([1],[44])\n([1],[44])") - print('Ok 5') + upgraded_server_data = node5.query( + "select finalizeAggregation(col3) from default.test_table order by col2;" + ).strip() + assert upgraded_server_data == "([1],[44])\n([1],[44])" + print("Ok 5") # New table has latest version. - new_server_data = node1.query("select finalizeAggregation(col3) from default.test_table;").strip() - assert(new_server_data == "([1],[300])") - print('Ok 6') + new_server_data = node1.query( + "select finalizeAggregation(col3) from default.test_table;" + ).strip() + assert new_server_data == "([1],[300])" + print("Ok 6") # Insert from new server (with version 1) to upgraded server (where version will be 0), result version 0. - node1.query("insert into table function remote('node5', default.test_table) select * from default.test_table;").strip() - upgraded_server_data = node5.query("select finalizeAggregation(col3) from default.test_table order by col2;").strip() - assert(upgraded_server_data == "([1],[44])\n([1],[44])\n([1],[44])") - print('Ok 7') + node1.query( + "insert into table function remote('node5', default.test_table) select * from default.test_table;" + ).strip() + upgraded_server_data = node5.query( + "select finalizeAggregation(col3) from default.test_table order by col2;" + ).strip() + assert upgraded_server_data == "([1],[44])\n([1],[44])\n([1],[44])" + print("Ok 7") # But new table gets data with latest version. insert_data(node1) - new_server_data = node1.query("select finalizeAggregation(col3) from default.test_table;").strip() - assert(new_server_data == "([1],[300])\n([1],[300])") - print('Ok 8') + new_server_data = node1.query( + "select finalizeAggregation(col3) from default.test_table;" + ).strip() + assert new_server_data == "([1],[300])\n([1],[300])" + print("Ok 8") # Create table with column implicitly with older version (version 0). - create_table(node1, name='test_table_0', version=0) - insert_data(node1, table_name='test_table_0', col2=3) - data = node1.query("select finalizeAggregation(col3) from default.test_table_0;").strip() - assert(data == "([1],[44])") - print('Ok') + create_table(node1, name="test_table_0", version=0) + insert_data(node1, table_name="test_table_0", col2=3) + data = node1.query( + "select finalizeAggregation(col3) from default.test_table_0;" + ).strip() + assert data == "([1],[44])" + print("Ok") # Insert from new server to upgraded server to a new table but the version was set implicitly to 0, so data version 0. - node1.query("insert into table function remote('node5', default.test_table) select * from default.test_table_0;").strip() - upgraded_server_data = node5.query("select finalizeAggregation(col3) from default.test_table order by col2;").strip() - assert(upgraded_server_data == "([1],[44])\n([1],[44])\n([1],[44])\n([1],[44])") - print('Ok') + node1.query( + "insert into table function remote('node5', default.test_table) select * from default.test_table_0;" + ).strip() + upgraded_server_data = node5.query( + "select finalizeAggregation(col3) from default.test_table order by col2;" + ).strip() + assert upgraded_server_data == "([1],[44])\n([1],[44])\n([1],[44])\n([1],[44])" + print("Ok") def test_aggregate_function_versioning_persisting_metadata(start_cluster): for node in [node1, node6]: create_table(node) insert_data(node) - data = node1.query("select finalizeAggregation(col3) from default.test_table;").strip() - assert(data == "([1],[300])") - data = node6.query("select finalizeAggregation(col3) from default.test_table;").strip() - assert(data == "([1],[44])") + data = node1.query( + "select finalizeAggregation(col3) from default.test_table;" + ).strip() + assert data == "([1],[300])" + data = node6.query( + "select finalizeAggregation(col3) from default.test_table;" + ).strip() + assert data == "([1],[44])" node6.restart_with_latest_version() @@ -199,18 +298,32 @@ def test_aggregate_function_versioning_persisting_metadata(start_cluster): for node in [node1, node6]: insert_data(node) - new_server_data = node1.query("select finalizeAggregation(col3) from default.test_table;").strip() - assert(new_server_data == "([1],[300])\n([1],[300])") + new_server_data = node1.query( + "select finalizeAggregation(col3) from default.test_table;" + ).strip() + assert new_server_data == "([1],[300])\n([1],[300])" - upgraded_server_data = node6.query("select finalizeAggregation(col3) from default.test_table;").strip() - assert(upgraded_server_data == "([1],[44])\n([1],[44])") + upgraded_server_data = node6.query( + "select finalizeAggregation(col3) from default.test_table;" + ).strip() + assert upgraded_server_data == "([1],[44])\n([1],[44])" for node in [node1, node6]: node.restart_clickhouse() insert_data(node) - result = node1.query("select finalizeAggregation(col3) from remote('127.0.0.{1,2}', default.test_table);").strip() - assert(result == "([1],[300])\n([1],[300])\n([1],[300])\n([1],[300])\n([1],[300])\n([1],[300])") + result = node1.query( + "select finalizeAggregation(col3) from remote('127.0.0.{1,2}', default.test_table);" + ).strip() + assert ( + result + == "([1],[300])\n([1],[300])\n([1],[300])\n([1],[300])\n([1],[300])\n([1],[300])" + ) - result = node6.query("select finalizeAggregation(col3) from remote('127.0.0.{1,2}', default.test_table);").strip() - assert(result == "([1],[44])\n([1],[44])\n([1],[44])\n([1],[44])\n([1],[44])\n([1],[44])") + result = node6.query( + "select finalizeAggregation(col3) from remote('127.0.0.{1,2}', default.test_table);" + ).strip() + assert ( + result + == "([1],[44])\n([1],[44])\n([1],[44])\n([1],[44])\n([1],[44])\n([1],[44])" + ) diff --git a/tests/integration/test_version_update_after_mutation/test.py b/tests/integration/test_version_update_after_mutation/test.py index 3c22f2ed380..2971cbc9792 100644 --- a/tests/integration/test_version_update_after_mutation/test.py +++ b/tests/integration/test_version_update_after_mutation/test.py @@ -6,12 +6,30 @@ from helpers.test_tools import assert_eq_with_retry, exec_query_with_retry cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', with_zookeeper=True, image='yandex/clickhouse-server', tag='20.1.10.70', - with_installed_binary=True, stay_alive=True) -node2 = cluster.add_instance('node2', with_zookeeper=True, image='yandex/clickhouse-server', tag='20.1.10.70', - with_installed_binary=True, stay_alive=True) -node3 = cluster.add_instance('node3', with_zookeeper=True, image='yandex/clickhouse-server', tag='20.1.10.70', - with_installed_binary=True, stay_alive=True) +node1 = cluster.add_instance( + "node1", + with_zookeeper=True, + image="yandex/clickhouse-server", + tag="20.1.10.70", + with_installed_binary=True, + stay_alive=True, +) +node2 = cluster.add_instance( + "node2", + with_zookeeper=True, + image="yandex/clickhouse-server", + tag="20.1.10.70", + with_installed_binary=True, + stay_alive=True, +) +node3 = cluster.add_instance( + "node3", + with_zookeeper=True, + image="yandex/clickhouse-server", + tag="20.1.10.70", + with_installed_binary=True, + stay_alive=True, +) @pytest.fixture(scope="module") @@ -29,7 +47,9 @@ def test_mutate_and_upgrade(start_cluster): node.query("DROP TABLE IF EXISTS mt") node.query( "CREATE TABLE mt (EventDate Date, id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t', '{}') ORDER BY tuple()".format( - node.name)) + node.name + ) + ) node1.query("INSERT INTO mt VALUES ('2020-02-13', 1), ('2020-02-13', 2);") @@ -42,7 +62,9 @@ def test_mutate_and_upgrade(start_cluster): node2.restart_with_latest_version(signal=9) # After hard restart table can be in readonly mode - exec_query_with_retry(node2, "INSERT INTO mt VALUES ('2020-02-13', 3)", retry_count=60) + exec_query_with_retry( + node2, "INSERT INTO mt VALUES ('2020-02-13', 3)", retry_count=60 + ) exec_query_with_retry(node1, "SYSTEM SYNC REPLICA mt", retry_count=60) assert node1.query("SELECT COUNT() FROM mt") == "2\n" @@ -62,8 +84,10 @@ def test_mutate_and_upgrade(start_cluster): assert node1.query("SELECT COUNT() FROM mt") == "2\n" assert node2.query("SELECT COUNT() FROM mt") == "2\n" - node1.query("ALTER TABLE mt MODIFY COLUMN id Int32 DEFAULT 0", - settings={"replication_alter_partitions_sync": "2"}) + node1.query( + "ALTER TABLE mt MODIFY COLUMN id Int32 DEFAULT 0", + settings={"replication_alter_partitions_sync": "2"}, + ) node2.query("OPTIMIZE TABLE mt FINAL") @@ -78,7 +102,8 @@ def test_upgrade_while_mutation(start_cluster): node3.query("DROP TABLE IF EXISTS mt1") node3.query( - "CREATE TABLE mt1 (EventDate Date, id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t1', 'node3') ORDER BY tuple()") + "CREATE TABLE mt1 (EventDate Date, id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t1', 'node3') ORDER BY tuple()" + ) node3.query("INSERT INTO mt1 select '2020-02-13', number from numbers(100000)") @@ -91,7 +116,9 @@ def test_upgrade_while_mutation(start_cluster): # checks for readonly exec_query_with_retry(node3, "OPTIMIZE TABLE mt1", sleep_time=5, retry_count=60) - node3.query("ALTER TABLE mt1 DELETE WHERE id > 100000", settings={"mutations_sync": "2"}) + node3.query( + "ALTER TABLE mt1 DELETE WHERE id > 100000", settings={"mutations_sync": "2"} + ) # will delete nothing, but previous async mutation will finish with this query assert_eq_with_retry(node3, "SELECT COUNT() from mt1", "50000\n") diff --git a/tests/integration/test_zookeeper_config/test.py b/tests/integration/test_zookeeper_config/test.py index 95d9db27a7d..d3d90ca0d4f 100644 --- a/tests/integration/test_zookeeper_config/test.py +++ b/tests/integration/test_zookeeper_config/test.py @@ -3,20 +3,33 @@ import pytest import logging from helpers.cluster import ClickHouseCluster -cluster = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml') +cluster = ClickHouseCluster( + __file__, zookeeper_config_path="configs/zookeeper_config_root_a.xml" +) + +node1 = cluster.add_instance( + "node1", + with_zookeeper=True, + main_configs=["configs/remote_servers.xml", "configs/zookeeper_config_root_a.xml"], +) +node2 = cluster.add_instance( + "node2", + with_zookeeper=True, + main_configs=["configs/remote_servers.xml", "configs/zookeeper_config_root_a.xml"], +) +node3 = cluster.add_instance( + "node3", + with_zookeeper=True, + main_configs=["configs/remote_servers.xml", "configs/zookeeper_config_root_b.xml"], +) -node1 = cluster.add_instance('node1', with_zookeeper=True, - main_configs=["configs/remote_servers.xml", "configs/zookeeper_config_root_a.xml"]) -node2 = cluster.add_instance('node2', with_zookeeper=True, - main_configs=["configs/remote_servers.xml", "configs/zookeeper_config_root_a.xml"]) -node3 = cluster.add_instance('node3', with_zookeeper=True, - main_configs=["configs/remote_servers.xml", "configs/zookeeper_config_root_b.xml"]) def create_zk_roots(zk): - zk.ensure_path('/root_a') - zk.ensure_path('/root_b') + zk.ensure_path("/root_a") + zk.ensure_path("/root_b") logging.debug(f"Create ZK roots:{zk.get_children('/')}") + @pytest.fixture(scope="module", autouse=True) def started_cluster(): try: @@ -28,30 +41,40 @@ def started_cluster(): finally: cluster.shutdown() + def test_chroot_with_same_root(started_cluster): for i, node in enumerate([node1, node2]): - node.query('DROP TABLE IF EXISTS simple SYNC') - node.query(''' + node.query("DROP TABLE IF EXISTS simple SYNC") + node.query( + """ CREATE TABLE simple (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192); - '''.format(replica=node.name)) + """.format( + replica=node.name + ) + ) for j in range(2): # Second insert to test deduplication node.query("INSERT INTO simple VALUES ({0}, {0})".format(i)) time.sleep(1) - assert node1.query('select count() from simple').strip() == '2' - assert node2.query('select count() from simple').strip() == '2' + assert node1.query("select count() from simple").strip() == "2" + assert node2.query("select count() from simple").strip() == "2" + def test_chroot_with_different_root(started_cluster): for i, node in [(1, node1), (3, node3)]: - node.query('DROP TABLE IF EXISTS simple_different SYNC') - node.query(''' + node.query("DROP TABLE IF EXISTS simple_different SYNC") + node.query( + """ CREATE TABLE simple_different (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple_different', '{replica}', date, id, 8192); - '''.format(replica=node.name)) + """.format( + replica=node.name + ) + ) for j in range(2): # Second insert to test deduplication node.query("INSERT INTO simple_different VALUES ({0}, {0})".format(i)) - assert node1.query('select count() from simple_different').strip() == '1' - assert node3.query('select count() from simple_different').strip() == '1' + assert node1.query("select count() from simple_different").strip() == "1" + assert node3.query("select count() from simple_different").strip() == "1" diff --git a/tests/integration/test_zookeeper_config/test_password.py b/tests/integration/test_zookeeper_config/test_password.py index 09c15cfd0cf..580b426db6f 100644 --- a/tests/integration/test_zookeeper_config/test_password.py +++ b/tests/integration/test_zookeeper_config/test_password.py @@ -1,5 +1,3 @@ - - import time import pytest from helpers.cluster import ClickHouseCluster @@ -7,10 +5,19 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__, name="password") # TODO ACL not implemented in Keeper. -node1 = cluster.add_instance('node1', with_zookeeper=True, - main_configs=["configs/remote_servers.xml", "configs/zookeeper_config_with_password.xml"]) - -node2 = cluster.add_instance('node2', with_zookeeper=True, main_configs=["configs/remote_servers.xml"]) +node1 = cluster.add_instance( + "node1", + with_zookeeper=True, + main_configs=[ + "configs/remote_servers.xml", + "configs/zookeeper_config_with_password.xml", + ], +) + +node2 = cluster.add_instance( + "node2", with_zookeeper=True, main_configs=["configs/remote_servers.xml"] +) + @pytest.fixture(scope="module", autouse=True) def started_cluster(): @@ -21,16 +28,23 @@ def started_cluster(): finally: cluster.shutdown() -def test_identity(started_cluster): - node1.query('DROP TABLE IF EXISTS simple SYNC') - node1.query(''' +def test_identity(started_cluster): + node1.query("DROP TABLE IF EXISTS simple SYNC") + + node1.query( + """ CREATE TABLE simple (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192); - '''.format(replica=node1.name)) + """.format( + replica=node1.name + ) + ) with pytest.raises(Exception): - node2.query(''' + node2.query( + """ CREATE TABLE simple (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '1', date, id, 8192); - ''') + """ + ) diff --git a/tests/integration/test_zookeeper_config/test_secure.py b/tests/integration/test_zookeeper_config/test_secure.py index c0b2216d677..f540a504024 100644 --- a/tests/integration/test_zookeeper_config/test_secure.py +++ b/tests/integration/test_zookeeper_config/test_secure.py @@ -1,5 +1,5 @@ import threading -import os +import os from tempfile import NamedTemporaryFile import pytest @@ -7,20 +7,36 @@ from helpers.cluster import ClickHouseCluster TEST_DIR = os.path.dirname(__file__) -cluster = ClickHouseCluster(__file__, name="secure", - zookeeper_certfile=os.path.join(TEST_DIR, "configs_secure", "client.crt"), - zookeeper_keyfile=os.path.join(TEST_DIR, "configs_secure", "client.key")) +cluster = ClickHouseCluster( + __file__, + name="secure", + zookeeper_certfile=os.path.join(TEST_DIR, "configs_secure", "client.crt"), + zookeeper_keyfile=os.path.join(TEST_DIR, "configs_secure", "client.key"), +) + +node1 = cluster.add_instance( + "node1", + main_configs=[ + "configs_secure/client.crt", + "configs_secure/client.key", + "configs_secure/conf.d/remote_servers.xml", + "configs_secure/conf.d/ssl_conf.xml", + "configs/zookeeper_config_with_ssl.xml", + ], + with_zookeeper_secure=True, +) +node2 = cluster.add_instance( + "node2", + main_configs=[ + "configs_secure/client.crt", + "configs_secure/client.key", + "configs_secure/conf.d/remote_servers.xml", + "configs_secure/conf.d/ssl_conf.xml", + "configs/zookeeper_config_with_ssl.xml", + ], + with_zookeeper_secure=True, +) -node1 = cluster.add_instance('node1', main_configs=["configs_secure/client.crt", "configs_secure/client.key", - "configs_secure/conf.d/remote_servers.xml", - "configs_secure/conf.d/ssl_conf.xml", - "configs/zookeeper_config_with_ssl.xml"], - with_zookeeper_secure=True) -node2 = cluster.add_instance('node2', main_configs=["configs_secure/client.crt", "configs_secure/client.key", - "configs_secure/conf.d/remote_servers.xml", - "configs_secure/conf.d/ssl_conf.xml", - "configs/zookeeper_config_with_ssl.xml"], - with_zookeeper_secure=True) @pytest.fixture(scope="module", autouse=True) def started_cluster(): @@ -31,6 +47,7 @@ def started_cluster(): finally: cluster.shutdown() + # NOTE this test have to be ported to Keeper def test_secure_connection(started_cluster): # no asserts, connection works @@ -43,8 +60,18 @@ def test_secure_connection(started_cluster): # just checking for race conditions for _ in range(threads_number): - threads.append(threading.Thread(target=(lambda: - [node1.query("SELECT count() FROM system.zookeeper WHERE path = '/'") for _ in range(iterations)]))) + threads.append( + threading.Thread( + target=( + lambda: [ + node1.query( + "SELECT count() FROM system.zookeeper WHERE path = '/'" + ) + for _ in range(iterations) + ] + ) + ) + ) for thread in threads: thread.start() diff --git a/tests/integration/test_zookeeper_config_load_balancing/__init__.py b/tests/integration/test_zookeeper_config_load_balancing/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_zookeeper_config_load_balancing/configs/zookeeper_load_balancing.xml b/tests/integration/test_zookeeper_config_load_balancing/configs/zookeeper_load_balancing.xml new file mode 100644 index 00000000000..5416e5e82de --- /dev/null +++ b/tests/integration/test_zookeeper_config_load_balancing/configs/zookeeper_load_balancing.xml @@ -0,0 +1,19 @@ + + + + random + + zoo1 + 2181 + + + zoo2 + 2181 + + + zoo3 + 2181 + + 3000 + + diff --git a/tests/integration/test_zookeeper_config_load_balancing/test.py b/tests/integration/test_zookeeper_config_load_balancing/test.py new file mode 100644 index 00000000000..56af7513389 --- /dev/null +++ b/tests/integration/test_zookeeper_config_load_balancing/test.py @@ -0,0 +1,427 @@ +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.network import PartitionManager + +cluster = ClickHouseCluster( + __file__, zookeeper_config_path="configs/zookeeper_load_balancing.xml" +) + +# use 3-letter hostnames, so getHostNameDifference("nod1", "zoo1") will work as expected +node1 = cluster.add_instance( + "nod1", with_zookeeper=True, main_configs=["configs/zookeeper_load_balancing.xml"] +) +node2 = cluster.add_instance( + "nod2", with_zookeeper=True, main_configs=["configs/zookeeper_load_balancing.xml"] +) +node3 = cluster.add_instance( + "nod3", with_zookeeper=True, main_configs=["configs/zookeeper_load_balancing.xml"] +) + + +def change_balancing(old, new, reload=True): + line = "{}<" + old_line = line.format(old) + new_line = line.format(new) + for node in [node1, node2, node3]: + node.replace_in_config( + "/etc/clickhouse-server/config.d/zookeeper_load_balancing.xml", + old_line, + new_line, + ) + if reload: + node.query("select '{}', '{}'".format(old, new)) + node.query("system reload config") + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def test_first_or_random(started_cluster): + try: + change_balancing("random", "first_or_random") + print( + str( + node1.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", + ], + privileged=True, + user="root", + ) + ) + ) + assert ( + "1" + == str( + node1.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + ], + privileged=True, + user="root", + ) + ).strip() + ) + + print( + str( + node2.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", + ], + privileged=True, + user="root", + ) + ) + ) + assert ( + "1" + == str( + node2.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + ], + privileged=True, + user="root", + ) + ).strip() + ) + + print( + str( + node3.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", + ], + privileged=True, + user="root", + ) + ) + ) + assert ( + "1" + == str( + node3.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + ], + privileged=True, + user="root", + ) + ).strip() + ) + finally: + change_balancing("first_or_random", "random", reload=False) + + +def test_in_order(started_cluster): + try: + change_balancing("random", "in_order") + print( + str( + node1.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", + ], + privileged=True, + user="root", + ) + ) + ) + assert ( + "1" + == str( + node1.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + ], + privileged=True, + user="root", + ) + ).strip() + ) + + print( + str( + node2.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", + ], + privileged=True, + user="root", + ) + ) + ) + assert ( + "1" + == str( + node2.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + ], + privileged=True, + user="root", + ) + ).strip() + ) + + print( + str( + node3.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", + ], + privileged=True, + user="root", + ) + ) + ) + assert ( + "1" + == str( + node3.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + ], + privileged=True, + user="root", + ) + ).strip() + ) + finally: + change_balancing("in_order", "random", reload=False) + + +def test_nearest_hostname(started_cluster): + try: + change_balancing("random", "nearest_hostname") + print( + str( + node1.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", + ], + privileged=True, + user="root", + ) + ) + ) + assert ( + "1" + == str( + node1.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + ], + privileged=True, + user="root", + ) + ).strip() + ) + + print( + str( + node2.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", + ], + privileged=True, + user="root", + ) + ) + ) + assert ( + "1" + == str( + node2.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo2_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + ], + privileged=True, + user="root", + ) + ).strip() + ) + + print( + str( + node3.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", + ], + privileged=True, + user="root", + ) + ) + ) + assert ( + "1" + == str( + node3.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo3_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + ], + privileged=True, + user="root", + ) + ).strip() + ) + finally: + change_balancing("nearest_hostname", "random", reload=False) + + +def test_round_robin(started_cluster): + pm = PartitionManager() + try: + pm._add_rule( + { + "source": node1.ip_address, + "destination": cluster.get_instance_ip("zoo1"), + "action": "REJECT --reject-with tcp-reset", + } + ) + pm._add_rule( + { + "source": node2.ip_address, + "destination": cluster.get_instance_ip("zoo1"), + "action": "REJECT --reject-with tcp-reset", + } + ) + pm._add_rule( + { + "source": node3.ip_address, + "destination": cluster.get_instance_ip("zoo1"), + "action": "REJECT --reject-with tcp-reset", + } + ) + change_balancing("random", "round_robin") + + print( + str( + node1.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", + ], + privileged=True, + user="root", + ) + ) + ) + assert ( + "1" + == str( + node1.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo2_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + ], + privileged=True, + user="root", + ) + ).strip() + ) + + print( + str( + node2.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", + ], + privileged=True, + user="root", + ) + ) + ) + assert ( + "1" + == str( + node2.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo2_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + ], + privileged=True, + user="root", + ) + ).strip() + ) + + print( + str( + node3.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep ':2181' | grep ESTABLISHED", + ], + privileged=True, + user="root", + ) + ) + ) + assert ( + "1" + == str( + node3.exec_in_container( + [ + "bash", + "-c", + "lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo2_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l", + ], + privileged=True, + user="root", + ) + ).strip() + ) + + finally: + pm.heal_all() + change_balancing("round_robin", "random", reload=False) diff --git a/tests/performance/consistent_hashes.xml b/tests/performance/consistent_hashes.xml index 3610579f545..c65a1151536 100644 --- a/tests/performance/consistent_hashes.xml +++ b/tests/performance/consistent_hashes.xml @@ -3,7 +3,7 @@ hash_func - yandexConsistentHash + kostikConsistentHash jumpConsistentHash diff --git a/tests/queries/0_stateless/00153_transform.reference b/tests/queries/0_stateless/00153_transform.reference index eea4fa0e1a8..8a38f4f8172 100644 --- a/tests/queries/0_stateless/00153_transform.reference +++ b/tests/queries/0_stateless/00153_transform.reference @@ -99,6 +99,6 @@ abc 1 1 Остальные -Яндекс +Bigmir)net Google Остальные diff --git a/tests/queries/0_stateless/00153_transform.sql b/tests/queries/0_stateless/00153_transform.sql index a5e531d36a4..78ec3cd4d1c 100644 --- a/tests/queries/0_stateless/00153_transform.sql +++ b/tests/queries/0_stateless/00153_transform.sql @@ -8,10 +8,10 @@ SELECT transform(toString(number), ['3', '5', '7'], [111, 222, 333], 0) FROM sys SELECT transform(toString(number), ['3', '5', '7'], [111, 222, 333], -1) FROM system.numbers LIMIT 10; SELECT transform(toString(number), ['3', '5', '7'], [111, 222, 333], -1.1) FROM system.numbers LIMIT 10; SELECT transform(toString(number), ['3', '5', '7'], [111, 222.2, 333], 1) FROM system.numbers LIMIT 10; -SELECT transform(1, [2, 3], ['Яндекс', 'Google'], 'Остальные') AS title; -SELECT transform(2, [2, 3], ['Яндекс', 'Google'], 'Остальные') AS title; -SELECT transform(3, [2, 3], ['Яндекс', 'Google'], 'Остальные') AS title; -SELECT transform(4, [2, 3], ['Яндекс', 'Google'], 'Остальные') AS title; +SELECT transform(1, [2, 3], ['Bigmir)net', 'Google'], 'Остальные') AS title; +SELECT transform(2, [2, 3], ['Bigmir)net', 'Google'], 'Остальные') AS title; +SELECT transform(3, [2, 3], ['Bigmir)net', 'Google'], 'Остальные') AS title; +SELECT transform(4, [2, 3], ['Bigmir)net', 'Google'], 'Остальные') AS title; SELECT transform('hello', 'wrong', 1); -- { serverError 43 } SELECT transform('hello', ['wrong'], 1); -- { serverError 43 } SELECT transform('hello', ['wrong'], [1]); -- { serverError 43 } diff --git a/tests/queries/0_stateless/00165_transform_non_const_default.reference b/tests/queries/0_stateless/00165_transform_non_const_default.reference index d66471d9741..01890b91309 100644 --- a/tests/queries/0_stateless/00165_transform_non_const_default.reference +++ b/tests/queries/0_stateless/00165_transform_non_const_default.reference @@ -79,6 +79,6 @@ abc 1 1 Остальные -Яндекс +Meta.ua Google Остальные diff --git a/tests/queries/0_stateless/00165_transform_non_const_default.sql b/tests/queries/0_stateless/00165_transform_non_const_default.sql index f68327f7700..ef3b7c1f1c5 100644 --- a/tests/queries/0_stateless/00165_transform_non_const_default.sql +++ b/tests/queries/0_stateless/00165_transform_non_const_default.sql @@ -6,7 +6,7 @@ SELECT transform(toString(number), ['3', '5', '7'], [111, 222, 333], materialize SELECT transform(toString(number), ['3', '5', '7'], [111, 222, 333], materialize(-1)) FROM system.numbers LIMIT 10; SELECT transform(toString(number), ['3', '5', '7'], [111, 222, 333], materialize(-1.1)) FROM system.numbers LIMIT 10; SELECT transform(toString(number), ['3', '5', '7'], [111, 222.2, 333], materialize(1)) FROM system.numbers LIMIT 10; -SELECT transform(1, [2, 3], ['Яндекс', 'Google'], materialize('Остальные')) AS title; -SELECT transform(2, [2, 3], ['Яндекс', 'Google'], materialize('Остальные')) AS title; -SELECT transform(3, [2, 3], ['Яндекс', 'Google'], materialize('Остальные')) AS title; -SELECT transform(4, [2, 3], ['Яндекс', 'Google'], materialize('Остальные')) AS title; +SELECT transform(1, [2, 3], ['Meta.ua', 'Google'], materialize('Остальные')) AS title; +SELECT transform(2, [2, 3], ['Meta.ua', 'Google'], materialize('Остальные')) AS title; +SELECT transform(3, [2, 3], ['Meta.ua', 'Google'], materialize('Остальные')) AS title; +SELECT transform(4, [2, 3], ['Meta.ua', 'Google'], materialize('Остальные')) AS title; diff --git a/tests/queries/0_stateless/00255_array_concat_string.reference b/tests/queries/0_stateless/00255_array_concat_string.reference index 4ffac8e5de0..edd1101beb6 100644 --- a/tests/queries/0_stateless/00255_array_concat_string.reference +++ b/tests/queries/0_stateless/00255_array_concat_string.reference @@ -34,25 +34,25 @@ Hello, World 0,1,2,3,4,5,6,7 0,1,2,3,4,5,6,7,8 -yandex -yandex google -yandex google test -yandex google test 123 -yandex google test 123 -yandex google test 123 hello -yandex google test 123 hello world -yandex google test 123 hello world goodbye -yandex google test 123 hello world goodbye xyz -yandex google test 123 hello world goodbye xyz yandex -yandex google test 123 hello world goodbye xyz yandex google -yandex google test 123 hello world goodbye xyz yandex google test -yandex google test 123 hello world goodbye xyz yandex google test 123 -yandex google test 123 hello world goodbye xyz yandex google test 123 -yandex google test 123 hello world goodbye xyz yandex google test 123 hello -yandex google test 123 hello world goodbye xyz yandex google test 123 hello world -yandex google test 123 hello world goodbye xyz yandex google test 123 hello world goodbye -yandex google test 123 hello world goodbye xyz yandex google test 123 hello world goodbye xyz -yandex google test 123 hello world goodbye xyz yandex google test 123 hello world goodbye xyz yandex +meta.ua +meta.ua google +meta.ua google test +meta.ua google test 123 +meta.ua google test 123 +meta.ua google test 123 hello +meta.ua google test 123 hello world +meta.ua google test 123 hello world goodbye +meta.ua google test 123 hello world goodbye xyz +meta.ua google test 123 hello world goodbye xyz meta.ua +meta.ua google test 123 hello world goodbye xyz meta.ua google +meta.ua google test 123 hello world goodbye xyz meta.ua google test +meta.ua google test 123 hello world goodbye xyz meta.ua google test 123 +meta.ua google test 123 hello world goodbye xyz meta.ua google test 123 +meta.ua google test 123 hello world goodbye xyz meta.ua google test 123 hello +meta.ua google test 123 hello world goodbye xyz meta.ua google test 123 hello world +meta.ua google test 123 hello world goodbye xyz meta.ua google test 123 hello world goodbye +meta.ua google test 123 hello world goodbye xyz meta.ua google test 123 hello world goodbye xyz +meta.ua google test 123 hello world goodbye xyz meta.ua google test 123 hello world goodbye xyz meta.ua 0 01 diff --git a/tests/queries/0_stateless/00255_array_concat_string.sql b/tests/queries/0_stateless/00255_array_concat_string.sql index f4f95956a16..a18d349bac8 100644 --- a/tests/queries/0_stateless/00255_array_concat_string.sql +++ b/tests/queries/0_stateless/00255_array_concat_string.sql @@ -6,7 +6,7 @@ SELECT arrayStringConcat(emptyArrayString()); SELECT arrayStringConcat(arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; SELECT arrayStringConcat(arrayMap(x -> toString(x), range(number)), '') FROM system.numbers LIMIT 10; SELECT arrayStringConcat(arrayMap(x -> toString(x), range(number)), ',') FROM system.numbers LIMIT 10; -SELECT arrayStringConcat(arrayMap(x -> transform(x, [0, 1, 2, 3, 4, 5, 6, 7, 8], ['yandex', 'google', 'test', '123', '', 'hello', 'world', 'goodbye', 'xyz'], ''), arrayMap(x -> x % 9, range(number))), ' ') FROM system.numbers LIMIT 20; +SELECT arrayStringConcat(arrayMap(x -> transform(x, [0, 1, 2, 3, 4, 5, 6, 7, 8], ['meta.ua', 'google', 'test', '123', '', 'hello', 'world', 'goodbye', 'xyz'], ''), arrayMap(x -> x % 9, range(number))), ' ') FROM system.numbers LIMIT 20; SELECT arrayStringConcat(arrayMap(x -> toString(x), range(number % 4))) FROM system.numbers LIMIT 10; SELECT arrayStringConcat([Null, 'hello', Null, 'world', Null, 'xyz', 'def', Null], ';'); SELECT arrayStringConcat([Null::Nullable(String), Null::Nullable(String)], ';'); diff --git a/tests/queries/0_stateless/00296_url_parameters.reference b/tests/queries/0_stateless/00296_url_parameters.reference index 91a7fe8d488..348651d3f7e 100644 --- a/tests/queries/0_stateless/00296_url_parameters.reference +++ b/tests/queries/0_stateless/00296_url_parameters.reference @@ -1,8 +1,8 @@ ['a=b','c=d'] ['a=b','c=d','e=f'] ['a','c=d','e=f'] ['a=b','c=d','e=f','g=h'] ['a=b','c=d'] ['a=b','c=d','e','g=h'] ['a=b','c=d','e=f','g=h'] ['a=b','c=d'] ['a=b','c=d','e=f'] ['a','c=d','e=f'] ['a=b','c=d','e=f','g=h'] ['a=b','c=d'] ['a=b','c=d','e','g=h'] ['a=b','c=d','e=f','g=h'] ['a','c'] ['a','c','e'] ['a','c','e'] ['a','c','e','g'] ['a','c'] ['a','c','e','g'] ['a','c','e','g'] ['a','c'] ['a','c','e'] ['a','c','e'] ['a','c','e','g'] ['a','c'] ['a','c','e','g'] ['a','c','e','g'] b d f d f h b d d h f h b d f d f h b d d h f h -http://yandex.ru/?c=d http://yandex.ru/?a=b http://yandex.ru/?a=b&c=d# http://yandex.ru/?a&c=d#e=f http://yandex.ru/?a#e=f http://yandex.ru/?a&c=d# http://yandex.ru/?a=b&c=d#e=f http://yandex.ru/?c=d#e http://yandex.ru/?a=b#e http://yandex.ru/?a=b&c=d#e http://yandex.ru/?a=b#e&g=h http://yandex.ru/?a=b&c=d#e&g=h http://yandex.ru/?a=b&c=d#e http://yandex.ru/?a=b&c=d#test?e=f&g=h http://yandex.ru/?a=b&c=d#test?g=h http://yandex.ru/?a=b&c=d#test?e=f //yandex.ru/?c=d //yandex.ru/?a=b //yandex.ru/?a=b&c=d# //yandex.ru/?a&c=d#e=f //yandex.ru/?a#e=f //yandex.ru/?a&c=d# //yandex.ru/?a=b&c=d#e=f //yandex.ru/?c=d#e //yandex.ru/?a=b#e //yandex.ru/?a=b&c=d#e //yandex.ru/?a=b#e&g=h //yandex.ru/?a=b&c=d#e&g=h //yandex.ru/?a=b&c=d#e //yandex.ru/?a=b&c=d#test?e=f&g=h //yandex.ru/?a=b&c=d#test?g=h //yandex.ru/?a=b&c=d#test?e=f +http://bigmir.net/?c=d http://bigmir.net/?a=b http://bigmir.net/?a=b&c=d# http://bigmir.net/?a&c=d#e=f http://bigmir.net/?a#e=f http://bigmir.net/?a&c=d# http://bigmir.net/?a=b&c=d#e=f http://bigmir.net/?c=d#e http://bigmir.net/?a=b#e http://bigmir.net/?a=b&c=d#e http://bigmir.net/?a=b#e&g=h http://bigmir.net/?a=b&c=d#e&g=h http://bigmir.net/?a=b&c=d#e http://bigmir.net/?a=b&c=d#test?e=f&g=h http://bigmir.net/?a=b&c=d#test?g=h http://bigmir.net/?a=b&c=d#test?e=f //bigmir.net/?c=d //bigmir.net/?a=b //bigmir.net/?a=b&c=d# //bigmir.net/?a&c=d#e=f //bigmir.net/?a#e=f //bigmir.net/?a&c=d# //bigmir.net/?a=b&c=d#e=f //bigmir.net/?c=d#e //bigmir.net/?a=b#e //bigmir.net/?a=b&c=d#e //bigmir.net/?a=b#e&g=h //bigmir.net/?a=b&c=d#e&g=h //bigmir.net/?a=b&c=d#e //bigmir.net/?a=b&c=d#test?e=f&g=h //bigmir.net/?a=b&c=d#test?g=h //bigmir.net/?a=b&c=d#test?e=f ['a=b','c=d'] ['a=b','c=d','e=f'] ['a','c=d','e=f'] ['a=b','c=d','e=f','g=h'] ['a=b','c=d'] ['a=b','c=d','e','g=h'] ['a=b','c=d','e=f','g=h'] ['a=b','c=d'] ['a=b','c=d','e=f'] ['a','c=d','e=f'] ['a=b','c=d','e=f','g=h'] ['a=b','c=d'] ['a=b','c=d','e','g=h'] ['a=b','c=d','e=f','g=h'] ['a','c'] ['a','c','e'] ['a','c','e'] ['a','c','e','g'] ['a','c'] ['a','c','e','g'] ['a','c','e','g'] ['a','c'] ['a','c','e'] ['a','c','e'] ['a','c','e','g'] ['a','c'] ['a','c','e','g'] ['a','c','e','g'] b d f d f h b d d h f h b d f d f h b d d h f h -http://yandex.ru/?c=d http://yandex.ru/?a=b http://yandex.ru/?a=b&c=d# http://yandex.ru/?a&c=d#e=f http://yandex.ru/?a#e=f http://yandex.ru/?a&c=d# http://yandex.ru/?a=b&c=d#e=f http://yandex.ru/?c=d#e http://yandex.ru/?a=b#e http://yandex.ru/?a=b&c=d#e http://yandex.ru/?a=b#e&g=h http://yandex.ru/?a=b&c=d#e&g=h http://yandex.ru/?a=b&c=d#e http://yandex.ru/?a=b&c=d#test?e=f&g=h http://yandex.ru/?a=b&c=d#test?g=h http://yandex.ru/?a=b&c=d#test?e=f //yandex.ru/?c=d //yandex.ru/?a=b //yandex.ru/?a=b&c=d# //yandex.ru/?a&c=d#e=f //yandex.ru/?a#e=f //yandex.ru/?a&c=d# //yandex.ru/?a=b&c=d#e=f //yandex.ru/?c=d#e //yandex.ru/?a=b#e //yandex.ru/?a=b&c=d#e //yandex.ru/?a=b#e&g=h //yandex.ru/?a=b&c=d#e&g=h //yandex.ru/?a=b&c=d#e //yandex.ru/?a=b&c=d#test?e=f&g=h //yandex.ru/?a=b&c=d#test?g=h //yandex.ru/?a=b&c=d#test?e=f +http://bigmir.net/?c=d http://bigmir.net/?a=b http://bigmir.net/?a=b&c=d# http://bigmir.net/?a&c=d#e=f http://bigmir.net/?a#e=f http://bigmir.net/?a&c=d# http://bigmir.net/?a=b&c=d#e=f http://bigmir.net/?c=d#e http://bigmir.net/?a=b#e http://bigmir.net/?a=b&c=d#e http://bigmir.net/?a=b#e&g=h http://bigmir.net/?a=b&c=d#e&g=h http://bigmir.net/?a=b&c=d#e http://bigmir.net/?a=b&c=d#test?e=f&g=h http://bigmir.net/?a=b&c=d#test?g=h http://bigmir.net/?a=b&c=d#test?e=f //bigmir.net/?c=d //bigmir.net/?a=b //bigmir.net/?a=b&c=d# //bigmir.net/?a&c=d#e=f //bigmir.net/?a#e=f //bigmir.net/?a&c=d# //bigmir.net/?a=b&c=d#e=f //bigmir.net/?c=d#e //bigmir.net/?a=b#e //bigmir.net/?a=b&c=d#e //bigmir.net/?a=b#e&g=h //bigmir.net/?a=b&c=d#e&g=h //bigmir.net/?a=b&c=d#e //bigmir.net/?a=b&c=d#test?e=f&g=h //bigmir.net/?a=b&c=d#test?g=h //bigmir.net/?a=b&c=d#test?e=f diff --git a/tests/queries/0_stateless/00296_url_parameters.sql b/tests/queries/0_stateless/00296_url_parameters.sql index f6dad306319..8a96e3888fe 100644 --- a/tests/queries/0_stateless/00296_url_parameters.sql +++ b/tests/queries/0_stateless/00296_url_parameters.sql @@ -1,200 +1,200 @@ SELECT - extractURLParameters('http://yandex.ru/?a=b&c=d'), - extractURLParameters('http://yandex.ru/?a=b&c=d#e=f'), - extractURLParameters('http://yandex.ru/?a&c=d#e=f'), - extractURLParameters('http://yandex.ru/?a=b&c=d#e=f&g=h'), - extractURLParameters('http://yandex.ru/?a=b&c=d#e'), - extractURLParameters('http://yandex.ru/?a=b&c=d#e&g=h'), - extractURLParameters('http://yandex.ru/?a=b&c=d#test?e=f&g=h'), - extractURLParameters('//yandex.ru/?a=b&c=d'), - extractURLParameters('//yandex.ru/?a=b&c=d#e=f'), - extractURLParameters('//yandex.ru/?a&c=d#e=f'), - extractURLParameters('//yandex.ru/?a=b&c=d#e=f&g=h'), - extractURLParameters('//yandex.ru/?a=b&c=d#e'), - extractURLParameters('//yandex.ru/?a=b&c=d#e&g=h'), - extractURLParameters('//yandex.ru/?a=b&c=d#test?e=f&g=h'); + extractURLParameters('http://bigmir.net/?a=b&c=d'), + extractURLParameters('http://bigmir.net/?a=b&c=d#e=f'), + extractURLParameters('http://bigmir.net/?a&c=d#e=f'), + extractURLParameters('http://bigmir.net/?a=b&c=d#e=f&g=h'), + extractURLParameters('http://bigmir.net/?a=b&c=d#e'), + extractURLParameters('http://bigmir.net/?a=b&c=d#e&g=h'), + extractURLParameters('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), + extractURLParameters('//bigmir.net/?a=b&c=d'), + extractURLParameters('//bigmir.net/?a=b&c=d#e=f'), + extractURLParameters('//bigmir.net/?a&c=d#e=f'), + extractURLParameters('//bigmir.net/?a=b&c=d#e=f&g=h'), + extractURLParameters('//bigmir.net/?a=b&c=d#e'), + extractURLParameters('//bigmir.net/?a=b&c=d#e&g=h'), + extractURLParameters('//bigmir.net/?a=b&c=d#test?e=f&g=h'); SELECT - extractURLParameterNames('http://yandex.ru/?a=b&c=d'), - extractURLParameterNames('http://yandex.ru/?a=b&c=d#e=f'), - extractURLParameterNames('http://yandex.ru/?a&c=d#e=f'), - extractURLParameterNames('http://yandex.ru/?a=b&c=d#e=f&g=h'), - extractURLParameterNames('http://yandex.ru/?a=b&c=d#e'), - extractURLParameterNames('http://yandex.ru/?a=b&c=d#e&g=h'), - extractURLParameterNames('http://yandex.ru/?a=b&c=d#test?e=f&g=h'), - extractURLParameterNames('//yandex.ru/?a=b&c=d'), - extractURLParameterNames('//yandex.ru/?a=b&c=d#e=f'), - extractURLParameterNames('//yandex.ru/?a&c=d#e=f'), - extractURLParameterNames('//yandex.ru/?a=b&c=d#e=f&g=h'), - extractURLParameterNames('//yandex.ru/?a=b&c=d#e'), - extractURLParameterNames('//yandex.ru/?a=b&c=d#e&g=h'), - extractURLParameterNames('//yandex.ru/?a=b&c=d#test?e=f&g=h'); + extractURLParameterNames('http://bigmir.net/?a=b&c=d'), + extractURLParameterNames('http://bigmir.net/?a=b&c=d#e=f'), + extractURLParameterNames('http://bigmir.net/?a&c=d#e=f'), + extractURLParameterNames('http://bigmir.net/?a=b&c=d#e=f&g=h'), + extractURLParameterNames('http://bigmir.net/?a=b&c=d#e'), + extractURLParameterNames('http://bigmir.net/?a=b&c=d#e&g=h'), + extractURLParameterNames('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), + extractURLParameterNames('//bigmir.net/?a=b&c=d'), + extractURLParameterNames('//bigmir.net/?a=b&c=d#e=f'), + extractURLParameterNames('//bigmir.net/?a&c=d#e=f'), + extractURLParameterNames('//bigmir.net/?a=b&c=d#e=f&g=h'), + extractURLParameterNames('//bigmir.net/?a=b&c=d#e'), + extractURLParameterNames('//bigmir.net/?a=b&c=d#e&g=h'), + extractURLParameterNames('//bigmir.net/?a=b&c=d#test?e=f&g=h'); SELECT - extractURLParameter('http://yandex.ru/?a=b&c=d', 'a'), - extractURLParameter('http://yandex.ru/?a=b&c=d', 'c'), - extractURLParameter('http://yandex.ru/?a=b&c=d#e=f', 'e'), - extractURLParameter('http://yandex.ru/?a&c=d#e=f', 'a'), - extractURLParameter('http://yandex.ru/?a&c=d#e=f', 'c'), - extractURLParameter('http://yandex.ru/?a&c=d#e=f', 'e'), - extractURLParameter('http://yandex.ru/?a=b&c=d#e=f&g=h', 'g'), - extractURLParameter('http://yandex.ru/?a=b&c=d#e', 'a'), - extractURLParameter('http://yandex.ru/?a=b&c=d#e', 'c'), - extractURLParameter('http://yandex.ru/?a=b&c=d#e', 'e'), - extractURLParameter('http://yandex.ru/?a=b&c=d#e&g=h', 'c'), - extractURLParameter('http://yandex.ru/?a=b&c=d#e&g=h', 'e'), - extractURLParameter('http://yandex.ru/?a=b&c=d#e&g=h', 'g'), - extractURLParameter('http://yandex.ru/?a=b&c=d#test?e=f&g=h', 'test'), - extractURLParameter('http://yandex.ru/?a=b&c=d#test?e=f&g=h', 'e'), - extractURLParameter('http://yandex.ru/?a=b&c=d#test?e=f&g=h', 'g'), - extractURLParameter('//yandex.ru/?a=b&c=d', 'a'), - extractURLParameter('//yandex.ru/?a=b&c=d', 'c'), - extractURLParameter('//yandex.ru/?a=b&c=d#e=f', 'e'), - extractURLParameter('//yandex.ru/?a&c=d#e=f', 'a'), - extractURLParameter('//yandex.ru/?a&c=d#e=f', 'c'), - extractURLParameter('//yandex.ru/?a&c=d#e=f', 'e'), - extractURLParameter('//yandex.ru/?a=b&c=d#e=f&g=h', 'g'), - extractURLParameter('//yandex.ru/?a=b&c=d#e', 'a'), - extractURLParameter('//yandex.ru/?a=b&c=d#e', 'c'), - extractURLParameter('//yandex.ru/?a=b&c=d#e', 'e'), - extractURLParameter('//yandex.ru/?a=b&c=d#e&g=h', 'c'), - extractURLParameter('//yandex.ru/?a=b&c=d#e&g=h', 'e'), - extractURLParameter('//yandex.ru/?a=b&c=d#e&g=h', 'g'), - extractURLParameter('//yandex.ru/?a=b&c=d#test?e=f&g=h', 'test'), - extractURLParameter('//yandex.ru/?a=b&c=d#test?e=f&g=h', 'e'), - extractURLParameter('//yandex.ru/?a=b&c=d#test?e=f&g=h', 'g'); + extractURLParameter('http://bigmir.net/?a=b&c=d', 'a'), + extractURLParameter('http://bigmir.net/?a=b&c=d', 'c'), + extractURLParameter('http://bigmir.net/?a=b&c=d#e=f', 'e'), + extractURLParameter('http://bigmir.net/?a&c=d#e=f', 'a'), + extractURLParameter('http://bigmir.net/?a&c=d#e=f', 'c'), + extractURLParameter('http://bigmir.net/?a&c=d#e=f', 'e'), + extractURLParameter('http://bigmir.net/?a=b&c=d#e=f&g=h', 'g'), + extractURLParameter('http://bigmir.net/?a=b&c=d#e', 'a'), + extractURLParameter('http://bigmir.net/?a=b&c=d#e', 'c'), + extractURLParameter('http://bigmir.net/?a=b&c=d#e', 'e'), + extractURLParameter('http://bigmir.net/?a=b&c=d#e&g=h', 'c'), + extractURLParameter('http://bigmir.net/?a=b&c=d#e&g=h', 'e'), + extractURLParameter('http://bigmir.net/?a=b&c=d#e&g=h', 'g'), + extractURLParameter('http://bigmir.net/?a=b&c=d#test?e=f&g=h', 'test'), + extractURLParameter('http://bigmir.net/?a=b&c=d#test?e=f&g=h', 'e'), + extractURLParameter('http://bigmir.net/?a=b&c=d#test?e=f&g=h', 'g'), + extractURLParameter('//bigmir.net/?a=b&c=d', 'a'), + extractURLParameter('//bigmir.net/?a=b&c=d', 'c'), + extractURLParameter('//bigmir.net/?a=b&c=d#e=f', 'e'), + extractURLParameter('//bigmir.net/?a&c=d#e=f', 'a'), + extractURLParameter('//bigmir.net/?a&c=d#e=f', 'c'), + extractURLParameter('//bigmir.net/?a&c=d#e=f', 'e'), + extractURLParameter('//bigmir.net/?a=b&c=d#e=f&g=h', 'g'), + extractURLParameter('//bigmir.net/?a=b&c=d#e', 'a'), + extractURLParameter('//bigmir.net/?a=b&c=d#e', 'c'), + extractURLParameter('//bigmir.net/?a=b&c=d#e', 'e'), + extractURLParameter('//bigmir.net/?a=b&c=d#e&g=h', 'c'), + extractURLParameter('//bigmir.net/?a=b&c=d#e&g=h', 'e'), + extractURLParameter('//bigmir.net/?a=b&c=d#e&g=h', 'g'), + extractURLParameter('//bigmir.net/?a=b&c=d#test?e=f&g=h', 'test'), + extractURLParameter('//bigmir.net/?a=b&c=d#test?e=f&g=h', 'e'), + extractURLParameter('//bigmir.net/?a=b&c=d#test?e=f&g=h', 'g'); SELECT - cutURLParameter('http://yandex.ru/?a=b&c=d', 'a'), - cutURLParameter('http://yandex.ru/?a=b&c=d', 'c'), - cutURLParameter('http://yandex.ru/?a=b&c=d#e=f', 'e'), - cutURLParameter('http://yandex.ru/?a&c=d#e=f', 'a'), - cutURLParameter('http://yandex.ru/?a&c=d#e=f', 'c'), - cutURLParameter('http://yandex.ru/?a&c=d#e=f', 'e'), - cutURLParameter('http://yandex.ru/?a=b&c=d#e=f&g=h', 'g'), - cutURLParameter('http://yandex.ru/?a=b&c=d#e', 'a'), - cutURLParameter('http://yandex.ru/?a=b&c=d#e', 'c'), - cutURLParameter('http://yandex.ru/?a=b&c=d#e', 'e'), - cutURLParameter('http://yandex.ru/?a=b&c=d#e&g=h', 'c'), - cutURLParameter('http://yandex.ru/?a=b&c=d#e&g=h', 'e'), - cutURLParameter('http://yandex.ru/?a=b&c=d#e&g=h', 'g'), - cutURLParameter('http://yandex.ru/?a=b&c=d#test?e=f&g=h', 'test'), - cutURLParameter('http://yandex.ru/?a=b&c=d#test?e=f&g=h', 'e'), - cutURLParameter('http://yandex.ru/?a=b&c=d#test?e=f&g=h', 'g'), - cutURLParameter('//yandex.ru/?a=b&c=d', 'a'), - cutURLParameter('//yandex.ru/?a=b&c=d', 'c'), - cutURLParameter('//yandex.ru/?a=b&c=d#e=f', 'e'), - cutURLParameter('//yandex.ru/?a&c=d#e=f', 'a'), - cutURLParameter('//yandex.ru/?a&c=d#e=f', 'c'), - cutURLParameter('//yandex.ru/?a&c=d#e=f', 'e'), - cutURLParameter('//yandex.ru/?a=b&c=d#e=f&g=h', 'g'), - cutURLParameter('//yandex.ru/?a=b&c=d#e', 'a'), - cutURLParameter('//yandex.ru/?a=b&c=d#e', 'c'), - cutURLParameter('//yandex.ru/?a=b&c=d#e', 'e'), - cutURLParameter('//yandex.ru/?a=b&c=d#e&g=h', 'c'), - cutURLParameter('//yandex.ru/?a=b&c=d#e&g=h', 'e'), - cutURLParameter('//yandex.ru/?a=b&c=d#e&g=h', 'g'), - cutURLParameter('//yandex.ru/?a=b&c=d#test?e=f&g=h', 'test'), - cutURLParameter('//yandex.ru/?a=b&c=d#test?e=f&g=h', 'e'), - cutURLParameter('//yandex.ru/?a=b&c=d#test?e=f&g=h', 'g'); + cutURLParameter('http://bigmir.net/?a=b&c=d', 'a'), + cutURLParameter('http://bigmir.net/?a=b&c=d', 'c'), + cutURLParameter('http://bigmir.net/?a=b&c=d#e=f', 'e'), + cutURLParameter('http://bigmir.net/?a&c=d#e=f', 'a'), + cutURLParameter('http://bigmir.net/?a&c=d#e=f', 'c'), + cutURLParameter('http://bigmir.net/?a&c=d#e=f', 'e'), + cutURLParameter('http://bigmir.net/?a=b&c=d#e=f&g=h', 'g'), + cutURLParameter('http://bigmir.net/?a=b&c=d#e', 'a'), + cutURLParameter('http://bigmir.net/?a=b&c=d#e', 'c'), + cutURLParameter('http://bigmir.net/?a=b&c=d#e', 'e'), + cutURLParameter('http://bigmir.net/?a=b&c=d#e&g=h', 'c'), + cutURLParameter('http://bigmir.net/?a=b&c=d#e&g=h', 'e'), + cutURLParameter('http://bigmir.net/?a=b&c=d#e&g=h', 'g'), + cutURLParameter('http://bigmir.net/?a=b&c=d#test?e=f&g=h', 'test'), + cutURLParameter('http://bigmir.net/?a=b&c=d#test?e=f&g=h', 'e'), + cutURLParameter('http://bigmir.net/?a=b&c=d#test?e=f&g=h', 'g'), + cutURLParameter('//bigmir.net/?a=b&c=d', 'a'), + cutURLParameter('//bigmir.net/?a=b&c=d', 'c'), + cutURLParameter('//bigmir.net/?a=b&c=d#e=f', 'e'), + cutURLParameter('//bigmir.net/?a&c=d#e=f', 'a'), + cutURLParameter('//bigmir.net/?a&c=d#e=f', 'c'), + cutURLParameter('//bigmir.net/?a&c=d#e=f', 'e'), + cutURLParameter('//bigmir.net/?a=b&c=d#e=f&g=h', 'g'), + cutURLParameter('//bigmir.net/?a=b&c=d#e', 'a'), + cutURLParameter('//bigmir.net/?a=b&c=d#e', 'c'), + cutURLParameter('//bigmir.net/?a=b&c=d#e', 'e'), + cutURLParameter('//bigmir.net/?a=b&c=d#e&g=h', 'c'), + cutURLParameter('//bigmir.net/?a=b&c=d#e&g=h', 'e'), + cutURLParameter('//bigmir.net/?a=b&c=d#e&g=h', 'g'), + cutURLParameter('//bigmir.net/?a=b&c=d#test?e=f&g=h', 'test'), + cutURLParameter('//bigmir.net/?a=b&c=d#test?e=f&g=h', 'e'), + cutURLParameter('//bigmir.net/?a=b&c=d#test?e=f&g=h', 'g'); SELECT - extractURLParameters(materialize('http://yandex.ru/?a=b&c=d')), - extractURLParameters(materialize('http://yandex.ru/?a=b&c=d#e=f')), - extractURLParameters(materialize('http://yandex.ru/?a&c=d#e=f')), - extractURLParameters(materialize('http://yandex.ru/?a=b&c=d#e=f&g=h')), - extractURLParameters(materialize('http://yandex.ru/?a=b&c=d#e')), - extractURLParameters(materialize('http://yandex.ru/?a=b&c=d#e&g=h')), - extractURLParameters(materialize('http://yandex.ru/?a=b&c=d#test?e=f&g=h')), - extractURLParameters(materialize('//yandex.ru/?a=b&c=d')), - extractURLParameters(materialize('//yandex.ru/?a=b&c=d#e=f')), - extractURLParameters(materialize('//yandex.ru/?a&c=d#e=f')), - extractURLParameters(materialize('//yandex.ru/?a=b&c=d#e=f&g=h')), - extractURLParameters(materialize('//yandex.ru/?a=b&c=d#e')), - extractURLParameters(materialize('//yandex.ru/?a=b&c=d#e&g=h')), - extractURLParameters(materialize('//yandex.ru/?a=b&c=d#test?e=f&g=h')); + extractURLParameters(materialize('http://bigmir.net/?a=b&c=d')), + extractURLParameters(materialize('http://bigmir.net/?a=b&c=d#e=f')), + extractURLParameters(materialize('http://bigmir.net/?a&c=d#e=f')), + extractURLParameters(materialize('http://bigmir.net/?a=b&c=d#e=f&g=h')), + extractURLParameters(materialize('http://bigmir.net/?a=b&c=d#e')), + extractURLParameters(materialize('http://bigmir.net/?a=b&c=d#e&g=h')), + extractURLParameters(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h')), + extractURLParameters(materialize('//bigmir.net/?a=b&c=d')), + extractURLParameters(materialize('//bigmir.net/?a=b&c=d#e=f')), + extractURLParameters(materialize('//bigmir.net/?a&c=d#e=f')), + extractURLParameters(materialize('//bigmir.net/?a=b&c=d#e=f&g=h')), + extractURLParameters(materialize('//bigmir.net/?a=b&c=d#e')), + extractURLParameters(materialize('//bigmir.net/?a=b&c=d#e&g=h')), + extractURLParameters(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h')); SELECT - extractURLParameterNames(materialize('http://yandex.ru/?a=b&c=d')), - extractURLParameterNames(materialize('http://yandex.ru/?a=b&c=d#e=f')), - extractURLParameterNames(materialize('http://yandex.ru/?a&c=d#e=f')), - extractURLParameterNames(materialize('http://yandex.ru/?a=b&c=d#e=f&g=h')), - extractURLParameterNames(materialize('http://yandex.ru/?a=b&c=d#e')), - extractURLParameterNames(materialize('http://yandex.ru/?a=b&c=d#e&g=h')), - extractURLParameterNames(materialize('http://yandex.ru/?a=b&c=d#test?e=f&g=h')), - extractURLParameterNames(materialize('//yandex.ru/?a=b&c=d')), - extractURLParameterNames(materialize('//yandex.ru/?a=b&c=d#e=f')), - extractURLParameterNames(materialize('//yandex.ru/?a&c=d#e=f')), - extractURLParameterNames(materialize('//yandex.ru/?a=b&c=d#e=f&g=h')), - extractURLParameterNames(materialize('//yandex.ru/?a=b&c=d#e')), - extractURLParameterNames(materialize('//yandex.ru/?a=b&c=d#e&g=h')), - extractURLParameterNames(materialize('//yandex.ru/?a=b&c=d#test?e=f&g=h')); + extractURLParameterNames(materialize('http://bigmir.net/?a=b&c=d')), + extractURLParameterNames(materialize('http://bigmir.net/?a=b&c=d#e=f')), + extractURLParameterNames(materialize('http://bigmir.net/?a&c=d#e=f')), + extractURLParameterNames(materialize('http://bigmir.net/?a=b&c=d#e=f&g=h')), + extractURLParameterNames(materialize('http://bigmir.net/?a=b&c=d#e')), + extractURLParameterNames(materialize('http://bigmir.net/?a=b&c=d#e&g=h')), + extractURLParameterNames(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h')), + extractURLParameterNames(materialize('//bigmir.net/?a=b&c=d')), + extractURLParameterNames(materialize('//bigmir.net/?a=b&c=d#e=f')), + extractURLParameterNames(materialize('//bigmir.net/?a&c=d#e=f')), + extractURLParameterNames(materialize('//bigmir.net/?a=b&c=d#e=f&g=h')), + extractURLParameterNames(materialize('//bigmir.net/?a=b&c=d#e')), + extractURLParameterNames(materialize('//bigmir.net/?a=b&c=d#e&g=h')), + extractURLParameterNames(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h')); SELECT - extractURLParameter(materialize('http://yandex.ru/?a=b&c=d'), 'a'), - extractURLParameter(materialize('http://yandex.ru/?a=b&c=d'), 'c'), - extractURLParameter(materialize('http://yandex.ru/?a=b&c=d#e=f'), 'e'), - extractURLParameter(materialize('http://yandex.ru/?a&c=d#e=f'), 'a'), - extractURLParameter(materialize('http://yandex.ru/?a&c=d#e=f'), 'c'), - extractURLParameter(materialize('http://yandex.ru/?a&c=d#e=f'), 'e'), - extractURLParameter(materialize('http://yandex.ru/?a=b&c=d#e=f&g=h'), 'g'), - extractURLParameter(materialize('http://yandex.ru/?a=b&c=d#e'), 'a'), - extractURLParameter(materialize('http://yandex.ru/?a=b&c=d#e'), 'c'), - extractURLParameter(materialize('http://yandex.ru/?a=b&c=d#e'), 'e'), - extractURLParameter(materialize('http://yandex.ru/?a=b&c=d#e&g=h'), 'c'), - extractURLParameter(materialize('http://yandex.ru/?a=b&c=d#e&g=h'), 'e'), - extractURLParameter(materialize('http://yandex.ru/?a=b&c=d#e&g=h'), 'g'), - extractURLParameter(materialize('http://yandex.ru/?a=b&c=d#test?e=f&g=h'), 'test'), - extractURLParameter(materialize('http://yandex.ru/?a=b&c=d#test?e=f&g=h'), 'e'), - extractURLParameter(materialize('http://yandex.ru/?a=b&c=d#test?e=f&g=h'), 'g'), - extractURLParameter(materialize('//yandex.ru/?a=b&c=d'), 'a'), - extractURLParameter(materialize('//yandex.ru/?a=b&c=d'), 'c'), - extractURLParameter(materialize('//yandex.ru/?a=b&c=d#e=f'), 'e'), - extractURLParameter(materialize('//yandex.ru/?a&c=d#e=f'), 'a'), - extractURLParameter(materialize('//yandex.ru/?a&c=d#e=f'), 'c'), - extractURLParameter(materialize('//yandex.ru/?a&c=d#e=f'), 'e'), - extractURLParameter(materialize('//yandex.ru/?a=b&c=d#e=f&g=h'), 'g'), - extractURLParameter(materialize('//yandex.ru/?a=b&c=d#e'), 'a'), - extractURLParameter(materialize('//yandex.ru/?a=b&c=d#e'), 'c'), - extractURLParameter(materialize('//yandex.ru/?a=b&c=d#e'), 'e'), - extractURLParameter(materialize('//yandex.ru/?a=b&c=d#e&g=h'), 'c'), - extractURLParameter(materialize('//yandex.ru/?a=b&c=d#e&g=h'), 'e'), - extractURLParameter(materialize('//yandex.ru/?a=b&c=d#e&g=h'), 'g'), - extractURLParameter(materialize('//yandex.ru/?a=b&c=d#test?e=f&g=h'), 'test'), - extractURLParameter(materialize('//yandex.ru/?a=b&c=d#test?e=f&g=h'), 'e'), - extractURLParameter(materialize('//yandex.ru/?a=b&c=d#test?e=f&g=h'), 'g'); + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d'), 'a'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d'), 'c'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#e=f'), 'e'), + extractURLParameter(materialize('http://bigmir.net/?a&c=d#e=f'), 'a'), + extractURLParameter(materialize('http://bigmir.net/?a&c=d#e=f'), 'c'), + extractURLParameter(materialize('http://bigmir.net/?a&c=d#e=f'), 'e'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#e=f&g=h'), 'g'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#e'), 'a'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#e'), 'c'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#e'), 'e'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#e&g=h'), 'c'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#e&g=h'), 'e'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#e&g=h'), 'g'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), 'test'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), 'e'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), 'g'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d'), 'a'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d'), 'c'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#e=f'), 'e'), + extractURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), 'a'), + extractURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), 'c'), + extractURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), 'e'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#e=f&g=h'), 'g'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#e'), 'a'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#e'), 'c'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#e'), 'e'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#e&g=h'), 'c'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#e&g=h'), 'e'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#e&g=h'), 'g'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h'), 'test'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h'), 'e'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h'), 'g'); SELECT - cutURLParameter(materialize('http://yandex.ru/?a=b&c=d'), 'a'), - cutURLParameter(materialize('http://yandex.ru/?a=b&c=d'), 'c'), - cutURLParameter(materialize('http://yandex.ru/?a=b&c=d#e=f'), 'e'), - cutURLParameter(materialize('http://yandex.ru/?a&c=d#e=f'), 'a'), - cutURLParameter(materialize('http://yandex.ru/?a&c=d#e=f'), 'c'), - cutURLParameter(materialize('http://yandex.ru/?a&c=d#e=f'), 'e'), - cutURLParameter(materialize('http://yandex.ru/?a=b&c=d#e=f&g=h'), 'g'), - cutURLParameter(materialize('http://yandex.ru/?a=b&c=d#e'), 'a'), - cutURLParameter(materialize('http://yandex.ru/?a=b&c=d#e'), 'c'), - cutURLParameter(materialize('http://yandex.ru/?a=b&c=d#e'), 'e'), - cutURLParameter(materialize('http://yandex.ru/?a=b&c=d#e&g=h'), 'c'), - cutURLParameter(materialize('http://yandex.ru/?a=b&c=d#e&g=h'), 'e'), - cutURLParameter(materialize('http://yandex.ru/?a=b&c=d#e&g=h'), 'g'), - cutURLParameter(materialize('http://yandex.ru/?a=b&c=d#test?e=f&g=h'), 'test'), - cutURLParameter(materialize('http://yandex.ru/?a=b&c=d#test?e=f&g=h'), 'e'), - cutURLParameter(materialize('http://yandex.ru/?a=b&c=d#test?e=f&g=h'), 'g'), - cutURLParameter(materialize('//yandex.ru/?a=b&c=d'), 'a'), - cutURLParameter(materialize('//yandex.ru/?a=b&c=d'), 'c'), - cutURLParameter(materialize('//yandex.ru/?a=b&c=d#e=f'), 'e'), - cutURLParameter(materialize('//yandex.ru/?a&c=d#e=f'), 'a'), - cutURLParameter(materialize('//yandex.ru/?a&c=d#e=f'), 'c'), - cutURLParameter(materialize('//yandex.ru/?a&c=d#e=f'), 'e'), - cutURLParameter(materialize('//yandex.ru/?a=b&c=d#e=f&g=h'), 'g'), - cutURLParameter(materialize('//yandex.ru/?a=b&c=d#e'), 'a'), - cutURLParameter(materialize('//yandex.ru/?a=b&c=d#e'), 'c'), - cutURLParameter(materialize('//yandex.ru/?a=b&c=d#e'), 'e'), - cutURLParameter(materialize('//yandex.ru/?a=b&c=d#e&g=h'), 'c'), - cutURLParameter(materialize('//yandex.ru/?a=b&c=d#e&g=h'), 'e'), - cutURLParameter(materialize('//yandex.ru/?a=b&c=d#e&g=h'), 'g'), - cutURLParameter(materialize('//yandex.ru/?a=b&c=d#test?e=f&g=h'), 'test'), - cutURLParameter(materialize('//yandex.ru/?a=b&c=d#test?e=f&g=h'), 'e'), - cutURLParameter(materialize('//yandex.ru/?a=b&c=d#test?e=f&g=h'), 'g'); + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d'), 'a'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d'), 'c'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e=f'), 'e'), + cutURLParameter(materialize('http://bigmir.net/?a&c=d#e=f'), 'a'), + cutURLParameter(materialize('http://bigmir.net/?a&c=d#e=f'), 'c'), + cutURLParameter(materialize('http://bigmir.net/?a&c=d#e=f'), 'e'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e=f&g=h'), 'g'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e'), 'a'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e'), 'c'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e'), 'e'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e&g=h'), 'c'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e&g=h'), 'e'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e&g=h'), 'g'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), 'test'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), 'e'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), 'g'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d'), 'a'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d'), 'c'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e=f'), 'e'), + cutURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), 'a'), + cutURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), 'c'), + cutURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), 'e'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e=f&g=h'), 'g'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e'), 'a'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e'), 'c'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e'), 'e'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e&g=h'), 'c'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e&g=h'), 'e'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e&g=h'), 'g'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h'), 'test'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h'), 'e'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h'), 'g'); diff --git a/tests/queries/0_stateless/00381_first_significant_subdomain.reference b/tests/queries/0_stateless/00381_first_significant_subdomain.reference index 1f1230a2104..086f3b0f9ce 100644 --- a/tests/queries/0_stateless/00381_first_significant_subdomain.reference +++ b/tests/queries/0_stateless/00381_first_significant_subdomain.reference @@ -1,3 +1,3 @@ canada congo net-domena -yandex yandex yandex яндекс yandex +meta bigmir yahoo гугл meta canada hello hello canada diff --git a/tests/queries/0_stateless/00381_first_significant_subdomain.sql b/tests/queries/0_stateless/00381_first_significant_subdomain.sql index 5badd14f200..5d8c53afc9c 100644 --- a/tests/queries/0_stateless/00381_first_significant_subdomain.sql +++ b/tests/queries/0_stateless/00381_first_significant_subdomain.sql @@ -4,12 +4,12 @@ SELECT firstSignificantSubdomain('http://pochemu.net-domena.ru') AS why; SELECT - firstSignificantSubdomain('ftp://www.yandex.com.tr/news.html'), - firstSignificantSubdomain('https://www.yandex.ua/news.html'), - firstSignificantSubdomain('magnet:yandex.abc'), - firstSignificantSubdomain('ftp://www.yandex.co.uk/news.html'), - firstSignificantSubdomain('https://api.www3.static.dev.ввв.яндекс.рф'), - firstSignificantSubdomain('//www.yandex.com.tr/news.html'); + firstSignificantSubdomain('ftp://www.meta.com.ua/news.html'), + firstSignificantSubdomain('https://www.bigmir.net/news.html'), + firstSignificantSubdomain('magnet:ukr.abc'), + firstSignificantSubdomain('ftp://www.yahoo.co.jp/news.html'), + firstSignificantSubdomain('https://api.www3.static.dev.ввв.гугл.ком'), + firstSignificantSubdomain('//www.meta.com.ua/news.html'); SELECT firstSignificantSubdomain('http://hello.canada.c'), diff --git a/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app_long.sh b/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app_long.sh index d77955a51bc..a78cdd445cb 100755 --- a/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app_long.sh +++ b/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app_long.sh @@ -19,11 +19,11 @@ function pack_unpack_compare() ${CLICKHOUSE_CLIENT} --query "CREATE TABLE buf_00385 ENGINE = Memory AS $1" local res_orig - res_orig=$(${CLICKHOUSE_CLIENT} --max_threads=1 --query "SELECT $TABLE_HASH FROM buf_00385") + res_orig=$(${CLICKHOUSE_CLIENT} --max_block_size=65505 --max_threads=1 --query "SELECT $TABLE_HASH FROM buf_00385") ${CLICKHOUSE_CLIENT} --max_threads=1 --query "CREATE TABLE buf_file ENGINE = File($3) AS SELECT * FROM buf_00385" local res_db_file - res_db_file=$(${CLICKHOUSE_CLIENT} --max_threads=1 --query "SELECT $TABLE_HASH FROM buf_file") + res_db_file=$(${CLICKHOUSE_CLIENT} --max_block_size=65505 --max_threads=1 --query "SELECT $TABLE_HASH FROM buf_file") ${CLICKHOUSE_CLIENT} --max_threads=1 --query "SELECT * FROM buf_00385 FORMAT $3" > "$buf_file" local res_ch_local1 diff --git a/tests/queries/0_stateless/00505_secure.sh b/tests/queries/0_stateless/00505_secure.sh index e69515253ed..b7c12911b90 100755 --- a/tests/queries/0_stateless/00505_secure.sh +++ b/tests/queries/0_stateless/00505_secure.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest +# Tags: no-fasttest, no-random-settings # set -x diff --git a/tests/queries/0_stateless/00580_consistent_hashing_functions.sql b/tests/queries/0_stateless/00580_consistent_hashing_functions.sql index 08e785929c7..f470642d391 100644 --- a/tests/queries/0_stateless/00580_consistent_hashing_functions.sql +++ b/tests/queries/0_stateless/00580_consistent_hashing_functions.sql @@ -1,6 +1,6 @@ -- Tags: no-fasttest SELECT jumpConsistentHash(1, 1), jumpConsistentHash(42, 57), jumpConsistentHash(256, 1024), jumpConsistentHash(3735883980, 1), jumpConsistentHash(3735883980, 666), jumpConsistentHash(16045690984833335023, 255); -SELECT yandexConsistentHash(16045690984833335023, 1), yandexConsistentHash(16045690984833335023, 2), yandexConsistentHash(16045690984833335023, 3), yandexConsistentHash(16045690984833335023, 4), yandexConsistentHash(16045690984833335023, 173), yandexConsistentHash(16045690984833335023, 255); +SELECT kostikConsistentHash(16045690984833335023, 1), kostikConsistentHash(16045690984833335023, 2), kostikConsistentHash(16045690984833335023, 3), kostikConsistentHash(16045690984833335023, 4), kostikConsistentHash(16045690984833335023, 173), kostikConsistentHash(16045690984833335023, 255); SELECT jumpConsistentHash(intHash64(number), 787) FROM system.numbers LIMIT 1000000, 2; -SELECT yandexConsistentHash(16045690984833335023+number-number, 120) FROM system.numbers LIMIT 1000000, 2; +SELECT kostikConsistentHash(16045690984833335023+number-number, 120) FROM system.numbers LIMIT 1000000, 2; diff --git a/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.reference b/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.reference index a0265bdb7ed..2b1089c6840 100644 --- a/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.reference +++ b/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.reference @@ -1,23 +1,23 @@ {"total":"1","domain":"baidu.com"} {"total":"2","domain":"facebook.com"} {"total":"1","domain":"google.com"} -{"total":"2","domain":"yandex.ru"} +{"total":"2","domain":"meta.ua"} {"total":"1","domain":"baidu.com"} {"total":"2","domain":"facebook.com"} {"total":"1","domain":"google.com"} -{"total":"2","domain":"yandex.ru"} +{"total":"2","domain":"meta.ua"} 1 baidu.com 2 facebook.com 1 google.com -2 yandex.ru +2 meta.ua 1 baidu.com 2 facebook.com 1 google.com -2 yandex.ru +2 meta.ua 1 baidu.com 1 google.com 2 facebook.com -2 yandex.ru +2 meta.ua 1 1 2 @@ -25,4 +25,4 @@ baidu.com google.com facebook.com -yandex.ru +meta.ua diff --git a/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.sql b/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.sql index 49975daaa7e..07d39e8d54a 100644 --- a/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.sql +++ b/tests/queries/0_stateless/00585_union_all_subquery_aggregation_column_removal.sql @@ -4,8 +4,8 @@ DROP TABLE IF EXISTS transactions; CREATE TABLE clicks (domain String) ENGINE = Memory; CREATE TABLE transactions (domain String) ENGINE = Memory; -INSERT INTO clicks VALUES ('facebook.com'), ('yandex.ru'), ('google.com'); -INSERT INTO transactions VALUES ('facebook.com'), ('yandex.ru'), ('baidu.com'); +INSERT INTO clicks VALUES ('facebook.com'), ('meta.ua'), ('google.com'); +INSERT INTO transactions VALUES ('facebook.com'), ('meta.ua'), ('baidu.com'); SELECT diff --git a/tests/queries/0_stateless/00646_url_engine.python b/tests/queries/0_stateless/00646_url_engine.python index 4f47e819328..d1836817867 100644 --- a/tests/queries/0_stateless/00646_url_engine.python +++ b/tests/queries/0_stateless/00646_url_engine.python @@ -120,18 +120,14 @@ class CSVHTTPServer(BaseHTTPRequestHandler): class HTTPServerV6(HTTPServer): address_family = socket.AF_INET6 -def start_server(requests_amount): +def start_server(): if IS_IPV6: httpd = HTTPServerV6(HTTP_SERVER_ADDRESS, CSVHTTPServer) else: httpd = HTTPServer(HTTP_SERVER_ADDRESS, CSVHTTPServer) - def real_func(): - for i in range(requests_amount): - httpd.handle_request() - - t = threading.Thread(target=real_func) - return t + t = threading.Thread(target=httpd.serve_forever) + return t, httpd # test section @@ -201,7 +197,7 @@ def main(): 'select double, count(*) from {tbl} group by double': "7.7\t2\n9.9\t10" } - t = start_server(len(select_only_requests) * 2 + (len(insert_requests) + len(select_requests)) * 2) + t, httpd = start_server() t.start() # test table with url engine test_select(table_name="test_table_select", requests=list(select_only_requests.keys()), answers=list(select_only_requests.values()), test_data=test_data) @@ -211,6 +207,8 @@ def main(): test_insert(table_name="test_table_insert", requests_insert=insert_requests, requests_select=list(select_requests.keys()), answers=list(select_requests.values())) #test insert into table function url test_insert(requests_insert=insert_requests, requests_select=list(select_requests.keys()), answers=list(select_requests.values())) + + httpd.shutdown() t.join() print("PASSED") diff --git a/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh b/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh index f49aeb93184..00a7e3c5232 100755 --- a/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh +++ b/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh @@ -25,7 +25,7 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE string_test_table (val String) ENGINE ${CLICKHOUSE_CLIENT} --query="CREATE TABLE fixed_string_test_table (val FixedString(1)) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0;" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE signed_integer_test_table (val Int32) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0;" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE unsigned_integer_test_table (val UInt32) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0;" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE enum_test_table (val Enum16('hello' = 1, 'world' = 2, 'yandex' = 256, 'clickhouse' = 257)) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0;" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE enum_test_table (val Enum16('hello' = 1, 'world' = 2, 'youtube' = 256, 'clickhouse' = 257)) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0;" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE date_test_table (val Date) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0;" ${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES string_test_table;" @@ -40,7 +40,7 @@ ${CLICKHOUSE_CLIENT} --query="INSERT INTO fixed_string_test_table VALUES ('0'), # 131072 -> 17 bit is 1 ${CLICKHOUSE_CLIENT} --query="INSERT INTO signed_integer_test_table VALUES (-2), (0), (2), (2), (131072), (131073), (131073);" ${CLICKHOUSE_CLIENT} --query="INSERT INTO unsigned_integer_test_table VALUES (0), (2), (2), (131072), (131073), (131073);" -${CLICKHOUSE_CLIENT} --query="INSERT INTO enum_test_table VALUES ('hello'), ('world'), ('world'), ('yandex'), ('clickhouse'), ('clickhouse');" +${CLICKHOUSE_CLIENT} --query="INSERT INTO enum_test_table VALUES ('hello'), ('world'), ('world'), ('youtube'), ('clickhouse'), ('clickhouse');" ${CLICKHOUSE_CLIENT} --query="INSERT INTO date_test_table VALUES (1), (2), (2), (256), (257), (257);" CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=debug/g') diff --git a/tests/queries/0_stateless/00900_long_parquet_load.reference b/tests/queries/0_stateless/00900_long_parquet_load.reference index 6ecff505b2e..b295a226853 100644 --- a/tests/queries/0_stateless/00900_long_parquet_load.reference +++ b/tests/queries/0_stateless/00900_long_parquet_load.reference @@ -88,6 +88,9 @@ idx10 ['This','is','a','test'] 22 23 24 +=== Try load data from case_insensitive_column_matching.parquet +123 1 +456 2 === Try load data from datapage_v2.snappy.parquet Code: 33. DB::ParsingEx---tion: Error while reading Parquet data: IOError: Unknown encoding type.: While executing ParquetBlockInputFormat: data for INSERT was parsed from stdin: (in query: INSERT INTO parquet_load FORMAT Parquet). (CANNOT_READ_ALL_DATA) @@ -339,9 +342,6 @@ Code: 33. DB::ParsingEx---tion: Error while reading Parquet data: IOError: Unkno (NULL) === Try load data from single_nan.parquet \N -=== Try load data from test_setting_input_format_use_lowercase_column_name.parquet -123 1 -456 2 === Try load data from userdata1.parquet 1454486129 1 Amanda Jordan ajordan0@com.com Female 1.197.201.2 6759521864920116 Indonesia 3/8/1971 49756.53 Internal Auditor 1E+02 1454519043 2 Albert Freeman afreeman1@is.gd Male 218.111.175.34 Canada 1/16/1968 150280.17 Accountant IV diff --git a/tests/queries/0_stateless/00909_ngram_distance.reference b/tests/queries/0_stateless/00909_ngram_distance.reference index 290e24faac5..4323fa86151 100644 --- a/tests/queries/0_stateless/00909_ngram_distance.reference +++ b/tests/queries/0_stateless/00909_ngram_distance.reference @@ -113,112 +113,112 @@ 0 http://autometric.ru/ 0 http://metric.ru/ 0 -http://metrica.yandex.com/ 0 http://metrika.ru/ 0 http://metris.ru/ 0 -пап привет как дела - Яндекс.Видео 0 +http://top.bigmir.net/ 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 0 привет как дела?... Херсон 600 -пап привет как дела - Яндекс.Видео 684 -привет как дела клип - Яндекс.Видео 692 -привет братан как дела - Яндекс.Видео 707 +пап привет как дела - TUT.BY 625 +привет как дела клип - TUT.BY 636 +привет братан как дела - TUT.BY 657 http://autometric.ru/ 1000 http://metric.ru/ 1000 -http://metrica.yandex.com/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 +http://top.bigmir.net/ 1000 привет 1000 0 http://metric.ru/ 765 http://metris.ru/ 765 http://metrika.ru/ 778 http://autometric.ru/ 810 -http://metrica.yandex.com/ 846 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 привет как дела?... Херсон 297 -пап привет как дела - Яндекс.Видео 422 -привет как дела клип - Яндекс.Видео 435 -привет братан как дела - Яндекс.Видео 500 +пап привет как дела - TUT.BY 333 +привет как дела клип - TUT.BY 350 +привет братан как дела - TUT.BY 429 привет 529 1000 http://autometric.ru/ 1000 http://metric.ru/ 1000 -http://metrica.yandex.com/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 436 привет как дела?... Херсон 459 -пап привет как дела - Яндекс.Видео 511 +привет как дела клип - TUT.BY 500 +привет братан как дела - TUT.BY 524 привет 529 -привет как дела клип - Яндекс.Видео 565 -привет братан как дела - Яндекс.Видео 583 1000 http://autometric.ru/ 1000 http://metric.ru/ 1000 -http://metrica.yandex.com/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 +http://top.bigmir.net/ 1000 http://metrika.ru/ 524 http://metric.ru/ 700 http://metris.ru/ 700 http://autometric.ru/ 750 -http://metrica.yandex.com/ 793 +http://top.bigmir.net/ 920 1000 -пап привет как дела - Яндекс.Видео 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 http://metric.ru/ 600 -http://metrica.yandex.com/ 655 http://autometric.ru/ 667 http://metris.ru/ 700 http://metrika.ru/ 714 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 http://metrika.ru/ 619 http://metric.ru/ 700 http://metris.ru/ 700 http://autometric.ru/ 750 -http://metrica.yandex.com/ 793 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 http://metric.ru/ 600 http://autometric.ru/ 667 http://metris.ru/ 700 http://metrika.ru/ 714 -http://metrica.yandex.com/ 724 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 -http://metrica.yandex.com/ 714 +http://top.bigmir.net/ 667 +http://metrika.ru/ 900 1000 http://autometric.ru/ 1000 http://metric.ru/ 1000 -http://metrika.ru/ 1000 http://metris.ru/ 1000 -пап привет как дела - Яндекс.Видео 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 0 0 @@ -335,135 +335,135 @@ http://metris.ru/ 1000 0 http://autometric.ru/ 0 http://metric.ru/ 0 -http://metrica.yandex.com/ 0 http://metrika.ru/ 0 http://metris.ru/ 0 -пап привет как дела - Яндекс.Видео 0 +http://top.bigmir.net/ 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 0 привет как дела?... Херсон 600 -пап привет как дела - Яндекс.Видео 684 -привет как дела клип - Яндекс.Видео 692 -привет братан как дела - Яндекс.Видео 707 +пап привет как дела - TUT.BY 625 +привет как дела клип - TUT.BY 636 +привет братан как дела - TUT.BY 657 http://autometric.ru/ 1000 http://metric.ru/ 1000 -http://metrica.yandex.com/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 +http://top.bigmir.net/ 1000 привет 1000 0 http://metric.ru/ 765 http://metris.ru/ 765 http://metrika.ru/ 778 http://autometric.ru/ 810 -http://metrica.yandex.com/ 846 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 привет как дела?... Херсон 297 -пап привет как дела - Яндекс.Видео 422 -привет как дела клип - Яндекс.Видео 435 -привет братан как дела - Яндекс.Видео 500 +пап привет как дела - TUT.BY 333 +привет как дела клип - TUT.BY 350 +привет братан как дела - TUT.BY 429 привет 529 1000 http://autometric.ru/ 1000 http://metric.ru/ 1000 -http://metrica.yandex.com/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 436 привет как дела?... Херсон 459 -пап привет как дела - Яндекс.Видео 511 +привет как дела клип - TUT.BY 500 +привет братан как дела - TUT.BY 524 привет 529 -привет как дела клип - Яндекс.Видео 565 -привет братан как дела - Яндекс.Видео 583 1000 http://autometric.ru/ 1000 http://metric.ru/ 1000 -http://metrica.yandex.com/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 +http://top.bigmir.net/ 1000 http://metrika.ru/ 524 http://metric.ru/ 700 http://metris.ru/ 700 http://autometric.ru/ 750 -http://metrica.yandex.com/ 793 +http://top.bigmir.net/ 920 1000 -пап привет как дела - Яндекс.Видео 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 http://metrika.ru/ 524 http://metric.ru/ 700 http://metris.ru/ 700 http://autometric.ru/ 750 -http://metrica.yandex.com/ 793 +http://top.bigmir.net/ 920 1000 -пап привет как дела - Яндекс.Видео 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 http://metric.ru/ 600 -http://metrica.yandex.com/ 655 http://autometric.ru/ 667 http://metris.ru/ 700 http://metrika.ru/ 714 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 http://metrika.ru/ 619 http://metric.ru/ 700 http://metris.ru/ 700 http://autometric.ru/ 750 -http://metrica.yandex.com/ 793 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 http://metric.ru/ 600 http://autometric.ru/ 667 http://metris.ru/ 700 http://metrika.ru/ 714 -http://metrica.yandex.com/ 724 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 -http://metrica.yandex.com/ 714 +http://top.bigmir.net/ 667 +http://metrika.ru/ 900 + 1000 +http://autometric.ru/ 1000 +http://metric.ru/ 1000 +http://metris.ru/ 1000 +пап привет как дела - TUT.BY 1000 +привет 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 +привет как дела?... Херсон 1000 +привет как дела клип - TUT.BY 0 +пап привет как дела - TUT.BY 208 +привет братан как дела - TUT.BY 286 +привет как дела?... Херсон 490 +привет 742 1000 http://autometric.ru/ 1000 http://metric.ru/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 -пап привет как дела - Яндекс.Видео 1000 -привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 -привет как дела?... Херсон 1000 -привет как дела клип - Яндекс.Видео 0 -пап привет как дела - Яндекс.Видео 169 -привет братан как дела - Яндекс.Видео 235 -привет как дела?... Херсон 544 -привет 784 - 1000 -http://autometric.ru/ 1000 -http://metric.ru/ 1000 -http://metrica.yandex.com/ 1000 -http://metrika.ru/ 1000 -http://metris.ru/ 1000 +http://top.bigmir.net/ 1000 0 0 0 @@ -576,82 +576,82 @@ http://metris.ru/ 1000 111 429 1000 +пап привет как дела - TUT.BY 242 привет как дела?... Херсон 254 -пап привет как дела - Яндекс.Видео 398 -привет как дела клип - Яндекс.Видео 412 -привет братан как дела - Яндекс.Видео 461 +привет как дела клип - TUT.BY 265 +привет братан как дела - TUT.BY 333 привет 471 1000 http://autometric.ru/ 1000 http://metric.ru/ 1000 -http://metrica.yandex.com/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 303 привет как дела?... Херсон 343 -пап привет как дела - Яндекс.Видео 446 +привет как дела клип - TUT.BY 353 +привет братан как дела - TUT.BY 389 привет 471 -привет как дела клип - Яндекс.Видео 482 -привет братан как дела - Яндекс.Видео 506 1000 http://autometric.ru/ 1000 http://metric.ru/ 1000 -http://metrica.yandex.com/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 +http://top.bigmir.net/ 1000 http://metrika.ru/ 579 http://metric.ru/ 778 http://metris.ru/ 778 http://autometric.ru/ 818 -http://metrica.yandex.com/ 852 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 http://metric.ru/ 667 -http://metrica.yandex.com/ 704 http://autometric.ru/ 727 http://metris.ru/ 778 http://metrika.ru/ 789 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 http://metrika.ru/ 684 http://metric.ru/ 778 http://metris.ru/ 778 http://autometric.ru/ 818 -http://metrica.yandex.com/ 852 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 http://metric.ru/ 667 http://autometric.ru/ 727 -http://metrica.yandex.com/ 778 http://metris.ru/ 778 http://metrika.ru/ 789 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 -http://metrica.yandex.com/ 769 +http://top.bigmir.net/ 727 1000 http://autometric.ru/ 1000 http://metric.ru/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 -пап привет как дела - Яндекс.Видео 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 0 0 @@ -765,91 +765,91 @@ http://metris.ru/ 1000 111 600 1000 +пап привет как дела - TUT.BY 909 привет как дела?... Херсон 910 -пап привет как дела - Яндекс.Видео 928 -привет как дела клип - Яндекс.Видео 929 -привет братан как дела - Яндекс.Видео 955 +привет как дела клип - TUT.BY 912 +привет братан как дела - TUT.BY 944 1000 http://autometric.ru/ 1000 http://metric.ru/ 1000 -http://metrica.yandex.com/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 +http://top.bigmir.net/ 1000 привет 1000 +пап привет как дела - TUT.BY 667 привет как дела?... Херсон 672 -пап привет как дела - Яндекс.Видео 735 -привет как дела клип - Яндекс.Видео 741 -привет братан как дела - Яндекс.Видео 753 +привет как дела клип - TUT.BY 676 +привет братан как дела - TUT.BY 694 1000 http://autometric.ru/ 1000 http://metric.ru/ 1000 -http://metrica.yandex.com/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 +http://top.bigmir.net/ 1000 привет 1000 http://metrika.ru/ 579 http://metric.ru/ 778 http://metris.ru/ 778 http://autometric.ru/ 818 -http://metrica.yandex.com/ 852 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 http://metrika.ru/ 579 http://metric.ru/ 778 http://metris.ru/ 778 http://autometric.ru/ 818 -http://metrica.yandex.com/ 852 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 http://metric.ru/ 667 -http://metrica.yandex.com/ 704 http://autometric.ru/ 727 http://metris.ru/ 778 http://metrika.ru/ 789 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 http://metrika.ru/ 684 http://metric.ru/ 778 http://metris.ru/ 778 http://autometric.ru/ 818 -http://metrica.yandex.com/ 852 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 http://metric.ru/ 667 http://autometric.ru/ 727 -http://metrica.yandex.com/ 778 http://metris.ru/ 778 http://metrika.ru/ 789 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 -http://metrica.yandex.com/ 769 +http://top.bigmir.net/ 727 1000 http://autometric.ru/ 1000 http://metric.ru/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 -пап привет как дела - Яндекс.Видео 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 diff --git a/tests/queries/0_stateless/00909_ngram_distance.sql b/tests/queries/0_stateless/00909_ngram_distance.sql index b2f403c415a..28aff50d22e 100644 --- a/tests/queries/0_stateless/00909_ngram_distance.sql +++ b/tests/queries/0_stateless/00909_ngram_distance.sql @@ -32,7 +32,7 @@ select round(1000 * ngramDistanceUTF8('абвгдеёжз', 'ёёёёёёёё')) drop table if exists test_distance; create table test_distance (Title String) engine = Memory; -insert into test_distance values ('привет как дела?... Херсон'), ('привет как дела клип - Яндекс.Видео'), ('привет'), ('пап привет как дела - Яндекс.Видео'), ('привет братан как дела - Яндекс.Видео'), ('http://metric.ru/'), ('http://autometric.ru/'), ('http://metrica.yandex.com/'), ('http://metris.ru/'), ('http://metrika.ru/'), (''); +insert into test_distance values ('привет как дела?... Херсон'), ('привет как дела клип - TUT.BY'), ('привет'), ('пап привет как дела - TUT.BY'), ('привет братан как дела - TUT.BY'), ('http://metric.ru/'), ('http://autometric.ru/'), ('http://top.bigmir.net/'), ('http://metris.ru/'), ('http://metrika.ru/'), (''); SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceUTF8(Title, Title) as distance, Title; SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceUTF8(Title, extract(Title, 'как дела')) as distance, Title; @@ -44,7 +44,7 @@ SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceUT SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceUTF8(Title, 'metrica') as distance, Title; SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceUTF8(Title, 'metriks') as distance, Title; SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceUTF8(Title, 'metrics') as distance, Title; -SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceUTF8(Title, 'yandex') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceUTF8(Title, 'bigmir') as distance, Title; select round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize(''), '')) from system.numbers limit 5; @@ -91,8 +91,8 @@ SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCa SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, 'mEtrica') as distance, Title; SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, 'metriKS') as distance, Title; SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, 'metrics') as distance, Title; -SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, 'YanDEX') as distance, Title; -SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, 'приВЕТ КАк ДеЛа КлИп - яндеКс.видео') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, 'BigMIR') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, 'приВЕТ КАк ДеЛа КлИп - TuT.by') as distance, Title; select round(1000 * ngramDistance(materialize(''), '')) from system.numbers limit 5; @@ -134,7 +134,7 @@ SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistance(T SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistance(Title, 'metrica') as distance, Title; SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistance(Title, 'metriks') as distance, Title; SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistance(Title, 'metrics') as distance, Title; -SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistance(Title, 'yandex') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistance(Title, 'bigmir') as distance, Title; select round(1000 * ngramDistanceCaseInsensitive(materialize(''), '')) from system.numbers limit 5; select round(1000 * ngramDistanceCaseInsensitive(materialize('abc'), '')) from system.numbers limit 5; @@ -175,6 +175,6 @@ SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCa SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitive(Title, 'mEtrica') as distance, Title; SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitive(Title, 'metriKS') as distance, Title; SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitive(Title, 'metrics') as distance, Title; -SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitive(Title, 'YanDEX') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitive(Title, 'BigMIR') as distance, Title; drop table if exists test_distance; diff --git a/tests/queries/0_stateless/00926_multimatch.reference b/tests/queries/0_stateless/00926_multimatch.reference index 4a2320de57b..7ff32ecd73b 100644 --- a/tests/queries/0_stateless/00926_multimatch.reference +++ b/tests/queries/0_stateless/00926_multimatch.reference @@ -600,16 +600,16 @@ 1 1 1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 1 1 1 diff --git a/tests/queries/0_stateless/00926_multimatch.sql b/tests/queries/0_stateless/00926_multimatch.sql index 90cc289b5a5..b9843a1b4ba 100644 --- a/tests/queries/0_stateless/00926_multimatch.sql +++ b/tests/queries/0_stateless/00926_multimatch.sql @@ -75,18 +75,18 @@ select 1 = multiMatchAny(materialize('abcdef'), ['a......', 'a.....']) from syst select 0 = multiMatchAny(materialize('aaaa'), ['.*aa.*aaa.*', 'aaaaaa{2}', '\(aa\){3}']) from system.numbers limit 10; select 1 = multiMatchAny(materialize('abc'), ['a\0d']) from system.numbers limit 10; -select 1 = multiMatchAnyIndex(materialize('gogleuedeyandexgoogle'), ['google', 'yandex1']) from system.numbers limit 10; -select 2 = multiMatchAnyIndex(materialize('gogleuedeyandexgoogle'), ['google1', 'yandex']) from system.numbers limit 10; -select 0 != multiMatchAnyIndex(materialize('gogleuedeyandexgoogle'), ['.*goo.*', '.*yan.*']) from system.numbers limit 10; +select 1 = multiMatchAnyIndex(materialize('gogleuedeuniangoogle'), ['google', 'unian1']) from system.numbers limit 10; +select 2 = multiMatchAnyIndex(materialize('gogleuedeuniangoogle'), ['google1', 'unian']) from system.numbers limit 10; +select 0 != multiMatchAnyIndex(materialize('gogleuedeuniangoogle'), ['.*goo.*', '.*yan.*']) from system.numbers limit 10; select 5 = multiMatchAnyIndex(materialize('vladizlvav dabe don\'t heart me no more'), ['what', 'is', 'love', 'baby', 'no mo??', 'dont', 'h.rt me']) from system.numbers limit 10; SELECT multiMatchAny(materialize('/odezhda-dlya-bega/'), ['/odezhda-dlya-bega/', 'kurtki-i-vetrovki-dlya-bega', 'futbolki-i-mayki-dlya-bega']); SELECT 1 = multiMatchAny('фабрикант', ['f[ae]b[ei]rl', 'ф[иаэе]б[еэи][рпл]', 'афиукд', 'a[ft],th', '^ф[аиеэ]?б?[еэи]?$', 'берлик', 'fab', 'фа[беьв]+е?[рлко]']); -- All indices tests -SELECT [1, 2] = arraySort(multiMatchAllIndices(materialize('gogleuedeyandexgoogle'), ['.*goo.*', '.*yan.*'])) from system.numbers limit 5; -SELECT [1, 3] = arraySort(multiMatchAllIndices(materialize('gogleuedeyandexgoogle'), ['.*goo.*', 'neverexisted', '.*yan.*'])) from system.numbers limit 5; -SELECT [] = multiMatchAllIndices(materialize('gogleuedeyandexgoogle'), ['neverexisted', 'anotherone', 'andanotherone']) from system.numbers limit 5; +SELECT [1, 2] = arraySort(multiMatchAllIndices(materialize('gogleuedeuniangoogle'), ['.*goo.*', '.*yan.*'])) from system.numbers limit 5; +SELECT [1, 3] = arraySort(multiMatchAllIndices(materialize('gogleuedeuniangoogle'), ['.*goo.*', 'neverexisted', '.*yan.*'])) from system.numbers limit 5; +SELECT [] = multiMatchAllIndices(materialize('gogleuedeuniangoogle'), ['neverexisted', 'anotherone', 'andanotherone']) from system.numbers limit 5; SELECT [1, 2, 3, 11] = arraySort(multiMatchAllIndices('фабрикант', ['', 'рикан', 'а', 'f[ae]b[ei]rl', 'ф[иаэе]б[еэи][рпл]', 'афиукд', 'a[ft],th', '^ф[аиеэ]?б?[еэи]?$', 'берлик', 'fab', 'фа[беьв]+е?[рлко]'])); SELECT [1] = multiMatchAllIndices(materialize('/odezhda-dlya-bega/'), ['/odezhda-dlya-bega/', 'kurtki-i-vetrovki-dlya-bega', 'futbolki-i-mayki-dlya-bega']); SELECT [] = multiMatchAllIndices(materialize('aaaa'), ['.*aa.*aaa.*', 'aaaaaa{2}', '\(aa\){3}']); diff --git a/tests/queries/0_stateless/00951_ngram_search.reference b/tests/queries/0_stateless/00951_ngram_search.reference index a98f63a198a..ece03fc649e 100644 --- a/tests/queries/0_stateless/00951_ngram_search.reference +++ b/tests/queries/0_stateless/00951_ngram_search.reference @@ -113,113 +113,113 @@ 1000 http://autometric.ru/ 1000 http://metric.ru/ 1000 -http://metrica.yandex.com/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 1000 http://autometric.ru/ 1000 http://metric.ru/ 1000 -http://metrica.yandex.com/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 1000 http://autometric.ru/ 1000 http://metric.ru/ 1000 -http://metrica.yandex.com/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 0 http://autometric.ru/ 0 http://metric.ru/ 0 -http://metrica.yandex.com/ 0 http://metrika.ru/ 0 http://metris.ru/ 0 +http://top.bigmir.net/ 0 привет 308 -привет братан как дела - Яндекс.Видео 923 -пап привет как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 923 +пап привет как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 0 http://autometric.ru/ 0 http://metric.ru/ 0 -http://metrica.yandex.com/ 0 http://metrika.ru/ 0 http://metris.ru/ 0 +http://top.bigmir.net/ 0 привет 308 -привет братан как дела - Яндекс.Видео 769 -привет как дела клип - Яндекс.Видео 769 +привет братан как дела - TUT.BY 769 +привет как дела клип - TUT.BY 769 привет как дела?... Херсон 769 -пап привет как дела - Яндекс.Видео 846 +пап привет как дела - TUT.BY 846 0 -пап привет как дела - Яндекс.Видео 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 +http://top.bigmir.net/ 200 http://autometric.ru/ 600 http://metric.ru/ 600 -http://metrica.yandex.com/ 600 http://metris.ru/ 600 http://metrika.ru/ 1000 0 -пап привет как дела - Яндекс.Видео 0 +http://top.bigmir.net/ 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 http://metrika.ru/ 600 http://metris.ru/ 600 http://autometric.ru/ 800 http://metric.ru/ 800 -http://metrica.yandex.com/ 1000 0 -пап привет как дела - Яндекс.Видео 0 +http://top.bigmir.net/ 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 http://autometric.ru/ 600 http://metric.ru/ 600 -http://metrica.yandex.com/ 600 http://metris.ru/ 600 http://metrika.ru/ 800 0 -пап привет как дела - Яндекс.Видео 0 +http://top.bigmir.net/ 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 http://metrika.ru/ 600 http://metris.ru/ 600 http://autometric.ru/ 800 http://metric.ru/ 800 -http://metrica.yandex.com/ 800 0 http://autometric.ru/ 0 http://metric.ru/ 0 -http://metrika.ru/ 0 http://metris.ru/ 0 -пап привет как дела - Яндекс.Видео 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 -http://metrica.yandex.com/ 1000 +http://metrika.ru/ 250 +http://top.bigmir.net/ 1000 1000 1000 1000 @@ -335,135 +335,135 @@ http://metrica.yandex.com/ 1000 1000 http://autometric.ru/ 1000 http://metric.ru/ 1000 -http://metrica.yandex.com/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 1000 http://autometric.ru/ 1000 http://metric.ru/ 1000 -http://metrica.yandex.com/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 1000 http://autometric.ru/ 1000 http://metric.ru/ 1000 -http://metrica.yandex.com/ 1000 http://metrika.ru/ 1000 http://metris.ru/ 1000 -пап привет как дела - Яндекс.Видео 1000 +http://top.bigmir.net/ 1000 +пап привет как дела - TUT.BY 1000 привет 1000 -привет братан как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 0 http://autometric.ru/ 0 http://metric.ru/ 0 -http://metrica.yandex.com/ 0 http://metrika.ru/ 0 http://metris.ru/ 0 +http://top.bigmir.net/ 0 привет 308 -привет братан как дела - Яндекс.Видео 923 -пап привет как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 923 +пап привет как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 0 http://autometric.ru/ 0 http://metric.ru/ 0 -http://metrica.yandex.com/ 0 http://metrika.ru/ 0 http://metris.ru/ 0 +http://top.bigmir.net/ 0 привет 308 -привет братан как дела - Яндекс.Видео 769 -привет как дела клип - Яндекс.Видео 769 +привет братан как дела - TUT.BY 769 +привет как дела клип - TUT.BY 769 привет как дела?... Херсон 769 -пап привет как дела - Яндекс.Видео 846 +пап привет как дела - TUT.BY 846 0 -пап привет как дела - Яндекс.Видео 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 +http://top.bigmir.net/ 200 http://autometric.ru/ 600 http://metric.ru/ 600 -http://metrica.yandex.com/ 600 http://metris.ru/ 600 http://metrika.ru/ 1000 0 -пап привет как дела - Яндекс.Видео 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 +http://top.bigmir.net/ 200 http://autometric.ru/ 600 http://metric.ru/ 600 -http://metrica.yandex.com/ 600 http://metris.ru/ 600 http://metrika.ru/ 1000 0 -пап привет как дела - Яндекс.Видео 0 +http://top.bigmir.net/ 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 http://metrika.ru/ 600 http://metris.ru/ 600 http://autometric.ru/ 800 http://metric.ru/ 800 -http://metrica.yandex.com/ 1000 0 -пап привет как дела - Яндекс.Видео 0 +http://top.bigmir.net/ 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 http://autometric.ru/ 600 http://metric.ru/ 600 -http://metrica.yandex.com/ 600 http://metris.ru/ 600 http://metrika.ru/ 800 0 -пап привет как дела - Яндекс.Видео 0 +http://top.bigmir.net/ 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 http://metrika.ru/ 600 http://metris.ru/ 600 http://autometric.ru/ 800 http://metric.ru/ 800 -http://metrica.yandex.com/ 800 0 http://autometric.ru/ 0 http://metric.ru/ 0 -http://metrika.ru/ 0 http://metris.ru/ 0 -пап привет как дела - Яндекс.Видео 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 -http://metrica.yandex.com/ 1000 +http://metrika.ru/ 250 +http://top.bigmir.net/ 1000 0 http://autometric.ru/ 0 http://metric.ru/ 0 -http://metrica.yandex.com/ 0 -http://metrika.ru/ 0 http://metris.ru/ 0 -привет 121 -привет как дела?... Херсон 394 -привет братан как дела - Яндекс.Видео 788 -пап привет как дела - Яндекс.Видео 818 -привет как дела клип - Яндекс.Видео 1000 +http://metrika.ru/ 32 +привет 129 +http://top.bigmir.net/ 258 +привет как дела?... Херсон 419 +привет братан как дела - TUT.BY 452 +пап привет как дела - TUT.BY 484 +привет как дела клип - TUT.BY 677 1000 1000 1000 @@ -579,80 +579,80 @@ http://metris.ru/ 0 0 http://autometric.ru/ 0 http://metric.ru/ 0 -http://metrica.yandex.com/ 0 http://metrika.ru/ 0 http://metris.ru/ 0 +http://top.bigmir.net/ 0 привет 360 -привет братан как дела - Яндекс.Видео 960 -пап привет как дела - Яндекс.Видео 1000 -привет как дела клип - Яндекс.Видео 1000 +привет братан как дела - TUT.BY 960 +пап привет как дела - TUT.BY 1000 +привет как дела клип - TUT.BY 1000 привет как дела?... Херсон 1000 0 http://autometric.ru/ 0 http://metric.ru/ 0 -http://metrica.yandex.com/ 0 http://metrika.ru/ 0 http://metris.ru/ 0 +http://top.bigmir.net/ 0 привет 360 -привет братан как дела - Яндекс.Видео 880 -привет как дела клип - Яндекс.Видео 880 +привет братан как дела - TUT.BY 880 +привет как дела клип - TUT.BY 880 привет как дела?... Херсон 880 -пап привет как дела - Яндекс.Видео 920 +пап привет как дела - TUT.BY 920 0 -пап привет как дела - Яндекс.Видео 0 +http://top.bigmir.net/ 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 http://autometric.ru/ 500 http://metric.ru/ 500 -http://metrica.yandex.com/ 500 http://metris.ru/ 500 http://metrika.ru/ 1000 0 -пап привет как дела - Яндекс.Видео 0 +http://top.bigmir.net/ 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 http://metrika.ru/ 500 http://metris.ru/ 500 http://autometric.ru/ 750 http://metric.ru/ 750 -http://metrica.yandex.com/ 1000 0 -пап привет как дела - Яндекс.Видео 0 +http://top.bigmir.net/ 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 http://autometric.ru/ 500 http://metric.ru/ 500 -http://metrica.yandex.com/ 500 http://metris.ru/ 500 http://metrika.ru/ 750 0 -пап привет как дела - Яндекс.Видео 0 +http://top.bigmir.net/ 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 http://metrika.ru/ 500 http://metris.ru/ 500 http://autometric.ru/ 750 http://metric.ru/ 750 -http://metrica.yandex.com/ 750 0 http://autometric.ru/ 0 http://metric.ru/ 0 http://metrika.ru/ 0 http://metris.ru/ 0 -пап привет как дела - Яндекс.Видео 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 -http://metrica.yandex.com/ 1000 +http://top.bigmir.net/ 1000 1000 1000 1000 @@ -768,88 +768,88 @@ http://metrica.yandex.com/ 1000 0 http://autometric.ru/ 0 http://metric.ru/ 0 -http://metrica.yandex.com/ 0 http://metrika.ru/ 0 http://metris.ru/ 0 +http://top.bigmir.net/ 0 привет 0 -привет братан как дела - Яндекс.Видео 80 -пап привет как дела - Яндекс.Видео 120 -привет как дела клип - Яндекс.Видео 120 +привет братан как дела - TUT.BY 80 +пап привет как дела - TUT.BY 120 +привет как дела клип - TUT.BY 120 привет как дела?... Херсон 120 0 http://autometric.ru/ 0 http://metric.ru/ 0 -http://metrica.yandex.com/ 0 http://metrika.ru/ 0 http://metris.ru/ 0 +http://top.bigmir.net/ 0 привет 0 -пап привет как дела - Яндекс.Видео 440 -привет братан как дела - Яндекс.Видео 440 -привет как дела клип - Яндекс.Видео 440 +пап привет как дела - TUT.BY 440 +привет братан как дела - TUT.BY 440 +привет как дела клип - TUT.BY 440 привет как дела?... Херсон 440 0 -пап привет как дела - Яндекс.Видео 0 +http://top.bigmir.net/ 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 http://autometric.ru/ 500 http://metric.ru/ 500 -http://metrica.yandex.com/ 500 http://metris.ru/ 500 http://metrika.ru/ 1000 0 -пап привет как дела - Яндекс.Видео 0 +http://top.bigmir.net/ 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 http://autometric.ru/ 500 http://metric.ru/ 500 -http://metrica.yandex.com/ 500 http://metris.ru/ 500 http://metrika.ru/ 1000 0 -пап привет как дела - Яндекс.Видео 0 +http://top.bigmir.net/ 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 http://metrika.ru/ 500 http://metris.ru/ 500 http://autometric.ru/ 750 http://metric.ru/ 750 -http://metrica.yandex.com/ 1000 0 -пап привет как дела - Яндекс.Видео 0 +http://top.bigmir.net/ 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 http://autometric.ru/ 500 http://metric.ru/ 500 -http://metrica.yandex.com/ 500 http://metris.ru/ 500 http://metrika.ru/ 750 0 -пап привет как дела - Яндекс.Видео 0 +http://top.bigmir.net/ 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 http://metrika.ru/ 500 http://metris.ru/ 500 http://autometric.ru/ 750 http://metric.ru/ 750 -http://metrica.yandex.com/ 750 0 http://autometric.ru/ 0 http://metric.ru/ 0 http://metrika.ru/ 0 http://metris.ru/ 0 -пап привет как дела - Яндекс.Видео 0 +пап привет как дела - TUT.BY 0 привет 0 -привет братан как дела - Яндекс.Видео 0 -привет как дела клип - Яндекс.Видео 0 +привет братан как дела - TUT.BY 0 +привет как дела клип - TUT.BY 0 привет как дела?... Херсон 0 -http://metrica.yandex.com/ 1000 +http://top.bigmir.net/ 1000 diff --git a/tests/queries/0_stateless/00951_ngram_search.sql b/tests/queries/0_stateless/00951_ngram_search.sql index f1a37605ebc..77525d86013 100644 --- a/tests/queries/0_stateless/00951_ngram_search.sql +++ b/tests/queries/0_stateless/00951_ngram_search.sql @@ -32,7 +32,7 @@ select round(1000 * ngramSearchUTF8('абвгдеёжз', 'ёёёёёёёё')); drop table if exists test_entry_distance; create table test_entry_distance (Title String) engine = Memory; -insert into test_entry_distance values ('привет как дела?... Херсон'), ('привет как дела клип - Яндекс.Видео'), ('привет'), ('пап привет как дела - Яндекс.Видео'), ('привет братан как дела - Яндекс.Видео'), ('http://metric.ru/'), ('http://autometric.ru/'), ('http://metrica.yandex.com/'), ('http://metris.ru/'), ('http://metrika.ru/'), (''); +insert into test_entry_distance values ('привет как дела?... Херсон'), ('привет как дела клип - TUT.BY'), ('привет'), ('пап привет как дела - TUT.BY'), ('привет братан как дела - TUT.BY'), ('http://metric.ru/'), ('http://autometric.ru/'), ('http://top.bigmir.net/'), ('http://metris.ru/'), ('http://metrika.ru/'), (''); SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchUTF8(Title, Title) as distance, Title; SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchUTF8(Title, extract(Title, 'как дела')) as distance, Title; @@ -44,7 +44,7 @@ SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSear SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchUTF8(Title, 'metrica') as distance, Title; SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchUTF8(Title, 'metriks') as distance, Title; SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchUTF8(Title, 'metrics') as distance, Title; -SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchUTF8(Title, 'yandex') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchUTF8(Title, 'bigmir') as distance, Title; select round(1000 * ngramSearchCaseInsensitiveUTF8(materialize(''), '')) from system.numbers limit 5; @@ -91,8 +91,8 @@ SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSear SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, 'mEtrica') as distance, Title; SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, 'metriKS') as distance, Title; SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, 'metrics') as distance, Title; -SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, 'YanDEX') as distance, Title; -SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, 'приВЕТ КАк ДеЛа КлИп - яндеКс.видео') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, 'BigMIR') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, 'приВЕТ КАк ДеЛа КлИп - bigMir.Net') as distance, Title; select round(1000 * ngramSearch(materialize(''), '')) from system.numbers limit 5; @@ -134,7 +134,7 @@ SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSear SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearch(Title, 'metrica') as distance, Title; SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearch(Title, 'metriks') as distance, Title; SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearch(Title, 'metrics') as distance, Title; -SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearch(Title, 'yandex') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearch(Title, 'bigmir') as distance, Title; select round(1000 * ngramSearchCaseInsensitive(materialize(''), '')) from system.numbers limit 5; select round(1000 * ngramSearchCaseInsensitive(materialize('abc'), '')) from system.numbers limit 5; @@ -175,6 +175,6 @@ SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSear SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitive(Title, 'mEtrica') as distance, Title; SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitive(Title, 'metriKS') as distance, Title; SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitive(Title, 'metrics') as distance, Title; -SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitive(Title, 'YanDEX') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitive(Title, 'BigMIR') as distance, Title; drop table if exists test_entry_distance; diff --git a/tests/queries/0_stateless/00960_live_view_watch_events_live.py b/tests/queries/0_stateless/00960_live_view_watch_events_live.py index 9327bc59f80..46c561516ba 100755 --- a/tests/queries/0_stateless/00960_live_view_watch_events_live.py +++ b/tests/queries/0_stateless/00960_live_view_watch_events_live.py @@ -6,45 +6,47 @@ import sys import signal CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: +with client(name="client1>", log=log) as client1, client( + name="client2>", log=log +) as client2: client1.expect(prompt) client2.expect(prompt) - client1.send('SET allow_experimental_live_view = 1') + client1.send("SET allow_experimental_live_view = 1") client1.expect(prompt) - client2.send('SET allow_experimental_live_view = 1') + client2.send("SET allow_experimental_live_view = 1") client2.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.lv') + client1.send("DROP TABLE IF EXISTS test.lv") client1.expect(prompt) - client1.send(' DROP TABLE IF EXISTS test.mt') + client1.send(" DROP TABLE IF EXISTS test.mt") client1.expect(prompt) - client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.send("CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()") client1.expect(prompt) - client1.send('CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt') + client1.send("CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt") client1.expect(prompt) - client1.send('WATCH test.lv EVENTS') - client1.expect('version') - client1.expect('1.*' + end_of_block) - client2.send('INSERT INTO test.mt VALUES (1),(2),(3)') - client1.expect('2.*' + end_of_block) - client2.send('INSERT INTO test.mt VALUES (4),(5),(6)') - client1.expect('3.*' + end_of_block) + client1.send("WATCH test.lv EVENTS") + client1.expect("version") + client1.expect("1.*" + end_of_block) + client2.send("INSERT INTO test.mt VALUES (1),(2),(3)") + client1.expect("2.*" + end_of_block) + client2.send("INSERT INTO test.mt VALUES (4),(5),(6)") + client1.expect("3.*" + end_of_block) # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) + client1.send("\x03", eol="") + match = client1.expect("(%s)|([#\$] )" % prompt) if match.groups()[1]: client1.send(client1.command) client1.expect(prompt) - client1.send('DROP TABLE test.lv') + client1.send("DROP TABLE test.lv") client1.expect(prompt) - client1.send('DROP TABLE test.mt') + client1.send("DROP TABLE test.mt") client1.expect(prompt) diff --git a/tests/queries/0_stateless/00962_live_view_periodic_refresh.py b/tests/queries/0_stateless/00962_live_view_periodic_refresh.py index 5dd357a314a..ac399d3c4c8 100755 --- a/tests/queries/0_stateless/00962_live_view_periodic_refresh.py +++ b/tests/queries/0_stateless/00962_live_view_periodic_refresh.py @@ -6,38 +6,41 @@ import sys import signal CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: +with client(name="client1>", log=log) as client1, client( + name="client2>", log=log +) as client2: client1.expect(prompt) client2.expect(prompt) - client1.send('SET allow_experimental_live_view = 1') + client1.send("SET allow_experimental_live_view = 1") client1.expect(prompt) - client2.send('SET allow_experimental_live_view = 1') + client2.send("SET allow_experimental_live_view = 1") client2.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.lv') + client1.send("DROP TABLE IF EXISTS test.lv") client1.expect(prompt) - client1.send("CREATE LIVE VIEW test.lv WITH REFRESH 1" - " AS SELECT value FROM system.events WHERE event = 'OSCPUVirtualTimeMicroseconds'") + client1.send( + "CREATE LIVE VIEW test.lv WITH REFRESH 1" + " AS SELECT value FROM system.events WHERE event = 'OSCPUVirtualTimeMicroseconds'" + ) client1.expect(prompt) - client1.send('WATCH test.lv FORMAT JSONEachRow') + client1.send("WATCH test.lv FORMAT JSONEachRow") client1.expect(r'"_version":' + end_of_block) client1.expect(r'"_version":' + end_of_block) client1.expect(r'"_version":' + end_of_block) # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) + client1.send("\x03", eol="") + match = client1.expect("(%s)|([#\$] )" % prompt) if match.groups()[1]: client1.send(client1.command) client1.expect(prompt) - client1.send('DROP TABLE test.lv') + client1.send("DROP TABLE test.lv") client1.expect(prompt) - diff --git a/tests/queries/0_stateless/00962_live_view_periodic_refresh_and_timeout.py b/tests/queries/0_stateless/00962_live_view_periodic_refresh_and_timeout.py index 95b5530436d..3bc649e92dc 100755 --- a/tests/queries/0_stateless/00962_live_view_periodic_refresh_and_timeout.py +++ b/tests/queries/0_stateless/00962_live_view_periodic_refresh_and_timeout.py @@ -7,48 +7,52 @@ import time import signal CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: +with client(name="client1>", log=log) as client1, client( + name="client2>", log=log +) as client2: client1.expect(prompt) client2.expect(prompt) - client1.send('SET allow_experimental_live_view = 1') + client1.send("SET allow_experimental_live_view = 1") client1.expect(prompt) - client2.send('SET allow_experimental_live_view = 1') + client2.send("SET allow_experimental_live_view = 1") client2.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.lv') + client1.send("DROP TABLE IF EXISTS test.lv") client1.expect(prompt) - client1.send("CREATE LIVE VIEW test.lv WITH TIMEOUT 60 AND REFRESH 1" - " AS SELECT value FROM system.events WHERE event = 'OSCPUVirtualTimeMicroseconds'") + client1.send( + "CREATE LIVE VIEW test.lv WITH TIMEOUT 60 AND REFRESH 1" + " AS SELECT value FROM system.events WHERE event = 'OSCPUVirtualTimeMicroseconds'" + ) client1.expect(prompt) - client1.send('WATCH test.lv FORMAT JSONEachRow') + client1.send("WATCH test.lv FORMAT JSONEachRow") client1.expect(r'"_version":' + end_of_block) client1.expect(r'"_version":' + end_of_block) client1.expect(r'"_version":' + end_of_block) # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) + client1.send("\x03", eol="") + match = client1.expect("(%s)|([#\$] )" % prompt) if match.groups()[1]: client1.send(client1.command) client1.expect(prompt) # poll until live view table is dropped start_time = time.time() while True: - client1.send('SELECT * FROM test.lv FORMAT JSONEachRow') + client1.send("SELECT * FROM test.lv FORMAT JSONEachRow") client1.expect(prompt) - if 'Table test.lv doesn\'t exist' in client1.before: + if "Table test.lv doesn't exist" in client1.before: break if time.time() - start_time > 90: break # check table is dropped - client1.send('DROP TABLE test.lv') - client1.expect('Table test.lv doesn\'t exist') + client1.send("DROP TABLE test.lv") + client1.expect("Table test.lv doesn't exist") client1.expect(prompt) diff --git a/tests/queries/0_stateless/00962_live_view_periodic_refresh_dictionary.py b/tests/queries/0_stateless/00962_live_view_periodic_refresh_dictionary.py index 57cb57d03c5..9d2a26c83c0 100755 --- a/tests/queries/0_stateless/00962_live_view_periodic_refresh_dictionary.py +++ b/tests/queries/0_stateless/00962_live_view_periodic_refresh_dictionary.py @@ -6,65 +6,68 @@ import sys import signal CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: +with client(name="client1>", log=log) as client1, client( + name="client2>", log=log +) as client2: client1.expect(prompt) client2.expect(prompt) - client1.send('SET allow_experimental_live_view = 1') + client1.send("SET allow_experimental_live_view = 1") client1.expect(prompt) - client2.send('SET allow_experimental_live_view = 1') + client2.send("SET allow_experimental_live_view = 1") client2.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.lv') + client1.send("DROP TABLE IF EXISTS test.lv") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.mt') + client1.send("DROP TABLE IF EXISTS test.mt") client1.expect(prompt) - client1.send('DROP DICTIONARY IF EXITS test.dict') + client1.send("DROP DICTIONARY IF EXITS test.dict") client1.expect(prompt) - - client1.send("CREATE TABLE test.mt (a Int32, b Int32) Engine=MergeTree order by tuple()") + + client1.send( + "CREATE TABLE test.mt (a Int32, b Int32) Engine=MergeTree order by tuple()" + ) + client1.expect(prompt) + client1.send( + "CREATE DICTIONARY test.dict(a Int32, b Int32) PRIMARY KEY a LAYOUT(FLAT()) " + + "SOURCE(CLICKHOUSE(db 'test' table 'mt')) LIFETIME(1)" + ) client1.expect(prompt) - client1.send("CREATE DICTIONARY test.dict(a Int32, b Int32) PRIMARY KEY a LAYOUT(FLAT()) " + \ - "SOURCE(CLICKHOUSE(db 'test' table 'mt')) LIFETIME(1)") - client1.expect(prompt) client1.send("CREATE LIVE VIEW test.lv WITH REFRESH 1 AS SELECT * FROM test.dict") client1.expect(prompt) client2.send("INSERT INTO test.mt VALUES (1,2)") - client2.expect(prompt) + client2.expect(prompt) - client1.send('WATCH test.lv FORMAT JSONEachRow') + client1.send("WATCH test.lv FORMAT JSONEachRow") client1.expect(r'"_version":"1"') - + client2.send("INSERT INTO test.mt VALUES (2,2)") - client2.expect(prompt) + client2.expect(prompt) client1.expect(r'"_version":"2"') - + client2.send("INSERT INTO test.mt VALUES (3,2)") - client2.expect(prompt) + client2.expect(prompt) client1.expect(r'"_version":"3"') - + # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) + client1.send("\x03", eol="") + match = client1.expect("(%s)|([#\$] )" % prompt) if match.groups()[1]: client1.send(client1.command) client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.lv') + client1.send("DROP TABLE IF EXISTS test.lv") client1.expect(prompt) - client1.send('DROP DICTIONARY IF EXISTS test.dict') + client1.send("DROP DICTIONARY IF EXISTS test.dict") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.mt') + client1.send("DROP TABLE IF EXISTS test.mt") client1.expect(prompt) - - - diff --git a/tests/queries/0_stateless/00962_temporary_live_view_watch_live.py b/tests/queries/0_stateless/00962_temporary_live_view_watch_live.py index 98d65b47d39..0358c28bf91 100755 --- a/tests/queries/0_stateless/00962_temporary_live_view_watch_live.py +++ b/tests/queries/0_stateless/00962_temporary_live_view_watch_live.py @@ -6,45 +6,47 @@ import sys import signal CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: +with client(name="client1>", log=log) as client1, client( + name="client2>", log=log +) as client2: client1.expect(prompt) client2.expect(prompt) - client1.send('SET allow_experimental_live_view = 1') + client1.send("SET allow_experimental_live_view = 1") client1.expect(prompt) - client2.send('SET allow_experimental_live_view = 1') + client2.send("SET allow_experimental_live_view = 1") client2.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.lv') + client1.send("DROP TABLE IF EXISTS test.lv") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.mt') + client1.send("DROP TABLE IF EXISTS test.mt") client1.expect(prompt) - client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.send("CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()") client1.expect(prompt) - client1.send('CREATE LIVE VIEW test.lv WITH TIMEOUT AS SELECT sum(a) FROM test.mt') + client1.send("CREATE LIVE VIEW test.lv WITH TIMEOUT AS SELECT sum(a) FROM test.mt") client1.expect(prompt) - client1.send('WATCH test.lv') - client1.expect('_version') - client1.expect(r'0.*1' + end_of_block) - client2.send('INSERT INTO test.mt VALUES (1),(2),(3)') - client1.expect(r'6.*2' + end_of_block) - client2.send('INSERT INTO test.mt VALUES (4),(5),(6)') - client1.expect(r'21.*3' + end_of_block) + client1.send("WATCH test.lv") + client1.expect("_version") + client1.expect(r"0.*1" + end_of_block) + client2.send("INSERT INTO test.mt VALUES (1),(2),(3)") + client1.expect(r"6.*2" + end_of_block) + client2.send("INSERT INTO test.mt VALUES (4),(5),(6)") + client1.expect(r"21.*3" + end_of_block) # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) + client1.send("\x03", eol="") + match = client1.expect("(%s)|([#\$] )" % prompt) if match.groups()[1]: client1.send(client1.command) client1.expect(prompt) - client1.send('DROP TABLE test.lv') + client1.send("DROP TABLE test.lv") client1.expect(prompt) - client1.send('DROP TABLE test.mt') + client1.send("DROP TABLE test.mt") client1.expect(prompt) diff --git a/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.py b/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.py index c352004078b..bafb283e487 100755 --- a/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.py +++ b/tests/queries/0_stateless/00964_live_view_watch_events_heartbeat.py @@ -6,49 +6,51 @@ import sys import signal CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: +with client(name="client1>", log=log) as client1, client( + name="client2>", log=log +) as client2: client1.expect(prompt) client2.expect(prompt) - client1.send('SET allow_experimental_live_view = 1') + client1.send("SET allow_experimental_live_view = 1") client1.expect(prompt) - client2.send('SET allow_experimental_live_view = 1') + client2.send("SET allow_experimental_live_view = 1") client2.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.lv') + client1.send("DROP TABLE IF EXISTS test.lv") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.mt') + client1.send("DROP TABLE IF EXISTS test.mt") client1.expect(prompt) - client1.send('SET live_view_heartbeat_interval=1') + client1.send("SET live_view_heartbeat_interval=1") client1.expect(prompt) - client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.send("CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()") client1.expect(prompt) - client1.send('CREATE LIVE VIEW test.lv WITH TIMEOUT AS SELECT sum(a) FROM test.mt') + client1.send("CREATE LIVE VIEW test.lv WITH TIMEOUT AS SELECT sum(a) FROM test.mt") client1.expect(prompt) - client1.send('WATCH test.lv EVENTS FORMAT CSV') - client1.expect('Progress: 1.00 rows.*\)') - client2.send('INSERT INTO test.mt VALUES (1)') + client1.send("WATCH test.lv EVENTS FORMAT CSV") + client1.expect("Progress: 1.00 rows.*\)") + client2.send("INSERT INTO test.mt VALUES (1)") client2.expect(prompt) - client1.expect('Progress: 2.00 rows.*\)') - client2.send('INSERT INTO test.mt VALUES (2),(3)') + client1.expect("Progress: 2.00 rows.*\)") + client2.send("INSERT INTO test.mt VALUES (2),(3)") client2.expect(prompt) # wait for heartbeat - client1.expect('Progress: 3.00 rows.*\)') + client1.expect("Progress: 3.00 rows.*\)") # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) + client1.send("\x03", eol="") + match = client1.expect("(%s)|([#\$] )" % prompt) if match.groups()[1]: client1.send(client1.command) client1.expect(prompt) - client1.send('DROP TABLE test.lv') + client1.send("DROP TABLE test.lv") client1.expect(prompt) - client1.send('DROP TABLE test.mt') + client1.send("DROP TABLE test.mt") client1.expect(prompt) diff --git a/tests/queries/0_stateless/00965_live_view_watch_heartbeat.py b/tests/queries/0_stateless/00965_live_view_watch_heartbeat.py index c1c14e8615b..3cb1220bb49 100755 --- a/tests/queries/0_stateless/00965_live_view_watch_heartbeat.py +++ b/tests/queries/0_stateless/00965_live_view_watch_heartbeat.py @@ -6,47 +6,49 @@ import sys import signal CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: +with client(name="client1>", log=log) as client1, client( + name="client2>", log=log +) as client2: client1.expect(prompt) client2.expect(prompt) - client1.send('SET allow_experimental_live_view = 1') + client1.send("SET allow_experimental_live_view = 1") client1.expect(prompt) - client2.send('SET allow_experimental_live_view = 1') + client2.send("SET allow_experimental_live_view = 1") client2.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.lv') + client1.send("DROP TABLE IF EXISTS test.lv") client1.expect(prompt) - client1.send(' DROP TABLE IF EXISTS test.mt') + client1.send(" DROP TABLE IF EXISTS test.mt") client1.expect(prompt) - client1.send('SET live_view_heartbeat_interval=1') + client1.send("SET live_view_heartbeat_interval=1") client1.expect(prompt) - client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.send("CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()") client1.expect(prompt) - client1.send('CREATE LIVE VIEW test.lv WITH TIMEOUT AS SELECT sum(a) FROM test.mt') + client1.send("CREATE LIVE VIEW test.lv WITH TIMEOUT AS SELECT sum(a) FROM test.mt") client1.expect(prompt) - client1.send('WATCH test.lv') - client1.expect('_version') - client1.expect(r'0.*1' + end_of_block) - client2.send('INSERT INTO test.mt VALUES (1),(2),(3)') - client1.expect(r'6.*2' + end_of_block) + client1.send("WATCH test.lv") + client1.expect("_version") + client1.expect(r"0.*1" + end_of_block) + client2.send("INSERT INTO test.mt VALUES (1),(2),(3)") + client1.expect(r"6.*2" + end_of_block) # wait for heartbeat - client1.expect('Progress: 2.00 rows.*\)') + client1.expect("Progress: 2.00 rows.*\)") # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) + client1.send("\x03", eol="") + match = client1.expect("(%s)|([#\$] )" % prompt) if match.groups()[1]: client1.send(client1.command) client1.expect(prompt) - client1.send('DROP TABLE test.lv') + client1.send("DROP TABLE test.lv") client1.expect(prompt) - client1.send('DROP TABLE test.mt') + client1.send("DROP TABLE test.mt") client1.expect(prompt) diff --git a/tests/queries/0_stateless/00966_live_view_watch_events_http.py b/tests/queries/0_stateless/00966_live_view_watch_events_http.py index dcbae5498bf..1f2ddae23d6 100755 --- a/tests/queries/0_stateless/00966_live_view_watch_events_http.py +++ b/tests/queries/0_stateless/00966_live_view_watch_events_http.py @@ -5,39 +5,45 @@ import os import sys CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block from httpclient import client as http_client log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1: +with client(name="client1>", log=log) as client1: client1.expect(prompt) - client1.send('SET allow_experimental_live_view = 1') + client1.send("SET allow_experimental_live_view = 1") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.lv') + client1.send("DROP TABLE IF EXISTS test.lv") client1.expect(prompt) - client1.send(' DROP TABLE IF EXISTS test.mt') + client1.send(" DROP TABLE IF EXISTS test.mt") client1.expect(prompt) - client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.send("CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()") client1.expect(prompt) - client1.send('CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt') + client1.send("CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt") client1.expect(prompt) - try: - with http_client({'method':'GET', 'url': '/?allow_experimental_live_view=1&query=WATCH%20test.lv%20EVENTS'}, name='client2>', log=log) as client2: - client2.expect('.*1\n') - client1.send('INSERT INTO test.mt VALUES (1),(2),(3)') + with http_client( + { + "method": "GET", + "url": "/?allow_experimental_live_view=1&query=WATCH%20test.lv%20EVENTS", + }, + name="client2>", + log=log, + ) as client2: + client2.expect(".*1\n") + client1.send("INSERT INTO test.mt VALUES (1),(2),(3)") client1.expect(prompt) - client2.expect('.*2\n') + client2.expect(".*2\n") finally: - client1.send('DROP TABLE test.lv') + client1.send("DROP TABLE test.lv") client1.expect(prompt) - client1.send('DROP TABLE test.mt') + client1.send("DROP TABLE test.mt") client1.expect(prompt) diff --git a/tests/queries/0_stateless/00967_live_view_watch_http.py b/tests/queries/0_stateless/00967_live_view_watch_http.py index 05d7905e8ed..92e192cc7f2 100755 --- a/tests/queries/0_stateless/00967_live_view_watch_http.py +++ b/tests/queries/0_stateless/00967_live_view_watch_http.py @@ -5,38 +5,45 @@ import os import sys CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block from httpclient import client as http_client log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1: +with client(name="client1>", log=log) as client1: client1.expect(prompt) - client1.send('SET allow_experimental_live_view = 1') + client1.send("SET allow_experimental_live_view = 1") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.lv') + client1.send("DROP TABLE IF EXISTS test.lv") client1.expect(prompt) - client1.send(' DROP TABLE IF EXISTS test.mt') + client1.send(" DROP TABLE IF EXISTS test.mt") client1.expect(prompt) - client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.send("CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()") client1.expect(prompt) - client1.send('CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt') + client1.send("CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt") client1.expect(prompt) try: - with http_client({'method':'GET', 'url':'/?allow_experimental_live_view=1&query=WATCH%20test.lv'}, name='client2>', log=log) as client2: - client2.expect('.*0\t1\n') - client1.send('INSERT INTO test.mt VALUES (1),(2),(3)') + with http_client( + { + "method": "GET", + "url": "/?allow_experimental_live_view=1&query=WATCH%20test.lv", + }, + name="client2>", + log=log, + ) as client2: + client2.expect(".*0\t1\n") + client1.send("INSERT INTO test.mt VALUES (1),(2),(3)") client1.expect(prompt) - client2.expect('.*6\t2\n') + client2.expect(".*6\t2\n") finally: - client1.send('DROP TABLE test.lv') + client1.send("DROP TABLE test.lv") client1.expect(prompt) - client1.send('DROP TABLE test.mt') + client1.send("DROP TABLE test.mt") client1.expect(prompt) diff --git a/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.py b/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.py index fbd7660bafe..8c5126bbaf3 100755 --- a/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.py +++ b/tests/queries/0_stateless/00970_live_view_watch_events_http_heartbeat.py @@ -5,43 +5,59 @@ import os import sys CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block from httpclient import client as http_client log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1: +with client(name="client1>", log=log) as client1: client1.expect(prompt) - client1.send('SET allow_experimental_live_view = 1') + client1.send("SET allow_experimental_live_view = 1") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.lv') + client1.send("DROP TABLE IF EXISTS test.lv") client1.expect(prompt) - client1.send(' DROP TABLE IF EXISTS test.mt') + client1.send(" DROP TABLE IF EXISTS test.mt") client1.expect(prompt) - client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.send("CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()") client1.expect(prompt) - client1.send('CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt') + client1.send("CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt") client1.expect(prompt) - with http_client({'method':'GET', 'url': '/?allow_experimental_live_view=1&live_view_heartbeat_interval=1&query=WATCH%20test.lv%20EVENTS%20FORMAT%20JSONEachRowWithProgress'}, name='client2>', log=log) as client2: - client2.expect('{"progress":{"read_rows":"1","read_bytes":"8","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}}\n', escape=True) + with http_client( + { + "method": "GET", + "url": "/?allow_experimental_live_view=1&live_view_heartbeat_interval=1&query=WATCH%20test.lv%20EVENTS%20FORMAT%20JSONEachRowWithProgress", + }, + name="client2>", + log=log, + ) as client2: + client2.expect( + '{"progress":{"read_rows":"1","read_bytes":"8","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}}\n', + escape=True, + ) client2.expect('{"row":{"version":"1"}', escape=True) - client2.expect('{"progress":{"read_rows":"1","read_bytes":"8","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}}', escape=True) + client2.expect( + '{"progress":{"read_rows":"1","read_bytes":"8","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}}', + escape=True, + ) # heartbeat is provided by progress message - client2.expect('{"progress":{"read_rows":"1","read_bytes":"8","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}}', escape=True) + client2.expect( + '{"progress":{"read_rows":"1","read_bytes":"8","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}}', + escape=True, + ) - client1.send('INSERT INTO test.mt VALUES (1),(2),(3)') + client1.send("INSERT INTO test.mt VALUES (1),(2),(3)") client1.expect(prompt) client2.expect('{"row":{"version":"2"}}\n', escape=True) - client1.send('DROP TABLE test.lv') + client1.send("DROP TABLE test.lv") client1.expect(prompt) - client1.send('DROP TABLE test.mt') + client1.send("DROP TABLE test.mt") client1.expect(prompt) diff --git a/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.py b/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.py index db5e1698a10..117f7b7c786 100755 --- a/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.py +++ b/tests/queries/0_stateless/00971_live_view_watch_http_heartbeat.py @@ -5,44 +5,53 @@ import os import sys CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block from httpclient import client as http_client log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1: +with client(name="client1>", log=log) as client1: client1.expect(prompt) - client1.send('SET allow_experimental_live_view = 1') + client1.send("SET allow_experimental_live_view = 1") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.lv') + client1.send("DROP TABLE IF EXISTS test.lv") client1.expect(prompt) - client1.send(' DROP TABLE IF EXISTS test.mt') + client1.send(" DROP TABLE IF EXISTS test.mt") client1.expect(prompt) - client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.send("CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()") client1.expect(prompt) - client1.send('CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt') + client1.send("CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt") client1.expect(prompt) - with http_client({'method':'GET', 'url':'/?allow_experimental_live_view=1&live_view_heartbeat_interval=1&query=WATCH%20test.lv%20FORMAT%20JSONEachRowWithProgress'}, name='client2>', log=log) as client2: - client2.expect('"progress".*',) + with http_client( + { + "method": "GET", + "url": "/?allow_experimental_live_view=1&live_view_heartbeat_interval=1&query=WATCH%20test.lv%20FORMAT%20JSONEachRowWithProgress", + }, + name="client2>", + log=log, + ) as client2: + client2.expect( + '"progress".*', + ) client2.expect('{"row":{"sum(a)":"0","_version":"1"}}\n', escape=True) client2.expect('"progress".*\n') # heartbeat is provided by progress message client2.expect('"progress".*\n') - client1.send('INSERT INTO test.mt VALUES (1),(2),(3)') + client1.send("INSERT INTO test.mt VALUES (1),(2),(3)") client1.expect(prompt) client2.expect('"progress".*"read_rows":"2".*\n') client2.expect('{"row":{"sum(a)":"6","_version":"2"}}\n', escape=True) - client1.send('DROP TABLE test.lv') + client1.send("DROP TABLE test.lv") client1.expect(prompt) - client1.send('DROP TABLE test.mt') + client1.send("DROP TABLE test.mt") client1.expect(prompt) diff --git a/tests/queries/0_stateless/00979_live_view_watch_continuous_aggregates.py b/tests/queries/0_stateless/00979_live_view_watch_continuous_aggregates.py index 81e2764d64f..ef144d044c2 100755 --- a/tests/queries/0_stateless/00979_live_view_watch_continuous_aggregates.py +++ b/tests/queries/0_stateless/00979_live_view_watch_continuous_aggregates.py @@ -6,63 +6,79 @@ import sys import signal CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: +with client(name="client1>", log=log) as client1, client( + name="client2>", log=log +) as client2: client1.expect(prompt) client2.expect(prompt) - client1.send('SET allow_experimental_live_view = 1') + client1.send("SET allow_experimental_live_view = 1") client1.expect(prompt) - client2.send('SET allow_experimental_live_view = 1') + client2.send("SET allow_experimental_live_view = 1") client2.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.lv') + client1.send("DROP TABLE IF EXISTS test.lv") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.mt') + client1.send("DROP TABLE IF EXISTS test.mt") client1.expect(prompt) - client1.send('CREATE TABLE test.mt (time DateTime, location String, temperature UInt32) Engine=MergeTree order by tuple()') + client1.send( + "CREATE TABLE test.mt (time DateTime, location String, temperature UInt32) Engine=MergeTree order by tuple()" + ) client1.expect(prompt) - client1.send('CREATE LIVE VIEW test.lv AS SELECT toStartOfDay(time) AS day, location, avg(temperature) FROM test.mt GROUP BY day, location ORDER BY day, location') + client1.send( + "CREATE LIVE VIEW test.lv AS SELECT toStartOfDay(time) AS day, location, avg(temperature) FROM test.mt GROUP BY day, location ORDER BY day, location" + ) client1.expect(prompt) - client1.send('WATCH test.lv FORMAT CSVWithNames') - client2.send("INSERT INTO test.mt VALUES ('2019-01-01 00:00:00','New York',60),('2019-01-01 00:10:00','New York',70)") + client1.send("WATCH test.lv FORMAT CSVWithNames") + client2.send( + "INSERT INTO test.mt VALUES ('2019-01-01 00:00:00','New York',60),('2019-01-01 00:10:00','New York',70)" + ) client2.expect(prompt) client1.expect(r'"2019-01-01 00:00:00","New York",65') - client2.send("INSERT INTO test.mt VALUES ('2019-01-01 00:00:00','Moscow',30),('2019-01-01 00:10:00', 'Moscow', 40)") + client2.send( + "INSERT INTO test.mt VALUES ('2019-01-01 00:00:00','Moscow',30),('2019-01-01 00:10:00', 'Moscow', 40)" + ) client2.expect(prompt) client1.expect(r'"2019-01-01 00:00:00","Moscow",35') client1.expect(r'"2019-01-01 00:00:00","New York",65') - client2.send("INSERT INTO test.mt VALUES ('2019-01-02 00:00:00','New York',50),('2019-01-02 00:10:00','New York',60)") + client2.send( + "INSERT INTO test.mt VALUES ('2019-01-02 00:00:00','New York',50),('2019-01-02 00:10:00','New York',60)" + ) client2.expect(prompt) client1.expect(r'"2019-01-01 00:00:00","Moscow",35') client1.expect(r'"2019-01-01 00:00:00","New York",65') client1.expect(r'"2019-01-02 00:00:00","New York",55') - client2.send("INSERT INTO test.mt VALUES ('2019-01-02 00:00:00','Moscow',20),('2019-01-02 00:10:00', 'Moscow', 30)") + client2.send( + "INSERT INTO test.mt VALUES ('2019-01-02 00:00:00','Moscow',20),('2019-01-02 00:10:00', 'Moscow', 30)" + ) client2.expect(prompt) client1.expect(r'"2019-01-01 00:00:00","Moscow",35') client1.expect(r'"2019-01-01 00:00:00","New York",65') client1.expect(r'"2019-01-02 00:00:00","Moscow",25') client1.expect(r'"2019-01-02 00:00:00","New York",55') - client2.send("INSERT INTO test.mt VALUES ('2019-01-02 00:03:00','New York',40),('2019-01-02 00:06:00','New York',30)") + client2.send( + "INSERT INTO test.mt VALUES ('2019-01-02 00:03:00','New York',40),('2019-01-02 00:06:00','New York',30)" + ) client2.expect(prompt) client1.expect(r'"2019-01-01 00:00:00","Moscow",35') client1.expect(r'"2019-01-01 00:00:00","New York",65') client1.expect(r'"2019-01-02 00:00:00","Moscow",25') client1.expect(r'"2019-01-02 00:00:00","New York",45') # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) + client1.send("\x03", eol="") + match = client1.expect("(%s)|([#\$] )" % prompt) if match.groups()[1]: client1.send(client1.command) client1.expect(prompt) - client1.send('DROP TABLE test.lv') + client1.send("DROP TABLE test.lv") client1.expect(prompt) - client1.send('DROP TABLE test.mt') + client1.send("DROP TABLE test.mt") client1.expect(prompt) diff --git a/tests/queries/0_stateless/00979_live_view_watch_live.py b/tests/queries/0_stateless/00979_live_view_watch_live.py index 7bbae932da7..b099b56ae48 100755 --- a/tests/queries/0_stateless/00979_live_view_watch_live.py +++ b/tests/queries/0_stateless/00979_live_view_watch_live.py @@ -6,51 +6,53 @@ import sys import signal CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: +with client(name="client1>", log=log) as client1, client( + name="client2>", log=log +) as client2: client1.expect(prompt) client2.expect(prompt) - client1.send('SET allow_experimental_live_view = 1') + client1.send("SET allow_experimental_live_view = 1") client1.expect(prompt) - client2.send('SET allow_experimental_live_view = 1') + client2.send("SET allow_experimental_live_view = 1") client2.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.lv') + client1.send("DROP TABLE IF EXISTS test.lv") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.mt') + client1.send("DROP TABLE IF EXISTS test.mt") client1.expect(prompt) - client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.send("CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()") client1.expect(prompt) - client1.send('CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt') + client1.send("CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt") client1.expect(prompt) - client1.send('WATCH test.lv') - client1.expect('_version') - client1.expect(r'0.*1' + end_of_block) - client2.send('INSERT INTO test.mt VALUES (1),(2),(3)') - client1.expect(r'6.*2' + end_of_block) + client1.send("WATCH test.lv") + client1.expect("_version") + client1.expect(r"0.*1" + end_of_block) + client2.send("INSERT INTO test.mt VALUES (1),(2),(3)") + client1.expect(r"6.*2" + end_of_block) client2.expect(prompt) - client2.send('INSERT INTO test.mt VALUES (4),(5),(6)') - client1.expect(r'21.*3' + end_of_block) + client2.send("INSERT INTO test.mt VALUES (4),(5),(6)") + client1.expect(r"21.*3" + end_of_block) client2.expect(prompt) for i in range(1, 129): - client2.send('INSERT INTO test.mt VALUES (1)') - client1.expect(r'%d.*%d' % (21 + i, 3 + i) + end_of_block) - client2.expect(prompt) + client2.send("INSERT INTO test.mt VALUES (1)") + client1.expect(r"%d.*%d" % (21 + i, 3 + i) + end_of_block) + client2.expect(prompt) # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) + client1.send("\x03", eol="") + match = client1.expect("(%s)|([#\$] )" % prompt) if match.groups()[1]: client1.send(client1.command) client1.expect(prompt) - client1.send('DROP TABLE test.lv') + client1.send("DROP TABLE test.lv") client1.expect(prompt) - client1.send('DROP TABLE test.mt') + client1.send("DROP TABLE test.mt") client1.expect(prompt) diff --git a/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.py b/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.py index ed2fe61f4b7..a7c1adac214 100755 --- a/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.py +++ b/tests/queries/0_stateless/00979_live_view_watch_live_with_subquery.py @@ -6,51 +6,55 @@ import sys import signal CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: +with client(name="client1>", log=log) as client1, client( + name="client2>", log=log +) as client2: client1.expect(prompt) client2.expect(prompt) - client1.send('SET allow_experimental_live_view = 1') + client1.send("SET allow_experimental_live_view = 1") client1.expect(prompt) - client2.send('SET allow_experimental_live_view = 1') + client2.send("SET allow_experimental_live_view = 1") client2.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.lv') + client1.send("DROP TABLE IF EXISTS test.lv") client1.expect(prompt) - client1.send(' DROP TABLE IF EXISTS test.mt') + client1.send(" DROP TABLE IF EXISTS test.mt") client1.expect(prompt) - client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.send("CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()") client1.expect(prompt) - client1.send('CREATE LIVE VIEW test.lv AS SELECT * FROM ( SELECT sum(A.a) FROM (SELECT * FROM test.mt) AS A )') + client1.send( + "CREATE LIVE VIEW test.lv AS SELECT * FROM ( SELECT sum(A.a) FROM (SELECT * FROM test.mt) AS A )" + ) client1.expect(prompt) - client1.send('WATCH test.lv') - client1.expect('_version') - client1.expect(r'0.*1' + end_of_block) - client2.send('INSERT INTO test.mt VALUES (1),(2),(3)') - client1.expect(r'6.*2' + end_of_block) + client1.send("WATCH test.lv") + client1.expect("_version") + client1.expect(r"0.*1" + end_of_block) + client2.send("INSERT INTO test.mt VALUES (1),(2),(3)") + client1.expect(r"6.*2" + end_of_block) client2.expect(prompt) - client2.send('INSERT INTO test.mt VALUES (4),(5),(6)') - client1.expect(r'21.*3' + end_of_block) + client2.send("INSERT INTO test.mt VALUES (4),(5),(6)") + client1.expect(r"21.*3" + end_of_block) client2.expect(prompt) - for i in range(1,129): - client2.send('INSERT INTO test.mt VALUES (1)') - client1.expect(r'%d.*%d' % (21+i, 3+i) + end_of_block) - client2.expect(prompt) + for i in range(1, 129): + client2.send("INSERT INTO test.mt VALUES (1)") + client1.expect(r"%d.*%d" % (21 + i, 3 + i) + end_of_block) + client2.expect(prompt) # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) + client1.send("\x03", eol="") + match = client1.expect("(%s)|([#\$] )" % prompt) if match.groups()[1]: client1.send(client1.command) - client1.expect(prompt) - client1.send('DROP TABLE test.lv') + client1.expect(prompt) + client1.send("DROP TABLE test.lv") client1.expect(prompt) - client1.send('DROP TABLE test.mt') + client1.send("DROP TABLE test.mt") client1.expect(prompt) diff --git a/tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.sql b/tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.sql index 79fabeae7ef..3da52f2cb96 100644 --- a/tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.sql +++ b/tests/queries/0_stateless/00979_yandex_consistent_hash_fpe.sql @@ -1 +1 @@ -SELECT yandexConsistentHash(-1, 40000); -- { serverError 36 } +SELECT kostikConsistentHash(-1, 40000); -- { serverError 36 } diff --git a/tests/queries/0_stateless/00998_constraints_all_tables.reference b/tests/queries/0_stateless/00998_constraints_all_tables.reference index 0ec8b004ae4..90a2380df1e 100644 --- a/tests/queries/0_stateless/00998_constraints_all_tables.reference +++ b/tests/queries/0_stateless/00998_constraints_all_tables.reference @@ -10,5 +10,5 @@ 0 0 3 -CREATE TABLE default.constrained\n(\n `URL` String,\n CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = \'yandex.ru\',\n CONSTRAINT is_utf8 CHECK isValidUTF8(URL)\n)\nENGINE = Log -CREATE TABLE default.constrained2\n(\n `URL` String,\n CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = \'yandex.ru\',\n CONSTRAINT is_utf8 CHECK isValidUTF8(URL)\n)\nENGINE = Log +CREATE TABLE default.constrained\n(\n `URL` String,\n CONSTRAINT is_censor CHECK domainWithoutWWW(URL) = \'censor.net\',\n CONSTRAINT is_utf8 CHECK isValidUTF8(URL)\n)\nENGINE = Log +CREATE TABLE default.constrained2\n(\n `URL` String,\n CONSTRAINT is_censor CHECK domainWithoutWWW(URL) = \'censor.net\',\n CONSTRAINT is_utf8 CHECK isValidUTF8(URL)\n)\nENGINE = Log diff --git a/tests/queries/0_stateless/00998_constraints_all_tables.sql b/tests/queries/0_stateless/00998_constraints_all_tables.sql index e47b7eaf83c..bb0d6933a01 100644 --- a/tests/queries/0_stateless/00998_constraints_all_tables.sql +++ b/tests/queries/0_stateless/00998_constraints_all_tables.sql @@ -1,53 +1,53 @@ DROP TABLE IF EXISTS constrained; -CREATE TABLE constrained (URL String, CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = 'yandex.ru', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = Null; -INSERT INTO constrained VALUES ('https://www.yandex.ru/?q=upyachka'), ('Hello'), ('test'); -- { serverError 469 } -INSERT INTO constrained VALUES ('https://www.yandex.ru/?q=upyachka'), ('ftp://yandex.ru/Hello'), ('https://yandex.ru/te\xFFst'); -- { serverError 469 } -INSERT INTO constrained VALUES ('https://www.yandex.ru/?q=upyachka'), ('ftp://yandex.ru/Hello'), (toValidUTF8('https://yandex.ru/te\xFFst')); +CREATE TABLE constrained (URL String, CONSTRAINT is_censor CHECK domainWithoutWWW(URL) = 'censor.net', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = Null; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('Hello'), ('test'); -- { serverError 469 } +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), ('https://censor.net/te\xFFst'); -- { serverError 469 } +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), (toValidUTF8('https://censor.net/te\xFFst')); DROP TABLE constrained; -CREATE TABLE constrained (URL String, CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = 'yandex.ru', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = Memory; -INSERT INTO constrained VALUES ('https://www.yandex.ru/?q=upyachka'), ('Hello'), ('test'); -- { serverError 469 } +CREATE TABLE constrained (URL String, CONSTRAINT is_censor CHECK domainWithoutWWW(URL) = 'censor.net', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = Memory; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('Hello'), ('test'); -- { serverError 469 } SELECT count() FROM constrained; -INSERT INTO constrained VALUES ('https://www.yandex.ru/?q=upyachka'), ('ftp://yandex.ru/Hello'), ('https://yandex.ru/te\xFFst'); -- { serverError 469 } +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), ('https://censor.net/te\xFFst'); -- { serverError 469 } SELECT count() FROM constrained; -INSERT INTO constrained VALUES ('https://www.yandex.ru/?q=upyachka'), ('ftp://yandex.ru/Hello'), (toValidUTF8('https://yandex.ru/te\xFFst')); +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), (toValidUTF8('https://censor.net/te\xFFst')); SELECT count() FROM constrained; DROP TABLE constrained; -CREATE TABLE constrained (URL String, CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = 'yandex.ru', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = StripeLog; -INSERT INTO constrained VALUES ('https://www.yandex.ru/?q=upyachka'), ('Hello'), ('test'); -- { serverError 469 } +CREATE TABLE constrained (URL String, CONSTRAINT is_censor CHECK domainWithoutWWW(URL) = 'censor.net', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = StripeLog; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('Hello'), ('test'); -- { serverError 469 } SELECT count() FROM constrained; -INSERT INTO constrained VALUES ('https://www.yandex.ru/?q=upyachka'), ('ftp://yandex.ru/Hello'), ('https://yandex.ru/te\xFFst'); -- { serverError 469 } +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), ('https://censor.net/te\xFFst'); -- { serverError 469 } SELECT count() FROM constrained; -INSERT INTO constrained VALUES ('https://www.yandex.ru/?q=upyachka'), ('ftp://yandex.ru/Hello'), (toValidUTF8('https://yandex.ru/te\xFFst')); +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), (toValidUTF8('https://censor.net/te\xFFst')); SELECT count() FROM constrained; DROP TABLE constrained; -CREATE TABLE constrained (URL String, CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = 'yandex.ru', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = TinyLog; -INSERT INTO constrained VALUES ('https://www.yandex.ru/?q=upyachka'), ('Hello'), ('test'); -- { serverError 469 } +CREATE TABLE constrained (URL String, CONSTRAINT is_censor CHECK domainWithoutWWW(URL) = 'censor.net', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = TinyLog; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('Hello'), ('test'); -- { serverError 469 } SELECT count() FROM constrained; -INSERT INTO constrained VALUES ('https://www.yandex.ru/?q=upyachka'), ('ftp://yandex.ru/Hello'), ('https://yandex.ru/te\xFFst'); -- { serverError 469 } +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), ('https://censor.net/te\xFFst'); -- { serverError 469 } SELECT count() FROM constrained; -INSERT INTO constrained VALUES ('https://www.yandex.ru/?q=upyachka'), ('ftp://yandex.ru/Hello'), (toValidUTF8('https://yandex.ru/te\xFFst')); +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), (toValidUTF8('https://censor.net/te\xFFst')); SELECT count() FROM constrained; DROP TABLE constrained; -CREATE TABLE constrained (URL String, CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = 'yandex.ru', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = Log; -INSERT INTO constrained VALUES ('https://www.yandex.ru/?q=upyachka'), ('Hello'), ('test'); -- { serverError 469 } +CREATE TABLE constrained (URL String, CONSTRAINT is_censor CHECK domainWithoutWWW(URL) = 'censor.net', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = Log; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('Hello'), ('test'); -- { serverError 469 } SELECT count() FROM constrained; -INSERT INTO constrained VALUES ('https://www.yandex.ru/?q=upyachka'), ('ftp://yandex.ru/Hello'), ('https://yandex.ru/te\xFFst'); -- { serverError 469 } +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), ('https://censor.net/te\xFFst'); -- { serverError 469 } SELECT count() FROM constrained; -INSERT INTO constrained VALUES ('https://www.yandex.ru/?q=upyachka'), ('ftp://yandex.ru/Hello'), (toValidUTF8('https://yandex.ru/te\xFFst')); +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), (toValidUTF8('https://censor.net/te\xFFst')); SELECT count() FROM constrained; DROP TABLE constrained; DROP TABLE IF EXISTS constrained2; -CREATE TABLE constrained (URL String, CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = 'yandex.ru', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = Log; +CREATE TABLE constrained (URL String, CONSTRAINT is_censor CHECK domainWithoutWWW(URL) = 'censor.net', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = Log; CREATE TABLE constrained2 AS constrained; SHOW CREATE TABLE constrained; SHOW CREATE TABLE constrained2; -INSERT INTO constrained VALUES ('https://www.yandex.ru/?q=upyachka'), ('Hello'), ('test'); -- { serverError 469 } -INSERT INTO constrained2 VALUES ('https://www.yandex.ru/?q=upyachka'), ('Hello'), ('test'); -- { serverError 469 } +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('Hello'), ('test'); -- { serverError 469 } +INSERT INTO constrained2 VALUES ('https://www.censor.net/?q=upyachka'), ('Hello'), ('test'); -- { serverError 469 } DROP TABLE constrained; DROP TABLE constrained2; diff --git a/tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.sql b/tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.sql index 6d1c7fd5ef6..86c84427297 100644 --- a/tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.sql +++ b/tests/queries/0_stateless/01034_prewhere_max_parallel_replicas_distributed.sql @@ -5,7 +5,7 @@ set allow_experimental_parallel_reading_from_replicas=0; drop table if exists test_max_parallel_replicas_lr; -- If you wonder why the table is named with "_lr" suffix in this test. --- No reason. Actually it is the name of the table in Yandex.Market and they provided this test case for us. +-- No reason. Actually it is the name of the table in our customer and they provided this test case for us. CREATE TABLE test_max_parallel_replicas_lr (timestamp UInt64) ENGINE = MergeTree ORDER BY (intHash32(timestamp)) SAMPLE BY intHash32(timestamp); INSERT INTO test_max_parallel_replicas_lr select number as timestamp from system.numbers limit 100; diff --git a/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh b/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh index fae263b076f..c2a35a3ef63 100755 --- a/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh +++ b/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh @@ -34,7 +34,7 @@ CREATE TABLE test_01037.polygons_array ENGINE = Memory; " -$CLICKHOUSE_CLIENT --query="INSERT INTO test_01037.polygons_array FORMAT JSONEachRow" --max_insert_block_size=100000 < "${CURDIR}/01037_polygon_data" +$CLICKHOUSE_CLIENT --query="INSERT INTO test_01037.polygons_array FORMAT JSONEachRow" --min_chunk_bytes_for_parallel_parsing=10485760 --max_insert_block_size=100000 < "${CURDIR}/01037_polygon_data" rm "${CURDIR}"/01037_polygon_data diff --git a/tests/queries/0_stateless/01056_window_view_proc_hop_watch.py b/tests/queries/0_stateless/01056_window_view_proc_hop_watch.py index 02e97ee7a17..be139c153aa 100755 --- a/tests/queries/0_stateless/01056_window_view_proc_hop_watch.py +++ b/tests/queries/0_stateless/01056_window_view_proc_hop_watch.py @@ -4,7 +4,7 @@ import sys import signal CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block @@ -12,46 +12,54 @@ log = None # uncomment the line below for debugging # log=sys.stdout -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: +with client(name="client1>", log=log) as client1, client( + name="client2>", log=log +) as client2: client1.expect(prompt) client2.expect(prompt) - client1.send('SET allow_experimental_window_view = 1') + client1.send("SET allow_experimental_window_view = 1") client1.expect(prompt) - client1.send('SET window_view_heartbeat_interval = 1') + client1.send("SET window_view_heartbeat_interval = 1") client1.expect(prompt) - client2.send('SET allow_experimental_window_view = 1') + client2.send("SET allow_experimental_window_view = 1") client2.expect(prompt) - client1.send('CREATE DATABASE 01056_window_view_proc_hop_watch') + client1.send("CREATE DATABASE 01056_window_view_proc_hop_watch") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS 01056_window_view_proc_hop_watch.mt') + client1.send("DROP TABLE IF EXISTS 01056_window_view_proc_hop_watch.mt") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS 01056_window_view_proc_hop_watch.wv') + client1.send("DROP TABLE IF EXISTS 01056_window_view_proc_hop_watch.wv") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS `.inner.wv`') + client1.send("DROP TABLE IF EXISTS `.inner.wv`") client1.expect(prompt) - client1.send('CREATE TABLE 01056_window_view_proc_hop_watch.mt(a Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple()') + client1.send( + "CREATE TABLE 01056_window_view_proc_hop_watch.mt(a Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple()" + ) client1.expect(prompt) - client1.send("CREATE WINDOW VIEW 01056_window_view_proc_hop_watch.wv AS SELECT count(a) AS count FROM 01056_window_view_proc_hop_watch.mt GROUP BY hop(timestamp, INTERVAL '1' SECOND, INTERVAL '1' SECOND, 'US/Samoa') AS wid;") + client1.send( + "CREATE WINDOW VIEW 01056_window_view_proc_hop_watch.wv AS SELECT count(a) AS count FROM 01056_window_view_proc_hop_watch.mt GROUP BY hop(timestamp, INTERVAL '1' SECOND, INTERVAL '1' SECOND, 'US/Samoa') AS wid;" + ) client1.expect(prompt) - - client1.send('WATCH 01056_window_view_proc_hop_watch.wv') - client1.expect('Query id' + end_of_block) - client2.send("INSERT INTO 01056_window_view_proc_hop_watch.mt VALUES (1, now('US/Samoa') + 1)") - client1.expect('1' + end_of_block) - client1.expect('Progress: 1.00 rows.*\)') + + client1.send("WATCH 01056_window_view_proc_hop_watch.wv") + client1.expect("Query id" + end_of_block) + client2.send( + "INSERT INTO 01056_window_view_proc_hop_watch.mt VALUES (1, now('US/Samoa') + 1)" + ) + client1.expect("1" + end_of_block) + client1.expect("Progress: 1.00 rows.*\)") # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) + client1.send("\x03", eol="") + match = client1.expect("(%s)|([#\$] )" % prompt) if match.groups()[1]: client1.send(client1.command) client1.expect(prompt) - client1.send('DROP TABLE 01056_window_view_proc_hop_watch.wv') + client1.send("DROP TABLE 01056_window_view_proc_hop_watch.wv") client1.expect(prompt) - client1.send('DROP TABLE 01056_window_view_proc_hop_watch.mt') + client1.send("DROP TABLE 01056_window_view_proc_hop_watch.mt") client1.expect(prompt) - client1.send('DROP DATABASE IF EXISTS 01056_window_view_proc_hop_watch') + client1.send("DROP DATABASE IF EXISTS 01056_window_view_proc_hop_watch") client1.expect(prompt) diff --git a/tests/queries/0_stateless/01059_storage_file_compression.sh b/tests/queries/0_stateless/01059_storage_file_compression.sh index fbee5070d8d..9d928986638 100755 --- a/tests/queries/0_stateless/01059_storage_file_compression.sh +++ b/tests/queries/0_stateless/01059_storage_file_compression.sh @@ -16,9 +16,10 @@ do ${CLICKHOUSE_CLIENT} --query "DROP TABLE file" done -${CLICKHOUSE_CLIENT} --query "SELECT count(), max(x) FROM file('${CLICKHOUSE_DATABASE}/{gz,br,xz,zst,lz4,bz2}.tsv.{gz,br,xz,zst,lz4,bz2}', TSV, 'x UInt64')" +${CLICKHOUSE_CLIENT} --max_read_buffer_size=1048576 --query "SELECT count(), max(x) FROM file('${CLICKHOUSE_DATABASE}/{gz,br,xz,zst,lz4,bz2}.tsv.{gz,br,xz,zst,lz4,bz2}', TSV, 'x UInt64')" for m in gz br xz zst lz4 bz2 do - ${CLICKHOUSE_CLIENT} --query "SELECT count() < 4000000, max(x) FROM file('${CLICKHOUSE_DATABASE}/${m}.tsv.${m}', RowBinary, 'x UInt8', 'none')" + ${CLICKHOUSE_CLIENT} --max_read_buffer_size=1048576 --query "SELECT count() < 4000000, max(x) FROM file('${CLICKHOUSE_DATABASE}/${m}.tsv.${m}', RowBinary, 'x UInt8', 'none')" done + diff --git a/tests/queries/0_stateless/01059_window_view_event_hop_watch_strict_asc.py b/tests/queries/0_stateless/01059_window_view_event_hop_watch_strict_asc.py index 638182ac216..f5024cb11ab 100755 --- a/tests/queries/0_stateless/01059_window_view_event_hop_watch_strict_asc.py +++ b/tests/queries/0_stateless/01059_window_view_event_hop_watch_strict_asc.py @@ -4,59 +4,71 @@ import sys import signal CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: +with client(name="client1>", log=log) as client1, client( + name="client2>", log=log +) as client2: client1.expect(prompt) client2.expect(prompt) - client1.send('SET allow_experimental_window_view = 1') + client1.send("SET allow_experimental_window_view = 1") client1.expect(prompt) - client1.send('SET window_view_heartbeat_interval = 1') + client1.send("SET window_view_heartbeat_interval = 1") client1.expect(prompt) - client1.send('CREATE DATABASE db_01059_event_hop_watch_strict_asc') + client1.send("CREATE DATABASE db_01059_event_hop_watch_strict_asc") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS db_01059_event_hop_watch_strict_asc.mt') + client1.send("DROP TABLE IF EXISTS db_01059_event_hop_watch_strict_asc.mt") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS db_01059_event_hop_watch_strict_asc.wv NO DELAY') + client1.send("DROP TABLE IF EXISTS db_01059_event_hop_watch_strict_asc.wv NO DELAY") client1.expect(prompt) - client1.send("CREATE TABLE db_01059_event_hop_watch_strict_asc.mt(a Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple()") + client1.send( + "CREATE TABLE db_01059_event_hop_watch_strict_asc.mt(a Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple()" + ) client1.expect(prompt) - client1.send("CREATE WINDOW VIEW db_01059_event_hop_watch_strict_asc.wv WATERMARK=STRICTLY_ASCENDING AS SELECT count(a) AS count, hopEnd(wid) as w_end FROM db_01059_event_hop_watch_strict_asc.mt GROUP BY hop(timestamp, INTERVAL '2' SECOND, INTERVAL '3' SECOND, 'US/Samoa') AS wid;") + client1.send( + "CREATE WINDOW VIEW db_01059_event_hop_watch_strict_asc.wv WATERMARK=STRICTLY_ASCENDING AS SELECT count(a) AS count, hopEnd(wid) as w_end FROM db_01059_event_hop_watch_strict_asc.mt GROUP BY hop(timestamp, INTERVAL '2' SECOND, INTERVAL '3' SECOND, 'US/Samoa') AS wid;" + ) client1.expect(prompt) - client1.send('WATCH db_01059_event_hop_watch_strict_asc.wv') - client1.expect('Query id' + end_of_block) - client2.send("INSERT INTO db_01059_event_hop_watch_strict_asc.mt VALUES (1, toDateTime('1990/01/01 12:00:00', 'US/Samoa'));") + client1.send("WATCH db_01059_event_hop_watch_strict_asc.wv") + client1.expect("Query id" + end_of_block) + client2.send( + "INSERT INTO db_01059_event_hop_watch_strict_asc.mt VALUES (1, toDateTime('1990/01/01 12:00:00', 'US/Samoa'));" + ) client2.expect("Ok.") - client2.send("INSERT INTO db_01059_event_hop_watch_strict_asc.mt VALUES (1, toDateTime('1990/01/01 12:00:05', 'US/Samoa'));") + client2.send( + "INSERT INTO db_01059_event_hop_watch_strict_asc.mt VALUES (1, toDateTime('1990/01/01 12:00:05', 'US/Samoa'));" + ) client2.expect("Ok.") - client1.expect('1*1990-01-01 12:00:02' + end_of_block) - client1.expect('Progress: 1.00 rows.*\)') + client1.expect("1*1990-01-01 12:00:02" + end_of_block) + client1.expect("Progress: 1.00 rows.*\)") - client2.send("INSERT INTO db_01059_event_hop_watch_strict_asc.mt VALUES (1, toDateTime('1990/01/01 12:00:10', 'US/Samoa'));") + client2.send( + "INSERT INTO db_01059_event_hop_watch_strict_asc.mt VALUES (1, toDateTime('1990/01/01 12:00:10', 'US/Samoa'));" + ) client2.expect("Ok.") - client1.expect('1*1990-01-01 12:00:06' + end_of_block) - client1.expect('1*1990-01-01 12:00:08' + end_of_block) - client1.expect('Progress: 3.00 rows.*\)') + client1.expect("1*1990-01-01 12:00:06" + end_of_block) + client1.expect("1*1990-01-01 12:00:08" + end_of_block) + client1.expect("Progress: 3.00 rows.*\)") # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) + client1.send("\x03", eol="") + match = client1.expect("(%s)|([#\$] )" % prompt) if match.groups()[1]: client1.send(client1.command) client1.expect(prompt) - client1.send('DROP TABLE db_01059_event_hop_watch_strict_asc.wv NO DELAY') + client1.send("DROP TABLE db_01059_event_hop_watch_strict_asc.wv NO DELAY") client1.expect(prompt) - client1.send('DROP TABLE db_01059_event_hop_watch_strict_asc.mt') + client1.send("DROP TABLE db_01059_event_hop_watch_strict_asc.mt") client1.expect(prompt) - client1.send('DROP DATABASE IF EXISTS db_01059_event_hop_watch_strict_asc') + client1.send("DROP DATABASE IF EXISTS db_01059_event_hop_watch_strict_asc") client1.expect(prompt) diff --git a/tests/queries/0_stateless/01062_window_view_event_hop_watch_asc.py b/tests/queries/0_stateless/01062_window_view_event_hop_watch_asc.py index 6be3e08665c..7f23e983ba2 100755 --- a/tests/queries/0_stateless/01062_window_view_event_hop_watch_asc.py +++ b/tests/queries/0_stateless/01062_window_view_event_hop_watch_asc.py @@ -4,61 +4,77 @@ import sys import signal CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: +with client(name="client1>", log=log) as client1, client( + name="client2>", log=log +) as client2: client1.expect(prompt) client2.expect(prompt) - client1.send('SET allow_experimental_window_view = 1') + client1.send("SET allow_experimental_window_view = 1") client1.expect(prompt) - client1.send('SET window_view_heartbeat_interval = 1') + client1.send("SET window_view_heartbeat_interval = 1") client1.expect(prompt) - client2.send('SET allow_experimental_window_view = 1') + client2.send("SET allow_experimental_window_view = 1") client2.expect(prompt) - client1.send('CREATE DATABASE 01062_window_view_event_hop_watch_asc') + client1.send("CREATE DATABASE 01062_window_view_event_hop_watch_asc") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS 01062_window_view_event_hop_watch_asc.mt') + client1.send("DROP TABLE IF EXISTS 01062_window_view_event_hop_watch_asc.mt") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS 01062_window_view_event_hop_watch_asc.wv NO DELAY') + client1.send( + "DROP TABLE IF EXISTS 01062_window_view_event_hop_watch_asc.wv NO DELAY" + ) client1.expect(prompt) - client1.send('CREATE TABLE 01062_window_view_event_hop_watch_asc.mt(a Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple()') + client1.send( + "CREATE TABLE 01062_window_view_event_hop_watch_asc.mt(a Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple()" + ) client1.expect(prompt) - client1.send("CREATE WINDOW VIEW 01062_window_view_event_hop_watch_asc.wv WATERMARK=ASCENDING AS SELECT count(a) AS count, hopEnd(wid) AS w_end FROM 01062_window_view_event_hop_watch_asc.mt GROUP BY hop(timestamp, INTERVAL '2' SECOND, INTERVAL '3' SECOND, 'US/Samoa') AS wid") + client1.send( + "CREATE WINDOW VIEW 01062_window_view_event_hop_watch_asc.wv WATERMARK=ASCENDING AS SELECT count(a) AS count, hopEnd(wid) AS w_end FROM 01062_window_view_event_hop_watch_asc.mt GROUP BY hop(timestamp, INTERVAL '2' SECOND, INTERVAL '3' SECOND, 'US/Samoa') AS wid" + ) client1.expect(prompt) - - client1.send('WATCH 01062_window_view_event_hop_watch_asc.wv') - client1.expect('Query id' + end_of_block) - client2.send("INSERT INTO 01062_window_view_event_hop_watch_asc.mt VALUES (1, '1990/01/01 12:00:00');") + + client1.send("WATCH 01062_window_view_event_hop_watch_asc.wv") + client1.expect("Query id" + end_of_block) + client2.send( + "INSERT INTO 01062_window_view_event_hop_watch_asc.mt VALUES (1, '1990/01/01 12:00:00');" + ) client2.expect(prompt) - client2.send("INSERT INTO 01062_window_view_event_hop_watch_asc.mt VALUES (1, '1990/01/01 12:00:05');") + client2.send( + "INSERT INTO 01062_window_view_event_hop_watch_asc.mt VALUES (1, '1990/01/01 12:00:05');" + ) client2.expect(prompt) - client1.expect('1*' + end_of_block) - client2.send("INSERT INTO 01062_window_view_event_hop_watch_asc.mt VALUES (1, '1990/01/01 12:00:06');") + client1.expect("1*" + end_of_block) + client2.send( + "INSERT INTO 01062_window_view_event_hop_watch_asc.mt VALUES (1, '1990/01/01 12:00:06');" + ) client2.expect(prompt) - client2.send("INSERT INTO 01062_window_view_event_hop_watch_asc.mt VALUES (1, '1990/01/01 12:00:10');") + client2.send( + "INSERT INTO 01062_window_view_event_hop_watch_asc.mt VALUES (1, '1990/01/01 12:00:10');" + ) client2.expect(prompt) - client1.expect('1' + end_of_block) - client1.expect('2' + end_of_block) - client1.expect('Progress: 3.00 rows.*\)') + client1.expect("1" + end_of_block) + client1.expect("2" + end_of_block) + client1.expect("Progress: 3.00 rows.*\)") # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) + client1.send("\x03", eol="") + match = client1.expect("(%s)|([#\$] )" % prompt) if match.groups()[1]: client1.send(client1.command) client1.expect(prompt) - client1.send('DROP TABLE 01062_window_view_event_hop_watch_asc.wv NO DELAY') + client1.send("DROP TABLE 01062_window_view_event_hop_watch_asc.wv NO DELAY") client1.expect(prompt) - client1.send('DROP TABLE 01062_window_view_event_hop_watch_asc.mt') + client1.send("DROP TABLE 01062_window_view_event_hop_watch_asc.mt") client1.expect(prompt) - client1.send('DROP DATABASE IF EXISTS 01062_window_view_event_hop_watch_asc') + client1.send("DROP DATABASE IF EXISTS 01062_window_view_event_hop_watch_asc") client1.expect(prompt) diff --git a/tests/queries/0_stateless/01065_window_view_event_hop_watch_bounded.py b/tests/queries/0_stateless/01065_window_view_event_hop_watch_bounded.py index b828c5116da..92d2b56ed34 100755 --- a/tests/queries/0_stateless/01065_window_view_event_hop_watch_bounded.py +++ b/tests/queries/0_stateless/01065_window_view_event_hop_watch_bounded.py @@ -4,7 +4,7 @@ import sys import signal CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block @@ -12,48 +12,54 @@ log = None # uncomment the line below for debugging # log=sys.stdout -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: +with client(name="client1>", log=log) as client1, client( + name="client2>", log=log +) as client2: client1.expect(prompt) client2.expect(prompt) - client1.send('SET allow_experimental_window_view = 1') + client1.send("SET allow_experimental_window_view = 1") client1.expect(prompt) - client1.send('SET window_view_heartbeat_interval = 1') + client1.send("SET window_view_heartbeat_interval = 1") client1.expect(prompt) - client2.send('SET allow_experimental_window_view = 1') + client2.send("SET allow_experimental_window_view = 1") client2.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.mt') + client1.send("DROP TABLE IF EXISTS test.mt") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.wv') + client1.send("DROP TABLE IF EXISTS test.wv") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS `.inner.wv`') + client1.send("DROP TABLE IF EXISTS `.inner.wv`") client1.expect(prompt) - client1.send('CREATE TABLE test.mt(a Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple()') + client1.send( + "CREATE TABLE test.mt(a Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple()" + ) client1.expect(prompt) - client1.send("CREATE WINDOW VIEW test.wv WATERMARK=INTERVAL '2' SECOND AS SELECT count(a) AS count, hopEnd(wid) AS w_end FROM test.mt GROUP BY hop(timestamp, INTERVAL '2' SECOND, INTERVAL '3' SECOND, 'US/Samoa') AS wid") + client1.send( + "CREATE WINDOW VIEW test.wv WATERMARK=INTERVAL '2' SECOND AS SELECT count(a) AS count, hopEnd(wid) AS w_end FROM test.mt GROUP BY hop(timestamp, INTERVAL '2' SECOND, INTERVAL '3' SECOND, 'US/Samoa') AS wid" + ) client1.expect(prompt) - - client1.send('WATCH test.wv') + + client1.send("WATCH test.wv") client2.send("INSERT INTO test.mt VALUES (1, '1990/01/01 12:00:00');") client2.expect(prompt) client2.send("INSERT INTO test.mt VALUES (1, '1990/01/01 12:00:05');") client2.expect(prompt) - client1.expect('1*' + end_of_block) + client1.expect("1*" + end_of_block) client2.send("INSERT INTO test.mt VALUES (1, '1990/01/01 12:00:06');") client2.send("INSERT INTO test.mt VALUES (1, '1990/01/01 12:00:10');") client2.expect(prompt) - client1.expect('1*' + end_of_block) - client1.expect('2*' + end_of_block) + client1.expect("1*" + end_of_block) + client1.expect("2*" + end_of_block) # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) + client1.send("\x03", eol="") + match = client1.expect("(%s)|([#\$] )" % prompt) if match.groups()[1]: client1.send(client1.command) client1.expect(prompt) - client1.send('DROP TABLE test.wv') + client1.send("DROP TABLE test.wv") client1.expect(prompt) - client1.send('DROP TABLE test.mt') + client1.send("DROP TABLE test.mt") client1.expect(prompt) diff --git a/tests/queries/0_stateless/01069_window_view_proc_tumble_watch.py b/tests/queries/0_stateless/01069_window_view_proc_tumble_watch.py index eb2b7835483..4c675fcabfb 100755 --- a/tests/queries/0_stateless/01069_window_view_proc_tumble_watch.py +++ b/tests/queries/0_stateless/01069_window_view_proc_tumble_watch.py @@ -1,62 +1,72 @@ #!/usr/bin/env python3 -#Tags: no-parallel +# Tags: no-parallel import os import sys import signal CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: +with client(name="client1>", log=log) as client1, client( + name="client2>", log=log +) as client2: client1.expect(prompt) client2.expect(prompt) - client1.send('SET allow_experimental_window_view = 1') + client1.send("SET allow_experimental_window_view = 1") client1.expect(prompt) - client1.send('SET window_view_heartbeat_interval = 1') + client1.send("SET window_view_heartbeat_interval = 1") client1.expect(prompt) - client2.send('SET allow_experimental_window_view = 1') + client2.send("SET allow_experimental_window_view = 1") client2.expect(prompt) - client1.send('CREATE DATABASE 01069_window_view_proc_tumble_watch') + client1.send("CREATE DATABASE 01069_window_view_proc_tumble_watch") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS 01069_window_view_proc_tumble_watch.mt') + client1.send("DROP TABLE IF EXISTS 01069_window_view_proc_tumble_watch.mt") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS 01069_window_view_proc_tumble_watch.wv NO DELAY') + client1.send("DROP TABLE IF EXISTS 01069_window_view_proc_tumble_watch.wv NO DELAY") client1.expect(prompt) - client1.send('CREATE TABLE 01069_window_view_proc_tumble_watch.mt(a Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple()') + client1.send( + "CREATE TABLE 01069_window_view_proc_tumble_watch.mt(a Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple()" + ) client1.expect(prompt) - client1.send("CREATE WINDOW VIEW 01069_window_view_proc_tumble_watch.wv AS SELECT count(a) AS count FROM 01069_window_view_proc_tumble_watch.mt GROUP BY tumble(timestamp, INTERVAL '1' SECOND, 'US/Samoa') AS wid;") + client1.send( + "CREATE WINDOW VIEW 01069_window_view_proc_tumble_watch.wv AS SELECT count(a) AS count FROM 01069_window_view_proc_tumble_watch.mt GROUP BY tumble(timestamp, INTERVAL '1' SECOND, 'US/Samoa') AS wid;" + ) client1.expect(prompt) - client1.send('WATCH 01069_window_view_proc_tumble_watch.wv') - client1.expect('Query id' + end_of_block) - client2.send("INSERT INTO 01069_window_view_proc_tumble_watch.mt VALUES (1, now('US/Samoa') + 3)") + client1.send("WATCH 01069_window_view_proc_tumble_watch.wv") + client1.expect("Query id" + end_of_block) + client2.send( + "INSERT INTO 01069_window_view_proc_tumble_watch.mt VALUES (1, now('US/Samoa') + 3)" + ) client2.expect("Ok.") - client1.expect('1' + end_of_block) - client1.expect('Progress: 1.00 rows.*\)') - client2.send("INSERT INTO 01069_window_view_proc_tumble_watch.mt VALUES (1, now('US/Samoa') + 3)") + client1.expect("1" + end_of_block) + client1.expect("Progress: 1.00 rows.*\)") + client2.send( + "INSERT INTO 01069_window_view_proc_tumble_watch.mt VALUES (1, now('US/Samoa') + 3)" + ) client2.expect("Ok.") - client1.expect('1' + end_of_block) - client1.expect('Progress: 2.00 rows.*\)') + client1.expect("1" + end_of_block) + client1.expect("Progress: 2.00 rows.*\)") # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) + client1.send("\x03", eol="") + match = client1.expect("(%s)|([#\$] )" % prompt) if match.groups()[1]: client1.send(client1.command) client1.expect(prompt) - client1.send('DROP TABLE 01069_window_view_proc_tumble_watch.wv NO DELAY') + client1.send("DROP TABLE 01069_window_view_proc_tumble_watch.wv NO DELAY") client1.expect(prompt) - client1.send('DROP TABLE 01069_window_view_proc_tumble_watch.mt') + client1.send("DROP TABLE 01069_window_view_proc_tumble_watch.mt") client1.expect(prompt) - client1.send('DROP DATABASE IF EXISTS 01069_window_view_proc_tumble_watch') + client1.send("DROP DATABASE IF EXISTS 01069_window_view_proc_tumble_watch") client1.expect(prompt) diff --git a/tests/queries/0_stateless/01070_window_view_watch_events.py b/tests/queries/0_stateless/01070_window_view_watch_events.py index 51330ce1c01..2bf732d68e5 100755 --- a/tests/queries/0_stateless/01070_window_view_watch_events.py +++ b/tests/queries/0_stateless/01070_window_view_watch_events.py @@ -6,7 +6,7 @@ import sys import signal CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block @@ -14,47 +14,57 @@ log = None # uncomment the line below for debugging # log=sys.stdout -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2: +with client(name="client1>", log=log) as client1, client( + name="client2>", log=log +) as client2: client1.expect(prompt) client2.expect(prompt) - client1.send('SET allow_experimental_window_view = 1') + client1.send("SET allow_experimental_window_view = 1") client1.expect(prompt) - client1.send('SET window_view_heartbeat_interval = 1') + client1.send("SET window_view_heartbeat_interval = 1") client1.expect(prompt) - client2.send('SET allow_experimental_window_view = 1') + client2.send("SET allow_experimental_window_view = 1") client2.expect(prompt) - client1.send('CREATE DATABASE IF NOT EXISTS 01070_window_view_watch_events') + client1.send("CREATE DATABASE IF NOT EXISTS 01070_window_view_watch_events") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS 01070_window_view_watch_events.mt NO DELAY') + client1.send("DROP TABLE IF EXISTS 01070_window_view_watch_events.mt NO DELAY") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS 01070_window_view_watch_events.wv NO DELAY') + client1.send("DROP TABLE IF EXISTS 01070_window_view_watch_events.wv NO DELAY") client1.expect(prompt) - client1.send("CREATE TABLE 01070_window_view_watch_events.mt(a Int32, timestamp DateTime('US/Samoa')) ENGINE=MergeTree ORDER BY tuple()") + client1.send( + "CREATE TABLE 01070_window_view_watch_events.mt(a Int32, timestamp DateTime('US/Samoa')) ENGINE=MergeTree ORDER BY tuple()" + ) client1.expect(prompt) - client1.send("CREATE WINDOW VIEW 01070_window_view_watch_events.wv WATERMARK=ASCENDING AS SELECT count(a) AS count, tumbleEnd(wid) AS w_end FROM 01070_window_view_watch_events.mt GROUP BY tumble(timestamp, INTERVAL '5' SECOND, 'US/Samoa') AS wid") + client1.send( + "CREATE WINDOW VIEW 01070_window_view_watch_events.wv WATERMARK=ASCENDING AS SELECT count(a) AS count, tumbleEnd(wid) AS w_end FROM 01070_window_view_watch_events.mt GROUP BY tumble(timestamp, INTERVAL '5' SECOND, 'US/Samoa') AS wid" + ) client1.expect(prompt) - client1.send('WATCH 01070_window_view_watch_events.wv EVENTS') - client1.expect('Query id' + end_of_block) - client2.send("INSERT INTO 01070_window_view_watch_events.mt VALUES (1, '1990/01/01 12:00:00');") + client1.send("WATCH 01070_window_view_watch_events.wv EVENTS") + client1.expect("Query id" + end_of_block) + client2.send( + "INSERT INTO 01070_window_view_watch_events.mt VALUES (1, '1990/01/01 12:00:00');" + ) client2.expect("Ok.") - client2.send("INSERT INTO 01070_window_view_watch_events.mt VALUES (1, '1990/01/01 12:00:06');") + client2.send( + "INSERT INTO 01070_window_view_watch_events.mt VALUES (1, '1990/01/01 12:00:06');" + ) client2.expect("Ok.") - client1.expect('1990-01-01 12:00:05' + end_of_block) - client1.expect('Progress: 1.00 rows.*\)') + client1.expect("1990-01-01 12:00:05" + end_of_block) + client1.expect("Progress: 1.00 rows.*\)") # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) + client1.send("\x03", eol="") + match = client1.expect("(%s)|([#\$] )" % prompt) if match.groups()[1]: client1.send(client1.command) client1.expect(prompt) - client1.send('DROP TABLE 01070_window_view_watch_events.wv NO DELAY;') + client1.send("DROP TABLE 01070_window_view_watch_events.wv NO DELAY;") client1.expect(prompt) - client1.send('DROP TABLE 01070_window_view_watch_events.mt;') + client1.send("DROP TABLE 01070_window_view_watch_events.mt;") client1.expect(prompt) - client1.send('DROP DATABASE IF EXISTS 01070_window_view_watch_events;') + client1.send("DROP DATABASE IF EXISTS 01070_window_view_watch_events;") client1.expect(prompt) diff --git a/tests/queries/0_stateless/01083_expressions_in_engine_arguments.sql b/tests/queries/0_stateless/01083_expressions_in_engine_arguments.sql index dd39277ee31..b162fdb21fd 100644 --- a/tests/queries/0_stateless/01083_expressions_in_engine_arguments.sql +++ b/tests/queries/0_stateless/01083_expressions_in_engine_arguments.sql @@ -1,5 +1,7 @@ -- Tags: no-parallel, no-fasttest +SET prefer_localhost_replica=1; + DROP TABLE IF EXISTS file; DROP TABLE IF EXISTS url; DROP TABLE IF EXISTS view; diff --git a/tests/queries/0_stateless/01091_num_threads.sql b/tests/queries/0_stateless/01091_num_threads.sql index e32d663880f..faeceb0e6d6 100644 --- a/tests/queries/0_stateless/01091_num_threads.sql +++ b/tests/queries/0_stateless/01091_num_threads.sql @@ -1,5 +1,6 @@ set log_queries=1; set log_query_threads=1; +set max_threads=0; WITH 01091 AS id SELECT 1; SYSTEM FLUSH LOGS; diff --git a/tests/queries/0_stateless/01099_parallel_distributed_insert_select.sql b/tests/queries/0_stateless/01099_parallel_distributed_insert_select.sql index de93166d891..aa924218360 100644 --- a/tests/queries/0_stateless/01099_parallel_distributed_insert_select.sql +++ b/tests/queries/0_stateless/01099_parallel_distributed_insert_select.sql @@ -3,6 +3,7 @@ -- set insert_distributed_sync = 1; -- see https://github.com/ClickHouse/ClickHouse/issues/18971 SET allow_experimental_parallel_reading_from_replicas = 0; -- see https://github.com/ClickHouse/ClickHouse/issues/34525 +SET prefer_localhost_replica = 1; DROP TABLE IF EXISTS local_01099_a; DROP TABLE IF EXISTS local_01099_b; diff --git a/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh b/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh index 300d32f5a0c..60650cb9cc3 100755 --- a/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh +++ b/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh @@ -13,7 +13,7 @@ $CLICKHOUSE_CLIENT -q "INSERT INTO test_01107.mt SELECT number + sleepEachRow(3) sleep 1 $CLICKHOUSE_CLIENT -q "DETACH TABLE test_01107.mt" --database_atomic_wait_for_drop_and_detach_synchronously=0 -$CLICKHOUSE_CLIENT -q "ATTACH TABLE test_01107.mt" 2>&1 | grep -F "Code: 57" > /dev/null && echo "OK" +$CLICKHOUSE_CLIENT -q "ATTACH TABLE test_01107.mt" --database_atomic_wait_for_drop_and_detach_synchronously=0 2>&1 | grep -F "Code: 57" > /dev/null && echo "OK" $CLICKHOUSE_CLIENT -q "DETACH DATABASE test_01107" --database_atomic_wait_for_drop_and_detach_synchronously=0 2>&1 | grep -F "Code: 219" > /dev/null && echo "OK" wait diff --git a/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock_zookeeper.sh b/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock_zookeeper.sh index abd5c0d6a4f..a51e786b058 100755 --- a/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock_zookeeper.sh +++ b/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock_zookeeper.sh @@ -38,27 +38,17 @@ function restart_replicas_loop() done sleep 0.$RANDOM } -function restart_thread_1() -{ - restart_replicas_loop -} - -function restart_thread_2() -{ - restart_replicas_loop -} export -f rename_thread_1 export -f rename_thread_2 -export -f restart_thread_1 -export -f restart_thread_2 +export -f restart_replicas_loop TIMEOUT=10 clickhouse_client_loop_timeout $TIMEOUT rename_thread_1 2> /dev/null & clickhouse_client_loop_timeout $TIMEOUT rename_thread_2 2> /dev/null & -clickhouse_client_loop_timeout $TIMEOUT restart_thread_1 2> /dev/null & -clickhouse_client_loop_timeout $TIMEOUT restart_thread_2 2> /dev/null & +clickhouse_client_loop_timeout $TIMEOUT restart_replicas_loop 2> /dev/null & +clickhouse_client_loop_timeout $TIMEOUT restart_replicas_loop 2> /dev/null & wait diff --git a/tests/queries/0_stateless/01236_distributed_over_live_view_over_distributed.sql b/tests/queries/0_stateless/01236_distributed_over_live_view_over_distributed.sql index 4aacecc9734..3822f22d353 100644 --- a/tests/queries/0_stateless/01236_distributed_over_live_view_over_distributed.sql +++ b/tests/queries/0_stateless/01236_distributed_over_live_view_over_distributed.sql @@ -1,4 +1,4 @@ --- Tags: distributed, no-replicated-database, no-parallel, no-fasttest +-- Tags: distributed, no-replicated-database, no-parallel, no-fasttest, no-random-settings SET allow_experimental_live_view = 1; diff --git a/tests/queries/0_stateless/01246_insert_into_watch_live_view.py b/tests/queries/0_stateless/01246_insert_into_watch_live_view.py index addff72ce66..67c79778736 100755 --- a/tests/queries/0_stateless/01246_insert_into_watch_live_view.py +++ b/tests/queries/0_stateless/01246_insert_into_watch_live_view.py @@ -7,75 +7,81 @@ import time import signal CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2, client(name='client3>', log=log) as client3: +with client(name="client1>", log=log) as client1, client( + name="client2>", log=log +) as client2, client(name="client3>", log=log) as client3: client1.expect(prompt) client2.expect(prompt) client3.expect(prompt) - client1.send('SET allow_experimental_live_view = 1') + client1.send("SET allow_experimental_live_view = 1") client1.expect(prompt) - client3.send('SET allow_experimental_live_view = 1') + client3.send("SET allow_experimental_live_view = 1") client3.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.lv') + client1.send("DROP TABLE IF EXISTS test.lv") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.lv_sums') + client1.send("DROP TABLE IF EXISTS test.lv_sums") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.mt') + client1.send("DROP TABLE IF EXISTS test.mt") client1.expect(prompt) - client1.send('DROP TABLE IF EXISTS test.sums') + client1.send("DROP TABLE IF EXISTS test.sums") client1.expect(prompt) - client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()') + client1.send("CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()") client1.expect(prompt) - client1.send('CREATE LIVE VIEW test.lv AS SELECT sum(a) AS s FROM test.mt') + client1.send("CREATE LIVE VIEW test.lv AS SELECT sum(a) AS s FROM test.mt") client1.expect(prompt) - client1.send('CREATE TABLE test.sums (s Int32, version Int32) Engine=MergeTree ORDER BY tuple()') + client1.send( + "CREATE TABLE test.sums (s Int32, version Int32) Engine=MergeTree ORDER BY tuple()" + ) client1.expect(prompt) - client3.send('CREATE LIVE VIEW test.lv_sums AS SELECT * FROM test.sums ORDER BY version') + client3.send( + "CREATE LIVE VIEW test.lv_sums AS SELECT * FROM test.sums ORDER BY version" + ) client3.expect(prompt) client3.send("WATCH test.lv_sums FORMAT CSVWithNames") - client1.send('INSERT INTO test.sums WATCH test.lv') - client1.expect(r'INSERT INTO') + client1.send("INSERT INTO test.sums WATCH test.lv") + client1.expect(r"INSERT INTO") - client3.expect('0,1.*\r\n') + client3.expect("0,1.*\r\n") - client2.send('INSERT INTO test.mt VALUES (1),(2),(3)') + client2.send("INSERT INTO test.mt VALUES (1),(2),(3)") client2.expect(prompt) - client3.expect('6,2.*\r\n') + client3.expect("6,2.*\r\n") - client2.send('INSERT INTO test.mt VALUES (4),(5),(6)') + client2.send("INSERT INTO test.mt VALUES (4),(5),(6)") client2.expect(prompt) - client3.expect('21,3.*\r\n') + client3.expect("21,3.*\r\n") # send Ctrl-C - client3.send('\x03', eol='') - match = client3.expect('(%s)|([#\$] )' % prompt) + client3.send("\x03", eol="") + match = client3.expect("(%s)|([#\$] )" % prompt) if match.groups()[1]: client3.send(client3.command) client3.expect(prompt) # send Ctrl-C - client1.send('\x03', eol='') - match = client1.expect('(%s)|([#\$] )' % prompt) + client1.send("\x03", eol="") + match = client1.expect("(%s)|([#\$] )" % prompt) if match.groups()[1]: client1.send(client1.command) client1.expect(prompt) - client2.send('DROP TABLE test.lv') + client2.send("DROP TABLE test.lv") client2.expect(prompt) - client2.send('DROP TABLE test.lv_sums') + client2.send("DROP TABLE test.lv_sums") client2.expect(prompt) - client2.send('DROP TABLE test.sums') + client2.send("DROP TABLE test.sums") client2.expect(prompt) - client2.send('DROP TABLE test.mt') + client2.send("DROP TABLE test.mt") client2.expect(prompt) diff --git a/tests/queries/0_stateless/01268_procfs_metrics.sh b/tests/queries/0_stateless/01268_procfs_metrics.sh index d5bd99724ca..c1697edd632 100755 --- a/tests/queries/0_stateless/01268_procfs_metrics.sh +++ b/tests/queries/0_stateless/01268_procfs_metrics.sh @@ -41,4 +41,13 @@ timeout $TIMEOUT bash -c show_processes_func & wait +# otherwise it can be alive after test +query_alive=$($CLICKHOUSE_CLIENT --query "SELECT count() FROM system.processes WHERE query ILIKE 'SELECT * FROM numbers(600000000)%'") +while [[ $query_alive != 0 ]] +do + $CLICKHOUSE_CLIENT -q "KILL QUERY WHERE query ilike '%SELECT * FROM numbers(600000000)%'" 2> /dev/null 1> /dev/null + sleep 0.5 + query_alive=$($CLICKHOUSE_CLIENT --query "SELECT count() FROM system.processes WHERE query ILIKE 'SELECT * FROM numbers(600000000)%'") +done + echo "Test OK" diff --git a/tests/queries/0_stateless/01281_group_by_limit_memory_tracking.sh b/tests/queries/0_stateless/01281_group_by_limit_memory_tracking.sh index c9c01455e31..1807da6948a 100755 --- a/tests/queries/0_stateless/01281_group_by_limit_memory_tracking.sh +++ b/tests/queries/0_stateless/01281_group_by_limit_memory_tracking.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-replicated-database, no-parallel, no-fasttest, no-tsan, no-asan +# Tags: no-replicated-database, no-parallel, no-fasttest, no-tsan, no-asan, no-random-settings # Tag no-fasttest: max_memory_usage_for_user can interfere another queries running concurrently # Regression for MemoryTracker that had been incorrectly accounted diff --git a/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh b/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh index 2a41d90cd3a..db986e74b6b 100755 --- a/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh +++ b/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh @@ -20,7 +20,8 @@ function thread() REPLICA=$1 ITERATIONS=$2 - $CLICKHOUSE_CLIENT --max_block_size 1 --min_insert_block_size_rows 0 --min_insert_block_size_bytes 0 --query "INSERT INTO r$REPLICA SELECT number * $NUM_REPLICAS + $REPLICA FROM numbers($ITERATIONS)" + # It's legal to fetch something before insert finished + $CLICKHOUSE_CLIENT --max_block_size 1 --min_insert_block_size_rows 0 --min_insert_block_size_bytes 0 --query "INSERT INTO r$REPLICA SELECT number * $NUM_REPLICAS + $REPLICA FROM numbers($ITERATIONS)" 2>&1 | grep -v -F "Tried to commit obsolete part" } for REPLICA in $SEQ; do diff --git a/tests/queries/0_stateless/01318_long_unsuccessful_mutation_zookeeper.sh b/tests/queries/0_stateless/01318_long_unsuccessful_mutation_zookeeper.sh index 9a4df37d206..34fa822b6ea 100755 --- a/tests/queries/0_stateless/01318_long_unsuccessful_mutation_zookeeper.sh +++ b/tests/queries/0_stateless/01318_long_unsuccessful_mutation_zookeeper.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long, zookeeper, no-parallel +# Tags: long, zookeeper, no-parallel, no-backward-compatibility-check CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01324_if_transform_strings_to_enum.reference b/tests/queries/0_stateless/01324_if_transform_strings_to_enum.reference index 994e3f24aaf..7cf545176e3 100644 --- a/tests/queries/0_stateless/01324_if_transform_strings_to_enum.reference +++ b/tests/queries/0_stateless/01324_if_transform_strings_to_enum.reference @@ -2,13 +2,13 @@ other other google other -yandex +censor.net other yahoo other other other -SELECT transform(number, [2, 4, 6], _CAST([\'google\', \'yandex\', \'yahoo\'], \'Array(Enum8(\\\'google\\\' = 1, \\\'other\\\' = 2, \\\'yahoo\\\' = 3, \\\'yandex\\\' = 4))\'), _CAST(\'other\', \'Enum8(\\\'google\\\' = 1, \\\'other\\\' = 2, \\\'yahoo\\\' = 3, \\\'yandex\\\' = 4)\')) +SELECT transform(number, [2, 4, 6], _CAST([\'google\', \'censor.net\', \'yahoo\'], \'Array(Enum8(\\\'censor.net\\\' = 1, \\\'google\\\' = 2, \\\'other\\\' = 3, \\\'yahoo\\\' = 4))\'), _CAST(\'other\', \'Enum8(\\\'censor.net\\\' = 1, \\\'google\\\' = 2, \\\'other\\\' = 3, \\\'yahoo\\\' = 4)\')) FROM system.numbers LIMIT 10 google @@ -17,24 +17,24 @@ google google google google -yandex -yandex -yandex -yandex -SELECT if(number > 5, \'yandex\', \'google\') +censor.net +censor.net +censor.net +censor.net +SELECT if(number > 5, \'censor.net\', \'google\') FROM system.numbers LIMIT 10 other other google other -yandex +censor.net other yahoo other other other -SELECT transform(number, [2, 4, 6], [\'google\', \'yandex\', \'yahoo\'], \'other\') +SELECT transform(number, [2, 4, 6], [\'google\', \'censor.net\', \'yahoo\'], \'other\') FROM system.numbers LIMIT 10 google @@ -43,10 +43,10 @@ google google google google -yandex -yandex -yandex -yandex -SELECT if(number > 5, \'yandex\', \'google\') +censor.net +censor.net +censor.net +censor.net +SELECT if(number > 5, \'censor.net\', \'google\') FROM system.numbers LIMIT 10 diff --git a/tests/queries/0_stateless/01324_if_transform_strings_to_enum.sql b/tests/queries/0_stateless/01324_if_transform_strings_to_enum.sql index dcb082c650a..ee2f48a53da 100644 --- a/tests/queries/0_stateless/01324_if_transform_strings_to_enum.sql +++ b/tests/queries/0_stateless/01324_if_transform_strings_to_enum.sql @@ -1,13 +1,13 @@ set optimize_if_transform_strings_to_enum = 1; -SELECT transform(number, [2, 4, 6], ['google', 'yandex', 'yahoo'], 'other') FROM system.numbers LIMIT 10; -EXPLAIN SYNTAX SELECT transform(number, [2, 4, 6], ['google', 'yandex', 'yahoo'], 'other') FROM system.numbers LIMIT 10; -SELECT number > 5 ? 'yandex' : 'google' FROM system.numbers LIMIT 10; -EXPLAIN SYNTAX SELECT number > 5 ? 'yandex' : 'google' FROM system.numbers LIMIT 10; +SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; +SELECT number > 5 ? 'censor.net' : 'google' FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT number > 5 ? 'censor.net' : 'google' FROM system.numbers LIMIT 10; set optimize_if_transform_strings_to_enum = 0; -SELECT transform(number, [2, 4, 6], ['google', 'yandex', 'yahoo'], 'other') FROM system.numbers LIMIT 10; -EXPLAIN SYNTAX SELECT transform(number, [2, 4, 6], ['google', 'yandex', 'yahoo'], 'other') FROM system.numbers LIMIT 10; -SELECT number > 5 ? 'yandex' : 'google' FROM system.numbers LIMIT 10; -EXPLAIN SYNTAX SELECT number > 5 ? 'yandex' : 'google' FROM system.numbers LIMIT 10; +SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; +SELECT number > 5 ? 'censor.net' : 'google' FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT number > 5 ? 'censor.net' : 'google' FROM system.numbers LIMIT 10; diff --git a/tests/queries/0_stateless/01358_constexpr_constraint.sql b/tests/queries/0_stateless/01358_constexpr_constraint.sql index 799f6f32259..4560ac47c42 100644 --- a/tests/queries/0_stateless/01358_constexpr_constraint.sql +++ b/tests/queries/0_stateless/01358_constexpr_constraint.sql @@ -1,7 +1,7 @@ CREATE TEMPORARY TABLE constrained ( `URL` String, - CONSTRAINT is_yandex CHECK domainWithoutWWW(URL) = domainWithoutWWW(URL), + CONSTRAINT identity CHECK domainWithoutWWW(URL) = domainWithoutWWW(URL), CONSTRAINT is_utf8 CHECK isValidUTF8(URL) ); diff --git a/tests/queries/0_stateless/01407_lambda_arrayJoin.sql b/tests/queries/0_stateless/01407_lambda_arrayJoin.sql index 4f34bb59527..363b1d92dbb 100644 --- a/tests/queries/0_stateless/01407_lambda_arrayJoin.sql +++ b/tests/queries/0_stateless/01407_lambda_arrayJoin.sql @@ -1,6 +1,5 @@ SELECT arrayFilter((a) -> ((a, arrayJoin([])) IN (Null, [Null])), []); SELECT arrayFilter((a) -> ((a, arrayJoin([[]])) IN (Null, [Null])), []); --- simplified from the https://clickhouse-test-reports.s3.yandex.net/10373/6c4748a63e7acde2cc3283d96ffec590aae1e724/fuzzer/fuzzer.log#fail1 SELECT * FROM system.one ARRAY JOIN arrayFilter((a) -> ((a, arrayJoin([])) IN (NULL)), []) AS arr_x; -- { serverError 43; } SELECT * FROM numbers(1) LEFT ARRAY JOIN arrayFilter((x_0, x_1) -> (arrayJoin([]) IN (NULL)), [], []) AS arr_x; diff --git a/tests/queries/0_stateless/01455_opentelemetry_distributed.reference b/tests/queries/0_stateless/01455_opentelemetry_distributed.reference index 119642df395..e70506599ec 100644 --- a/tests/queries/0_stateless/01455_opentelemetry_distributed.reference +++ b/tests/queries/0_stateless/01455_opentelemetry_distributed.reference @@ -11,9 +11,11 @@ ===native=== {"query":"select * from url('http:\/\/127.0.0.2:8123\/?query=select%201%20format%20Null', CSV, 'a int')","status":"QueryFinish","tracestate":"another custom state","sorted_by_start_time":1} {"query":"select 1 format Null\n","status":"QueryFinish","tracestate":"another custom state","sorted_by_start_time":1} +{"query":"select 1 format Null\n","status":"QueryFinish","tracestate":"another custom state","sorted_by_start_time":1} +{"query":"select 1 format Null\n","query_status":"QueryFinish","tracestate":"another custom state","sorted_by_finish_time":1} {"query":"select 1 format Null\n","query_status":"QueryFinish","tracestate":"another custom state","sorted_by_finish_time":1} {"query":"select * from url('http:\/\/127.0.0.2:8123\/?query=select%201%20format%20Null', CSV, 'a int')","query_status":"QueryFinish","tracestate":"another custom state","sorted_by_finish_time":1} -{"total spans":"2","unique spans":"2","unique non-zero parent spans":"2"} +{"total spans":"3","unique spans":"3","unique non-zero parent spans":"2"} {"initial query spans with proper parent":"1"} {"unique non-empty tracestate values":"1"} ===sampled=== diff --git a/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.sql b/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.sql index ee144264193..1e1d87a5ad5 100644 --- a/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.sql +++ b/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.sql @@ -8,8 +8,8 @@ SET optimize_if_transform_strings_to_enum = 1; SELECT any(number + 1) FROM numbers(1); SELECT uniq(bitNot(number)) FROM numbers(1); SELECT sum(number + 1) FROM numbers(1); -SELECT transform(number, [1, 2], ['google', 'yandex'], 'other') FROM numbers(1); -SELECT number > 0 ? 'yandex' : 'google' FROM numbers(1); +SELECT transform(number, [1, 2], ['google', 'censor.net'], 'other') FROM numbers(1); +SELECT number > 0 ? 'censor.net' : 'google' FROM numbers(1); DROP TABLE IF EXISTS local_table; @@ -23,8 +23,8 @@ INSERT INTO local_table SELECT number FROM numbers(1); SELECT any(number + 1) FROM dist; SELECT uniq(bitNot(number)) FROM dist; SELECT sum(number + 1) FROM dist; -SELECT transform(number, [1, 2], ['google', 'yandex'], 'other') FROM dist; -SELECT number > 0 ? 'yandex' : 'google' FROM dist; +SELECT transform(number, [1, 2], ['google', 'censor.net'], 'other') FROM dist; +SELECT number > 0 ? 'censor.net' : 'google' FROM dist; DROP TABLE local_table; DROP TABLE dist; diff --git a/tests/queries/0_stateless/01506_buffer_table_alter_block_structure_2.sql b/tests/queries/0_stateless/01506_buffer_table_alter_block_structure_2.sql index f9c227942ac..0595e67f2b0 100644 --- a/tests/queries/0_stateless/01506_buffer_table_alter_block_structure_2.sql +++ b/tests/queries/0_stateless/01506_buffer_table_alter_block_structure_2.sql @@ -1,3 +1,5 @@ +-- Tags: no-random-settings + DROP TABLE IF EXISTS buf_dest; DROP TABLE IF EXISTS buf; diff --git a/tests/queries/0_stateless/01514_parallel_formatting.sql b/tests/queries/0_stateless/01514_parallel_formatting.sql index 95a9e19aa1f..a2d50a4d7bb 100644 --- a/tests/queries/0_stateless/01514_parallel_formatting.sql +++ b/tests/queries/0_stateless/01514_parallel_formatting.sql @@ -1,5 +1,8 @@ drop table if exists tsv; set output_format_parallel_formatting=1; +set max_read_buffer_size=1048576; +set max_block_size=65505; + create table tsv(a int, b int default 7) engine File(TSV); insert into tsv(a) select number from numbers(10000000); diff --git a/tests/queries/0_stateless/01524_do_not_merge_across_partitions_select_final.sql b/tests/queries/0_stateless/01524_do_not_merge_across_partitions_select_final.sql index ca9f296b6bf..90975b0d9c4 100644 --- a/tests/queries/0_stateless/01524_do_not_merge_across_partitions_select_final.sql +++ b/tests/queries/0_stateless/01524_do_not_merge_across_partitions_select_final.sql @@ -1,6 +1,7 @@ DROP TABLE IF EXISTS select_final; SET do_not_merge_across_partitions_select_final = 1; +SET max_threads = 0; CREATE TABLE select_final (t DateTime, x Int32, string String) ENGINE = ReplacingMergeTree() PARTITION BY toYYYYMM(t) ORDER BY (x, t); diff --git a/tests/queries/0_stateless/01529_bad_memory_tracking.sh b/tests/queries/0_stateless/01529_bad_memory_tracking.sh index f5d096cc799..d12623d04b9 100755 --- a/tests/queries/0_stateless/01529_bad_memory_tracking.sh +++ b/tests/queries/0_stateless/01529_bad_memory_tracking.sh @@ -8,6 +8,6 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh for _ in {1..10}; do - ${CLICKHOUSE_CLIENT} --max_memory_usage '10G' --query "SELECT i FROM generateRandom('i Array(Int8)', 1, 1, 1048577) LIMIT 65536" |& grep -v -e 'Received exception from server' -e 'Code: 241' -e '(query: ' + ${CLICKHOUSE_CLIENT} --max_block_size=65505 --max_memory_usage '10G' --query "SELECT i FROM generateRandom('i Array(Int8)', 1, 1, 1048577) LIMIT 65536" |& grep -v -e 'Received exception from server' -e 'Code: 241' -e '(query: ' done exit 0 diff --git a/tests/queries/0_stateless/01532_primary_key_without_order_by_zookeeper.reference b/tests/queries/0_stateless/01532_primary_key_without_order_by_zookeeper.reference index 66aaf09f4d9..0c1d3ae33ac 100644 --- a/tests/queries/0_stateless/01532_primary_key_without_order_by_zookeeper.reference +++ b/tests/queries/0_stateless/01532_primary_key_without_order_by_zookeeper.reference @@ -9,8 +9,8 @@ CREATE TABLE default.merge_tree_pk_sql\n(\n `key` UInt64,\n `value` String 1 c 2 b 1 c 0 -2 e 555 2 b 0 +2 e 555 CREATE TABLE default.merge_tree_pk_sql\n(\n `key` UInt64,\n `value` String,\n `key2` UInt64\n)\nENGINE = ReplacingMergeTree\nPRIMARY KEY key\nORDER BY (key, key2)\nSETTINGS index_granularity = 8192 CREATE TABLE default.replicated_merge_tree_pk_sql\n(\n `key` UInt64,\n `value` String\n)\nENGINE = ReplicatedReplacingMergeTree(\'/clickhouse/test/01532_primary_key_without\', \'r1\')\nPRIMARY KEY key\nORDER BY key\nSETTINGS index_granularity = 8192 1 a @@ -18,6 +18,6 @@ CREATE TABLE default.replicated_merge_tree_pk_sql\n(\n `key` UInt64,\n `va 1 c 2 b 1 c 0 -2 e 555 2 b 0 +2 e 555 CREATE TABLE default.replicated_merge_tree_pk_sql\n(\n `key` UInt64,\n `value` String,\n `key2` UInt64\n)\nENGINE = ReplicatedReplacingMergeTree(\'/clickhouse/test/01532_primary_key_without\', \'r1\')\nPRIMARY KEY key\nORDER BY (key, key2)\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01532_primary_key_without_order_by_zookeeper.sql b/tests/queries/0_stateless/01532_primary_key_without_order_by_zookeeper.sql index 8d413cf6c35..109c808b344 100644 --- a/tests/queries/0_stateless/01532_primary_key_without_order_by_zookeeper.sql +++ b/tests/queries/0_stateless/01532_primary_key_without_order_by_zookeeper.sql @@ -1,4 +1,4 @@ --- Tags: zookeeper +-- Tags: zookeeper, no-parallel DROP TABLE IF EXISTS merge_tree_pk; @@ -15,14 +15,14 @@ SHOW CREATE TABLE merge_tree_pk; INSERT INTO merge_tree_pk VALUES (1, 'a'); INSERT INTO merge_tree_pk VALUES (2, 'b'); -SELECT * FROM merge_tree_pk ORDER BY key; +SELECT * FROM merge_tree_pk ORDER BY key, value; INSERT INTO merge_tree_pk VALUES (1, 'c'); DETACH TABLE merge_tree_pk; ATTACH TABLE merge_tree_pk; -SELECT * FROM merge_tree_pk FINAL ORDER BY key; +SELECT * FROM merge_tree_pk FINAL ORDER BY key, value; DROP TABLE IF EXISTS merge_tree_pk; @@ -41,14 +41,14 @@ SHOW CREATE TABLE merge_tree_pk_sql; INSERT INTO merge_tree_pk_sql VALUES (1, 'a'); INSERT INTO merge_tree_pk_sql VALUES (2, 'b'); -SELECT * FROM merge_tree_pk_sql ORDER BY key; +SELECT * FROM merge_tree_pk_sql ORDER BY key, value; INSERT INTO merge_tree_pk_sql VALUES (1, 'c'); DETACH TABLE merge_tree_pk_sql; ATTACH TABLE merge_tree_pk_sql; -SELECT * FROM merge_tree_pk_sql FINAL ORDER BY key; +SELECT * FROM merge_tree_pk_sql FINAL ORDER BY key, value; ALTER TABLE merge_tree_pk_sql ADD COLUMN key2 UInt64, MODIFY ORDER BY (key, key2); @@ -56,7 +56,7 @@ INSERT INTO merge_tree_pk_sql VALUES (2, 'd', 555); INSERT INTO merge_tree_pk_sql VALUES (2, 'e', 555); -SELECT * FROM merge_tree_pk_sql FINAL ORDER BY key; +SELECT * FROM merge_tree_pk_sql FINAL ORDER BY key, value; SHOW CREATE TABLE merge_tree_pk_sql; @@ -77,14 +77,14 @@ SHOW CREATE TABLE replicated_merge_tree_pk_sql; INSERT INTO replicated_merge_tree_pk_sql VALUES (1, 'a'); INSERT INTO replicated_merge_tree_pk_sql VALUES (2, 'b'); -SELECT * FROM replicated_merge_tree_pk_sql ORDER BY key; +SELECT * FROM replicated_merge_tree_pk_sql ORDER BY key, value; INSERT INTO replicated_merge_tree_pk_sql VALUES (1, 'c'); DETACH TABLE replicated_merge_tree_pk_sql; ATTACH TABLE replicated_merge_tree_pk_sql; -SELECT * FROM replicated_merge_tree_pk_sql FINAL ORDER BY key; +SELECT * FROM replicated_merge_tree_pk_sql FINAL ORDER BY key, value; ALTER TABLE replicated_merge_tree_pk_sql ADD COLUMN key2 UInt64, MODIFY ORDER BY (key, key2); @@ -92,7 +92,7 @@ INSERT INTO replicated_merge_tree_pk_sql VALUES (2, 'd', 555); INSERT INTO replicated_merge_tree_pk_sql VALUES (2, 'e', 555); -SELECT * FROM replicated_merge_tree_pk_sql FINAL ORDER BY key; +SELECT * FROM replicated_merge_tree_pk_sql FINAL ORDER BY key, value; DETACH TABLE replicated_merge_tree_pk_sql; ATTACH TABLE replicated_merge_tree_pk_sql; diff --git a/tests/queries/0_stateless/01562_optimize_monotonous_functions_in_order_by.reference b/tests/queries/0_stateless/01562_optimize_monotonous_functions_in_order_by.reference index d8c9b88d8e8..46aaa6e07d6 100644 --- a/tests/queries/0_stateless/01562_optimize_monotonous_functions_in_order_by.reference +++ b/tests/queries/0_stateless/01562_optimize_monotonous_functions_in_order_by.reference @@ -9,7 +9,7 @@ Expression (Projection) Sorting (Sorting for ORDER BY) Expression (Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree + ReadFromMergeTree (default.test_order_by) SELECT timestamp, key @@ -21,7 +21,7 @@ Expression (Projection) Sorting Expression (Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree + ReadFromMergeTree (default.test_order_by) SELECT timestamp, key @@ -35,7 +35,7 @@ Expression (Projection) Sorting Expression (Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree + ReadFromMergeTree (default.test_order_by) SELECT timestamp, key diff --git a/tests/queries/0_stateless/01576_alias_column_rewrite.reference b/tests/queries/0_stateless/01576_alias_column_rewrite.reference index 07d361cfa46..11cc146dd62 100644 --- a/tests/queries/0_stateless/01576_alias_column_rewrite.reference +++ b/tests/queries/0_stateless/01576_alias_column_rewrite.reference @@ -26,35 +26,35 @@ Expression (Projection) Sorting (Sorting for ORDER BY) Expression (Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree + ReadFromMergeTree (default.test_table) Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting Expression (Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree + ReadFromMergeTree (default.test_table) Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting Expression (Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree + ReadFromMergeTree (default.test_table) optimize_aggregation_in_order Expression ((Projection + Before ORDER BY)) Aggregating Expression (Before GROUP BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree + ReadFromMergeTree (default.test_table) Expression ((Projection + Before ORDER BY)) Aggregating Expression (Before GROUP BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree + ReadFromMergeTree (default.test_table) Expression ((Projection + Before ORDER BY)) Aggregating Expression (Before GROUP BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) - ReadFromMergeTree + ReadFromMergeTree (default.test_table) second-index 1 1 diff --git a/tests/queries/0_stateless/01598_memory_limit_zeros.sql b/tests/queries/0_stateless/01598_memory_limit_zeros.sql index 5b321687e43..cc2a75e023e 100644 --- a/tests/queries/0_stateless/01598_memory_limit_zeros.sql +++ b/tests/queries/0_stateless/01598_memory_limit_zeros.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel, no-fasttest +-- Tags: no-parallel, no-fasttest, no-random-settings SET max_memory_usage = 1, max_untracked_memory = 1000000, max_threads=40; select 'test', count(*) from zeros_mt(1000000) where not ignore(zero); -- { serverError 241 } diff --git a/tests/queries/0_stateless/01622_constraints_simple_optimization.sql b/tests/queries/0_stateless/01622_constraints_simple_optimization.sql index e1922975a2a..7ec9e1a3158 100644 --- a/tests/queries/0_stateless/01622_constraints_simple_optimization.sql +++ b/tests/queries/0_stateless/01622_constraints_simple_optimization.sql @@ -8,23 +8,23 @@ SET optimize_move_to_prewhere = 1; SET optimize_substitute_columns = 1; SET optimize_append_index = 1; -CREATE TABLE constraint_test_assumption (URL String, a Int32, CONSTRAINT c1 ASSUME domainWithoutWWW(URL) = 'yandex.ru', CONSTRAINT c2 ASSUME URL > 'zzz' AND startsWith(URL, 'test') = True) ENGINE = TinyLog; +CREATE TABLE constraint_test_assumption (URL String, a Int32, CONSTRAINT c1 ASSUME domainWithoutWWW(URL) = 'bigmir.net', CONSTRAINT c2 ASSUME URL > 'zzz' AND startsWith(URL, 'test') = True) ENGINE = TinyLog; --- Add wrong rows in order to check optimization INSERT INTO constraint_test_assumption (URL, a) VALUES ('1', 1); INSERT INTO constraint_test_assumption (URL, a) VALUES ('2', 2); -INSERT INTO constraint_test_assumption (URL, a) VALUES ('yandex.ru', 3); +INSERT INTO constraint_test_assumption (URL, a) VALUES ('bigmir.net', 3); INSERT INTO constraint_test_assumption (URL, a) VALUES ('3', 4); -SELECT count() FROM constraint_test_assumption WHERE domainWithoutWWW(URL) = 'yandex.ru'; --- assumption -> 4 -SELECT count() FROM constraint_test_assumption WHERE NOT (domainWithoutWWW(URL) = 'yandex.ru'); --- assumption -> 0 -SELECT count() FROM constraint_test_assumption WHERE domainWithoutWWW(URL) != 'yandex.ru'; --- assumption -> 0 +SELECT count() FROM constraint_test_assumption WHERE domainWithoutWWW(URL) = 'bigmir.net'; --- assumption -> 4 +SELECT count() FROM constraint_test_assumption WHERE NOT (domainWithoutWWW(URL) = 'bigmir.net'); --- assumption -> 0 +SELECT count() FROM constraint_test_assumption WHERE domainWithoutWWW(URL) != 'bigmir.net'; --- assumption -> 0 SELECT count() FROM constraint_test_assumption WHERE domainWithoutWWW(URL) = 'nothing'; --- not optimized -> 0 -SELECT count() FROM constraint_test_assumption WHERE (domainWithoutWWW(URL) = 'yandex.ru' AND URL > 'zzz'); ---> assumption -> 4 -SELECT count() FROM constraint_test_assumption WHERE (domainWithoutWWW(URL) = 'yandex.ru' AND NOT URL <= 'zzz'); ---> assumption -> 4 -SELECT count() FROM constraint_test_assumption WHERE (domainWithoutWWW(URL) = 'yandex.ru' AND URL > 'zzz') OR (a = 10 AND a + 5 < 100); ---> assumption -> 4 -SELECT count() FROM constraint_test_assumption WHERE (domainWithoutWWW(URL) = 'yandex.ru' AND URL = '111'); ---> assumption & no assumption -> 0 +SELECT count() FROM constraint_test_assumption WHERE (domainWithoutWWW(URL) = 'bigmir.net' AND URL > 'zzz'); ---> assumption -> 4 +SELECT count() FROM constraint_test_assumption WHERE (domainWithoutWWW(URL) = 'bigmir.net' AND NOT URL <= 'zzz'); ---> assumption -> 4 +SELECT count() FROM constraint_test_assumption WHERE (domainWithoutWWW(URL) = 'bigmir.net' AND URL > 'zzz') OR (a = 10 AND a + 5 < 100); ---> assumption -> 4 +SELECT count() FROM constraint_test_assumption WHERE (domainWithoutWWW(URL) = 'bigmir.net' AND URL = '111'); ---> assumption & no assumption -> 0 SELECT count() FROM constraint_test_assumption WHERE (startsWith(URL, 'test') = True); ---> assumption -> 4 DROP TABLE constraint_test_assumption; diff --git a/tests/queries/0_stateless/01650_fetch_patition_with_macro_in_zk_path_long.sql b/tests/queries/0_stateless/01650_fetch_patition_with_macro_in_zk_path_long.sql index ce48ad6a02a..4357aa199dc 100644 --- a/tests/queries/0_stateless/01650_fetch_patition_with_macro_in_zk_path_long.sql +++ b/tests/queries/0_stateless/01650_fetch_patition_with_macro_in_zk_path_long.sql @@ -1,4 +1,4 @@ --- Tags: long +-- Tags: long, no-backward-compatibility-check DROP TABLE IF EXISTS test_01640; DROP TABLE IF EXISTS restore_01640; diff --git a/tests/queries/0_stateless/01661_referer.reference b/tests/queries/0_stateless/01661_referer.reference index 49d29c80f17..d247c53413e 100644 --- a/tests/queries/0_stateless/01661_referer.reference +++ b/tests/queries/0_stateless/01661_referer.reference @@ -1,2 +1,2 @@ 1 -https://yandex.ru/ +https://meta.ua/ diff --git a/tests/queries/0_stateless/01661_referer.sh b/tests/queries/0_stateless/01661_referer.sh index 8123c925454..0299ee063ea 100755 --- a/tests/queries/0_stateless/01661_referer.sh +++ b/tests/queries/0_stateless/01661_referer.sh @@ -4,6 +4,6 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT 1' --referer 'https://yandex.ru/' +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT 1' --referer 'https://meta.ua/' ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" -${CLICKHOUSE_CLIENT} --query "SELECT http_referer FROM system.query_log WHERE current_database = currentDatabase() AND http_referer LIKE '%yandex%' LIMIT 1" +${CLICKHOUSE_CLIENT} --query "SELECT http_referer FROM system.query_log WHERE current_database = currentDatabase() AND http_referer LIKE '%meta%' LIMIT 1" diff --git a/tests/queries/0_stateless/01675_data_type_coroutine.reference b/tests/queries/0_stateless/01675_data_type_coroutine.reference index 7326d960397..541dab48def 100644 --- a/tests/queries/0_stateless/01675_data_type_coroutine.reference +++ b/tests/queries/0_stateless/01675_data_type_coroutine.reference @@ -1 +1,2 @@ Ok +Ok diff --git a/tests/queries/0_stateless/01675_data_type_coroutine.sh b/tests/queries/0_stateless/01675_data_type_coroutine.sh index 8e80d722a4c..9f7d5401bd2 100755 --- a/tests/queries/0_stateless/01675_data_type_coroutine.sh +++ b/tests/queries/0_stateless/01675_data_type_coroutine.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: long CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -6,6 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) counter=0 retries=60 + I=0 while [[ $counter -lt $retries ]]; do I=$((I + 1)) @@ -14,5 +16,16 @@ while [[ $counter -lt $retries ]]; do ((++counter)) done +echo 'Ok' + +counter=0 +I=0 +while [[ $counter -lt $retries ]]; do + I=$((I + 1)) + TYPE=$(perl -e "print 'Array(' x $I; print 'UInt8'; print ')' x $I") + ${CLICKHOUSE_CLIENT} --prefer_localhost_replica=0 --max_parser_depth 1000000 --query "SELECT * FROM remote('127.0.0.{1,2}', generateRandom('x $TYPE', 1, 1, 1)) LIMIT 1 FORMAT Null" 2>&1 | grep -q -F 'Maximum parse depth' && break; + ((++counter)) +done + #echo "I = ${I}" echo 'Ok' diff --git a/tests/queries/0_stateless/01701_parallel_parsing_infinite_segmentation.sh b/tests/queries/0_stateless/01701_parallel_parsing_infinite_segmentation.sh index edc4f6916ff..0fe04fb95fd 100755 --- a/tests/queries/0_stateless/01701_parallel_parsing_infinite_segmentation.sh +++ b/tests/queries/0_stateless/01701_parallel_parsing_infinite_segmentation.sh @@ -6,6 +6,6 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} -q "create table insert_big_json(a String, b String) engine=MergeTree() order by tuple()"; -python3 -c "[print('{{\"a\":\"{}\", \"b\":\"{}\"'.format('clickhouse'* 1000000, 'dbms' * 1000000)) for i in range(10)]; [print('{{\"a\":\"{}\", \"b\":\"{}\"}}'.format('clickhouse'* 100000, 'dbms' * 100000)) for i in range(10)]" 2>/dev/null | ${CLICKHOUSE_CLIENT} --input_format_parallel_parsing=1 --max_memory_usage=0 -q "insert into insert_big_json FORMAT JSONEachRow" 2>&1 | grep -q "min_chunk_bytes_for_parallel_parsing" && echo "Ok." || echo "FAIL" ||: +python3 -c "[print('{{\"a\":\"{}\", \"b\":\"{}\"'.format('clickhouse'* 1000000, 'dbms' * 1000000)) for i in range(10)]; [print('{{\"a\":\"{}\", \"b\":\"{}\"}}'.format('clickhouse'* 100000, 'dbms' * 100000)) for i in range(10)]" 2>/dev/null | ${CLICKHOUSE_CLIENT} --min_chunk_bytes_for_parallel_parsing=10485760 --max_threads=0 --input_format_parallel_parsing=1 --max_memory_usage=0 -q "insert into insert_big_json FORMAT JSONEachRow" 2>&1 | grep -q "min_chunk_bytes_for_parallel_parsing" && echo "Ok." || echo "FAIL" ||: ${CLICKHOUSE_CLIENT} -q "drop table insert_big_json" diff --git a/tests/queries/0_stateless/01722_long_brotli_http_compression_json_format.sh b/tests/queries/0_stateless/01722_long_brotli_http_compression_json_format.sh index 7295537a2d2..03f7893eb04 100755 --- a/tests/queries/0_stateless/01722_long_brotli_http_compression_json_format.sh +++ b/tests/queries/0_stateless/01722_long_brotli_http_compression_json_format.sh @@ -5,4 +5,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CURL} -sS -H 'Accept-Encoding: br' "${CLICKHOUSE_URL}&enable_http_compression=1" -d "SELECT toDate('2020-12-12') as datetime, 'test-pipeline' as pipeline, 'clickhouse-test-host-001.clickhouse.com' as host, 'clickhouse' as home, 'clickhouse' as detail, number as row_number FROM numbers(1000000) FORMAT JSON" | brotli -d | tail -n30 | head -n23 +${CLICKHOUSE_CURL} -sS -H 'Accept-Encoding: br' "${CLICKHOUSE_URL}&enable_http_compression=1" -d "SELECT toDate('2020-12-12') as datetime, 'test-pipeline' as pipeline, 'clickhouse-test-host-001.clickhouse.com' as host, 'clickhouse' as home, 'clickhouse' as detail, number as row_number FROM numbers(1000000) FORMAT JSON SETTINGS max_block_size=65505" | brotli -d | tail -n30 | head -n23 diff --git a/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql b/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql index 7f36bcb6c8a..62b578c21d6 100644 --- a/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql +++ b/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql @@ -1,4 +1,4 @@ --- Tags: long, distributed +-- Tags: long, distributed, no-random-settings drop table if exists data_01730; diff --git a/tests/queries/0_stateless/01733_transform_ubsan.sql b/tests/queries/0_stateless/01733_transform_ubsan.sql index 256603e9087..7c3d8ef653a 100644 --- a/tests/queries/0_stateless/01733_transform_ubsan.sql +++ b/tests/queries/0_stateless/01733_transform_ubsan.sql @@ -1,4 +1,4 @@ -SELECT arrayStringConcat(arrayMap(x -> transform(x, [1025, -9223372036854775808, 65537, 257, 1048576, 10, 7, 1048575, 65536], ['yandex', 'googlegooglegooglegoogle', 'test', '', '', 'hello', 'world', '', 'xyz'], ''), arrayMap(x -> (x % -inf), range(number))), '') +SELECT arrayStringConcat(arrayMap(x -> transform(x, [1025, -9223372036854775808, 65537, 257, 1048576, 10, 7, 1048575, 65536], ['censor.net', 'googlegooglegooglegoogle', 'test', '', '', 'hello', 'world', '', 'xyz'], ''), arrayMap(x -> (x % -inf), range(number))), '') FROM system.numbers LIMIT 1025 FORMAT Null; diff --git a/tests/queries/0_stateless/01746_long_zlib_http_compression_json_format.sh b/tests/queries/0_stateless/01746_long_zlib_http_compression_json_format.sh index feaf4bcf623..71b003d2533 100755 --- a/tests/queries/0_stateless/01746_long_zlib_http_compression_json_format.sh +++ b/tests/queries/0_stateless/01746_long_zlib_http_compression_json_format.sh @@ -5,4 +5,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CURL} -sS -H 'Accept-Encoding: gzip' "${CLICKHOUSE_URL}&enable_http_compression=1&http_zlib_compression_level=1" -d "SELECT toDate('2020-12-12') as datetime, 'test-pipeline' as pipeline, 'clickhouse-test-host-001.clickhouse.com' as host, 'clickhouse' as home, 'clickhouse' as detail, number as row_number FROM numbers(100000) FORMAT JSON" | gzip -d | tail -n30 | head -n23 +${CLICKHOUSE_CURL} -sS -H 'Accept-Encoding: gzip' "${CLICKHOUSE_URL}&enable_http_compression=1&http_zlib_compression_level=1" -d "SELECT toDate('2020-12-12') as datetime, 'test-pipeline' as pipeline, 'clickhouse-test-host-001.clickhouse.com' as host, 'clickhouse' as home, 'clickhouse' as detail, number as row_number FROM numbers(100000) FORMAT JSON SETTINGS max_block_size=65505" | gzip -d | tail -n30 | head -n23 diff --git a/tests/queries/0_stateless/01780_column_sparse_tuple.reference b/tests/queries/0_stateless/01780_column_sparse_tuple.reference index 22337838cff..743ded75a81 100644 --- a/tests/queries/0_stateless/01780_column_sparse_tuple.reference +++ b/tests/queries/0_stateless/01780_column_sparse_tuple.reference @@ -41,7 +41,7 @@ a a a id [] [] [] -t ['a','b','b.u','b.s'] ['UInt64','Tuple(u UInt32, s String)','UInt32','String'] ['Sparse','Default','Sparse','Default'] +t ['a','b.u','b.s'] ['UInt64','UInt32','String'] ['Sparse','Sparse','Default'] 0 0 0 @@ -58,7 +58,7 @@ aaaaaa a aaaaaa id [] [] [] -t ['a','b','b.u','b.s'] ['UInt64','Tuple(u UInt32, s String)','UInt32','String'] ['Sparse','Default','Sparse','Default'] +t ['a','b.u','b.s'] ['UInt64','UInt32','String'] ['Sparse','Sparse','Default'] aaaaaa a aaaaaa diff --git a/tests/queries/0_stateless/01786_explain_merge_tree.reference b/tests/queries/0_stateless/01786_explain_merge_tree.reference index 9b2df9773ea..25c7c37beca 100644 --- a/tests/queries/0_stateless/01786_explain_merge_tree.reference +++ b/tests/queries/0_stateless/01786_explain_merge_tree.reference @@ -1,4 +1,4 @@ - ReadFromMergeTree + ReadFromMergeTree (default.test_index) Indexes: MinMax Keys: @@ -32,6 +32,7 @@ Granules: 1/2 ----------------- "Node Type": "ReadFromMergeTree", + "Description": "default.test_index", "Indexes": [ { "Type": "MinMax", @@ -89,16 +90,16 @@ } ] ----------------- - ReadFromMergeTree + ReadFromMergeTree (default.test_index) ReadType: InOrder Parts: 1 Granules: 3 ----------------- - ReadFromMergeTree + ReadFromMergeTree (default.test_index) ReadType: InReverseOrder Parts: 1 Granules: 3 - ReadFromMergeTree + ReadFromMergeTree (default.idx) Indexes: PrimaryKey Keys: diff --git a/tests/queries/0_stateless/01825_type_json_btc.sh b/tests/queries/0_stateless/01825_type_json_btc.sh index bfa209b72d1..f11b952ae3b 100755 --- a/tests/queries/0_stateless/01825_type_json_btc.sh +++ b/tests/queries/0_stateless/01825_type_json_btc.sh @@ -5,11 +5,16 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh +user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +mkdir -p ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/ +rm -rf ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME:?}/* +cp $CUR_DIR/data_json/btc_transactions.json ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/ + ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS btc" ${CLICKHOUSE_CLIENT} -q "CREATE TABLE btc (data JSON) ENGINE = MergeTree ORDER BY tuple()" --allow_experimental_object_type 1 -cat $CUR_DIR/data_json/btc_transactions.json | ${CLICKHOUSE_CLIENT} -q "INSERT INTO btc FORMAT JSONAsObject" +${CLICKHOUSE_CLIENT} -q "INSERT INTO btc SELECT * FROM file('${CLICKHOUSE_TEST_UNIQUE_NAME}/btc_transactions.json', 'JSONAsObject')" ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM btc WHERE NOT ignore(*)" ${CLICKHOUSE_CLIENT} -q "DESC btc SETTINGS describe_extend_object_types = 1" @@ -21,3 +26,5 @@ ${CLICKHOUSE_CLIENT} -q "SELECT avg(length(data.inputs.prev_out.spending_outpoin ${CLICKHOUSE_CLIENT} -q "SELECT data.out.spending_outpoints AS outpoints FROM btc WHERE arrayExists(x -> notEmpty(x), outpoints)" ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS btc" + +rm ${user_files_path}/${CLICKHOUSE_TEST_UNIQUE_NAME}/btc_transactions.json diff --git a/tests/queries/0_stateless/01825_type_json_ghdata.reference b/tests/queries/0_stateless/01825_type_json_ghdata.reference index c11e9c2dfd9..3418121ad43 100644 --- a/tests/queries/0_stateless/01825_type_json_ghdata.reference +++ b/tests/queries/0_stateless/01825_type_json_ghdata.reference @@ -1,13 +1,11 @@ 5000 -900 +839 String 562 Array 134 UInt64 63 -Tuple 52 Int32 47 Int8 17 Int16 15 -Nested 9 Int64 1 leonardomso/33-js-concepts 3 ytdl-org/youtube-dl 3 diff --git a/tests/queries/0_stateless/01889_clickhouse_client_config_format.sh b/tests/queries/0_stateless/01889_clickhouse_client_config_format.sh index aa162911399..4e970f17d3a 100755 --- a/tests/queries/0_stateless/01889_clickhouse_client_config_format.sh +++ b/tests/queries/0_stateless/01889_clickhouse_client_config_format.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest +# Tags: no-fasttest, no-random-settings CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01889_tokenize.reference b/tests/queries/0_stateless/01889_tokenize.reference index 4dd6f323929..2861a183c33 100644 --- a/tests/queries/0_stateless/01889_tokenize.reference +++ b/tests/queries/0_stateless/01889_tokenize.reference @@ -1,8 +1,8 @@ ['It','is','quite','a','wonderful','day','isn','t','it'] ['There','is','so','much','to','learn'] -['22','00','email','yandex','ru'] +['22','00','email','tut','by'] ['Токенизация','каких','либо','других','языков'] ['It','is','quite','a','wonderful','day,','isn\'t','it?'] ['There','is....','so','much','to','learn!'] -['22:00','email@yandex.ru'] +['22:00','email@tut.by'] ['Токенизация','каких-либо','других','языков?'] diff --git a/tests/queries/0_stateless/01889_tokenize.sql b/tests/queries/0_stateless/01889_tokenize.sql index c9d29a8632b..287e439d2ce 100644 --- a/tests/queries/0_stateless/01889_tokenize.sql +++ b/tests/queries/0_stateless/01889_tokenize.sql @@ -2,10 +2,10 @@ SET allow_experimental_nlp_functions = 1; SELECT splitByNonAlpha('It is quite a wonderful day, isn\'t it?'); SELECT splitByNonAlpha('There is.... so much to learn!'); -SELECT splitByNonAlpha('22:00 email@yandex.ru'); +SELECT splitByNonAlpha('22:00 email@tut.by'); SELECT splitByNonAlpha('Токенизация каких-либо других языков?'); SELECT splitByWhitespace('It is quite a wonderful day, isn\'t it?'); SELECT splitByWhitespace('There is.... so much to learn!'); -SELECT splitByWhitespace('22:00 email@yandex.ru'); +SELECT splitByWhitespace('22:00 email@tut.by'); SELECT splitByWhitespace('Токенизация каких-либо других языков?'); diff --git a/tests/queries/0_stateless/01921_test_progress_bar.py b/tests/queries/0_stateless/01921_test_progress_bar.py index a95d5994607..3b0b429d396 100755 --- a/tests/queries/0_stateless/01921_test_progress_bar.py +++ b/tests/queries/0_stateless/01921_test_progress_bar.py @@ -4,16 +4,16 @@ import sys import signal CURDIR = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(0, os.path.join(CURDIR, 'helpers')) +sys.path.insert(0, os.path.join(CURDIR, "helpers")) from client import client, prompt, end_of_block log = None # uncomment the line below for debugging -#log=sys.stdout +# log=sys.stdout -with client(name='client1>', log=log) as client1: +with client(name="client1>", log=log) as client1: client1.expect(prompt) - client1.send('SELECT number FROM numbers(100) FORMAT Null') - client1.expect('Progress: 100\.00 rows, 800\.00 B.*' + end_of_block) - client1.expect('0 rows in set. Elapsed: [\\w]{1}\.[\\w]{3} sec.' + end_of_block) + client1.send("SELECT number FROM numbers(100) FORMAT Null") + client1.expect("Progress: 100\.00 rows, 800\.00 B.*" + end_of_block) + client1.expect("0 rows in set. Elapsed: [\\w]{1}\.[\\w]{3} sec." + end_of_block) diff --git a/tests/queries/0_stateless/01946_test_zstd_decompression_with_escape_sequence_at_the_end_of_buffer.sh b/tests/queries/0_stateless/01946_test_zstd_decompression_with_escape_sequence_at_the_end_of_buffer.sh index b6721d4a798..47cf6e06b48 100755 --- a/tests/queries/0_stateless/01946_test_zstd_decompression_with_escape_sequence_at_the_end_of_buffer.sh +++ b/tests/queries/0_stateless/01946_test_zstd_decompression_with_escape_sequence_at_the_end_of_buffer.sh @@ -12,6 +12,7 @@ mkdir -p ${user_files_path}/ cp $CUR_DIR/data_zstd/test_01946.zstd ${user_files_path}/ ${CLICKHOUSE_CLIENT} --multiline --multiquery --query " +set min_chunk_bytes_for_parallel_parsing=10485760; set max_read_buffer_size = 65536; set input_format_parallel_parsing = 0; select * from file('test_01946.zstd', 'JSONEachRow', 'foo String') limit 30 format Null; diff --git a/tests/queries/0_stateless/01955_clickhouse_benchmark_connection_hang.sh b/tests/queries/0_stateless/01955_clickhouse_benchmark_connection_hang.sh index e7871454192..7740ffcce7b 100755 --- a/tests/queries/0_stateless/01955_clickhouse_benchmark_connection_hang.sh +++ b/tests/queries/0_stateless/01955_clickhouse_benchmark_connection_hang.sh @@ -66,8 +66,6 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # frame #11: 0x000000000fffdfc4 clickhouse`main(argc_=, argv_=) + 356 at main.cpp:366 # frame #12: 0x00007ffff7de6d0a libc.so.6`__libc_start_main(main=(clickhouse`main at main.cpp:339), argc=7, argv=0x00007fffffffe1e8, init=, fini=, rtld_fini=, stack_end=0x00007fffffffe1d8) + 234 at libc-start.c:308 # frame #13: 0x000000000ffdc0aa clickhouse`_start + 42 -# -# [1]: https://clickhouse-test-reports.s3.yandex.net/26656/f17ca450ac991603e6400c7caef49c493ac69739/functional_stateless_tests_(ubsan).html#fail1 # Limit number of files to 50, and we will get EMFILE for some of socket() prlimit --nofile=50 $CLICKHOUSE_BENCHMARK --iterations 1 --concurrency 50 --query 'select 1' 2>&1 diff --git a/tests/queries/0_stateless/02006_test_positional_arguments.reference b/tests/queries/0_stateless/02006_test_positional_arguments.reference index c5c5f115b0a..f86a1ab6c47 100644 --- a/tests/queries/0_stateless/02006_test_positional_arguments.reference +++ b/tests/queries/0_stateless/02006_test_positional_arguments.reference @@ -111,3 +111,9 @@ select substr('aaaaaaaaaaaaaa', 8) as a group by a; aaaaaaa select substr('aaaaaaaaaaaaaa', 8) as a group by substr('aaaaaaaaaaaaaa', 8); aaaaaaa +select b from (select 5 as a, 'Hello' as b order by a); +Hello +select b from (select 5 as a, 'Hello' as b group by a); +Hello +select b from (select 5 as a, 'Hello' as b order by 1); +Hello diff --git a/tests/queries/0_stateless/02006_test_positional_arguments.sql b/tests/queries/0_stateless/02006_test_positional_arguments.sql index 7442ca6bbf6..2a02cd03c93 100644 --- a/tests/queries/0_stateless/02006_test_positional_arguments.sql +++ b/tests/queries/0_stateless/02006_test_positional_arguments.sql @@ -45,3 +45,6 @@ explain syntax select plus(1, 1) as a group by a; select substr('aaaaaaaaaaaaaa', 8) as a group by a; select substr('aaaaaaaaaaaaaa', 8) as a group by substr('aaaaaaaaaaaaaa', 8); +select b from (select 5 as a, 'Hello' as b order by a); +select b from (select 5 as a, 'Hello' as b group by a); +select b from (select 5 as a, 'Hello' as b order by 1); diff --git a/tests/queries/0_stateless/02015_async_inserts_2.sh b/tests/queries/0_stateless/02015_async_inserts_2.sh index 90f5584d84e..fd20f846897 100755 --- a/tests/queries/0_stateless/02015_async_inserts_2.sh +++ b/tests/queries/0_stateless/02015_async_inserts_2.sh @@ -1,13 +1,14 @@ #!/usr/bin/env bash +# Tags: no-random-settings CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -url="${CLICKHOUSE_URL}&async_insert=1&wait_for_async_insert=1" +url="${CLICKHOUSE_URL}&async_insert=1&wait_for_async_insert=1&max_insert_threads=0&group_by_two_level_threshold=100000&group_by_two_level_threshold_bytes=50000000&distributed_aggregation_memory_efficient=1&fsync_metadata=1&priority=1&output_format_parallel_formatting=0&input_format_parallel_parsing=0&min_chunk_bytes_for_parallel_parsing=4031398&max_read_buffer_size=554729&prefer_localhost_replica=0&max_block_size=51672&max_threads=20" -${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS async_inserts" -${CLICKHOUSE_CLIENT} -q "CREATE TABLE async_inserts (id UInt32, s String) ENGINE = MergeTree ORDER BY id" +${CLICKHOUSE_CLIENT} --max_insert_threads=0 --group_by_two_level_threshold=100000 --group_by_two_level_threshold_bytes=50000000 --distributed_aggregation_memory_efficient=1 --fsync_metadata=1 --priority=1 --output_format_parallel_formatting=0 --input_format_parallel_parsing=0 --min_chunk_bytes_for_parallel_parsing=4031398 --max_read_buffer_size=554729 --prefer_localhost_replica=0 --max_block_size=51672 --max_threads=20 -q "DROP TABLE IF EXISTS async_inserts" +${CLICKHOUSE_CLIENT} --max_insert_threads=0 --group_by_two_level_threshold=100000 --group_by_two_level_threshold_bytes=50000000 --distributed_aggregation_memory_efficient=1 --fsync_metadata=1 --priority=1 --output_format_parallel_formatting=0 --input_format_parallel_parsing=0 --min_chunk_bytes_for_parallel_parsing=4031398 --max_read_buffer_size=554729 --prefer_localhost_replica=0 --max_block_size=51672 --max_threads=20 -q "CREATE TABLE async_inserts (id UInt32, s String) ENGINE = MergeTree ORDER BY id" ${CLICKHOUSE_CURL} -sS "$url" -d 'INSERT INTO async_inserts FORMAT CSV 1,"a" @@ -22,7 +23,7 @@ ${CLICKHOUSE_CURL} -sS "$url" -d 'INSERT INTO async_inserts FORMAT CSV wait -${CLICKHOUSE_CLIENT} -q "SELECT * FROM async_inserts ORDER BY id" -${CLICKHOUSE_CLIENT} -q "SELECT name, rows, level FROM system.parts WHERE table = 'async_inserts' AND database = '$CLICKHOUSE_DATABASE' ORDER BY name" +${CLICKHOUSE_CLIENT} --max_insert_threads=0 --group_by_two_level_threshold=100000 --group_by_two_level_threshold_bytes=50000000 --distributed_aggregation_memory_efficient=1 --fsync_metadata=1 --priority=1 --output_format_parallel_formatting=0 --input_format_parallel_parsing=0 --min_chunk_bytes_for_parallel_parsing=4031398 --max_read_buffer_size=554729 --prefer_localhost_replica=0 --max_block_size=51672 --max_threads=20 -q "SELECT * FROM async_inserts ORDER BY id" +${CLICKHOUSE_CLIENT} --max_insert_threads=0 --group_by_two_level_threshold=100000 --group_by_two_level_threshold_bytes=50000000 --distributed_aggregation_memory_efficient=1 --fsync_metadata=1 --priority=1 --output_format_parallel_formatting=0 --input_format_parallel_parsing=0 --min_chunk_bytes_for_parallel_parsing=4031398 --max_read_buffer_size=554729 --prefer_localhost_replica=0 --max_block_size=51672 --max_threads=20 -q "SELECT name, rows, level FROM system.parts WHERE table = 'async_inserts' AND database = '$CLICKHOUSE_DATABASE' ORDER BY name" -${CLICKHOUSE_CLIENT} -q "DROP TABLE async_inserts" +${CLICKHOUSE_CLIENT} --max_insert_threads=0 --group_by_two_level_threshold=100000 --group_by_two_level_threshold_bytes=50000000 --distributed_aggregation_memory_efficient=1 --fsync_metadata=1 --priority=1 --output_format_parallel_formatting=0 --input_format_parallel_parsing=0 --min_chunk_bytes_for_parallel_parsing=4031398 --max_read_buffer_size=554729 --prefer_localhost_replica=0 --max_block_size=51672 --max_threads=20 -q "DROP TABLE async_inserts" diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index cecdd0498b1..a6a184b3d22 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -34,7 +34,7 @@ CREATE TABLE system.numbers_mt\n(\n `number` UInt64\n)\nENGINE = SystemNumber CREATE TABLE system.one\n(\n `dummy` UInt8\n)\nENGINE = SystemOne()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.part_moves_between_shards\n(\n `database` String,\n `table` String,\n `task_name` String,\n `task_uuid` UUID,\n `create_time` DateTime,\n `part_name` String,\n `part_uuid` UUID,\n `to_shard` String,\n `dst_part_name` String,\n `update_time` DateTime,\n `state` String,\n `rollback` UInt8,\n `num_tries` UInt32,\n `last_exception` String\n)\nENGINE = SystemShardMoves()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.parts\n(\n `partition` String,\n `name` String,\n `uuid` UUID,\n `part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `secondary_indices_compressed_bytes` UInt64,\n `secondary_indices_uncompressed_bytes` UInt64,\n `secondary_indices_marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `is_frozen` UInt8,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `hash_of_all_files` String,\n `hash_of_uncompressed_files` String,\n `uncompressed_hash_of_compressed_files` String,\n `delete_ttl_info_min` DateTime,\n `delete_ttl_info_max` DateTime,\n `move_ttl_info.expression` Array(String),\n `move_ttl_info.min` Array(DateTime),\n `move_ttl_info.max` Array(DateTime),\n `default_compression_codec` String,\n `recompression_ttl_info.expression` Array(String),\n `recompression_ttl_info.min` Array(DateTime),\n `recompression_ttl_info.max` Array(DateTime),\n `group_by_ttl_info.expression` Array(String),\n `group_by_ttl_info.min` Array(DateTime),\n `group_by_ttl_info.max` Array(DateTime),\n `rows_where_ttl_info.expression` Array(String),\n `rows_where_ttl_info.min` Array(DateTime),\n `rows_where_ttl_info.max` Array(DateTime),\n `projections` Array(String),\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemParts()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' -CREATE TABLE system.parts_columns\n(\n `partition` String,\n `name` String,\n `uuid` UUID,\n `part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `column` String,\n `type` String,\n `column_position` UInt64,\n `default_kind` String,\n `default_expression` String,\n `column_bytes_on_disk` UInt64,\n `column_data_compressed_bytes` UInt64,\n `column_data_uncompressed_bytes` UInt64,\n `column_marks_bytes` UInt64,\n `serialization_kind` String,\n `subcolumns.names` Array(String),\n `subcolumns.types` Array(String),\n `subcolumns.serializations` Array(String),\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemPartsColumns()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' +CREATE TABLE system.parts_columns\n(\n `partition` String,\n `name` String,\n `uuid` UUID,\n `part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `column` String,\n `type` String,\n `column_position` UInt64,\n `default_kind` String,\n `default_expression` String,\n `column_bytes_on_disk` UInt64,\n `column_data_compressed_bytes` UInt64,\n `column_data_uncompressed_bytes` UInt64,\n `column_marks_bytes` UInt64,\n `serialization_kind` String,\n `subcolumns.names` Array(String),\n `subcolumns.types` Array(String),\n `subcolumns.serializations` Array(String),\n `subcolumns.bytes_on_disk` Array(UInt64),\n `subcolumns.data_compressed_bytes` Array(UInt64),\n `subcolumns.data_uncompressed_bytes` Array(UInt64),\n `subcolumns.marks_bytes` Array(UInt64),\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemPartsColumns()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.privileges\n(\n `privilege` Enum16(\'SHOW DATABASES\' = 0, \'SHOW TABLES\' = 1, \'SHOW COLUMNS\' = 2, \'SHOW DICTIONARIES\' = 3, \'SHOW\' = 4, \'SELECT\' = 5, \'INSERT\' = 6, \'ALTER UPDATE\' = 7, \'ALTER DELETE\' = 8, \'ALTER ADD COLUMN\' = 9, \'ALTER MODIFY COLUMN\' = 10, \'ALTER DROP COLUMN\' = 11, \'ALTER COMMENT COLUMN\' = 12, \'ALTER CLEAR COLUMN\' = 13, \'ALTER RENAME COLUMN\' = 14, \'ALTER MATERIALIZE COLUMN\' = 15, \'ALTER COLUMN\' = 16, \'ALTER MODIFY COMMENT\' = 17, \'ALTER ORDER BY\' = 18, \'ALTER SAMPLE BY\' = 19, \'ALTER ADD INDEX\' = 20, \'ALTER DROP INDEX\' = 21, \'ALTER MATERIALIZE INDEX\' = 22, \'ALTER CLEAR INDEX\' = 23, \'ALTER INDEX\' = 24, \'ALTER ADD PROJECTION\' = 25, \'ALTER DROP PROJECTION\' = 26, \'ALTER MATERIALIZE PROJECTION\' = 27, \'ALTER CLEAR PROJECTION\' = 28, \'ALTER PROJECTION\' = 29, \'ALTER ADD CONSTRAINT\' = 30, \'ALTER DROP CONSTRAINT\' = 31, \'ALTER CONSTRAINT\' = 32, \'ALTER TTL\' = 33, \'ALTER MATERIALIZE TTL\' = 34, \'ALTER SETTINGS\' = 35, \'ALTER MOVE PARTITION\' = 36, \'ALTER FETCH PARTITION\' = 37, \'ALTER FREEZE PARTITION\' = 38, \'ALTER DATABASE SETTINGS\' = 39, \'ALTER TABLE\' = 40, \'ALTER DATABASE\' = 41, \'ALTER VIEW REFRESH\' = 42, \'ALTER VIEW MODIFY QUERY\' = 43, \'ALTER VIEW\' = 44, \'ALTER\' = 45, \'CREATE DATABASE\' = 46, \'CREATE TABLE\' = 47, \'CREATE VIEW\' = 48, \'CREATE DICTIONARY\' = 49, \'CREATE TEMPORARY TABLE\' = 50, \'CREATE FUNCTION\' = 51, \'CREATE\' = 52, \'DROP DATABASE\' = 53, \'DROP TABLE\' = 54, \'DROP VIEW\' = 55, \'DROP DICTIONARY\' = 56, \'DROP FUNCTION\' = 57, \'DROP\' = 58, \'TRUNCATE\' = 59, \'OPTIMIZE\' = 60, \'KILL QUERY\' = 61, \'MOVE PARTITION BETWEEN SHARDS\' = 62, \'CREATE USER\' = 63, \'ALTER USER\' = 64, \'DROP USER\' = 65, \'CREATE ROLE\' = 66, \'ALTER ROLE\' = 67, \'DROP ROLE\' = 68, \'ROLE ADMIN\' = 69, \'CREATE ROW POLICY\' = 70, \'ALTER ROW POLICY\' = 71, \'DROP ROW POLICY\' = 72, \'CREATE QUOTA\' = 73, \'ALTER QUOTA\' = 74, \'DROP QUOTA\' = 75, \'CREATE SETTINGS PROFILE\' = 76, \'ALTER SETTINGS PROFILE\' = 77, \'DROP SETTINGS PROFILE\' = 78, \'SHOW USERS\' = 79, \'SHOW ROLES\' = 80, \'SHOW ROW POLICIES\' = 81, \'SHOW QUOTAS\' = 82, \'SHOW SETTINGS PROFILES\' = 83, \'SHOW ACCESS\' = 84, \'ACCESS MANAGEMENT\' = 85, \'SYSTEM SHUTDOWN\' = 86, \'SYSTEM DROP DNS CACHE\' = 87, \'SYSTEM DROP MARK CACHE\' = 88, \'SYSTEM DROP UNCOMPRESSED CACHE\' = 89, \'SYSTEM DROP MMAP CACHE\' = 90, \'SYSTEM DROP COMPILED EXPRESSION CACHE\' = 91, \'SYSTEM DROP CACHE\' = 92, \'SYSTEM RELOAD CONFIG\' = 93, \'SYSTEM RELOAD SYMBOLS\' = 94, \'SYSTEM RELOAD DICTIONARY\' = 95, \'SYSTEM RELOAD MODEL\' = 96, \'SYSTEM RELOAD FUNCTION\' = 97, \'SYSTEM RELOAD EMBEDDED DICTIONARIES\' = 98, \'SYSTEM RELOAD\' = 99, \'SYSTEM RESTART DISK\' = 100, \'SYSTEM MERGES\' = 101, \'SYSTEM TTL MERGES\' = 102, \'SYSTEM FETCHES\' = 103, \'SYSTEM MOVES\' = 104, \'SYSTEM DISTRIBUTED SENDS\' = 105, \'SYSTEM REPLICATED SENDS\' = 106, \'SYSTEM SENDS\' = 107, \'SYSTEM REPLICATION QUEUES\' = 108, \'SYSTEM DROP REPLICA\' = 109, \'SYSTEM SYNC REPLICA\' = 110, \'SYSTEM RESTART REPLICA\' = 111, \'SYSTEM RESTORE REPLICA\' = 112, \'SYSTEM FLUSH DISTRIBUTED\' = 113, \'SYSTEM FLUSH LOGS\' = 114, \'SYSTEM FLUSH\' = 115, \'SYSTEM THREAD FUZZER\' = 116, \'SYSTEM\' = 117, \'dictGet\' = 118, \'addressToLine\' = 119, \'addressToLineWithInlines\' = 120, \'addressToSymbol\' = 121, \'demangle\' = 122, \'INTROSPECTION\' = 123, \'FILE\' = 124, \'URL\' = 125, \'REMOTE\' = 126, \'MONGO\' = 127, \'MYSQL\' = 128, \'POSTGRES\' = 129, \'SQLITE\' = 130, \'ODBC\' = 131, \'JDBC\' = 132, \'HDFS\' = 133, \'S3\' = 134, \'SOURCES\' = 135, \'ALL\' = 136, \'NONE\' = 137),\n `aliases` Array(String),\n `level` Nullable(Enum8(\'GLOBAL\' = 0, \'DATABASE\' = 1, \'TABLE\' = 2, \'DICTIONARY\' = 3, \'VIEW\' = 4, \'COLUMN\' = 5)),\n `parent_group` Nullable(Enum16(\'SHOW DATABASES\' = 0, \'SHOW TABLES\' = 1, \'SHOW COLUMNS\' = 2, \'SHOW DICTIONARIES\' = 3, \'SHOW\' = 4, \'SELECT\' = 5, \'INSERT\' = 6, \'ALTER UPDATE\' = 7, \'ALTER DELETE\' = 8, \'ALTER ADD COLUMN\' = 9, \'ALTER MODIFY COLUMN\' = 10, \'ALTER DROP COLUMN\' = 11, \'ALTER COMMENT COLUMN\' = 12, \'ALTER CLEAR COLUMN\' = 13, \'ALTER RENAME COLUMN\' = 14, \'ALTER MATERIALIZE COLUMN\' = 15, \'ALTER COLUMN\' = 16, \'ALTER MODIFY COMMENT\' = 17, \'ALTER ORDER BY\' = 18, \'ALTER SAMPLE BY\' = 19, \'ALTER ADD INDEX\' = 20, \'ALTER DROP INDEX\' = 21, \'ALTER MATERIALIZE INDEX\' = 22, \'ALTER CLEAR INDEX\' = 23, \'ALTER INDEX\' = 24, \'ALTER ADD PROJECTION\' = 25, \'ALTER DROP PROJECTION\' = 26, \'ALTER MATERIALIZE PROJECTION\' = 27, \'ALTER CLEAR PROJECTION\' = 28, \'ALTER PROJECTION\' = 29, \'ALTER ADD CONSTRAINT\' = 30, \'ALTER DROP CONSTRAINT\' = 31, \'ALTER CONSTRAINT\' = 32, \'ALTER TTL\' = 33, \'ALTER MATERIALIZE TTL\' = 34, \'ALTER SETTINGS\' = 35, \'ALTER MOVE PARTITION\' = 36, \'ALTER FETCH PARTITION\' = 37, \'ALTER FREEZE PARTITION\' = 38, \'ALTER DATABASE SETTINGS\' = 39, \'ALTER TABLE\' = 40, \'ALTER DATABASE\' = 41, \'ALTER VIEW REFRESH\' = 42, \'ALTER VIEW MODIFY QUERY\' = 43, \'ALTER VIEW\' = 44, \'ALTER\' = 45, \'CREATE DATABASE\' = 46, \'CREATE TABLE\' = 47, \'CREATE VIEW\' = 48, \'CREATE DICTIONARY\' = 49, \'CREATE TEMPORARY TABLE\' = 50, \'CREATE FUNCTION\' = 51, \'CREATE\' = 52, \'DROP DATABASE\' = 53, \'DROP TABLE\' = 54, \'DROP VIEW\' = 55, \'DROP DICTIONARY\' = 56, \'DROP FUNCTION\' = 57, \'DROP\' = 58, \'TRUNCATE\' = 59, \'OPTIMIZE\' = 60, \'KILL QUERY\' = 61, \'MOVE PARTITION BETWEEN SHARDS\' = 62, \'CREATE USER\' = 63, \'ALTER USER\' = 64, \'DROP USER\' = 65, \'CREATE ROLE\' = 66, \'ALTER ROLE\' = 67, \'DROP ROLE\' = 68, \'ROLE ADMIN\' = 69, \'CREATE ROW POLICY\' = 70, \'ALTER ROW POLICY\' = 71, \'DROP ROW POLICY\' = 72, \'CREATE QUOTA\' = 73, \'ALTER QUOTA\' = 74, \'DROP QUOTA\' = 75, \'CREATE SETTINGS PROFILE\' = 76, \'ALTER SETTINGS PROFILE\' = 77, \'DROP SETTINGS PROFILE\' = 78, \'SHOW USERS\' = 79, \'SHOW ROLES\' = 80, \'SHOW ROW POLICIES\' = 81, \'SHOW QUOTAS\' = 82, \'SHOW SETTINGS PROFILES\' = 83, \'SHOW ACCESS\' = 84, \'ACCESS MANAGEMENT\' = 85, \'SYSTEM SHUTDOWN\' = 86, \'SYSTEM DROP DNS CACHE\' = 87, \'SYSTEM DROP MARK CACHE\' = 88, \'SYSTEM DROP UNCOMPRESSED CACHE\' = 89, \'SYSTEM DROP MMAP CACHE\' = 90, \'SYSTEM DROP COMPILED EXPRESSION CACHE\' = 91, \'SYSTEM DROP CACHE\' = 92, \'SYSTEM RELOAD CONFIG\' = 93, \'SYSTEM RELOAD SYMBOLS\' = 94, \'SYSTEM RELOAD DICTIONARY\' = 95, \'SYSTEM RELOAD MODEL\' = 96, \'SYSTEM RELOAD FUNCTION\' = 97, \'SYSTEM RELOAD EMBEDDED DICTIONARIES\' = 98, \'SYSTEM RELOAD\' = 99, \'SYSTEM RESTART DISK\' = 100, \'SYSTEM MERGES\' = 101, \'SYSTEM TTL MERGES\' = 102, \'SYSTEM FETCHES\' = 103, \'SYSTEM MOVES\' = 104, \'SYSTEM DISTRIBUTED SENDS\' = 105, \'SYSTEM REPLICATED SENDS\' = 106, \'SYSTEM SENDS\' = 107, \'SYSTEM REPLICATION QUEUES\' = 108, \'SYSTEM DROP REPLICA\' = 109, \'SYSTEM SYNC REPLICA\' = 110, \'SYSTEM RESTART REPLICA\' = 111, \'SYSTEM RESTORE REPLICA\' = 112, \'SYSTEM FLUSH DISTRIBUTED\' = 113, \'SYSTEM FLUSH LOGS\' = 114, \'SYSTEM FLUSH\' = 115, \'SYSTEM THREAD FUZZER\' = 116, \'SYSTEM\' = 117, \'dictGet\' = 118, \'addressToLine\' = 119, \'addressToLineWithInlines\' = 120, \'addressToSymbol\' = 121, \'demangle\' = 122, \'INTROSPECTION\' = 123, \'FILE\' = 124, \'URL\' = 125, \'REMOTE\' = 126, \'MONGO\' = 127, \'MYSQL\' = 128, \'POSTGRES\' = 129, \'SQLITE\' = 130, \'ODBC\' = 131, \'JDBC\' = 132, \'HDFS\' = 133, \'S3\' = 134, \'SOURCES\' = 135, \'ALL\' = 136, \'NONE\' = 137))\n)\nENGINE = SystemPrivileges()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.processes\n(\n `is_initial_query` UInt8,\n `user` String,\n `query_id` String,\n `address` IPv6,\n `port` UInt16,\n `initial_user` String,\n `initial_query_id` String,\n `initial_address` IPv6,\n `initial_port` UInt16,\n `interface` UInt8,\n `os_user` String,\n `client_hostname` String,\n `client_name` String,\n `client_revision` UInt64,\n `client_version_major` UInt64,\n `client_version_minor` UInt64,\n `client_version_patch` UInt64,\n `http_method` UInt8,\n `http_user_agent` String,\n `http_referer` String,\n `forwarded_for` String,\n `quota_key` String,\n `distributed_depth` UInt64,\n `elapsed` Float64,\n `is_cancelled` UInt8,\n `read_rows` UInt64,\n `read_bytes` UInt64,\n `total_rows_approx` UInt64,\n `written_rows` UInt64,\n `written_bytes` UInt64,\n `memory_usage` Int64,\n `peak_memory_usage` Int64,\n `query` String,\n `thread_ids` Array(UInt64),\n `ProfileEvents` Map(String, UInt64),\n `Settings` Map(String, String),\n `current_database` String,\n `ProfileEvents.Names` Array(String),\n `ProfileEvents.Values` Array(UInt64),\n `Settings.Names` Array(String),\n `Settings.Values` Array(String)\n)\nENGINE = SystemProcesses()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' CREATE TABLE system.projection_parts\n(\n `partition` String,\n `name` String,\n `part_type` String,\n `parent_name` String,\n `parent_uuid` UUID,\n `parent_part_type` String,\n `active` UInt8,\n `marks` UInt64,\n `rows` UInt64,\n `bytes_on_disk` UInt64,\n `data_compressed_bytes` UInt64,\n `data_uncompressed_bytes` UInt64,\n `marks_bytes` UInt64,\n `parent_marks` UInt64,\n `parent_rows` UInt64,\n `parent_bytes_on_disk` UInt64,\n `parent_data_compressed_bytes` UInt64,\n `parent_data_uncompressed_bytes` UInt64,\n `parent_marks_bytes` UInt64,\n `modification_time` DateTime,\n `remove_time` DateTime,\n `refcount` UInt32,\n `min_date` Date,\n `max_date` Date,\n `min_time` DateTime,\n `max_time` DateTime,\n `partition_id` String,\n `min_block_number` Int64,\n `max_block_number` Int64,\n `level` UInt32,\n `data_version` UInt64,\n `primary_key_bytes_in_memory` UInt64,\n `primary_key_bytes_in_memory_allocated` UInt64,\n `is_frozen` UInt8,\n `database` String,\n `table` String,\n `engine` String,\n `disk_name` String,\n `path` String,\n `hash_of_all_files` String,\n `hash_of_uncompressed_files` String,\n `uncompressed_hash_of_compressed_files` String,\n `delete_ttl_info_min` DateTime,\n `delete_ttl_info_max` DateTime,\n `move_ttl_info.expression` Array(String),\n `move_ttl_info.min` Array(DateTime),\n `move_ttl_info.max` Array(DateTime),\n `default_compression_codec` String,\n `recompression_ttl_info.expression` Array(String),\n `recompression_ttl_info.min` Array(DateTime),\n `recompression_ttl_info.max` Array(DateTime),\n `group_by_ttl_info.expression` Array(String),\n `group_by_ttl_info.min` Array(DateTime),\n `group_by_ttl_info.max` Array(DateTime),\n `rows_where_ttl_info.expression` Array(String),\n `rows_where_ttl_info.min` Array(DateTime),\n `rows_where_ttl_info.max` Array(DateTime),\n `bytes` UInt64,\n `marks_size` UInt64\n)\nENGINE = SystemProjectionParts()\nCOMMENT \'SYSTEM TABLE is built on the fly.\' diff --git a/tests/queries/0_stateless/02118_deserialize_whole_text.sh b/tests/queries/0_stateless/02118_deserialize_whole_text.sh index fe9256df329..e9f35582f15 100755 --- a/tests/queries/0_stateless/02118_deserialize_whole_text.sh +++ b/tests/queries/0_stateless/02118_deserialize_whole_text.sh @@ -41,16 +41,16 @@ $CLICKHOUSE_CLIENT -q "SELECT * FROM file('data_02118', 'CSV', 'x IPv4')" 2>&1 | echo "[\"255.255.255.255trash\"]" > $DATA_FILE $CLICKHOUSE_CLIENT -q "SELECT * FROM file('data_02118', 'JSONCompactEachRow', 'x IPv4')" 2>&1 | grep -F -q "UNEXPECTED_DATA_AFTER_PARSED_VALUE" && echo 'OK' || echo 'FAIL' -echo "[\"2a02:6ba8:2da1:40cd:31db:f9f1:fc3d:80b1trash\"]" > $DATA_FILE +echo "[\"0000:0000:0000:0000:0000:ffff:192.168.100.228b1trash\"]" > $DATA_FILE $CLICKHOUSE_CLIENT -q "SELECT * FROM file('data_02118', 'JSONCompactStringsEachRow', 'x IPv6')" 2>&1 | grep -F -q "UNEXPECTED_DATA_AFTER_PARSED_VALUE" && echo 'OK' || echo 'FAIL' -echo "2a02:6ba8:2da1:40cd:31db:f9f1:fc3d:80b1trash" > $DATA_FILE +echo "0000:0000:0000:0000:0000:ffff:192.168.100.228b1trash" > $DATA_FILE $CLICKHOUSE_CLIENT -q "SELECT * FROM file('data_02118', 'TSV', 'x IPv6')" 2>&1 | grep -F -q "UNEXPECTED_DATA_AFTER_PARSED_VALUE" && echo 'OK' || echo 'FAIL' -echo "2a02:6ba8:2da1:40cd:31db:f9f1:fc3d:80b1trash" > $DATA_FILE +echo "0000:0000:0000:0000:0000:ffff:192.168.100.228b1trash" > $DATA_FILE $CLICKHOUSE_CLIENT -q "SELECT * FROM file('data_02118', 'CSV', 'x IPv6')" 2>&1 | grep -F -q "UNEXPECTED_DATA_AFTER_PARSED_VALUE" && echo 'OK' || echo 'FAIL' -echo "[\"2a02:6ba8:2da1:40cd:31db:f9f1:fc3d:80b1trash\"]" > $DATA_FILE +echo "[\"0000:0000:0000:0000:0000:ffff:192.168.100.228b1trash\"]" > $DATA_FILE $CLICKHOUSE_CLIENT -q "SELECT * FROM file('data_02118', 'JSONCompactEachRow', 'x IPv6')" 2>&1 | grep -F -q "UNEXPECTED_DATA_AFTER_PARSED_VALUE" && echo 'OK' || echo 'FAIL' echo "[\"{1:2, 2:3}trash\"]" > $DATA_FILE diff --git a/tests/queries/0_stateless/02126_url_auth.python b/tests/queries/0_stateless/02126_url_auth.python index 60009624c76..57b16fb413e 100644 --- a/tests/queries/0_stateless/02126_url_auth.python +++ b/tests/queries/0_stateless/02126_url_auth.python @@ -121,18 +121,14 @@ class CSVHTTPServer(BaseHTTPRequestHandler): class HTTPServerV6(HTTPServer): address_family = socket.AF_INET6 -def start_server(requests_amount): +def start_server(): if IS_IPV6: httpd = HTTPServerV6(HTTP_SERVER_ADDRESS, CSVHTTPServer) else: httpd = HTTPServer(HTTP_SERVER_ADDRESS, CSVHTTPServer) - def real_func(): - for i in range(requests_amount): - httpd.handle_request() - - t = threading.Thread(target=real_func) - return t + t = threading.Thread(target=httpd.serve_forever) + return t, httpd # test section @@ -217,9 +213,10 @@ def main(): query : 'hello, world', } - t = start_server(len(list(select_requests_url_auth.keys()))) + t, httpd = start_server() t.start() test_select(requests=list(select_requests_url_auth.keys()), answers=list(select_requests_url_auth.values()), test_data=test_data) + httpd.shutdown() t.join() print("PASSED") diff --git a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql index 7d0e9111d9c..8fb11ac383c 100644 --- a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql +++ b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql @@ -1,3 +1,5 @@ +SET max_threads=0; + DROP TABLE IF EXISTS t_read_in_order; CREATE TABLE t_read_in_order(date Date, i UInt64, v UInt64) diff --git a/tests/queries/0_stateless/02161_addressToLineWithInlines.sql b/tests/queries/0_stateless/02161_addressToLineWithInlines.sql index 12cae6af189..b6b497b4b55 100644 --- a/tests/queries/0_stateless/02161_addressToLineWithInlines.sql +++ b/tests/queries/0_stateless/02161_addressToLineWithInlines.sql @@ -1,6 +1,6 @@ -- Tags: no-tsan, no-asan, no-ubsan, no-msan, no-debug, no-cpu-aarch64 - +SET allow_introspection_functions = 0; SELECT addressToLineWithInlines(1); -- { serverError 446 } SET allow_introspection_functions = 1; diff --git a/tests/queries/0_stateless/02177_issue_31009.sql b/tests/queries/0_stateless/02177_issue_31009.sql index f4a65e3a3a0..ab4aec60ce4 100644 --- a/tests/queries/0_stateless/02177_issue_31009.sql +++ b/tests/queries/0_stateless/02177_issue_31009.sql @@ -1,5 +1,10 @@ -- Tags: long +SET max_threads=0; + +DROP TABLE IF EXISTS left; +DROP TABLE IF EXISTS right; + CREATE TABLE left ( key UInt32, value String ) ENGINE = MergeTree ORDER BY key; CREATE TABLE right ( key UInt32, value String ) ENGINE = MergeTree ORDER BY tuple(); diff --git a/tests/queries/0_stateless/02205_HTTP_user_agent.python b/tests/queries/0_stateless/02205_HTTP_user_agent.python index 8fb9cea0845..397e06cbe82 100644 --- a/tests/queries/0_stateless/02205_HTTP_user_agent.python +++ b/tests/queries/0_stateless/02205_HTTP_user_agent.python @@ -124,7 +124,8 @@ def test_select(): check_answers(query, EXPECTED_ANSWER) def main(): - t = start_server(1) + # HEAD + GET + t = start_server(2) t.start() test_select() t.join() diff --git a/tests/queries/0_stateless/02206_information_schema_show_database.reference b/tests/queries/0_stateless/02206_information_schema_show_database.reference index 551186fa0ab..af437aca989 100644 --- a/tests/queries/0_stateless/02206_information_schema_show_database.reference +++ b/tests/queries/0_stateless/02206_information_schema_show_database.reference @@ -1 +1,4 @@ CREATE DATABASE INFORMATION_SCHEMA\nENGINE = Memory +CREATE VIEW INFORMATION_SCHEMA.COLUMNS\n(\n `table_catalog` String,\n `table_schema` String,\n `table_name` String,\n `column_name` String,\n `ordinal_position` UInt64,\n `column_default` String,\n `is_nullable` UInt8,\n `data_type` String,\n `character_maximum_length` Nullable(UInt64),\n `character_octet_length` Nullable(UInt64),\n `numeric_precision` Nullable(UInt64),\n `numeric_precision_radix` Nullable(UInt64),\n `numeric_scale` Nullable(UInt64),\n `datetime_precision` Nullable(UInt64),\n `character_set_catalog` Nullable(String),\n `character_set_schema` Nullable(String),\n `character_set_name` Nullable(String),\n `collation_catalog` Nullable(String),\n `collation_schema` Nullable(String),\n `collation_name` Nullable(String),\n `domain_catalog` Nullable(String),\n `domain_schema` Nullable(String),\n `domain_name` Nullable(String),\n `column_comment` String,\n `column_type` String,\n `TABLE_CATALOG` String ALIAS table_catalog,\n `TABLE_SCHEMA` String ALIAS table_schema,\n `TABLE_NAME` String ALIAS table_name,\n `COLUMN_NAME` String ALIAS column_name,\n `ORDINAL_POSITION` UInt64 ALIAS ordinal_position,\n `COLUMN_DEFAULT` String ALIAS column_default,\n `IS_NULLABLE` UInt8 ALIAS is_nullable,\n `DATA_TYPE` String ALIAS data_type,\n `CHARACTER_MAXIMUM_LENGTH` Nullable(UInt64) ALIAS character_maximum_length,\n `CHARACTER_OCTET_LENGTH` Nullable(UInt64) ALIAS character_octet_length,\n `NUMERIC_PRECISION` Nullable(UInt64) ALIAS numeric_precision,\n `NUMERIC_PRECISION_RADIX` Nullable(UInt64) ALIAS numeric_precision_radix,\n `NUMERIC_SCALE` Nullable(UInt64) ALIAS numeric_scale,\n `DATETIME_PRECISION` Nullable(UInt64) ALIAS datetime_precision,\n `CHARACTER_SET_CATALOG` Nullable(String) ALIAS character_set_catalog,\n `CHARACTER_SET_SCHEMA` Nullable(String) ALIAS character_set_schema,\n `CHARACTER_SET_NAME` Nullable(String) ALIAS character_set_name,\n `COLLATION_CATALOG` Nullable(String) ALIAS collation_catalog,\n `COLLATION_SCHEMA` Nullable(String) ALIAS collation_schema,\n `COLLATION_NAME` Nullable(String) ALIAS collation_name,\n `DOMAIN_CATALOG` Nullable(String) ALIAS domain_catalog,\n `DOMAIN_SCHEMA` Nullable(String) ALIAS domain_schema,\n `DOMAIN_NAME` Nullable(String) ALIAS domain_name,\n `COLUMN_COMMENT` String ALIAS column_comment,\n `COLUMN_TYPE` String ALIAS column_type\n) AS\nSELECT\n database AS table_catalog,\n database AS table_schema,\n table AS table_name,\n name AS column_name,\n position AS ordinal_position,\n default_expression AS column_default,\n type LIKE \'Nullable(%)\' AS is_nullable,\n type AS data_type,\n character_octet_length AS character_maximum_length,\n character_octet_length,\n numeric_precision,\n numeric_precision_radix,\n numeric_scale,\n datetime_precision,\n NULL AS character_set_catalog,\n NULL AS character_set_schema,\n NULL AS character_set_name,\n NULL AS collation_catalog,\n NULL AS collation_schema,\n NULL AS collation_name,\n NULL AS domain_catalog,\n NULL AS domain_schema,\n NULL AS domain_name,\n comment AS column_comment,\n type AS column_type\nFROM system.columns +CREATE VIEW INFORMATION_SCHEMA.TABLES (`table_catalog` String, `table_schema` String, `table_name` String, `table_type` Enum8(\'BASE TABLE\' = 1, \'VIEW\' = 2, \'FOREIGN TABLE\' = 3, \'LOCAL TEMPORARY\' = 4, \'SYSTEM VIEW\' = 5), `TABLE_CATALOG` String ALIAS table_catalog, `TABLE_SCHEMA` String ALIAS table_schema, `TABLE_NAME` String ALIAS table_name, `TABLE_TYPE` Enum8(\'BASE TABLE\' = 1, \'VIEW\' = 2, \'FOREIGN TABLE\' = 3, \'LOCAL TEMPORARY\' = 4, \'SYSTEM VIEW\' = 5) ALIAS table_type) AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, multiIf(is_temporary, 4, engine LIKE \'%View\', 2, engine LIKE \'System%\', 5, has_own_data = 0, 3, 1) AS table_type FROM system.tables +CREATE VIEW information_schema.tables (`table_catalog` String, `table_schema` String, `table_name` String, `table_type` Enum8(\'BASE TABLE\' = 1, \'VIEW\' = 2, \'FOREIGN TABLE\' = 3, \'LOCAL TEMPORARY\' = 4, \'SYSTEM VIEW\' = 5), `TABLE_CATALOG` String ALIAS table_catalog, `TABLE_SCHEMA` String ALIAS table_schema, `TABLE_NAME` String ALIAS table_name, `TABLE_TYPE` Enum8(\'BASE TABLE\' = 1, \'VIEW\' = 2, \'FOREIGN TABLE\' = 3, \'LOCAL TEMPORARY\' = 4, \'SYSTEM VIEW\' = 5) ALIAS table_type) AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, multiIf(is_temporary, 4, engine LIKE \'%View\', 2, engine LIKE \'System%\', 5, has_own_data = 0, 3, 1) AS table_type FROM system.tables diff --git a/tests/queries/0_stateless/02206_information_schema_show_database.sql b/tests/queries/0_stateless/02206_information_schema_show_database.sql index de5ca495e2e..91a8a0d1dea 100644 --- a/tests/queries/0_stateless/02206_information_schema_show_database.sql +++ b/tests/queries/0_stateless/02206_information_schema_show_database.sql @@ -1 +1,3 @@ SHOW CREATE DATABASE INFORMATION_SCHEMA; +SHOW CREATE INFORMATION_SCHEMA.COLUMNS; +SELECT create_table_query FROM system.tables WHERE database ILIKE 'INFORMATION_SCHEMA' AND table ILIKE 'TABLES'; -- supress style check: database = currentDatabase() diff --git a/tests/queries/0_stateless/02207_subseconds_intervals.reference b/tests/queries/0_stateless/02207_subseconds_intervals.reference new file mode 100644 index 00000000000..f7b91ff48b8 --- /dev/null +++ b/tests/queries/0_stateless/02207_subseconds_intervals.reference @@ -0,0 +1,62 @@ +test intervals +- test nanoseconds +1980-12-12 12:12:12.123456789 +1980-12-12 12:12:12.123456700 +1980-12-12 12:12:12.123456789 +1930-12-12 12:12:12.123456789 +1930-12-12 12:12:12.123456700 +2220-12-12 12:12:12.123456789 +2220-12-12 12:12:12.123456700 +- test microseconds +1980-12-12 12:12:12.123456 +1980-12-12 12:12:12.123400 +1980-12-12 12:12:12.123456 +1980-12-12 12:12:12.123456 +1930-12-12 12:12:12.123456 +1930-12-12 12:12:12.123400 +1930-12-12 12:12:12.123457 +2220-12-12 12:12:12.123456 +2220-12-12 12:12:12.123400 +2220-12-12 12:12:12.123456 +- test milliseconds +1980-12-12 12:12:12.123 +1980-12-12 12:12:12.120 +1980-12-12 12:12:12.123 +1980-12-12 12:12:12.123 +1930-12-12 12:12:12.123 +1930-12-12 12:12:12.120 +1930-12-12 12:12:12.124 +2220-12-12 12:12:12.123 +2220-12-12 12:12:12.120 +2220-12-12 12:12:12.123 +test add[...]seconds() +- test nanoseconds +1980-12-12 12:12:12.123456790 +1980-12-12 12:12:12.123456701 +1980-12-12 12:12:12.123456790 +1930-12-12 12:12:12.123456788 +1930-12-12 12:12:12.123456699 +2220-12-12 12:12:12.123456790 +2220-12-12 12:12:12.123456701 +- test microseconds +1980-12-12 12:12:12.123457 +1980-12-12 12:12:12.123401 +1980-12-12 12:12:12.12345778 +1980-12-12 12:12:12.123457 +1930-12-12 12:12:12.123455 +1930-12-12 12:12:12.123399 +1930-12-12 12:12:12.12345578 +2220-12-12 12:12:12.123457 +2220-12-12 12:12:12.123401 +2220-12-12 12:12:12.12345778 +- test milliseconds +1980-12-12 12:12:12.124 +1980-12-12 12:12:12.121 +1980-12-12 12:12:12.124456 +1980-12-12 12:12:12.124 +1930-12-12 12:12:12.122 +1930-12-12 12:12:12.119 +1930-12-12 12:12:12.122456 +2220-12-12 12:12:12.124 +2220-12-12 12:12:12.121 +2220-12-12 12:12:12.124456 diff --git a/tests/queries/0_stateless/02207_subseconds_intervals.sql b/tests/queries/0_stateless/02207_subseconds_intervals.sql new file mode 100644 index 00000000000..a7ce03d9330 --- /dev/null +++ b/tests/queries/0_stateless/02207_subseconds_intervals.sql @@ -0,0 +1,94 @@ +SELECT 'test intervals'; + +SELECT '- test nanoseconds'; +select toStartOfInterval(toDateTime64('1980-12-12 12:12:12.123456789', 9), INTERVAL 1 NANOSECOND); -- In normal range, source scale matches result +select toStartOfInterval(toDateTime64('1980-12-12 12:12:12.1234567', 7), INTERVAL 1 NANOSECOND); -- In normal range, source scale less than result + +select toStartOfInterval(a, INTERVAL 1 NANOSECOND) from ( select toDateTime64('1980-12-12 12:12:12.123456789', 9) AS a ); -- Non-constant argument + +select toStartOfInterval(toDateTime64('1930-12-12 12:12:12.123456789', 9), INTERVAL 1 NANOSECOND); -- Below normal range, source scale matches result +select toStartOfInterval(toDateTime64('1930-12-12 12:12:12.1234567', 7), INTERVAL 1 NANOSECOND); -- Below normal range, source scale less than result + +select toStartOfInterval(toDateTime64('2220-12-12 12:12:12.123456789', 9), INTERVAL 1 NANOSECOND); -- Above normal range, source scale matches result +select toStartOfInterval(toDateTime64('2220-12-12 12:12:12.1234567', 7), INTERVAL 1 NANOSECOND); -- Above normal range, source scale less than result + + +SELECT '- test microseconds'; +select toStartOfInterval(toDateTime64('1980-12-12 12:12:12.123456', 6), INTERVAL 1 MICROSECOND); -- In normal range, source scale matches result +select toStartOfInterval(toDateTime64('1980-12-12 12:12:12.1234', 4), INTERVAL 1 MICROSECOND); -- In normal range, source scale less than result +select toStartOfInterval(toDateTime64('1980-12-12 12:12:12.12345678', 8), INTERVAL 1 MICROSECOND); -- In normal range, source scale greater than result + +select toStartOfInterval(a, INTERVAL 1 MICROSECOND) from ( select toDateTime64('1980-12-12 12:12:12.12345678', 8) AS a ); -- Non-constant argument + +select toStartOfInterval(toDateTime64('1930-12-12 12:12:12.123456', 6), INTERVAL 1 MICROSECOND); -- Below normal range, source scale matches result +select toStartOfInterval(toDateTime64('1930-12-12 12:12:12.1234', 4), INTERVAL 1 MICROSECOND); -- Below normal range, source scale less than result +select toStartOfInterval(toDateTime64('1930-12-12 12:12:12.12345678', 8), INTERVAL 1 MICROSECOND); -- Below normal range, source scale greater than result + + +select toStartOfInterval(toDateTime64('2220-12-12 12:12:12.123456', 6), INTERVAL 1 MICROSECOND); -- Above normal range, source scale matches result +select toStartOfInterval(toDateTime64('2220-12-12 12:12:12.1234', 4), INTERVAL 1 MICROSECOND); -- Above normal range, source scale less than result +select toStartOfInterval(toDateTime64('2220-12-12 12:12:12.12345678', 8), INTERVAL 1 MICROSECOND); -- Above normal range, source scale greater than result + + +SELECT '- test milliseconds'; +select toStartOfInterval(toDateTime64('1980-12-12 12:12:12.123', 3), INTERVAL 1 MILLISECOND); -- In normal range, source scale matches result +select toStartOfInterval(toDateTime64('1980-12-12 12:12:12.12', 2), INTERVAL 1 MILLISECOND); -- In normal range, source scale less than result +select toStartOfInterval(toDateTime64('1980-12-12 12:12:12.123456', 6), INTERVAL 1 MILLISECOND); -- In normal range, source scale greater than result + +select toStartOfInterval(a, INTERVAL 1 MILLISECOND) from ( select toDateTime64('1980-12-12 12:12:12.123456', 6) AS a ); -- Non-constant argument + +select toStartOfInterval(toDateTime64('1930-12-12 12:12:12.123', 3), INTERVAL 1 MILLISECOND); -- Below normal range, source scale matches result +select toStartOfInterval(toDateTime64('1930-12-12 12:12:12.12', 2), INTERVAL 1 MILLISECOND); -- Below normal range, source scale less than result +select toStartOfInterval(toDateTime64('1930-12-12 12:12:12.123456', 6), INTERVAL 1 MILLISECOND); -- Below normal range, source scale greater than result + +select toStartOfInterval(toDateTime64('2220-12-12 12:12:12.123', 3), INTERVAL 1 MILLISECOND); -- Above normal range, source scale matches result +select toStartOfInterval(toDateTime64('2220-12-12 12:12:12.12', 2), INTERVAL 1 MILLISECOND); -- Above normal range, source scale less than result +select toStartOfInterval(toDateTime64('2220-12-12 12:12:12.123456', 6), INTERVAL 1 MILLISECOND); -- Above normal range, source scale greater than result + + +SELECT 'test add[...]seconds()'; + + +SELECT '- test nanoseconds'; +select addNanoseconds(toDateTime64('1980-12-12 12:12:12.123456789', 9), 1); -- In normal range, source scale matches result +select addNanoseconds(toDateTime64('1980-12-12 12:12:12.1234567', 7), 1); -- In normal range, source scale less than result + +select addNanoseconds(a, 1) from ( select toDateTime64('1980-12-12 12:12:12.123456789', 9) AS a ); -- Non-constant argument + +select addNanoseconds(toDateTime64('1930-12-12 12:12:12.123456789', 9), 1); -- Below normal range, source scale matches result +select addNanoseconds(toDateTime64('1930-12-12 12:12:12.1234567', 7), 1); -- Below normal range, source scale less than result + +select addNanoseconds(toDateTime64('2220-12-12 12:12:12.123456789', 9), 1); -- Above normal range, source scale matches result +select addNanoseconds(toDateTime64('2220-12-12 12:12:12.1234567', 7), 1); -- Above normal range, source scale less than result + + +SELECT '- test microseconds'; +select addMicroseconds(toDateTime64('1980-12-12 12:12:12.123456', 6), 1); -- In normal range, source scale matches result +select addMicroseconds(toDateTime64('1980-12-12 12:12:12.1234', 4), 1); -- In normal range, source scale less than result +select addMicroseconds(toDateTime64('1980-12-12 12:12:12.12345678', 8), 1); -- In normal range, source scale greater than result + +select addMicroseconds(a, 1) from ( select toDateTime64('1980-12-12 12:12:12.123456', 6) AS a ); -- Non-constant argument + +select addMicroseconds(toDateTime64('1930-12-12 12:12:12.123456', 6), 1); -- Below normal range, source scale matches result +select addMicroseconds(toDateTime64('1930-12-12 12:12:12.1234', 4), 1); -- Below normal range, source scale less than result +select addMicroseconds(toDateTime64('1930-12-12 12:12:12.12345678', 8), 1); -- Below normal range, source scale greater than result + +select addMicroseconds(toDateTime64('2220-12-12 12:12:12.123456', 6), 1); -- Above normal range, source scale matches result +select addMicroseconds(toDateTime64('2220-12-12 12:12:12.1234', 4), 1); -- Above normal range, source scale less than result +select addMicroseconds(toDateTime64('2220-12-12 12:12:12.12345678', 8), 1); -- Above normal range, source scale greater than result + + +SELECT '- test milliseconds'; +select addMilliseconds(toDateTime64('1980-12-12 12:12:12.123', 3), 1); -- In normal range, source scale matches result +select addMilliseconds(toDateTime64('1980-12-12 12:12:12.12', 2), 1); -- In normal range, source scale less than result +select addMilliseconds(toDateTime64('1980-12-12 12:12:12.123456', 6), 1); -- In normal range, source scale greater than result + +select addMilliseconds(a, 1) from ( select toDateTime64('1980-12-12 12:12:12.123', 3) AS a ); -- Non-constant argument + +select addMilliseconds(toDateTime64('1930-12-12 12:12:12.123', 3), 1); -- Below normal range, source scale matches result +select addMilliseconds(toDateTime64('1930-12-12 12:12:12.12', 2), 1); -- Below normal range, source scale less than result +select addMilliseconds(toDateTime64('1930-12-12 12:12:12.123456', 6), 1); -- Below normal range, source scale greater than result + +select addMilliseconds(toDateTime64('2220-12-12 12:12:12.123', 3), 1); -- Above normal range, source scale matches result +select addMilliseconds(toDateTime64('2220-12-12 12:12:12.12', 2), 1); -- Above normal range, source scale less than result +select addMilliseconds(toDateTime64('2220-12-12 12:12:12.123456', 6), 1); -- Above normal range, source scale greater than result diff --git a/tests/queries/0_stateless/02222_create_table_without_columns_metadata.sh b/tests/queries/0_stateless/02222_create_table_without_columns_metadata.sh index 842c32cf243..261c389c9f2 100755 --- a/tests/queries/0_stateless/02222_create_table_without_columns_metadata.sh +++ b/tests/queries/0_stateless/02222_create_table_without_columns_metadata.sh @@ -29,5 +29,5 @@ $CLICKHOUSE_CLIENT -q "create table test_dist engine=Distributed('test_shard_loc $CLICKHOUSE_CLIENT -q "detach table test_dist" $CLICKHOUSE_CLIENT -q "drop table test" $CLICKHOUSE_CLIENT -q "attach table test_dist" -$CLICKHOUSE_CLIENT -q "select * from test_dist" 2>&1 | grep -q "UNKNOWN_TABLE" && echo "OK" || echo "FAIL" +$CLICKHOUSE_CLIENT --prefer_localhost_replica=1 -q "select * from test_dist" 2>&1 | grep -q "UNKNOWN_TABLE" && echo "OK" || echo "FAIL" diff --git a/tests/queries/0_stateless/02232_partition_pruner_mixed_constant_type.reference b/tests/queries/0_stateless/02232_partition_pruner_mixed_constant_type.reference new file mode 100644 index 00000000000..6fcbc14234d --- /dev/null +++ b/tests/queries/0_stateless/02232_partition_pruner_mixed_constant_type.reference @@ -0,0 +1,4 @@ +1647353101000 +1647353101001 +1647353101002 +1647353101003 diff --git a/tests/queries/0_stateless/02232_partition_pruner_mixed_constant_type.sql b/tests/queries/0_stateless/02232_partition_pruner_mixed_constant_type.sql new file mode 100644 index 00000000000..a0b58271764 --- /dev/null +++ b/tests/queries/0_stateless/02232_partition_pruner_mixed_constant_type.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS broken; + +CREATE TABLE broken (time UInt64) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(time / 1000)) ORDER BY time; +INSERT INTO broken (time) VALUES (1647353101000), (1647353101001), (1647353101002), (1647353101003); +SELECT * FROM broken WHERE time>-1; + +DROP TABLE broken; diff --git a/tests/queries/0_stateless/02233_HTTP_ranged.python b/tests/queries/0_stateless/02233_HTTP_ranged.python new file mode 100644 index 00000000000..e0198210c16 --- /dev/null +++ b/tests/queries/0_stateless/02233_HTTP_ranged.python @@ -0,0 +1,262 @@ +#!/usr/bin/env python3 + +from http.server import BaseHTTPRequestHandler, HTTPServer +import socket +import sys +import re +import threading +import os +import traceback +import urllib.request +import subprocess + + +def is_ipv6(host): + try: + socket.inet_aton(host) + return False + except: + return True + + +def get_local_port(host, ipv6): + if ipv6: + family = socket.AF_INET6 + else: + family = socket.AF_INET + + with socket.socket(family) as fd: + fd.bind((host, 0)) + return fd.getsockname()[1] + + +CLICKHOUSE_HOST = os.environ.get("CLICKHOUSE_HOST", "localhost") +CLICKHOUSE_PORT_HTTP = os.environ.get("CLICKHOUSE_PORT_HTTP", "8123") + +# Server returns this JSON response. +SERVER_JSON_RESPONSE = """{ + "login": "ClickHouse", + "id": 54801242, + "name": "ClickHouse", + "company": null +}""" + +PAYLOAD_LEN = len(SERVER_JSON_RESPONSE) + +EXPECTED_ANSWER = """{\\n\\t"login": "ClickHouse",\\n\\t"id": 54801242,\\n\\t"name": "ClickHouse",\\n\\t"company": null\\n}""" + +##################################################################################### +# This test starts an HTTP server and serves data to clickhouse url-engine based table. +# The objective of this test is to check the ClickHouse server provides a User-Agent +# with HTTP requests. +# In order for it to work ip+port of http server (given below) should be +# accessible from clickhouse server. +##################################################################################### + +# IP-address of this host accessible from the outside world. Get the first one +HTTP_SERVER_HOST = ( + subprocess.check_output(["hostname", "-i"]).decode("utf-8").strip().split()[0] +) +IS_IPV6 = is_ipv6(HTTP_SERVER_HOST) +HTTP_SERVER_PORT = get_local_port(HTTP_SERVER_HOST, IS_IPV6) + +# IP address and port of the HTTP server started from this script. +HTTP_SERVER_ADDRESS = (HTTP_SERVER_HOST, HTTP_SERVER_PORT) +if IS_IPV6: + HTTP_SERVER_URL_STR = ( + "http://" + + f"[{str(HTTP_SERVER_ADDRESS[0])}]:{str(HTTP_SERVER_ADDRESS[1])}" + + "/" + ) +else: + HTTP_SERVER_URL_STR = ( + "http://" + f"{str(HTTP_SERVER_ADDRESS[0])}:{str(HTTP_SERVER_ADDRESS[1])}" + "/" + ) + + +def get_ch_answer(query): + host = CLICKHOUSE_HOST + if IS_IPV6: + host = f"[{host}]" + + url = os.environ.get( + "CLICKHOUSE_URL", + "http://{host}:{port}".format(host=CLICKHOUSE_HOST, port=CLICKHOUSE_PORT_HTTP), + ) + return urllib.request.urlopen(url, data=query.encode()).read().decode() + + +def check_answers(query, answer): + ch_answer = get_ch_answer(query) + if ch_answer.strip() != answer.strip(): + print("FAIL on query:", query, file=sys.stderr) + print("Expected answer:", answer, file=sys.stderr) + print("Fetched answer :", ch_answer, file=sys.stderr) + raise Exception("Fail on query") + + +BYTE_RANGE_RE = re.compile(r"bytes=(\d+)-(\d+)?$") + + +def parse_byte_range(byte_range): + """Returns the two numbers in 'bytes=123-456' or throws ValueError. + The last number or both numbers may be None. + """ + if byte_range.strip() == "": + return None, None + + m = BYTE_RANGE_RE.match(byte_range) + if not m: + raise ValueError(f"Invalid byte range {byte_range}") + + first, last = [x and int(x) for x in m.groups()] + if last and last < first: + raise ValueError(f"Invalid byte range {byte_range}") + return first, last + + +# Server with check for User-Agent headers. +class HttpProcessor(BaseHTTPRequestHandler): + allow_range = False + range_used = False + get_call_num = 0 + + def send_head(self): + if self.headers["Range"] and HttpProcessor.allow_range: + try: + self.range = parse_byte_range(self.headers["Range"]) + except ValueError as e: + self.send_error(400, "Invalid byte range") + return None + else: + self.range = None + + if self.range: + first, last = self.range + else: + first, last = None, None + + if first == None: + first = 0 + + payload = SERVER_JSON_RESPONSE.encode() + payload_len = len(payload) + if first and first >= payload_len: + self.send_error(416, "Requested Range Not Satisfiable") + return None + + self.send_response(206 if HttpProcessor.allow_range else 200) + self.send_header("Content-type", "application/json") + + if HttpProcessor.allow_range: + self.send_header("Accept-Ranges", "bytes") + + if last is None or last >= payload_len: + last = payload_len - 1 + + response_length = last - first + 1 + + if first or last: + self.send_header("Content-Range", f"bytes {first}-{last}/{payload_len}") + self.send_header( + "Content-Length", + str(response_length) if HttpProcessor.allow_range else str(payload_len), + ) + self.end_headers() + return payload + + def do_HEAD(self): + self.send_head() + + def do_GET(self): + result = self.send_head() + if result == None: + return + + HttpProcessor.get_call_num += 1 + + if not self.range: + self.wfile.write(SERVER_JSON_RESPONSE.encode()) + return + + HttpProcessor.range_used = True + payload = SERVER_JSON_RESPONSE.encode() + start, stop = self.range + if stop == None: + stop = len(payload) - 1 + if start == None: + start = 0 + self.wfile.write(SERVER_JSON_RESPONSE.encode()[start : stop + 1]) + + def log_message(self, format, *args): + return + + +class HTTPServerV6(HTTPServer): + address_family = socket.AF_INET6 + + +def start_server(): + if IS_IPV6: + httpd = HTTPServerV6(HTTP_SERVER_ADDRESS, HttpProcessor) + else: + httpd = HTTPServer(HTTP_SERVER_ADDRESS, HttpProcessor) + + t = threading.Thread(target=httpd.serve_forever) + return t, httpd + + +##################################################################### +# Testing area. +##################################################################### + + +def test_select(download_buffer_size): + global HTTP_SERVER_URL_STR + query = f"SELECT * FROM url('{HTTP_SERVER_URL_STR}','JSONAsString') SETTINGS max_download_buffer_size={download_buffer_size};" + check_answers(query, EXPECTED_ANSWER) + + +def run_test(allow_range, download_buffer_size=20): + HttpProcessor.range_used = False + HttpProcessor.get_call_num = 0 + HttpProcessor.allow_range = allow_range + + t, httpd = start_server() + t.start() + test_select(download_buffer_size) + + expected_get_call_num = (PAYLOAD_LEN - 1) // download_buffer_size + 1 + if allow_range: + if not HttpProcessor.range_used: + raise Exception("HTTP Range was not used when supported") + + if expected_get_call_num != HttpProcessor.get_call_num: + raise Exception( + f"Invalid amount of GET calls with Range. Expected {expected_get_call_num}, actual {HttpProcessor.get_call_num}" + ) + else: + if HttpProcessor.range_used: + raise Exception("HTTP Range used while not supported") + + httpd.shutdown() + t.join() + print("PASSED") + + +def main(): + run_test(allow_range=False) + run_test(allow_range=True, download_buffer_size=20) + run_test(allow_range=True, download_buffer_size=10) + + +if __name__ == "__main__": + try: + main() + except Exception as ex: + exc_type, exc_value, exc_traceback = sys.exc_info() + traceback.print_tb(exc_traceback, file=sys.stderr) + print(ex, file=sys.stderr) + sys.stderr.flush() + + os._exit(1) diff --git a/tests/queries/0_stateless/02233_HTTP_ranged.reference b/tests/queries/0_stateless/02233_HTTP_ranged.reference new file mode 100644 index 00000000000..17f0fff172a --- /dev/null +++ b/tests/queries/0_stateless/02233_HTTP_ranged.reference @@ -0,0 +1,3 @@ +PASSED +PASSED +PASSED diff --git a/tests/queries/0_stateless/02233_HTTP_ranged.sh b/tests/queries/0_stateless/02233_HTTP_ranged.sh new file mode 100755 index 00000000000..b6fba098d10 --- /dev/null +++ b/tests/queries/0_stateless/02233_HTTP_ranged.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +python3 "$CURDIR"/02233_HTTP_ranged.python + diff --git a/tests/queries/0_stateless/02233_setting_input_format_use_lowercase_column_name.sh b/tests/queries/0_stateless/02233_setting_input_format_use_lowercase_column_name.sh deleted file mode 100755 index b946addd01c..00000000000 --- a/tests/queries/0_stateless/02233_setting_input_format_use_lowercase_column_name.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-ubsan, no-fasttest - -CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CUR_DIR"/../shell_config.sh - -echo "Parquet" -DATA_FILE=$CUR_DIR/data_parquet/test_setting_input_format_use_lowercase_column_name.parquet -${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_load" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_load (id String, score Int32) ENGINE = Memory" -cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO parquet_load FORMAT Parquet SETTINGS input_format_use_lowercase_column_name=true" -${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_load" -${CLICKHOUSE_CLIENT} --query="drop table parquet_load" - -echo "ORC" -DATA_FILE=$CUR_DIR/data_orc/test_setting_input_format_use_lowercase_column_name.orc -${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS orc_load" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE orc_load (id String, score Int32) ENGINE = Memory" -cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO orc_load FORMAT ORC SETTINGS input_format_use_lowercase_column_name=true" -${CLICKHOUSE_CLIENT} --query="SELECT * FROM orc_load" -${CLICKHOUSE_CLIENT} --query="drop table orc_load" diff --git a/tests/queries/0_stateless/02240_tskv_schema_inference_bug.reference b/tests/queries/0_stateless/02240_tskv_schema_inference_bug.reference new file mode 100644 index 00000000000..a8abc33648e --- /dev/null +++ b/tests/queries/0_stateless/02240_tskv_schema_inference_bug.reference @@ -0,0 +1,8 @@ +b Nullable(String) +c Nullable(String) +a Nullable(String) +s1 \N 1 +} [2] 2 +\N \N \N +\N \N \N +\N [3] \N diff --git a/tests/queries/0_stateless/02240_tskv_schema_inference_bug.sh b/tests/queries/0_stateless/02240_tskv_schema_inference_bug.sh new file mode 100755 index 00000000000..8655ffd1e1f --- /dev/null +++ b/tests/queries/0_stateless/02240_tskv_schema_inference_bug.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# Tags: no-parallel, no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +FILE_NAME=test_02240.data +DATA_FILE=${USER_FILES_PATH:?}/$FILE_NAME + +touch $DATA_FILE + +echo -e 'a=1\tb=s1\tc=\N +c=[2]\ta=2\tb=\N} + +a=\N +c=[3]\ta=\N' > $DATA_FILE +$CLICKHOUSE_CLIENT --max_read_buffer_size=4 -q "desc file('$FILE_NAME', 'TSKV')" +$CLICKHOUSE_CLIENT --max_read_buffer_size=4 -q "select * from file('$FILE_NAME', 'TSKV')" + diff --git a/tests/queries/0_stateless/02241_parquet_bad_column.reference b/tests/queries/0_stateless/02241_parquet_bad_column.reference index f599e28b8ab..b2f7f08c170 100644 --- a/tests/queries/0_stateless/02241_parquet_bad_column.reference +++ b/tests/queries/0_stateless/02241_parquet_bad_column.reference @@ -1 +1,2 @@ 10 +10 diff --git a/tests/queries/0_stateless/02241_parquet_bad_column.sh b/tests/queries/0_stateless/02241_parquet_bad_column.sh index a160671a088..9efd11cbbe1 100755 --- a/tests/queries/0_stateless/02241_parquet_bad_column.sh +++ b/tests/queries/0_stateless/02241_parquet_bad_column.sh @@ -5,23 +5,25 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q "drop table if exists test_02241" -$CLICKHOUSE_CLIENT -q "create table test_02241 (image_path Nullable(String), - caption Nullable(String), - NSFW Nullable(String), - similarity Nullable(Float64), - LICENSE Nullable(String), - url Nullable(String), - key Nullable(UInt64), - shard_id Nullable(UInt64), - status Nullable(String), - width Nullable(UInt32), - height Nullable(UInt32), - exif Nullable(String), - original_width Nullable(UInt32), - original_height Nullable(UInt32)) engine=Memory" +for case_insensitive in "true" "false"; do + $CLICKHOUSE_CLIENT -q "drop table if exists test_02241" + $CLICKHOUSE_CLIENT -q "create table test_02241 (image_path Nullable(String), + caption Nullable(String), + NSFW Nullable(String), + similarity Nullable(Float64), + LICENSE Nullable(String), + url Nullable(String), + key Nullable(UInt64), + shard_id Nullable(UInt64), + status Nullable(String), + width Nullable(UInt32), + height Nullable(UInt32), + exif Nullable(String), + original_width Nullable(UInt32), + original_height Nullable(UInt32)) engine=Memory" -cat $CUR_DIR/data_parquet_bad_column/metadata_0.parquet | $CLICKHOUSE_CLIENT -q "insert into test_02241 format Parquet" + cat $CUR_DIR/data_parquet_bad_column/metadata_0.parquet | $CLICKHOUSE_CLIENT -q "insert into test_02241 format Parquet SETTINGS input_format_parquet_case_insensitive_column_matching=$case_insensitive" -$CLICKHOUSE_CLIENT -q "select count() from test_02241" -$CLICKHOUSE_CLIENT -q "drop table test_02241" + $CLICKHOUSE_CLIENT -q "select count() from test_02241" + $CLICKHOUSE_CLIENT -q "drop table test_02241" +done diff --git a/tests/queries/0_stateless/02241_short_circuit_short_column.reference b/tests/queries/0_stateless/02241_short_circuit_short_column.reference new file mode 100644 index 00000000000..c25c8fb59d0 --- /dev/null +++ b/tests/queries/0_stateless/02241_short_circuit_short_column.reference @@ -0,0 +1,20 @@ +1 \N +1 \N +1 \N +1 \N +1 \N +1 \N +1 \N +1 \N +1 \N +1 \N +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 diff --git a/tests/queries/0_stateless/02241_short_circuit_short_column.sql b/tests/queries/0_stateless/02241_short_circuit_short_column.sql new file mode 100644 index 00000000000..311307fe505 --- /dev/null +++ b/tests/queries/0_stateless/02241_short_circuit_short_column.sql @@ -0,0 +1,2 @@ +SELECT 65536 AND 2147483647, throwIf((((1048575 AND throwIf((0. AND NULL) AND (((65536 AND 257) AND (1.1754943508222875e-38 AND 1024) AND -2147483649) AND NULL) AND (10 AND NULL)) AND (((65536 AND 257) AND (1.1754943508222875e-38 AND 1024) AND -1) AND NULL) AND 65535) AND -1) AND NULL) AND (NULL AND NULL), NULL < number) FROM numbers(10); +SELECT NULL AND throwIf((0 AND NULL) AND 2147483646 AND NULL AND NULL) AND -2147483649 AND (NULL AND NULL) AND NULL FROM system.numbers LIMIT 10; diff --git a/tests/queries/0_stateless/02233_setting_input_format_use_lowercase_column_name.reference b/tests/queries/0_stateless/02242_case_insensitive_column_matching.reference similarity index 66% rename from tests/queries/0_stateless/02233_setting_input_format_use_lowercase_column_name.reference rename to tests/queries/0_stateless/02242_case_insensitive_column_matching.reference index 5c383cb3035..9732211a286 100644 --- a/tests/queries/0_stateless/02233_setting_input_format_use_lowercase_column_name.reference +++ b/tests/queries/0_stateless/02242_case_insensitive_column_matching.reference @@ -4,3 +4,6 @@ Parquet ORC 123 1 456 2 +Arrow +123 1 +456 2 diff --git a/tests/queries/0_stateless/02242_case_insensitive_column_matching.sh b/tests/queries/0_stateless/02242_case_insensitive_column_matching.sh new file mode 100755 index 00000000000..8ebf2952ab3 --- /dev/null +++ b/tests/queries/0_stateless/02242_case_insensitive_column_matching.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Tags: no-ubsan, no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +echo "Parquet" +DATA_FILE=$CUR_DIR/data_parquet/case_insensitive_column_matching.parquet +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_load" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_load (iD String, scOre Int32) ENGINE = Memory" +cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO parquet_load FORMAT Parquet SETTINGS input_format_parquet_case_insensitive_column_matching=true" +${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_load" +${CLICKHOUSE_CLIENT} --query="drop table parquet_load" + +echo "ORC" +DATA_FILE=$CUR_DIR/data_orc/case_insensitive_column_matching.orc +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS orc_load" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE orc_load (iD String, sCorE Int32) ENGINE = Memory" +cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO orc_load FORMAT ORC SETTINGS input_format_orc_case_insensitive_column_matching=true" +${CLICKHOUSE_CLIENT} --query="SELECT * FROM orc_load" +${CLICKHOUSE_CLIENT} --query="drop table orc_load" + +echo "Arrow" +DATA_FILE=$CUR_DIR/data_arrow/case_insensitive_column_matching.arrow +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS arrow_load" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE arrow_load (iD String, sCorE Int32) ENGINE = Memory" +cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO arrow_load FORMAT Arrow SETTINGS input_format_arrow_case_insensitive_column_matching=true" +${CLICKHOUSE_CLIENT} --query="SELECT * FROM arrow_load" +${CLICKHOUSE_CLIENT} --query="drop table arrow_load" diff --git a/tests/queries/0_stateless/02242_case_insensitive_nested.reference b/tests/queries/0_stateless/02242_case_insensitive_nested.reference new file mode 100644 index 00000000000..58d66d3230a --- /dev/null +++ b/tests/queries/0_stateless/02242_case_insensitive_nested.reference @@ -0,0 +1,12 @@ +Arrow +[1,2,3] ['123','456','789'] [9.8,10.12,11.14] +[4,5,6] ['101112','131415','161718'] [123.8,10.2,11.414] +[7,8,9] ['101','415','118'] [13.08,1.12,0.414] +Parquet +[1,2,3] ['123','456','789'] [9.8,10.12,11.14] +[4,5,6] ['101112','131415','161718'] [123.8,10.2,11.414] +[7,8,9] ['101','415','118'] [13.08,1.12,0.414] +ORC +[1,2,3] ['123','456','789'] [9.8,10.12,11.14] +[4,5,6] ['101112','131415','161718'] [123.8,10.2,11.414] +[7,8,9] ['101','415','118'] [13.08,1.12,0.414] diff --git a/tests/queries/0_stateless/02242_case_insensitive_nested.sh b/tests/queries/0_stateless/02242_case_insensitive_nested.sh new file mode 100755 index 00000000000..c22f5695dc3 --- /dev/null +++ b/tests/queries/0_stateless/02242_case_insensitive_nested.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS nested_table" +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS nested_nested_table" + +${CLICKHOUSE_CLIENT} --query="CREATE TABLE nested_table (table Nested(eLeM1 Int32, elEm2 String, ELEM3 Float32)) engine=Memory" + +formats=('Arrow' 'Parquet' 'ORC') +format_files=('arrow' 'parquet' 'orc') + +for ((i = 0; i < 3; i++)) do + echo ${formats[i]} + + ${CLICKHOUSE_CLIENT} --query="TRUNCATE TABLE nested_table" + cat $CUR_DIR/data_orc_arrow_parquet_nested/nested_table.${format_files[i]} | ${CLICKHOUSE_CLIENT} -q "INSERT INTO nested_table FORMAT ${formats[i]} SETTINGS input_format_${format_files[i]}_import_nested = 1, input_format_${format_files[i]}_case_insensitive_column_matching = true" + + ${CLICKHOUSE_CLIENT} --query="SELECT * FROM nested_table" + +done + +${CLICKHOUSE_CLIENT} --query="DROP TABLE nested_table" diff --git a/tests/queries/0_stateless/02242_if_then_else_null_bug.reference b/tests/queries/0_stateless/02242_if_then_else_null_bug.reference new file mode 100644 index 00000000000..a3bd3b9256e --- /dev/null +++ b/tests/queries/0_stateless/02242_if_then_else_null_bug.reference @@ -0,0 +1,4 @@ +\N +1 +\N +2 diff --git a/tests/queries/0_stateless/02242_if_then_else_null_bug.sql b/tests/queries/0_stateless/02242_if_then_else_null_bug.sql new file mode 100644 index 00000000000..47b0f38d3dc --- /dev/null +++ b/tests/queries/0_stateless/02242_if_then_else_null_bug.sql @@ -0,0 +1,4 @@ +SELECT if(materialize(1) > 0, CAST(NULL, 'Nullable(Int64)'), materialize(toInt32(1))); +SELECT if(materialize(1) > 0, materialize(toInt32(1)), CAST(NULL, 'Nullable(Int64)')); +SELECT if(materialize(1) > 0, CAST(NULL, 'Nullable(Decimal(18, 4))'), materialize(CAST(2, 'Nullable(Decimal(9, 4))'))); +SELECT if(materialize(1) > 0, materialize(CAST(2, 'Nullable(Decimal(9, 4))')), CAST(NULL, 'Nullable(Decimal(18, 4))')); diff --git a/tests/queries/0_stateless/02242_optimize_to_subcolumns_no_storage.reference b/tests/queries/0_stateless/02242_optimize_to_subcolumns_no_storage.reference new file mode 100644 index 00000000000..0cfbf08886f --- /dev/null +++ b/tests/queries/0_stateless/02242_optimize_to_subcolumns_no_storage.reference @@ -0,0 +1 @@ +2 diff --git a/tests/queries/0_stateless/02242_optimize_to_subcolumns_no_storage.sql b/tests/queries/0_stateless/02242_optimize_to_subcolumns_no_storage.sql new file mode 100644 index 00000000000..e6e4663c5aa --- /dev/null +++ b/tests/queries/0_stateless/02242_optimize_to_subcolumns_no_storage.sql @@ -0,0 +1,3 @@ +SET optimize_functions_to_subcolumns = 1; +SELECT count(*) FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3 +WHERE (n1.number = n2.number) AND (n2.number = n3.number); diff --git a/tests/queries/0_stateless/02242_subcolumns_sizes.reference b/tests/queries/0_stateless/02242_subcolumns_sizes.reference new file mode 100644 index 00000000000..124b6341116 --- /dev/null +++ b/tests/queries/0_stateless/02242_subcolumns_sizes.reference @@ -0,0 +1,8 @@ +arr size0 UInt64 1 +d k1 String 1 +d k2.k3 Array(String) 1 +d k2.k4 Array(String) 1 +d k2.k5 Array(Int8) 1 +d k2.size0 UInt64 1 +n null UInt8 1 +1 1 1 1 diff --git a/tests/queries/0_stateless/02242_subcolumns_sizes.sql b/tests/queries/0_stateless/02242_subcolumns_sizes.sql new file mode 100644 index 00000000000..8c3d8e69238 --- /dev/null +++ b/tests/queries/0_stateless/02242_subcolumns_sizes.sql @@ -0,0 +1,34 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_subcolumns_sizes; + +SET allow_experimental_object_type = 1; + +CREATE TABLE t_subcolumns_sizes (id UInt64, arr Array(UInt64), n Nullable(String), d JSON) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO t_subcolumns_sizes FORMAT JSONEachRow {"id": 1, "arr": [1, 2, 3], "n": null, "d": {"k1": "v1", "k2": [{"k3": 1, "k4": "v2"}, {"k3": 3}]}} +INSERT INTO t_subcolumns_sizes FORMAT JSONEachRow {"id": 2, "arr": [0], "n": "foo", "d": {"k1": "v3", "k2": [{"k4": "v4"}, {"k3": "v5", "k5": 5}]}} + +OPTIMIZE TABLE t_subcolumns_sizes FINAL; + +SELECT + column, + subcolumns.names AS sname, + subcolumns.types AS stype, + subcolumns.bytes_on_disk > 0 +FROM system.parts_columns ARRAY JOIN subcolumns +WHERE database = currentDatabase() AND table = 't_subcolumns_sizes' AND active +ORDER BY column, sname, stype; + +SELECT + any(column_bytes_on_disk) = sum(subcolumns.bytes_on_disk), + any(column_data_compressed_bytes) = sum(subcolumns.data_compressed_bytes), + any(column_data_uncompressed_bytes) = sum(subcolumns.data_uncompressed_bytes), + any(column_marks_bytes) = sum(subcolumns.marks_bytes) +FROM system.parts_columns ARRAY JOIN subcolumns +WHERE database = currentDatabase() AND table = 't_subcolumns_sizes' +AND active AND column = 'd'; + +DROP TABLE IF EXISTS t_subcolumns_sizes; diff --git a/tests/queries/0_stateless/02242_throw_if_constant_argument.reference b/tests/queries/0_stateless/02242_throw_if_constant_argument.reference new file mode 100644 index 00000000000..4521d575ff3 --- /dev/null +++ b/tests/queries/0_stateless/02242_throw_if_constant_argument.reference @@ -0,0 +1,10 @@ +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 diff --git a/tests/queries/0_stateless/02242_throw_if_constant_argument.sql b/tests/queries/0_stateless/02242_throw_if_constant_argument.sql new file mode 100644 index 00000000000..bdde059ef0f --- /dev/null +++ b/tests/queries/0_stateless/02242_throw_if_constant_argument.sql @@ -0,0 +1 @@ +SELECT throwIf(0 AND 2147483646) FROM system.numbers LIMIT 10; diff --git a/tests/queries/0_stateless/02243_in_ip_address.reference b/tests/queries/0_stateless/02243_in_ip_address.reference new file mode 100644 index 00000000000..aa47d0d46d4 --- /dev/null +++ b/tests/queries/0_stateless/02243_in_ip_address.reference @@ -0,0 +1,2 @@ +0 +0 diff --git a/tests/queries/0_stateless/02243_in_ip_address.sql b/tests/queries/0_stateless/02243_in_ip_address.sql new file mode 100644 index 00000000000..a2c8c37e585 --- /dev/null +++ b/tests/queries/0_stateless/02243_in_ip_address.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table (id UInt64, value_ipv4 IPv4, value_ipv6 IPv6) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0, '127.0.0.1', '127.0.0.1'); + +SELECT id FROM test_table WHERE value_ipv4 IN (SELECT value_ipv4 FROM test_table); +SELECT id FROM test_table WHERE value_ipv6 IN (SELECT value_ipv6 FROM test_table); + +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02243_ipv6_long_parsing.reference b/tests/queries/0_stateless/02243_ipv6_long_parsing.reference new file mode 100644 index 00000000000..c09bfebe9d5 --- /dev/null +++ b/tests/queries/0_stateless/02243_ipv6_long_parsing.reference @@ -0,0 +1,3 @@ +0 ::ffff:1.12.12.12 +1 ::ffff:123.123.123.123 +2 ::ffff:192.168.100.228 diff --git a/tests/queries/0_stateless/02243_ipv6_long_parsing.sql b/tests/queries/0_stateless/02243_ipv6_long_parsing.sql new file mode 100644 index 00000000000..25225ee0fa8 --- /dev/null +++ b/tests/queries/0_stateless/02243_ipv6_long_parsing.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table (id UInt64, value IPv6) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0, '0000:0000:0000:0000:0000:ffff:1.12.12.12'); +INSERT INTO test_table VALUES (1, '0000:0000:0000:0000:0000:ffff:123.123.123.123'); +INSERT INTO test_table VALUES (2, '0000:0000:0000:0000:0000:ffff:192.168.100.228'); + +SELECT * FROM test_table ORDER BY id; + +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02244_casewithexpression_return_type.reference b/tests/queries/0_stateless/02244_casewithexpression_return_type.reference new file mode 100644 index 00000000000..bcdeb4a290b --- /dev/null +++ b/tests/queries/0_stateless/02244_casewithexpression_return_type.reference @@ -0,0 +1,20 @@ +0 555555 +1 10 +2 555555 +3 55 +4 555555 +5 555555 +6 77 +7 555555 +8 555555 +9 95 +10 100 +11 555555 +12 555555 +13 555555 +14 555555 +15 555555 +16 555555 +17 555555 +18 555555 +19 555555 diff --git a/tests/queries/0_stateless/02244_casewithexpression_return_type.sql b/tests/queries/0_stateless/02244_casewithexpression_return_type.sql new file mode 100644 index 00000000000..02557a3ddfa --- /dev/null +++ b/tests/queries/0_stateless/02244_casewithexpression_return_type.sql @@ -0,0 +1,12 @@ + SELECT "number", CASE "number" + WHEN 3 THEN 55 + WHEN 6 THEN 77 + WHEN 9 THEN 95 + ELSE CASE + WHEN "number"=1 THEN 10 + WHEN "number"=10 THEN 100 + ELSE 555555 + END + END AS "LONG_COL_0" + FROM `system`.numbers + LIMIT 20; diff --git a/tests/queries/0_stateless/02244_issue_35598_fuse.reference b/tests/queries/0_stateless/02244_issue_35598_fuse.reference new file mode 100644 index 00000000000..6ce84b402a3 --- /dev/null +++ b/tests/queries/0_stateless/02244_issue_35598_fuse.reference @@ -0,0 +1,2 @@ +0 0 nan +0 0 nan diff --git a/tests/queries/0_stateless/02244_issue_35598_fuse.sql b/tests/queries/0_stateless/02244_issue_35598_fuse.sql new file mode 100644 index 00000000000..a590854eb6c --- /dev/null +++ b/tests/queries/0_stateless/02244_issue_35598_fuse.sql @@ -0,0 +1,5 @@ +SELECT sum(x), count(x), avg(x) FROM (SELECT number :: Decimal32(0) AS x FROM numbers(0)) +SETTINGS optimize_syntax_fuse_functions = 0, optimize_fuse_sum_count_avg = 0; + +SELECT sum(x), count(x), avg(x) FROM (SELECT number :: Decimal32(0) AS x FROM numbers(0)) +SETTINGS optimize_syntax_fuse_functions = 1, optimize_fuse_sum_count_avg = 1; diff --git a/tests/queries/0_stateless/02244_lowcardinality_hash_join.reference b/tests/queries/0_stateless/02244_lowcardinality_hash_join.reference new file mode 100644 index 00000000000..d89bbd39cdc --- /dev/null +++ b/tests/queries/0_stateless/02244_lowcardinality_hash_join.reference @@ -0,0 +1,4 @@ +x x +x x +x x +x x diff --git a/tests/queries/0_stateless/02244_lowcardinality_hash_join.sql b/tests/queries/0_stateless/02244_lowcardinality_hash_join.sql new file mode 100644 index 00000000000..f2a601adf06 --- /dev/null +++ b/tests/queries/0_stateless/02244_lowcardinality_hash_join.sql @@ -0,0 +1,27 @@ +-- Tags: no-parallel +DROP TABLE IF EXISTS lc_table; + +CREATE TABLE lc_table +( + col LowCardinality(String) +) ENGINE=TinyLog; + +INSERT INTO lc_table VALUES('x'); + +SELECT * +FROM lc_table +INNER JOIN lc_table AS lc_table2 ON lc_table.col = lc_table2.col; + +SELECT * +FROM lc_table +INNER JOIN lc_table AS lc_table2 ON CAST(lc_table.col AS String) = CAST(lc_table2.col AS String); + +SELECT * +FROM lc_table +INNER JOIN lc_table AS lc_table2 ON (lc_table.col = lc_table2.col) OR (lc_table.col = lc_table2.col); + +SELECT * +FROM lc_table +INNER JOIN lc_table AS lc_table2 ON (CAST(lc_table.col AS String) = CAST(lc_table2.col AS String)) OR (CAST(lc_table.col AS String) = CAST(lc_table2.col AS String)); + +DROP TABLE IF EXISTS lc_table; diff --git a/tests/queries/0_stateless/02244_url_engine_headers_test.reference b/tests/queries/0_stateless/02244_url_engine_headers_test.reference new file mode 100644 index 00000000000..7b96f09d82e --- /dev/null +++ b/tests/queries/0_stateless/02244_url_engine_headers_test.reference @@ -0,0 +1 @@ +{"12":12}\n diff --git a/tests/queries/0_stateless/02244_url_engine_headers_test.sql b/tests/queries/0_stateless/02244_url_engine_headers_test.sql new file mode 100644 index 00000000000..e71a933346c --- /dev/null +++ b/tests/queries/0_stateless/02244_url_engine_headers_test.sql @@ -0,0 +1 @@ +select * from url(url_with_headers, url='http://127.0.0.1:8123?query=select+12', format='RawBLOB'); diff --git a/tests/queries/0_stateless/02245_weird_partitions_pruning.reference b/tests/queries/0_stateless/02245_weird_partitions_pruning.reference new file mode 100644 index 00000000000..cf406b417b4 --- /dev/null +++ b/tests/queries/0_stateless/02245_weird_partitions_pruning.reference @@ -0,0 +1,14 @@ +202112-0 (202112,0) +202201-0 (202201,0) +202301-0 (202301,0) +202112-0 2021-12-31 22:30:00 2021-12-31 22:30:00 2021-12-31 14:30:00 2021-12-31 14:30:00 1000 +202201-0 2022-01-01 00:30:00 2022-01-31 22:30:00 2021-12-31 16:30:00 2022-01-31 14:30:00 2000 +202301-0 2023-01-31 22:30:00 2023-01-31 22:30:00 2023-01-31 14:30:00 2023-01-31 14:30:00 1000 +202112-0 +default weird_partitions_02245 1 1000 1 +202201-0 +default weird_partitions_02245 1 2000 1 +202112-0 +202201-0 +default weird_partitions_02245 2 3000 2 +default weird_partitions_02245 0 0 0 diff --git a/tests/queries/0_stateless/02245_weird_partitions_pruning.sql b/tests/queries/0_stateless/02245_weird_partitions_pruning.sql new file mode 100644 index 00000000000..6273a9f3d59 --- /dev/null +++ b/tests/queries/0_stateless/02245_weird_partitions_pruning.sql @@ -0,0 +1,61 @@ +-- We use a hack - partition by ignore(d1). In some cases there are two columns +-- not fully correlated (<1) (date_begin - date_end or datetime - datetime_in_TZ_with_DST) +-- If we partition by these columns instead of one it will be twice more partitions. +-- Partition by (.., ignore(d1)) allows to partition by the first column but build +-- min_max indexes for both column, so partition pruning works for both columns. +-- It's very similar to min_max skip index but gives bigger performance boost, +-- because partition pruning happens on very early query stage. + + +DROP TABLE IF EXISTS weird_partitions_02245; + +CREATE TABLE weird_partitions_02245(d DateTime, d1 DateTime default d - toIntervalHour(8), id Int64) +Engine=MergeTree +PARTITION BY (toYYYYMM(toDateTime(d)), ignore(d1)) +ORDER BY id; + +INSERT INTO weird_partitions_02245(d, id) +SELECT + toDateTime('2021-12-31 22:30:00') AS d, + number +FROM numbers(1000); + +INSERT INTO weird_partitions_02245(d, id) +SELECT + toDateTime('2022-01-01 00:30:00') AS d, + number +FROM numbers(1000); + +INSERT INTO weird_partitions_02245(d, id) +SELECT + toDateTime('2022-01-31 22:30:00') AS d, + number +FROM numbers(1000); + +INSERT INTO weird_partitions_02245(d, id) +SELECT + toDateTime('2023-01-31 22:30:00') AS d, + number +FROM numbers(1000); + +OPTIMIZE TABLE weird_partitions_02245; +OPTIMIZE TABLE weird_partitions_02245; + +SELECT DISTINCT _partition_id, _partition_value FROM weird_partitions_02245 ORDER BY _partition_id ASC; + +SELECT _partition_id, min(d), max(d), min(d1), max(d1), count() FROM weird_partitions_02245 GROUP BY _partition_id ORDER BY _partition_id ASC; + +select DISTINCT _partition_id from weird_partitions_02245 where d >= '2021-12-31 00:00:00' and d < '2022-01-01 00:00:00' ORDER BY _partition_id; +explain estimate select DISTINCT _partition_id from weird_partitions_02245 where d >= '2021-12-31 00:00:00' and d < '2022-01-01 00:00:00'; + +select DISTINCT _partition_id from weird_partitions_02245 where d >= '2022-01-01 00:00:00' and d1 >= '2021-12-31 00:00:00' and d1 < '2022-01-01 00:00:00' ORDER BY _partition_id;; +explain estimate select DISTINCT _partition_id from weird_partitions_02245 where d >= '2022-01-01 00:00:00' and d1 >= '2021-12-31 00:00:00' and d1 < '2022-01-01 00:00:00'; + +select DISTINCT _partition_id from weird_partitions_02245 where d1 >= '2021-12-31 00:00:00' and d1 < '2022-01-01 00:00:00' ORDER BY _partition_id;; +explain estimate select DISTINCT _partition_id from weird_partitions_02245 where d1 >= '2021-12-31 00:00:00' and d1 < '2022-01-01 00:00:00'; + +select DISTINCT _partition_id from weird_partitions_02245 where d >= '2022-01-01 00:00:00' and d1 >= '2021-12-31 00:00:00' and d1 < '2020-01-01 00:00:00' ORDER BY _partition_id;; +explain estimate select DISTINCT _partition_id from weird_partitions_02245 where d >= '2022-01-01 00:00:00' and d1 >= '2021-12-31 00:00:00' and d1 < '2020-01-01 00:00:00'; + +DROP TABLE weird_partitions_02245; + diff --git a/tests/queries/0_stateless/data_arrow/case_insensitive_column_matching.arrow b/tests/queries/0_stateless/data_arrow/case_insensitive_column_matching.arrow new file mode 100644 index 00000000000..4350d5c3e49 Binary files /dev/null and b/tests/queries/0_stateless/data_arrow/case_insensitive_column_matching.arrow differ diff --git a/tests/queries/0_stateless/data_orc/case_insensitive_column_matching.orc b/tests/queries/0_stateless/data_orc/case_insensitive_column_matching.orc new file mode 100644 index 00000000000..136f9980064 Binary files /dev/null and b/tests/queries/0_stateless/data_orc/case_insensitive_column_matching.orc differ diff --git a/tests/queries/0_stateless/data_parquet/test_setting_input_format_use_lowercase_column_name.parquet b/tests/queries/0_stateless/data_parquet/case_insensitive_column_matching.parquet similarity index 100% rename from tests/queries/0_stateless/data_parquet/test_setting_input_format_use_lowercase_column_name.parquet rename to tests/queries/0_stateless/data_parquet/case_insensitive_column_matching.parquet diff --git a/tests/queries/0_stateless/data_parquet/test_setting_input_format_use_lowercase_column_name.parquet.columns b/tests/queries/0_stateless/data_parquet/case_insensitive_column_matching.parquet.columns similarity index 100% rename from tests/queries/0_stateless/data_parquet/test_setting_input_format_use_lowercase_column_name.parquet.columns rename to tests/queries/0_stateless/data_parquet/case_insensitive_column_matching.parquet.columns diff --git a/tests/queries/0_stateless/helpers/00900_parquet_create_table_columns.py b/tests/queries/0_stateless/helpers/00900_parquet_create_table_columns.py index 92606c9cb26..b69bf7c8d11 100755 --- a/tests/queries/0_stateless/helpers/00900_parquet_create_table_columns.py +++ b/tests/queries/0_stateless/helpers/00900_parquet_create_table_columns.py @@ -16,16 +16,19 @@ TYPE_PARQUET_PHYSICAL_TO_CLICKHOUSE = { "FLOAT": "Float32", "DOUBLE": "Float64", "BYTE_ARRAY": "String", - "INT96": "Int64", # TODO! + "INT96": "Int64", # TODO! } + def read_file(filename): with open(filename, "rb") as f: return f.read().decode("raw_unicode_escape") + def get_column_name(column): return column["Name"].split(".", 1)[0] + def resolve_clickhouse_column_type(column): column_name = get_column_name(column) logical_type = column.get("LogicalType", {}) @@ -35,23 +38,46 @@ def resolve_clickhouse_column_type(column): precision = int(logical_type["precision"]) scale = int(logical_type["scale"]) if precision < 1 or precision > 76: - raise RuntimeError("Column {} has invalid Decimal precision {}".format(column_name, precision)) + raise RuntimeError( + "Column {} has invalid Decimal precision {}".format( + column_name, precision + ) + ) if precision > 38: - raise RuntimeError("Column {} has unsupported Decimal precision {}".format(column_name, precision)) + raise RuntimeError( + "Column {} has unsupported Decimal precision {}".format( + column_name, precision + ) + ) if scale < 0 or scale > precision: - raise RuntimeError("Column {} has invalid Decimal scale {} for precision {}".format(column_name, scale, precision)) + raise RuntimeError( + "Column {} has invalid Decimal scale {} for precision {}".format( + column_name, scale, precision + ) + ) return "Decimal({}, {})".format(precision, scale) if converted_type and converted_type != "NONE": result_type = TYPE_PARQUET_CONVERTED_TO_CLICKHOUSE.get(converted_type) if result_type: return result_type - raise RuntimeError("Column {} has unknown ConvertedType: {}".format(column_name, converted_type)) + raise RuntimeError( + "Column {} has unknown ConvertedType: {}".format( + column_name, converted_type + ) + ) if physical_type and physical_type != "NONE": result_type = TYPE_PARQUET_PHYSICAL_TO_CLICKHOUSE.get(physical_type) if result_type: return result_type - raise RuntimeError("Column {} has unknown PhysicalType: {}".format(column_name, physical_type)) - raise RuntimeError("Column {} has invalid types: ConvertedType={}, PhysicalType={}".format(column_name, converted_type, physical_type)) + raise RuntimeError( + "Column {} has unknown PhysicalType: {}".format(column_name, physical_type) + ) + raise RuntimeError( + "Column {} has invalid types: ConvertedType={}, PhysicalType={}".format( + column_name, converted_type, physical_type + ) + ) + def dump_columns(obj): descr_by_column_name = {} @@ -78,11 +104,22 @@ def dump_columns(obj): else: return "Tuple({})".format(", ".join(types)) - print(", ".join(map(lambda descr: "`{}` {}".format(descr["name"], _format_type(descr["types"])), columns_descr))) + print( + ", ".join( + map( + lambda descr: "`{}` {}".format( + descr["name"], _format_type(descr["types"]) + ), + columns_descr, + ) + ) + ) + def dump_columns_from_file(filename): dump_columns(json.loads(read_file(filename), strict=False)) + if __name__ == "__main__": filename = sys.argv[1] dump_columns_from_file(filename) diff --git a/tests/queries/0_stateless/helpers/client.py b/tests/queries/0_stateless/helpers/client.py index 086d920d0b7..5c8589dfca1 100644 --- a/tests/queries/0_stateless/helpers/client.py +++ b/tests/queries/0_stateless/helpers/client.py @@ -8,29 +8,30 @@ sys.path.insert(0, os.path.join(CURDIR)) import uexpect -prompt = ':\) ' -end_of_block = r'.*\r\n.*\r\n' +prompt = ":\) " +end_of_block = r".*\r\n.*\r\n" + class client(object): - def __init__(self, command=None, name='', log=None): - self.client = uexpect.spawn(['/bin/bash','--noediting']) + def __init__(self, command=None, name="", log=None): + self.client = uexpect.spawn(["/bin/bash", "--noediting"]) if command is None: - command = os.environ.get('CLICKHOUSE_BINARY', 'clickhouse') + ' client' + command = os.environ.get("CLICKHOUSE_BINARY", "clickhouse") + " client" self.client.command = command - self.client.eol('\r') + self.client.eol("\r") self.client.logger(log, prefix=name) self.client.timeout(120) - self.client.expect('[#\$] ', timeout=60) + self.client.expect("[#\$] ", timeout=60) self.client.send(command) def __enter__(self): return self.client.__enter__() def __exit__(self, type, value, traceback): - self.client.reader['kill_event'].set() + self.client.reader["kill_event"].set() # send Ctrl-C - self.client.send('\x03', eol='') + self.client.send("\x03", eol="") time.sleep(0.3) - self.client.send('quit', eol='\r') - self.client.send('\x03', eol='') + self.client.send("quit", eol="\r") + self.client.send("\x03", eol="") return self.client.__exit__(type, value, traceback) diff --git a/tests/queries/0_stateless/helpers/httpclient.py b/tests/queries/0_stateless/helpers/httpclient.py index adbfbc7d287..00e6a8d164d 100644 --- a/tests/queries/0_stateless/helpers/httpclient.py +++ b/tests/queries/0_stateless/helpers/httpclient.py @@ -7,8 +7,11 @@ sys.path.insert(0, os.path.join(CURDIR)) import httpexpect -def client(request, name='', log=None): - client = httpexpect.spawn({'host':'localhost','port':8123,'timeout':30}, request) + +def client(request, name="", log=None): + client = httpexpect.spawn( + {"host": "localhost", "port": 8123, "timeout": 30}, request + ) client.logger(log, prefix=name) client.timeout(20) return client diff --git a/tests/queries/0_stateless/helpers/httpexpect.py b/tests/queries/0_stateless/helpers/httpexpect.py index 788e57499a8..6147118e793 100644 --- a/tests/queries/0_stateless/helpers/httpexpect.py +++ b/tests/queries/0_stateless/helpers/httpexpect.py @@ -23,6 +23,7 @@ import uexpect from threading import Thread, Event from queue import Queue, Empty + class IO(uexpect.IO): def __init__(self, connection, response, queue, reader): self.connection = connection @@ -33,10 +34,10 @@ class IO(uexpect.IO): raise NotImplementedError def close(self, force=True): - self.reader['kill_event'].set() + self.reader["kill_event"].set() self.connection.close() if self._logger: - self._logger.write('\n') + self._logger.write("\n") self._logger.flush() @@ -52,6 +53,7 @@ def reader(response, queue, kill_event): break raise + def spawn(connection, request): connection = http.client.HTTPConnection(**connection) connection.request(**request) @@ -63,11 +65,20 @@ def spawn(connection, request): thread.daemon = True thread.start() - return IO(connection, response, queue, reader={'thread':thread, 'kill_event':reader_kill_event}) + return IO( + connection, + response, + queue, + reader={"thread": thread, "kill_event": reader_kill_event}, + ) -if __name__ == '__main__': - with spawn({'host':'localhost','port':8123},{'method':'GET', 'url':'?query=SELECT%201'}) as client: + +if __name__ == "__main__": + with spawn( + {"host": "localhost", "port": 8123}, + {"method": "GET", "url": "?query=SELECT%201"}, + ) as client: client.logger(sys.stdout) client.timeout(2) print(client.response.status, client.response.reason) - client.expect('1\n') + client.expect("1\n") diff --git a/tests/queries/0_stateless/helpers/protobuf_length_delimited_encoder.py b/tests/queries/0_stateless/helpers/protobuf_length_delimited_encoder.py index 090bb119321..4a3f4613b6f 100755 --- a/tests/queries/0_stateless/helpers/protobuf_length_delimited_encoder.py +++ b/tests/queries/0_stateless/helpers/protobuf_length_delimited_encoder.py @@ -11,6 +11,7 @@ import subprocess import sys import tempfile + def read_varint(input): res = 0 multiplier = 1 @@ -27,29 +28,36 @@ def read_varint(input): multiplier *= 0x80 return res + def write_varint(output, value): while True: if value < 0x80: b = value - output.write(b.to_bytes(1, byteorder='little')) + output.write(b.to_bytes(1, byteorder="little")) break b = (value & 0x7F) + 0x80 - output.write(b.to_bytes(1, byteorder='little')) + output.write(b.to_bytes(1, byteorder="little")) value = value >> 7 + def write_hexdump(output, data): - with subprocess.Popen(["hexdump", "-C"], stdin=subprocess.PIPE, stdout=output, shell=False) as proc: + with subprocess.Popen( + ["hexdump", "-C"], stdin=subprocess.PIPE, stdout=output, shell=False + ) as proc: proc.communicate(data) if proc.returncode != 0: raise RuntimeError("hexdump returned code " + str(proc.returncode)) output.flush() + class FormatSchemaSplitted: def __init__(self, format_schema): self.format_schema = format_schema - splitted = self.format_schema.split(':') + splitted = self.format_schema.split(":") if len(splitted) < 2: - raise RuntimeError('The format schema must have the format "schemafile:MessageType"') + raise RuntimeError( + 'The format schema must have the format "schemafile:MessageType"' + ) path = splitted[0] self.schemadir = os.path.dirname(path) self.schemaname = os.path.basename(path) @@ -57,36 +65,43 @@ class FormatSchemaSplitted: self.schemaname = self.schemaname + ".proto" self.message_type = splitted[1] + def decode(input, output, format_schema, format): if not type(format_schema) is FormatSchemaSplitted: format_schema = FormatSchemaSplitted(format_schema) msgindex = 1 - if format == 'protobuflist': - read_varint(input) # envelope msg size + if format == "protobuflist": + read_varint(input) # envelope msg size while True: - if format == 'protobuflist': - read_varint(input) # wiretype and field id of nested msg + if format == "protobuflist": + read_varint(input) # wiretype and field id of nested msg sz = read_varint(input) if sz is None: break - output.write("MESSAGE #{msgindex} AT 0x{msgoffset:08X}\n".format(msgindex=msgindex, msgoffset=input.tell()).encode()) + output.write( + "MESSAGE #{msgindex} AT 0x{msgoffset:08X}\n".format( + msgindex=msgindex, msgoffset=input.tell() + ).encode() + ) output.flush() msg = input.read(sz) if len(msg) < sz: - raise EOFError('Unexpected end of file') - protoc = os.getenv('PROTOC_BINARY', 'protoc') - with subprocess.Popen([protoc, - "--decode", format_schema.message_type, format_schema.schemaname], - cwd=format_schema.schemadir, - stdin=subprocess.PIPE, - stdout=output, - shell=False) as proc: + raise EOFError("Unexpected end of file") + protoc = os.getenv("PROTOC_BINARY", "protoc") + with subprocess.Popen( + [protoc, "--decode", format_schema.message_type, format_schema.schemaname], + cwd=format_schema.schemadir, + stdin=subprocess.PIPE, + stdout=output, + shell=False, + ) as proc: proc.communicate(msg) if proc.returncode != 0: raise RuntimeError("protoc returned code " + str(proc.returncode)) output.flush() msgindex = msgindex + 1 + def encode(input, output, format_schema, format): if not type(format_schema) is FormatSchemaSplitted: format_schema = FormatSchemaSplitted(format_schema) @@ -97,7 +112,11 @@ def encode(input, output, format_schema, format): if len(line) == 0: break if not line.startswith(b"MESSAGE #"): - raise RuntimeError("The line at 0x{line_offset:08X} must start with the text 'MESSAGE #'".format(line_offset=line_offset)) + raise RuntimeError( + "The line at 0x{line_offset:08X} must start with the text 'MESSAGE #'".format( + line_offset=line_offset + ) + ) msg = b"" while True: line_offset = input.tell() @@ -105,27 +124,29 @@ def encode(input, output, format_schema, format): if line.startswith(b"MESSAGE #") or len(line) == 0: break msg += line - protoc = os.getenv('PROTOC_BINARY', 'protoc') - with subprocess.Popen([protoc, - "--encode", format_schema.message_type, format_schema.schemaname], - cwd=format_schema.schemadir, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - shell=False) as proc: + protoc = os.getenv("PROTOC_BINARY", "protoc") + with subprocess.Popen( + [protoc, "--encode", format_schema.message_type, format_schema.schemaname], + cwd=format_schema.schemadir, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + shell=False, + ) as proc: msgbin = proc.communicate(msg)[0] if proc.returncode != 0: raise RuntimeError("protoc returned code " + str(proc.returncode)) - if format == 'protobuflist': + if format == "protobuflist": field_number = 1 - wire_type = 2 # length-delimited + wire_type = 2 # length-delimited write_varint(buf, (field_number << 3) | wire_type) write_varint(buf, len(msgbin)) buf.write(msgbin) - if format == 'protobuflist': + if format == "protobuflist": write_varint(output, len(buf.getvalue())) output.write(buf.getvalue()) output.flush() + def decode_and_check(input, output, format_schema, format): input_data = input.read() output.write(b"Binary representation:\n") @@ -152,24 +173,56 @@ def decode_and_check(input, output, format_schema, format): output.write(b"\nBinary representation is as expected\n") output.flush() else: - output.write(b"\nBinary representation differs from the expected one (listed below):\n") + output.write( + b"\nBinary representation differs from the expected one (listed below):\n" + ) output.flush() write_hexdump(output, encoded_data) sys.exit(1) + if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Encodes or decodes length-delimited protobuf messages.') - parser.add_argument('--input', help='The input file, the standard input will be used if not specified.') - parser.add_argument('--output', help='The output file, the standard output will be used if not specified') - parser.add_argument('--format_schema', required=True, help='Format schema in the format "schemafile:MessageType"') - parser.add_argument('--format', choices=['protobuf', 'protobuflist'], default='protobuf', help='The input/output format, "protobuf" if not specified') + parser = argparse.ArgumentParser( + description="Encodes or decodes length-delimited protobuf messages." + ) + parser.add_argument( + "--input", + help="The input file, the standard input will be used if not specified.", + ) + parser.add_argument( + "--output", + help="The output file, the standard output will be used if not specified", + ) + parser.add_argument( + "--format_schema", + required=True, + help='Format schema in the format "schemafile:MessageType"', + ) + parser.add_argument( + "--format", + choices=["protobuf", "protobuflist"], + default="protobuf", + help='The input/output format, "protobuf" if not specified', + ) group = parser.add_mutually_exclusive_group(required=True) - group.add_argument('--encode', action='store_true', help='Specify to encode length-delimited messages.' - 'The utility will read text-format messages of the given type from the input and write it in binary to the output.') - group.add_argument('--decode', action='store_true', help='Specify to decode length-delimited messages.' - 'The utility will read messages in binary from the input and write text-format messages to the output.') - group.add_argument('--decode_and_check', action='store_true', help='The same as --decode, and the utility will then encode ' - ' the decoded data back to the binary form to check that the result of that encoding is the same as the input was.') + group.add_argument( + "--encode", + action="store_true", + help="Specify to encode length-delimited messages." + "The utility will read text-format messages of the given type from the input and write it in binary to the output.", + ) + group.add_argument( + "--decode", + action="store_true", + help="Specify to decode length-delimited messages." + "The utility will read messages in binary from the input and write text-format messages to the output.", + ) + group.add_argument( + "--decode_and_check", + action="store_true", + help="The same as --decode, and the utility will then encode " + " the decoded data back to the binary form to check that the result of that encoding is the same as the input was.", + ) args = parser.parse_args() custom_input_file = None diff --git a/tests/queries/0_stateless/helpers/pure_http_client.py b/tests/queries/0_stateless/helpers/pure_http_client.py index 3335f141bb5..0e7a4d27f4f 100644 --- a/tests/queries/0_stateless/helpers/pure_http_client.py +++ b/tests/queries/0_stateless/helpers/pure_http_client.py @@ -5,64 +5,75 @@ import requests import time import pandas as pd -CLICKHOUSE_HOST = os.environ.get('CLICKHOUSE_HOST', '127.0.0.1') -CLICKHOUSE_PORT_HTTP = os.environ.get('CLICKHOUSE_PORT_HTTP', '8123') -CLICKHOUSE_SERVER_URL_STR = 'http://' + ':'.join(str(s) for s in [CLICKHOUSE_HOST, CLICKHOUSE_PORT_HTTP]) + "/" -CLICKHOUSE_DATABASE = os.environ.get('CLICKHOUSE_DATABASE', 'test') +CLICKHOUSE_HOST = os.environ.get("CLICKHOUSE_HOST", "127.0.0.1") +CLICKHOUSE_PORT_HTTP = os.environ.get("CLICKHOUSE_PORT_HTTP", "8123") +CLICKHOUSE_SERVER_URL_STR = ( + "http://" + ":".join(str(s) for s in [CLICKHOUSE_HOST, CLICKHOUSE_PORT_HTTP]) + "/" +) +CLICKHOUSE_DATABASE = os.environ.get("CLICKHOUSE_DATABASE", "test") + class ClickHouseClient: - def __init__(self, host = CLICKHOUSE_SERVER_URL_STR): + def __init__(self, host=CLICKHOUSE_SERVER_URL_STR): self.host = host - def query(self, query, connection_timeout=1500, settings=dict(), binary_result=False): + def query( + self, query, connection_timeout=1500, settings=dict(), binary_result=False + ): NUMBER_OF_TRIES = 30 DELAY = 10 params = { - 'timeout_before_checking_execution_speed': 120, - 'max_execution_time': 6000, - 'database': CLICKHOUSE_DATABASE, + "timeout_before_checking_execution_speed": 120, + "max_execution_time": 6000, + "database": CLICKHOUSE_DATABASE, } # Add extra settings to params params = {**params, **settings} for i in range(NUMBER_OF_TRIES): - r = requests.post(self.host, params=params, timeout=connection_timeout, data=query) + r = requests.post( + self.host, params=params, timeout=connection_timeout, data=query + ) if r.status_code == 200: return r.content if binary_result else r.text else: - print('ATTENTION: try #%d failed' % i) - if i != (NUMBER_OF_TRIES-1): + print("ATTENTION: try #%d failed" % i) + if i != (NUMBER_OF_TRIES - 1): print(query) print(r.text) - time.sleep(DELAY*(i+1)) + time.sleep(DELAY * (i + 1)) else: raise ValueError(r.text) - def query_return_df(self, query, connection_timeout = 1500): + def query_return_df(self, query, connection_timeout=1500): data = self.query(query, connection_timeout) - df = pd.read_csv(io.StringIO(data), sep = '\t') + df = pd.read_csv(io.StringIO(data), sep="\t") return df def query_with_data(self, query, data, connection_timeout=1500, settings=dict()): params = { - 'query': query, - 'timeout_before_checking_execution_speed': 120, - 'max_execution_time': 6000, - 'database': CLICKHOUSE_DATABASE, + "query": query, + "timeout_before_checking_execution_speed": 120, + "max_execution_time": 6000, + "database": CLICKHOUSE_DATABASE, } - headers = { - "Content-Type": "application/binary" - } + headers = {"Content-Type": "application/binary"} # Add extra settings to params params = {**params, **settings} - r = requests.post(self.host, params=params, timeout=connection_timeout, data=data, headers=headers) + r = requests.post( + self.host, + params=params, + timeout=connection_timeout, + data=data, + headers=headers, + ) result = r.text if r.status_code == 200: return result else: - raise ValueError(r.text) \ No newline at end of file + raise ValueError(r.text) diff --git a/tests/queries/0_stateless/helpers/shell.py b/tests/queries/0_stateless/helpers/shell.py index 5c327a55d94..befb3dcd543 100644 --- a/tests/queries/0_stateless/helpers/shell.py +++ b/tests/queries/0_stateless/helpers/shell.py @@ -8,13 +8,14 @@ sys.path.insert(0, os.path.join(CURDIR)) import uexpect + class shell(object): - def __init__(self, command=None, name='', log=None, prompt='[#\$] '): + def __init__(self, command=None, name="", log=None, prompt="[#\$] "): if command is None: - command = ['/bin/bash', '--noediting'] + command = ["/bin/bash", "--noediting"] self.prompt = prompt self.client = uexpect.spawn(command) - self.client.eol('\r') + self.client.eol("\r") self.client.logger(log, prefix=name) self.client.timeout(20) self.client.expect(prompt, timeout=60) @@ -25,10 +26,10 @@ class shell(object): return io def __exit__(self, type, value, traceback): - self.client.reader['kill_event'].set() + self.client.reader["kill_event"].set() # send Ctrl-C - self.client.send('\x03', eol='') + self.client.send("\x03", eol="") time.sleep(0.3) - self.client.send('exit', eol='\r') - self.client.send('\x03', eol='') + self.client.send("exit", eol="\r") + self.client.send("\x03", eol="") return self.client.__exit__(type, value, traceback) diff --git a/tests/queries/0_stateless/helpers/uexpect.py b/tests/queries/0_stateless/helpers/uexpect.py index 7a633facc95..2e6d8aed19e 100644 --- a/tests/queries/0_stateless/helpers/uexpect.py +++ b/tests/queries/0_stateless/helpers/uexpect.py @@ -21,12 +21,14 @@ from threading import Thread, Event from subprocess import Popen from queue import Queue, Empty + class TimeoutError(Exception): def __init__(self, timeout): self.timeout = timeout def __str__(self): - return 'Timeout %.3fs' % float(self.timeout) + return "Timeout %.3fs" % float(self.timeout) + class ExpectTimeoutError(Exception): def __init__(self, pattern, timeout, buffer): @@ -35,14 +37,15 @@ class ExpectTimeoutError(Exception): self.buffer = buffer def __str__(self): - s = 'Timeout %.3fs ' % float(self.timeout) + s = "Timeout %.3fs " % float(self.timeout) if self.pattern: - s += 'for %s ' % repr(self.pattern.pattern) + s += "for %s " % repr(self.pattern.pattern) if self.buffer: - s += 'buffer %s' % repr(self.buffer[:]) - #s += ' or \'%s\'' % ','.join(['%x' % ord(c) for c in self.buffer[:]]) + s += "buffer %s" % repr(self.buffer[:]) + # s += ' or \'%s\'' % ','.join(['%x' % ord(c) for c in self.buffer[:]]) return s + class IO(object): class EOF(object): pass @@ -54,12 +57,12 @@ class IO(object): TIMEOUT = Timeout class Logger(object): - def __init__(self, logger, prefix=''): + def __init__(self, logger, prefix=""): self._logger = logger self._prefix = prefix def write(self, data): - self._logger.write(('\n' + data).replace('\n','\n' + self._prefix)) + self._logger.write(("\n" + data).replace("\n", "\n" + self._prefix)) def flush(self): self._logger.flush() @@ -76,7 +79,7 @@ class IO(object): self.reader = reader self._timeout = None self._logger = None - self._eol = '' + self._eol = "" def __enter__(self): return self @@ -84,7 +87,7 @@ class IO(object): def __exit__(self, type, value, traceback): self.close() - def logger(self, logger=None, prefix=''): + def logger(self, logger=None, prefix=""): if logger: self._logger = self.Logger(logger, prefix=prefix) return self._logger @@ -100,15 +103,15 @@ class IO(object): return self._eol def close(self, force=True): - self.reader['kill_event'].set() - os.system('pkill -TERM -P %d' % self.process.pid) + self.reader["kill_event"].set() + os.system("pkill -TERM -P %d" % self.process.pid) if force: self.process.kill() else: self.process.terminate() os.close(self.master) if self._logger: - self._logger.write('\n') + self._logger.write("\n") self._logger.flush() def send(self, data, eol=None): @@ -134,9 +137,9 @@ class IO(object): if self.buffer is not None: self.match = pattern.search(self.buffer, 0) if self.match is not None: - self.after = self.buffer[self.match.start():self.match.end()] - self.before = self.buffer[:self.match.start()] - self.buffer = self.buffer[self.match.end():] + self.after = self.buffer[self.match.start() : self.match.end()] + self.before = self.buffer[: self.match.start()] + self.buffer = self.buffer[self.match.end() :] break if timeleft < 0: break @@ -144,16 +147,16 @@ class IO(object): data = self.read(timeout=timeleft, raise_exception=True) except TimeoutError: if self._logger: - self._logger.write((self.buffer or '') + '\n') + self._logger.write((self.buffer or "") + "\n") self._logger.flush() exception = ExpectTimeoutError(pattern, timeout, self.buffer) self.buffer = None raise exception - timeleft -= (time.time() - start_time) + timeleft -= time.time() - start_time if data: self.buffer = (self.buffer + data) if self.buffer else data if self._logger: - self._logger.write((self.before or '') + (self.after or '')) + self._logger.write((self.before or "") + (self.after or "")) self._logger.flush() if self.match is None: exception = ExpectTimeoutError(pattern, timeout, self.buffer) @@ -162,15 +165,15 @@ class IO(object): return self.match def read(self, timeout=0, raise_exception=False): - data = '' + data = "" timeleft = timeout try: - while timeleft >= 0 : + while timeleft >= 0: start_time = time.time() data += self.queue.get(timeout=timeleft) if data: break - timeleft -= (time.time() - start_time) + timeleft -= time.time() - start_time except Empty: if data: return data @@ -182,9 +185,17 @@ class IO(object): return data + def spawn(command): master, slave = pty.openpty() - process = Popen(command, preexec_fn=os.setsid, stdout=slave, stdin=slave, stderr=slave, bufsize=1) + process = Popen( + command, + preexec_fn=os.setsid, + stdout=slave, + stdin=slave, + stderr=slave, + bufsize=1, + ) os.close(slave) queue = Queue() @@ -193,12 +204,18 @@ def spawn(command): thread.daemon = True thread.start() - return IO(process, master, queue, reader={'thread':thread, 'kill_event':reader_kill_event}) + return IO( + process, + master, + queue, + reader={"thread": thread, "kill_event": reader_kill_event}, + ) + def reader(process, out, queue, kill_event): while True: try: - data = os.read(out, 65536).decode(errors='replace') + data = os.read(out, 65536).decode(errors="replace") queue.put(data) except: if kill_event.is_set(): diff --git a/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql b/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql index 63a833af114..c7a34c493c9 100644 --- a/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql +++ b/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql @@ -1,4 +1,4 @@ --- Tags: replica, distributed +-- Tags: replica, distributed, no-random-settings SET max_parallel_replicas = 2; SELECT EventTime::DateTime('Asia/Dubai') FROM remote('127.0.0.{1|2}', test, hits) ORDER BY EventTime DESC LIMIT 10 diff --git a/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh b/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh index a1136a47319..d14a174d3a0 100755 --- a/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh +++ b/tests/queries/1_stateful/00161_parallel_parsing_with_names.sh @@ -13,9 +13,9 @@ do $CLICKHOUSE_CLIENT -q "CREATE TABLE parsing_with_names(c FixedString(16), a DateTime('Asia/Dubai'), b String) ENGINE=Memory()" echo "$format, false"; - $CLICKHOUSE_CLIENT --output_format_parallel_formatting=false -q \ + $CLICKHOUSE_CLIENT --max_block_size=65505 --output_format_parallel_formatting=false -q \ "SELECT URLRegions as d, toTimeZone(ClientEventTime, 'Asia/Dubai') as a, MobilePhoneModel as b, ParamPrice as e, ClientIP6 as c FROM test.hits LIMIT 50000 Format $format" | \ - $CLICKHOUSE_CLIENT --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=false -q "INSERT INTO parsing_with_names FORMAT $format" + $CLICKHOUSE_CLIENT --max_block_size=65505 --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=false -q "INSERT INTO parsing_with_names FORMAT $format" $CLICKHOUSE_CLIENT -q "SELECT * FROM parsing_with_names;" | md5sum $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parsing_with_names" @@ -23,9 +23,9 @@ do $CLICKHOUSE_CLIENT -q "CREATE TABLE parsing_with_names(c FixedString(16), a DateTime('Asia/Dubai'), b String) ENGINE=Memory()" echo "$format, true"; - $CLICKHOUSE_CLIENT --output_format_parallel_formatting=false -q \ + $CLICKHOUSE_CLIENT --max_block_size=65505 --output_format_parallel_formatting=false -q \ "SELECT URLRegions as d, toTimeZone(ClientEventTime, 'Asia/Dubai') as a, MobilePhoneModel as b, ParamPrice as e, ClientIP6 as c FROM test.hits LIMIT 50000 Format $format" | \ - $CLICKHOUSE_CLIENT --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=true -q "INSERT INTO parsing_with_names FORMAT $format" + $CLICKHOUSE_CLIENT --max_block_size=65505 --input_format_skip_unknown_fields=1 --input_format_parallel_parsing=true -q "INSERT INTO parsing_with_names FORMAT $format" $CLICKHOUSE_CLIENT -q "SELECT * FROM parsing_with_names;" | md5sum $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parsing_with_names" diff --git a/tests/testflows/aes_encryption/regression.py b/tests/testflows/aes_encryption/regression.py index 1ba4ca85ba3..c12aaca861d 100755 --- a/tests/testflows/aes_encryption/regression.py +++ b/tests/testflows/aes_encryption/regression.py @@ -16,68 +16,75 @@ issue_24029 = "https://github.com/ClickHouse/ClickHouse/issues/24029" xfails = { # encrypt - "encrypt/invalid key or iv length for mode/mode=\"'aes-???-gcm'\", key_len=??, iv_len=12, aad=True/iv is too short": - [(Fail, "known issue")], - "encrypt/invalid key or iv length for mode/mode=\"'aes-???-gcm'\", key_len=??, iv_len=12, aad=True/iv is too long": - [(Fail, "known issue")], - "encrypt/invalid plaintext data type/data_type='IPv6', value=\"toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001')\"": - [(Fail, "known issue as IPv6 is implemented as FixedString(16)")], + "encrypt/invalid key or iv length for mode/mode=\"'aes-???-gcm'\", key_len=??, iv_len=12, aad=True/iv is too short": [ + (Fail, "known issue") + ], + "encrypt/invalid key or iv length for mode/mode=\"'aes-???-gcm'\", key_len=??, iv_len=12, aad=True/iv is too long": [ + (Fail, "known issue") + ], + "encrypt/invalid plaintext data type/data_type='IPv6', value=\"toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001')\"": [ + (Fail, "known issue as IPv6 is implemented as FixedString(16)") + ], # encrypt_mysql - "encrypt_mysql/key or iv length for mode/mode=\"'aes-???-ecb'\", key_len=??, iv_len=None": - [(Fail, issue_18251)], - "encrypt_mysql/invalid parameters/iv not valid for mode": - [(Fail, issue_18251)], - "encrypt_mysql/invalid plaintext data type/data_type='IPv6', value=\"toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001')\"": - [(Fail, "known issue as IPv6 is implemented as FixedString(16)")], + "encrypt_mysql/key or iv length for mode/mode=\"'aes-???-ecb'\", key_len=??, iv_len=None": [ + (Fail, issue_18251) + ], + "encrypt_mysql/invalid parameters/iv not valid for mode": [(Fail, issue_18251)], + "encrypt_mysql/invalid plaintext data type/data_type='IPv6', value=\"toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001')\"": [ + (Fail, "known issue as IPv6 is implemented as FixedString(16)") + ], # decrypt_mysql - "decrypt_mysql/key or iv length for mode/mode=\"'aes-???-ecb'\", key_len=??, iv_len=None:": - [(Fail, issue_18251)], + "decrypt_mysql/key or iv length for mode/mode=\"'aes-???-ecb'\", key_len=??, iv_len=None:": [ + (Fail, issue_18251) + ], # compatibility - "compatibility/insert/encrypt using materialized view/:": - [(Fail, issue_18249)], - "compatibility/insert/decrypt using materialized view/:": - [(Error, issue_18249)], - "compatibility/insert/aes encrypt mysql using materialized view/:": - [(Fail, issue_18249)], - "compatibility/insert/aes decrypt mysql using materialized view/:": - [(Error, issue_18249)], - "compatibility/select/decrypt unique": - [(Fail, issue_18249)], - "compatibility/mysql/:engine/decrypt/mysql_datatype='TEXT'/:": - [(Fail, issue_18250)], - "compatibility/mysql/:engine/decrypt/mysql_datatype='VARCHAR(100)'/:": - [(Fail, issue_18250)], - "compatibility/mysql/:engine/encrypt/mysql_datatype='TEXT'/:": - [(Fail, issue_18250)], - "compatibility/mysql/:engine/encrypt/mysql_datatype='VARCHAR(100)'/:": - [(Fail, issue_18250)], + "compatibility/insert/encrypt using materialized view/:": [(Fail, issue_18249)], + "compatibility/insert/decrypt using materialized view/:": [(Error, issue_18249)], + "compatibility/insert/aes encrypt mysql using materialized view/:": [ + (Fail, issue_18249) + ], + "compatibility/insert/aes decrypt mysql using materialized view/:": [ + (Error, issue_18249) + ], + "compatibility/select/decrypt unique": [(Fail, issue_18249)], + "compatibility/mysql/:engine/decrypt/mysql_datatype='TEXT'/:": [ + (Fail, issue_18250) + ], + "compatibility/mysql/:engine/decrypt/mysql_datatype='VARCHAR(100)'/:": [ + (Fail, issue_18250) + ], + "compatibility/mysql/:engine/encrypt/mysql_datatype='TEXT'/:": [ + (Fail, issue_18250) + ], + "compatibility/mysql/:engine/encrypt/mysql_datatype='VARCHAR(100)'/:": [ + (Fail, issue_18250) + ], # reinterpretAsFixedString for UUID stopped working - "decrypt/decryption/mode=:datatype=UUID:": - [(Fail, issue_24029)], - "encrypt/:/mode=:datatype=UUID:": - [(Fail, issue_24029)], - "decrypt/invalid ciphertext/mode=:/invalid ciphertext=reinterpretAsFixedString(toUUID:": - [(Fail, issue_24029)], - "encrypt_mysql/encryption/mode=:datatype=UUID:": - [(Fail, issue_24029)], - "decrypt_mysql/decryption/mode=:datatype=UUID:": - [(Fail, issue_24029)], - "decrypt_mysql/invalid ciphertext/mode=:/invalid ciphertext=reinterpretAsFixedString(toUUID:": - [(Fail, issue_24029)], + "decrypt/decryption/mode=:datatype=UUID:": [(Fail, issue_24029)], + "encrypt/:/mode=:datatype=UUID:": [(Fail, issue_24029)], + "decrypt/invalid ciphertext/mode=:/invalid ciphertext=reinterpretAsFixedString(toUUID:": [ + (Fail, issue_24029) + ], + "encrypt_mysql/encryption/mode=:datatype=UUID:": [(Fail, issue_24029)], + "decrypt_mysql/decryption/mode=:datatype=UUID:": [(Fail, issue_24029)], + "decrypt_mysql/invalid ciphertext/mode=:/invalid ciphertext=reinterpretAsFixedString(toUUID:": [ + (Fail, issue_24029) + ], } + @TestFeature @Name("aes encryption") @ArgumentParser(argparser) @Specifications(SRS_008_ClickHouse_AES_Encryption_Functions) @Requirements( - RQ_SRS008_AES_Functions("1.0"), - RQ_SRS008_AES_Functions_DifferentModes("1.0") + RQ_SRS008_AES_Functions("1.0"), RQ_SRS008_AES_Functions_DifferentModes("1.0") ) @XFails(xfails) -def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None): - """ClickHouse AES encryption functions regression module. - """ +def regression( + self, local, clickhouse_binary_path, clickhouse_version=None, stress=None +): + """ClickHouse AES encryption functions regression module.""" nodes = { "clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3"), } @@ -86,19 +93,49 @@ def regression(self, local, clickhouse_binary_path, clickhouse_version=None, str self.context.stress = stress self.context.clickhouse_version = clickhouse_version - with Cluster(local, clickhouse_binary_path, nodes=nodes, - docker_compose_project_dir=os.path.join(current_dir(), "aes_encryption_env")) as cluster: + with Cluster( + local, + clickhouse_binary_path, + nodes=nodes, + docker_compose_project_dir=os.path.join(current_dir(), "aes_encryption_env"), + ) as cluster: self.context.cluster = cluster with Pool(5) as pool: try: - Feature(run=load("aes_encryption.tests.encrypt", "feature"), flags=TE, parallel=True, executor=pool) - Feature(run=load("aes_encryption.tests.decrypt", "feature"), flags=TE, parallel=True, executor=pool) - Feature(run=load("aes_encryption.tests.encrypt_mysql", "feature"), flags=TE, parallel=True, executor=pool) - Feature(run=load("aes_encryption.tests.decrypt_mysql", "feature"), flags=TE, parallel=True, executor=pool) - Feature(run=load("aes_encryption.tests.compatibility.feature", "feature"), flags=TE, parallel=True, executor=pool) + Feature( + run=load("aes_encryption.tests.encrypt", "feature"), + flags=TE, + parallel=True, + executor=pool, + ) + Feature( + run=load("aes_encryption.tests.decrypt", "feature"), + flags=TE, + parallel=True, + executor=pool, + ) + Feature( + run=load("aes_encryption.tests.encrypt_mysql", "feature"), + flags=TE, + parallel=True, + executor=pool, + ) + Feature( + run=load("aes_encryption.tests.decrypt_mysql", "feature"), + flags=TE, + parallel=True, + executor=pool, + ) + Feature( + run=load("aes_encryption.tests.compatibility.feature", "feature"), + flags=TE, + parallel=True, + executor=pool, + ) finally: join() + if main(): regression() diff --git a/tests/testflows/aes_encryption/requirements/requirements.py b/tests/testflows/aes_encryption/requirements/requirements.py index 22259aef65e..0fbbea7e85a 100644 --- a/tests/testflows/aes_encryption/requirements/requirements.py +++ b/tests/testflows/aes_encryption/requirements/requirements.py @@ -9,1699 +9,1782 @@ from testflows.core import Requirement Heading = Specification.Heading RQ_SRS008_AES_Functions = Requirement( - name='RQ.SRS008.AES.Functions', - version='1.0', + name="RQ.SRS008.AES.Functions", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support [AES] encryption functions to encrypt and decrypt data.\n' - '\n' - ), + "[ClickHouse] SHALL support [AES] encryption functions to encrypt and decrypt data.\n" + "\n" + ), link=None, level=3, - num='4.1.1') + num="4.1.1", +) RQ_SRS008_AES_Functions_Compatibility_MySQL = Requirement( - name='RQ.SRS008.AES.Functions.Compatibility.MySQL', - version='1.0', + name="RQ.SRS008.AES.Functions.Compatibility.MySQL", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support [AES] encryption functions compatible with [MySQL 5.7].\n' - '\n' - ), + "[ClickHouse] SHALL support [AES] encryption functions compatible with [MySQL 5.7].\n" + "\n" + ), link=None, level=3, - num='4.2.1') + num="4.2.1", +) RQ_SRS008_AES_Functions_Compatibility_Dictionaries = Requirement( - name='RQ.SRS008.AES.Functions.Compatibility.Dictionaries', - version='1.0', + name="RQ.SRS008.AES.Functions.Compatibility.Dictionaries", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support encryption and decryption of data accessed on remote\n' - '[MySQL] servers using [MySQL Dictionary].\n' - '\n' - ), + "[ClickHouse] SHALL support encryption and decryption of data accessed on remote\n" + "[MySQL] servers using [MySQL Dictionary].\n" + "\n" + ), link=None, level=3, - num='4.2.2') + num="4.2.2", +) RQ_SRS008_AES_Functions_Compatibility_Engine_Database_MySQL = Requirement( - name='RQ.SRS008.AES.Functions.Compatibility.Engine.Database.MySQL', - version='1.0', + name="RQ.SRS008.AES.Functions.Compatibility.Engine.Database.MySQL", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support encryption and decryption of data accessed using [MySQL Database Engine],\n' - '\n' - ), + "[ClickHouse] SHALL support encryption and decryption of data accessed using [MySQL Database Engine],\n" + "\n" + ), link=None, level=3, - num='4.2.3') + num="4.2.3", +) RQ_SRS008_AES_Functions_Compatibility_Engine_Table_MySQL = Requirement( - name='RQ.SRS008.AES.Functions.Compatibility.Engine.Table.MySQL', - version='1.0', + name="RQ.SRS008.AES.Functions.Compatibility.Engine.Table.MySQL", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support encryption and decryption of data accessed using [MySQL Table Engine].\n' - '\n' - ), + "[ClickHouse] SHALL support encryption and decryption of data accessed using [MySQL Table Engine].\n" + "\n" + ), link=None, level=3, - num='4.2.4') + num="4.2.4", +) RQ_SRS008_AES_Functions_Compatibility_TableFunction_MySQL = Requirement( - name='RQ.SRS008.AES.Functions.Compatibility.TableFunction.MySQL', - version='1.0', + name="RQ.SRS008.AES.Functions.Compatibility.TableFunction.MySQL", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support encryption and decryption of data accessed using [MySQL Table Function].\n' - '\n' - ), + "[ClickHouse] SHALL support encryption and decryption of data accessed using [MySQL Table Function].\n" + "\n" + ), link=None, level=3, - num='4.2.5') + num="4.2.5", +) RQ_SRS008_AES_Functions_DifferentModes = Requirement( - name='RQ.SRS008.AES.Functions.DifferentModes', - version='1.0', + name="RQ.SRS008.AES.Functions.DifferentModes", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL allow different modes to be supported in a single SQL statement\n' - 'using explicit function parameters.\n' - '\n' - ), + "[ClickHouse] SHALL allow different modes to be supported in a single SQL statement\n" + "using explicit function parameters.\n" + "\n" + ), link=None, level=3, - num='4.3.1') + num="4.3.1", +) RQ_SRS008_AES_Functions_DataFromMultipleSources = Requirement( - name='RQ.SRS008.AES.Functions.DataFromMultipleSources', - version='1.0', + name="RQ.SRS008.AES.Functions.DataFromMultipleSources", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support handling encryption and decryption of data from multiple sources\n' - 'in the `SELECT` statement, including [ClickHouse] [MergeTree] table as well as [MySQL Dictionary],\n' - '[MySQL Database Engine], [MySQL Table Engine], and [MySQL Table Function]\n' - 'with possibly different encryption schemes.\n' - '\n' - ), + "[ClickHouse] SHALL support handling encryption and decryption of data from multiple sources\n" + "in the `SELECT` statement, including [ClickHouse] [MergeTree] table as well as [MySQL Dictionary],\n" + "[MySQL Database Engine], [MySQL Table Engine], and [MySQL Table Function]\n" + "with possibly different encryption schemes.\n" + "\n" + ), link=None, level=3, - num='4.4.1') + num="4.4.1", +) RQ_SRS008_AES_Functions_SuppressOutputOfSensitiveValues = Requirement( - name='RQ.SRS008.AES.Functions.SuppressOutputOfSensitiveValues', - version='1.0', + name="RQ.SRS008.AES.Functions.SuppressOutputOfSensitiveValues", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL suppress output of [AES] `string` and `key` parameters to the system log,\n' - 'error log, and `query_log` table to prevent leakage of sensitive values.\n' - '\n' - ), + "[ClickHouse] SHALL suppress output of [AES] `string` and `key` parameters to the system log,\n" + "error log, and `query_log` table to prevent leakage of sensitive values.\n" + "\n" + ), link=None, level=3, - num='4.5.1') + num="4.5.1", +) RQ_SRS008_AES_Functions_InvalidParameters = Requirement( - name='RQ.SRS008.AES.Functions.InvalidParameters', - version='1.0', + name="RQ.SRS008.AES.Functions.InvalidParameters", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when parameters are invalid.\n' - '\n' - ), + "[ClickHouse] SHALL return an error when parameters are invalid.\n" "\n" + ), link=None, level=3, - num='4.6.1') + num="4.6.1", +) RQ_SRS008_AES_Functions_Mismatched_Key = Requirement( - name='RQ.SRS008.AES.Functions.Mismatched.Key', - version='1.0', + name="RQ.SRS008.AES.Functions.Mismatched.Key", + version="1.0", priority=None, group=None, type=None, uid=None, - description=( - '[ClickHouse] SHALL return garbage for mismatched keys.\n' - '\n' - ), + description=("[ClickHouse] SHALL return garbage for mismatched keys.\n" "\n"), link=None, level=3, - num='4.7.1') + num="4.7.1", +) RQ_SRS008_AES_Functions_Mismatched_IV = Requirement( - name='RQ.SRS008.AES.Functions.Mismatched.IV', - version='1.0', + name="RQ.SRS008.AES.Functions.Mismatched.IV", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return garbage for mismatched initialization vector for the modes that use it.\n' - '\n' - ), + "[ClickHouse] SHALL return garbage for mismatched initialization vector for the modes that use it.\n" + "\n" + ), link=None, level=3, - num='4.7.2') + num="4.7.2", +) RQ_SRS008_AES_Functions_Mismatched_AAD = Requirement( - name='RQ.SRS008.AES.Functions.Mismatched.AAD', - version='1.0', + name="RQ.SRS008.AES.Functions.Mismatched.AAD", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return garbage for mismatched additional authentication data for the modes that use it.\n' - '\n' - ), + "[ClickHouse] SHALL return garbage for mismatched additional authentication data for the modes that use it.\n" + "\n" + ), link=None, level=3, - num='4.7.3') + num="4.7.3", +) RQ_SRS008_AES_Functions_Mismatched_Mode = Requirement( - name='RQ.SRS008.AES.Functions.Mismatched.Mode', - version='1.0', + name="RQ.SRS008.AES.Functions.Mismatched.Mode", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error or garbage for mismatched mode.\n' - '\n' - ), + "[ClickHouse] SHALL return an error or garbage for mismatched mode.\n" "\n" + ), link=None, level=3, - num='4.7.4') + num="4.7.4", +) RQ_SRS008_AES_Functions_Check_Performance = Requirement( - name='RQ.SRS008.AES.Functions.Check.Performance', - version='1.0', + name="RQ.SRS008.AES.Functions.Check.Performance", + version="1.0", priority=None, group=None, type=None, uid=None, - description=( - 'Performance of [AES] encryption functions SHALL be measured.\n' - '\n' - ), + description=("Performance of [AES] encryption functions SHALL be measured.\n" "\n"), link=None, level=3, - num='4.8.1') + num="4.8.1", +) RQ_SRS008_AES_Function_Check_Performance_BestCase = Requirement( - name='RQ.SRS008.AES.Function.Check.Performance.BestCase', - version='1.0', + name="RQ.SRS008.AES.Function.Check.Performance.BestCase", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - 'Performance of [AES] encryption functions SHALL be checked for the best case\n' - 'scenario where there is one key, one initialization vector, and one large stream of data.\n' - '\n' - ), + "Performance of [AES] encryption functions SHALL be checked for the best case\n" + "scenario where there is one key, one initialization vector, and one large stream of data.\n" + "\n" + ), link=None, level=3, - num='4.8.2') + num="4.8.2", +) RQ_SRS008_AES_Function_Check_Performance_WorstCase = Requirement( - name='RQ.SRS008.AES.Function.Check.Performance.WorstCase', - version='1.0', + name="RQ.SRS008.AES.Function.Check.Performance.WorstCase", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - 'Performance of [AES] encryption functions SHALL be checked for the worst case\n' - 'where there are `N` keys, `N` initialization vectors and `N` very small streams of data.\n' - '\n' - ), + "Performance of [AES] encryption functions SHALL be checked for the worst case\n" + "where there are `N` keys, `N` initialization vectors and `N` very small streams of data.\n" + "\n" + ), link=None, level=3, - num='4.8.3') + num="4.8.3", +) RQ_SRS008_AES_Functions_Check_Compression = Requirement( - name='RQ.SRS008.AES.Functions.Check.Compression', - version='1.0', + name="RQ.SRS008.AES.Functions.Check.Compression", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - 'Effect of [AES] encryption on column compression SHALL be measured.\n' - '\n' - ), + "Effect of [AES] encryption on column compression SHALL be measured.\n" "\n" + ), link=None, level=3, - num='4.8.4') + num="4.8.4", +) RQ_SRS008_AES_Functions_Check_Compression_LowCardinality = Requirement( - name='RQ.SRS008.AES.Functions.Check.Compression.LowCardinality', - version='1.0', + name="RQ.SRS008.AES.Functions.Check.Compression.LowCardinality", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - 'Effect of [AES] encryption on the compression of a column with [LowCardinality] data type\n' - 'SHALL be measured.\n' - '\n' - ), + "Effect of [AES] encryption on the compression of a column with [LowCardinality] data type\n" + "SHALL be measured.\n" + "\n" + ), link=None, level=3, - num='4.8.5') + num="4.8.5", +) RQ_SRS008_AES_Encrypt_Function = Requirement( - name='RQ.SRS008.AES.Encrypt.Function', - version='1.0', + name="RQ.SRS008.AES.Encrypt.Function", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `encrypt` function to encrypt data using [AES].\n' - '\n' - ), + "[ClickHouse] SHALL support `encrypt` function to encrypt data using [AES].\n" + "\n" + ), link=None, level=3, - num='4.9.1') + num="4.9.1", +) RQ_SRS008_AES_Encrypt_Function_Syntax = Requirement( - name='RQ.SRS008.AES.Encrypt.Function.Syntax', - version='1.0', + name="RQ.SRS008.AES.Encrypt.Function.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `encrypt` function\n' - '\n' - '```sql\n' - 'encrypt(mode, plaintext, key, [iv, aad])\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for the `encrypt` function\n" + "\n" + "```sql\n" + "encrypt(mode, plaintext, key, [iv, aad])\n" + "```\n" + "\n" + ), link=None, level=3, - num='4.9.2') + num="4.9.2", +) RQ_SRS008_AES_Encrypt_Function_NIST_TestVectors = Requirement( - name='RQ.SRS008.AES.Encrypt.Function.NIST.TestVectors', - version='1.0', + name="RQ.SRS008.AES.Encrypt.Function.NIST.TestVectors", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] `encrypt` function output SHALL produce output that matches [NIST test vectors].\n' - '\n' - ), + "[ClickHouse] `encrypt` function output SHALL produce output that matches [NIST test vectors].\n" + "\n" + ), link=None, level=3, - num='4.9.3') + num="4.9.3", +) RQ_SRS008_AES_Encrypt_Function_Parameters_PlainText = Requirement( - name='RQ.SRS008.AES.Encrypt.Function.Parameters.PlainText', - version='2.0', + name="RQ.SRS008.AES.Encrypt.Function.Parameters.PlainText", + version="2.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `plaintext` with `String`, `FixedString`, `Nullable(String)`,\n' - '`Nullable(FixedString)`, `LowCardinality(String)`, or `LowCardinality(FixedString(N))` data types as\n' - 'the second parameter to the `encrypt` function that SHALL specify the data to be encrypted.\n' - '\n' - '\n' - ), + "[ClickHouse] SHALL support `plaintext` with `String`, `FixedString`, `Nullable(String)`,\n" + "`Nullable(FixedString)`, `LowCardinality(String)`, or `LowCardinality(FixedString(N))` data types as\n" + "the second parameter to the `encrypt` function that SHALL specify the data to be encrypted.\n" + "\n" + "\n" + ), link=None, level=3, - num='4.9.4') + num="4.9.4", +) RQ_SRS008_AES_Encrypt_Function_Parameters_Key = Requirement( - name='RQ.SRS008.AES.Encrypt.Function.Parameters.Key', - version='1.0', + name="RQ.SRS008.AES.Encrypt.Function.Parameters.Key", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `key` with `String` or `FixedString` data types\n' - 'as the parameter to the `encrypt` function that SHALL specify the encryption key.\n' - '\n' - ), + "[ClickHouse] SHALL support `key` with `String` or `FixedString` data types\n" + "as the parameter to the `encrypt` function that SHALL specify the encryption key.\n" + "\n" + ), link=None, level=3, - num='4.9.5') + num="4.9.5", +) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode = Requirement( - name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode', - version='1.0', + name="RQ.SRS008.AES.Encrypt.Function.Parameters.Mode", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the first parameter\n' - 'to the `encrypt` function that SHALL specify encryption key length and block encryption mode.\n' - '\n' - ), + "[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the first parameter\n" + "to the `encrypt` function that SHALL specify encryption key length and block encryption mode.\n" + "\n" + ), link=None, level=3, - num='4.9.6') + num="4.9.6", +) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_ValuesFormat = Requirement( - name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.ValuesFormat', - version='1.0', + name="RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.ValuesFormat", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support values of the form `aes-[key length]-[mode]` for the `mode` parameter\n' - 'of the `encrypt` function where\n' - 'the `key_length` SHALL specifies the length of the key and SHALL accept\n' - '`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n' - 'mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB] as well as\n' - '[CTR] and [GCM] as the values. For example, `aes-256-ofb`.\n' - '\n' - ), + "[ClickHouse] SHALL support values of the form `aes-[key length]-[mode]` for the `mode` parameter\n" + "of the `encrypt` function where\n" + "the `key_length` SHALL specifies the length of the key and SHALL accept\n" + "`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n" + "mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB] as well as\n" + "[CTR] and [GCM] as the values. For example, `aes-256-ofb`.\n" + "\n" + ), link=None, level=3, - num='4.9.7') + num="4.9.7", +) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_Invalid = Requirement( - name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.Invalid', - version='1.0', + name="RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.Invalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `encrypt`\n' - 'function is not valid with the exception where such a mode is supported by the underlying\n' - '[OpenSSL] implementation.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `encrypt`\n" + "function is not valid with the exception where such a mode is supported by the underlying\n" + "[OpenSSL] implementation.\n" + "\n" + ), link=None, level=3, - num='4.9.8') + num="4.9.8", +) RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Values = Requirement( - name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Values', - version='1.0', + name="RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Values", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter\n' - 'of the `encrypt` function:\n' - '\n' - '* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key\n' - '* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key\n' - '* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key\n' - '* `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key\n' - '* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key\n' - '* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key\n' - '* `aes-128-cfb128` that SHALL use [CFB128] block mode encryption with 128 bit key\n' - '* `aes-192-cfb128` that SHALL use [CFB128] block mode encryption with 192 bit key\n' - '* `aes-256-cfb128` that SHALL use [CFB128] block mode encryption with 256 bit key\n' - '* `aes-128-ofb` that SHALL use [OFB] block mode encryption with 128 bit key\n' - '* `aes-192-ofb` that SHALL use [OFB] block mode encryption with 192 bit key\n' - '* `aes-256-ofb` that SHALL use [OFB] block mode encryption with 256 bit key\n' - '* `aes-128-gcm` that SHALL use [GCM] block mode encryption with 128 bit key\n' - ' and [AEAD] 16-byte tag is appended to the resulting ciphertext according to\n' - ' the [RFC5116]\n' - '* `aes-192-gcm` that SHALL use [GCM] block mode encryption with 192 bit key\n' - ' and [AEAD] 16-byte tag is appended to the resulting ciphertext according to\n' - ' the [RFC5116]\n' - '* `aes-256-gcm` that SHALL use [GCM] block mode encryption with 256 bit key\n' - ' and [AEAD] 16-byte tag is appended to the resulting ciphertext according to\n' - ' the [RFC5116]\n' - '* `aes-128-ctr` that SHALL use [CTR] block mode encryption with 128 bit key\n' - '* `aes-192-ctr` that SHALL use [CTR] block mode encryption with 192 bit key\n' - '* `aes-256-ctr` that SHALL use [CTR] block mode encryption with 256 bit key\n' - '\n' - ), + "[ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter\n" + "of the `encrypt` function:\n" + "\n" + "* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key\n" + "* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key\n" + "* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key\n" + "* `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key\n" + "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key\n" + "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key\n" + "* `aes-128-cfb128` that SHALL use [CFB128] block mode encryption with 128 bit key\n" + "* `aes-192-cfb128` that SHALL use [CFB128] block mode encryption with 192 bit key\n" + "* `aes-256-cfb128` that SHALL use [CFB128] block mode encryption with 256 bit key\n" + "* `aes-128-ofb` that SHALL use [OFB] block mode encryption with 128 bit key\n" + "* `aes-192-ofb` that SHALL use [OFB] block mode encryption with 192 bit key\n" + "* `aes-256-ofb` that SHALL use [OFB] block mode encryption with 256 bit key\n" + "* `aes-128-gcm` that SHALL use [GCM] block mode encryption with 128 bit key\n" + " and [AEAD] 16-byte tag is appended to the resulting ciphertext according to\n" + " the [RFC5116]\n" + "* `aes-192-gcm` that SHALL use [GCM] block mode encryption with 192 bit key\n" + " and [AEAD] 16-byte tag is appended to the resulting ciphertext according to\n" + " the [RFC5116]\n" + "* `aes-256-gcm` that SHALL use [GCM] block mode encryption with 256 bit key\n" + " and [AEAD] 16-byte tag is appended to the resulting ciphertext according to\n" + " the [RFC5116]\n" + "* `aes-128-ctr` that SHALL use [CTR] block mode encryption with 128 bit key\n" + "* `aes-192-ctr` that SHALL use [CTR] block mode encryption with 192 bit key\n" + "* `aes-256-ctr` that SHALL use [CTR] block mode encryption with 256 bit key\n" + "\n" + ), link=None, level=3, - num='4.9.9') + num="4.9.9", +) RQ_SRS008_AES_Encrypt_Function_Parameters_InitializationVector = Requirement( - name='RQ.SRS008.AES.Encrypt.Function.Parameters.InitializationVector', - version='1.0', + name="RQ.SRS008.AES.Encrypt.Function.Parameters.InitializationVector", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth\n' - 'parameter to the `encrypt` function that SHALL specify the initialization vector for block modes that require\n' - 'it.\n' - '\n' - ), + "[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth\n" + "parameter to the `encrypt` function that SHALL specify the initialization vector for block modes that require\n" + "it.\n" + "\n" + ), link=None, level=3, - num='4.9.10') + num="4.9.10", +) RQ_SRS008_AES_Encrypt_Function_Parameters_AdditionalAuthenticatedData = Requirement( - name='RQ.SRS008.AES.Encrypt.Function.Parameters.AdditionalAuthenticatedData', - version='1.0', + name="RQ.SRS008.AES.Encrypt.Function.Parameters.AdditionalAuthenticatedData", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `aad` with `String` or `FixedString` data types as the optional fifth\n' - 'parameter to the `encrypt` function that SHALL specify the additional authenticated data\n' - 'for block modes that require it.\n' - '\n' - ), + "[ClickHouse] SHALL support `aad` with `String` or `FixedString` data types as the optional fifth\n" + "parameter to the `encrypt` function that SHALL specify the additional authenticated data\n" + "for block modes that require it.\n" + "\n" + ), link=None, level=3, - num='4.9.11') + num="4.9.11", +) RQ_SRS008_AES_Encrypt_Function_Parameters_ReturnValue = Requirement( - name='RQ.SRS008.AES.Encrypt.Function.Parameters.ReturnValue', - version='1.0', + name="RQ.SRS008.AES.Encrypt.Function.Parameters.ReturnValue", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return the encrypted value of the data\n' - 'using `String` data type as the result of `encrypt` function.\n' - '\n' - ), + "[ClickHouse] SHALL return the encrypted value of the data\n" + "using `String` data type as the result of `encrypt` function.\n" + "\n" + ), link=None, level=3, - num='4.9.12') + num="4.9.12", +) RQ_SRS008_AES_Encrypt_Function_Key_Length_InvalidLengthError = Requirement( - name='RQ.SRS008.AES.Encrypt.Function.Key.Length.InvalidLengthError', - version='1.0', + name="RQ.SRS008.AES.Encrypt.Function.Key.Length.InvalidLengthError", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `key` length is not exact for the `encrypt` function for a given block mode.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `key` length is not exact for the `encrypt` function for a given block mode.\n" + "\n" + ), link=None, level=3, - num='4.9.13') + num="4.9.13", +) RQ_SRS008_AES_Encrypt_Function_InitializationVector_Length_InvalidLengthError = Requirement( - name='RQ.SRS008.AES.Encrypt.Function.InitializationVector.Length.InvalidLengthError', - version='1.0', + name="RQ.SRS008.AES.Encrypt.Function.InitializationVector.Length.InvalidLengthError", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `iv` length is specified and not of the exact size for the `encrypt` function for a given block mode.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `iv` length is specified and not of the exact size for the `encrypt` function for a given block mode.\n" + "\n" + ), link=None, level=3, - num='4.9.14') + num="4.9.14", +) RQ_SRS008_AES_Encrypt_Function_InitializationVector_NotValidForMode = Requirement( - name='RQ.SRS008.AES.Encrypt.Function.InitializationVector.NotValidForMode', - version='1.0', + name="RQ.SRS008.AES.Encrypt.Function.InitializationVector.NotValidForMode", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `iv` is specified for the `encrypt` function for a mode that does not need it.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `iv` is specified for the `encrypt` function for a mode that does not need it.\n" + "\n" + ), link=None, level=3, - num='4.9.15') + num="4.9.15", +) RQ_SRS008_AES_Encrypt_Function_AdditionalAuthenticationData_NotValidForMode = Requirement( - name='RQ.SRS008.AES.Encrypt.Function.AdditionalAuthenticationData.NotValidForMode', - version='1.0', + name="RQ.SRS008.AES.Encrypt.Function.AdditionalAuthenticationData.NotValidForMode", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `aad` is specified for the `encrypt` function for a mode that does not need it.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `aad` is specified for the `encrypt` function for a mode that does not need it.\n" + "\n" + ), link=None, level=3, - num='4.9.16') + num="4.9.16", +) RQ_SRS008_AES_Encrypt_Function_AdditionalAuthenticationData_Length = Requirement( - name='RQ.SRS008.AES.Encrypt.Function.AdditionalAuthenticationData.Length', - version='1.0', + name="RQ.SRS008.AES.Encrypt.Function.AdditionalAuthenticationData.Length", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not limit the size of the `aad` parameter passed to the `encrypt` function.\n' - '\n' - ), + "[ClickHouse] SHALL not limit the size of the `aad` parameter passed to the `encrypt` function.\n" + "\n" + ), link=None, level=3, - num='4.9.17') + num="4.9.17", +) RQ_SRS008_AES_Encrypt_Function_NonGCMMode_KeyAndInitializationVector_Length = Requirement( - name='RQ.SRS008.AES.Encrypt.Function.NonGCMMode.KeyAndInitializationVector.Length', - version='1.0', + name="RQ.SRS008.AES.Encrypt.Function.NonGCMMode.KeyAndInitializationVector.Length", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when the `encrypt` function is called with the following parameter values\n' - 'when using non-GCM modes\n' - '\n' - '* `aes-128-ecb` mode and `key` is not 16 bytes or `iv` or `aad` is specified\n' - '* `aes-192-ecb` mode and `key` is not 24 bytes or `iv` or `aad` is specified\n' - '* `aes-256-ecb` mode and `key` is not 32 bytes or `iv` or `aad` is specified\n' - '* `aes-128-cbc` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-192-cbc` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-256-cbc` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-128-cfb1` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-192-cfb1` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-256-cfb1` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-128-cfb8` mode and `key` is not 16 bytes and if specified `iv` is not 16 bytes\n' - '* `aes-192-cfb8` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-256-cfb8` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-128-cfb128` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-192-cfb128` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-256-cfb128` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-128-ofb` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-192-ofb` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-256-ofb` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-128-ctr` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes\n' - '* `aes-192-ctr` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes\n' - '* `aes-256-ctr` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes\n' - '\n' - ), + "[ClickHouse] SHALL return an error when the `encrypt` function is called with the following parameter values\n" + "when using non-GCM modes\n" + "\n" + "* `aes-128-ecb` mode and `key` is not 16 bytes or `iv` or `aad` is specified\n" + "* `aes-192-ecb` mode and `key` is not 24 bytes or `iv` or `aad` is specified\n" + "* `aes-256-ecb` mode and `key` is not 32 bytes or `iv` or `aad` is specified\n" + "* `aes-128-cbc` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-192-cbc` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-256-cbc` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-128-cfb1` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-192-cfb1` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-256-cfb1` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-128-cfb8` mode and `key` is not 16 bytes and if specified `iv` is not 16 bytes\n" + "* `aes-192-cfb8` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-256-cfb8` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-128-cfb128` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-192-cfb128` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-256-cfb128` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-128-ofb` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-192-ofb` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-256-ofb` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-128-ctr` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes\n" + "* `aes-192-ctr` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes\n" + "* `aes-256-ctr` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes\n" + "\n" + ), link=None, level=3, - num='4.9.18') + num="4.9.18", +) RQ_SRS008_AES_Encrypt_Function_GCMMode_KeyAndInitializationVector_Length = Requirement( - name='RQ.SRS008.AES.Encrypt.Function.GCMMode.KeyAndInitializationVector.Length', - version='1.0', + name="RQ.SRS008.AES.Encrypt.Function.GCMMode.KeyAndInitializationVector.Length", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when the `encrypt` function is called with the following parameter values\n' - 'when using GCM modes\n' - '\n' - '* `aes-128-gcm` mode and `key` is not 16 bytes or `iv` is not specified\n' - '* `aes-192-gcm` mode and `key` is not 24 bytes or `iv` is not specified\n' - '* `aes-256-gcm` mode and `key` is not 32 bytes or `iv` is not specified\n' - '\n' - ), + "[ClickHouse] SHALL return an error when the `encrypt` function is called with the following parameter values\n" + "when using GCM modes\n" + "\n" + "* `aes-128-gcm` mode and `key` is not 16 bytes or `iv` is not specified\n" + "* `aes-192-gcm` mode and `key` is not 24 bytes or `iv` is not specified\n" + "* `aes-256-gcm` mode and `key` is not 32 bytes or `iv` is not specified\n" + "\n" + ), link=None, level=3, - num='4.9.19') + num="4.9.19", +) RQ_SRS008_AES_Decrypt_Function = Requirement( - name='RQ.SRS008.AES.Decrypt.Function', - version='1.0', + name="RQ.SRS008.AES.Decrypt.Function", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `decrypt` function to decrypt data using [AES].\n' - '\n' - ), + "[ClickHouse] SHALL support `decrypt` function to decrypt data using [AES].\n" + "\n" + ), link=None, level=3, - num='4.10.1') + num="4.10.1", +) RQ_SRS008_AES_Decrypt_Function_Syntax = Requirement( - name='RQ.SRS008.AES.Decrypt.Function.Syntax', - version='1.0', + name="RQ.SRS008.AES.Decrypt.Function.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `decrypt` function\n' - '\n' - '```sql\n' - 'decrypt(mode, ciphertext, key, [iv, aad])\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for the `decrypt` function\n" + "\n" + "```sql\n" + "decrypt(mode, ciphertext, key, [iv, aad])\n" + "```\n" + "\n" + ), link=None, level=3, - num='4.10.2') + num="4.10.2", +) RQ_SRS008_AES_Decrypt_Function_Parameters_CipherText = Requirement( - name='RQ.SRS008.AES.Decrypt.Function.Parameters.CipherText', - version='1.0', + name="RQ.SRS008.AES.Decrypt.Function.Parameters.CipherText", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `ciphertext` accepting `FixedString` or `String` data types as\n' - 'the second parameter to the `decrypt` function that SHALL specify the data to be decrypted.\n' - '\n' - ), + "[ClickHouse] SHALL support `ciphertext` accepting `FixedString` or `String` data types as\n" + "the second parameter to the `decrypt` function that SHALL specify the data to be decrypted.\n" + "\n" + ), link=None, level=3, - num='4.10.3') + num="4.10.3", +) RQ_SRS008_AES_Decrypt_Function_Parameters_Key = Requirement( - name='RQ.SRS008.AES.Decrypt.Function.Parameters.Key', - version='1.0', + name="RQ.SRS008.AES.Decrypt.Function.Parameters.Key", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `key` with `String` or `FixedString` data types\n' - 'as the third parameter to the `decrypt` function that SHALL specify the encryption key.\n' - '\n' - ), + "[ClickHouse] SHALL support `key` with `String` or `FixedString` data types\n" + "as the third parameter to the `decrypt` function that SHALL specify the encryption key.\n" + "\n" + ), link=None, level=3, - num='4.10.4') + num="4.10.4", +) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode = Requirement( - name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode', - version='1.0', + name="RQ.SRS008.AES.Decrypt.Function.Parameters.Mode", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the first parameter\n' - 'to the `decrypt` function that SHALL specify encryption key length and block encryption mode.\n' - '\n' - ), + "[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the first parameter\n" + "to the `decrypt` function that SHALL specify encryption key length and block encryption mode.\n" + "\n" + ), link=None, level=3, - num='4.10.5') + num="4.10.5", +) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_ValuesFormat = Requirement( - name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.ValuesFormat', - version='1.0', + name="RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.ValuesFormat", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support values of the form `aes-[key length]-[mode]` for the `mode` parameter\n' - 'of the `decrypt` function where\n' - 'the `key_length` SHALL specifies the length of the key and SHALL accept\n' - '`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n' - 'mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB] as well as\n' - '[CTR] and [GCM] as the values. For example, `aes-256-ofb`.\n' - '\n' - ), + "[ClickHouse] SHALL support values of the form `aes-[key length]-[mode]` for the `mode` parameter\n" + "of the `decrypt` function where\n" + "the `key_length` SHALL specifies the length of the key and SHALL accept\n" + "`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n" + "mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB] as well as\n" + "[CTR] and [GCM] as the values. For example, `aes-256-ofb`.\n" + "\n" + ), link=None, level=3, - num='4.10.6') + num="4.10.6", +) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_Invalid = Requirement( - name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.Invalid', - version='1.0', + name="RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.Invalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `decrypt`\n' - 'function is not valid with the exception where such a mode is supported by the underlying\n' - '[OpenSSL] implementation.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `decrypt`\n" + "function is not valid with the exception where such a mode is supported by the underlying\n" + "[OpenSSL] implementation.\n" + "\n" + ), link=None, level=3, - num='4.10.7') + num="4.10.7", +) RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Values = Requirement( - name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Values', - version='1.0', + name="RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Values", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter\n' - 'of the `decrypt` function:\n' - '\n' - '* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key\n' - '* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key\n' - '* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key\n' - '* `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key\n' - '* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key\n' - '* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key\n' - '* `aes-128-cfb128` that SHALL use [CFB128] block mode encryption with 128 bit key\n' - '* `aes-192-cfb128` that SHALL use [CFB128] block mode encryption with 192 bit key\n' - '* `aes-256-cfb128` that SHALL use [CFB128] block mode encryption with 256 bit key\n' - '* `aes-128-ofb` that SHALL use [OFB] block mode encryption with 128 bit key\n' - '* `aes-192-ofb` that SHALL use [OFB] block mode encryption with 192 bit key\n' - '* `aes-256-ofb` that SHALL use [OFB] block mode encryption with 256 bit key\n' - '* `aes-128-gcm` that SHALL use [GCM] block mode encryption with 128 bit key\n' - ' and [AEAD] 16-byte tag is expected present at the end of the ciphertext according to\n' - ' the [RFC5116]\n' - '* `aes-192-gcm` that SHALL use [GCM] block mode encryption with 192 bit key\n' - ' and [AEAD] 16-byte tag is expected present at the end of the ciphertext according to\n' - ' the [RFC5116]\n' - '* `aes-256-gcm` that SHALL use [GCM] block mode encryption with 256 bit key\n' - ' and [AEAD] 16-byte tag is expected present at the end of the ciphertext according to\n' - ' the [RFC5116]\n' - '* `aes-128-ctr` that SHALL use [CTR] block mode encryption with 128 bit key\n' - '* `aes-192-ctr` that SHALL use [CTR] block mode encryption with 192 bit key\n' - '* `aes-256-ctr` that SHALL use [CTR] block mode encryption with 256 bit key\n' - '\n' - ), + "[ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter\n" + "of the `decrypt` function:\n" + "\n" + "* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key\n" + "* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key\n" + "* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key\n" + "* `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key\n" + "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key\n" + "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key\n" + "* `aes-128-cfb128` that SHALL use [CFB128] block mode encryption with 128 bit key\n" + "* `aes-192-cfb128` that SHALL use [CFB128] block mode encryption with 192 bit key\n" + "* `aes-256-cfb128` that SHALL use [CFB128] block mode encryption with 256 bit key\n" + "* `aes-128-ofb` that SHALL use [OFB] block mode encryption with 128 bit key\n" + "* `aes-192-ofb` that SHALL use [OFB] block mode encryption with 192 bit key\n" + "* `aes-256-ofb` that SHALL use [OFB] block mode encryption with 256 bit key\n" + "* `aes-128-gcm` that SHALL use [GCM] block mode encryption with 128 bit key\n" + " and [AEAD] 16-byte tag is expected present at the end of the ciphertext according to\n" + " the [RFC5116]\n" + "* `aes-192-gcm` that SHALL use [GCM] block mode encryption with 192 bit key\n" + " and [AEAD] 16-byte tag is expected present at the end of the ciphertext according to\n" + " the [RFC5116]\n" + "* `aes-256-gcm` that SHALL use [GCM] block mode encryption with 256 bit key\n" + " and [AEAD] 16-byte tag is expected present at the end of the ciphertext according to\n" + " the [RFC5116]\n" + "* `aes-128-ctr` that SHALL use [CTR] block mode encryption with 128 bit key\n" + "* `aes-192-ctr` that SHALL use [CTR] block mode encryption with 192 bit key\n" + "* `aes-256-ctr` that SHALL use [CTR] block mode encryption with 256 bit key\n" + "\n" + ), link=None, level=3, - num='4.10.8') + num="4.10.8", +) RQ_SRS008_AES_Decrypt_Function_Parameters_InitializationVector = Requirement( - name='RQ.SRS008.AES.Decrypt.Function.Parameters.InitializationVector', - version='1.0', + name="RQ.SRS008.AES.Decrypt.Function.Parameters.InitializationVector", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth\n' - 'parameter to the `decrypt` function that SHALL specify the initialization vector for block modes that require\n' - 'it.\n' - '\n' - ), + "[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth\n" + "parameter to the `decrypt` function that SHALL specify the initialization vector for block modes that require\n" + "it.\n" + "\n" + ), link=None, level=3, - num='4.10.9') + num="4.10.9", +) RQ_SRS008_AES_Decrypt_Function_Parameters_AdditionalAuthenticatedData = Requirement( - name='RQ.SRS008.AES.Decrypt.Function.Parameters.AdditionalAuthenticatedData', - version='1.0', + name="RQ.SRS008.AES.Decrypt.Function.Parameters.AdditionalAuthenticatedData", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `aad` with `String` or `FixedString` data types as the optional fifth\n' - 'parameter to the `decrypt` function that SHALL specify the additional authenticated data\n' - 'for block modes that require it.\n' - '\n' - ), + "[ClickHouse] SHALL support `aad` with `String` or `FixedString` data types as the optional fifth\n" + "parameter to the `decrypt` function that SHALL specify the additional authenticated data\n" + "for block modes that require it.\n" + "\n" + ), link=None, level=3, - num='4.10.10') + num="4.10.10", +) RQ_SRS008_AES_Decrypt_Function_Parameters_ReturnValue = Requirement( - name='RQ.SRS008.AES.Decrypt.Function.Parameters.ReturnValue', - version='1.0', + name="RQ.SRS008.AES.Decrypt.Function.Parameters.ReturnValue", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return the decrypted value of the data\n' - 'using `String` data type as the result of `decrypt` function.\n' - '\n' - ), + "[ClickHouse] SHALL return the decrypted value of the data\n" + "using `String` data type as the result of `decrypt` function.\n" + "\n" + ), link=None, level=3, - num='4.10.11') + num="4.10.11", +) RQ_SRS008_AES_Decrypt_Function_Key_Length_InvalidLengthError = Requirement( - name='RQ.SRS008.AES.Decrypt.Function.Key.Length.InvalidLengthError', - version='1.0', + name="RQ.SRS008.AES.Decrypt.Function.Key.Length.InvalidLengthError", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `key` length is not exact for the `decrypt` function for a given block mode.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `key` length is not exact for the `decrypt` function for a given block mode.\n" + "\n" + ), link=None, level=3, - num='4.10.12') + num="4.10.12", +) RQ_SRS008_AES_Decrypt_Function_InitializationVector_Length_InvalidLengthError = Requirement( - name='RQ.SRS008.AES.Decrypt.Function.InitializationVector.Length.InvalidLengthError', - version='1.0', + name="RQ.SRS008.AES.Decrypt.Function.InitializationVector.Length.InvalidLengthError", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `iv` is specified and the length is not exact for the `decrypt` function for a given block mode.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `iv` is specified and the length is not exact for the `decrypt` function for a given block mode.\n" + "\n" + ), link=None, level=3, - num='4.10.13') + num="4.10.13", +) RQ_SRS008_AES_Decrypt_Function_InitializationVector_NotValidForMode = Requirement( - name='RQ.SRS008.AES.Decrypt.Function.InitializationVector.NotValidForMode', - version='1.0', + name="RQ.SRS008.AES.Decrypt.Function.InitializationVector.NotValidForMode", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `iv` is specified for the `decrypt` function\n' - 'for a mode that does not need it.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `iv` is specified for the `decrypt` function\n" + "for a mode that does not need it.\n" + "\n" + ), link=None, level=3, - num='4.10.14') + num="4.10.14", +) RQ_SRS008_AES_Decrypt_Function_AdditionalAuthenticationData_NotValidForMode = Requirement( - name='RQ.SRS008.AES.Decrypt.Function.AdditionalAuthenticationData.NotValidForMode', - version='1.0', + name="RQ.SRS008.AES.Decrypt.Function.AdditionalAuthenticationData.NotValidForMode", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `aad` is specified for the `decrypt` function\n' - 'for a mode that does not need it.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `aad` is specified for the `decrypt` function\n" + "for a mode that does not need it.\n" + "\n" + ), link=None, level=3, - num='4.10.15') + num="4.10.15", +) RQ_SRS008_AES_Decrypt_Function_AdditionalAuthenticationData_Length = Requirement( - name='RQ.SRS008.AES.Decrypt.Function.AdditionalAuthenticationData.Length', - version='1.0', + name="RQ.SRS008.AES.Decrypt.Function.AdditionalAuthenticationData.Length", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not limit the size of the `aad` parameter passed to the `decrypt` function.\n' - '\n' - ), + "[ClickHouse] SHALL not limit the size of the `aad` parameter passed to the `decrypt` function.\n" + "\n" + ), link=None, level=3, - num='4.10.16') + num="4.10.16", +) RQ_SRS008_AES_Decrypt_Function_NonGCMMode_KeyAndInitializationVector_Length = Requirement( - name='RQ.SRS008.AES.Decrypt.Function.NonGCMMode.KeyAndInitializationVector.Length', - version='1.0', + name="RQ.SRS008.AES.Decrypt.Function.NonGCMMode.KeyAndInitializationVector.Length", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when the `decrypt` function is called with the following parameter values\n' - 'when using non-GCM modes\n' - '\n' - '* `aes-128-ecb` mode and `key` is not 16 bytes or `iv` or `aad` is specified\n' - '* `aes-192-ecb` mode and `key` is not 24 bytes or `iv` or `aad` is specified\n' - '* `aes-256-ecb` mode and `key` is not 32 bytes or `iv` or `aad` is specified\n' - '* `aes-128-cbc` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-192-cbc` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-256-cbc` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-128-cfb1` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-192-cfb1` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-256-cfb1` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-128-cfb8` mode and `key` is not 16 bytes and if specified `iv` is not 16 bytes\n' - '* `aes-192-cfb8` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-256-cfb8` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-128-cfb128` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-192-cfb128` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-256-cfb128` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-128-ofb` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-192-ofb` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-256-ofb` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n' - '* `aes-128-ctr` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes\n' - '* `aes-192-ctr` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes\n' - '* `aes-256-ctr` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes\n' - '\n' - ), + "[ClickHouse] SHALL return an error when the `decrypt` function is called with the following parameter values\n" + "when using non-GCM modes\n" + "\n" + "* `aes-128-ecb` mode and `key` is not 16 bytes or `iv` or `aad` is specified\n" + "* `aes-192-ecb` mode and `key` is not 24 bytes or `iv` or `aad` is specified\n" + "* `aes-256-ecb` mode and `key` is not 32 bytes or `iv` or `aad` is specified\n" + "* `aes-128-cbc` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-192-cbc` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-256-cbc` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-128-cfb1` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-192-cfb1` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-256-cfb1` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-128-cfb8` mode and `key` is not 16 bytes and if specified `iv` is not 16 bytes\n" + "* `aes-192-cfb8` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-256-cfb8` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-128-cfb128` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-192-cfb128` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-256-cfb128` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-128-ofb` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-192-ofb` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-256-ofb` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes or `aad` is specified\n" + "* `aes-128-ctr` mode and `key` is not 16 bytes or if specified `iv` is not 16 bytes\n" + "* `aes-192-ctr` mode and `key` is not 24 bytes or if specified `iv` is not 16 bytes\n" + "* `aes-256-ctr` mode and `key` is not 32 bytes or if specified `iv` is not 16 bytes\n" + "\n" + ), link=None, level=3, - num='4.10.17') + num="4.10.17", +) RQ_SRS008_AES_Decrypt_Function_GCMMode_KeyAndInitializationVector_Length = Requirement( - name='RQ.SRS008.AES.Decrypt.Function.GCMMode.KeyAndInitializationVector.Length', - version='1.0', + name="RQ.SRS008.AES.Decrypt.Function.GCMMode.KeyAndInitializationVector.Length", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when the `decrypt` function is called with the following parameter values\n' - 'when using GCM modes\n' - '\n' - '* `aes-128-gcm` mode and `key` is not 16 bytes or `iv` is not specified\n' - '* `aes-192-gcm` mode and `key` is not 24 bytes or `iv` is not specified\n' - '* `aes-256-gcm` mode and `key` is not 32 bytes or `iv` is not specified\n' - '\n' - ), + "[ClickHouse] SHALL return an error when the `decrypt` function is called with the following parameter values\n" + "when using GCM modes\n" + "\n" + "* `aes-128-gcm` mode and `key` is not 16 bytes or `iv` is not specified\n" + "* `aes-192-gcm` mode and `key` is not 24 bytes or `iv` is not specified\n" + "* `aes-256-gcm` mode and `key` is not 32 bytes or `iv` is not specified\n" + "\n" + ), link=None, level=3, - num='4.10.18') + num="4.10.18", +) RQ_SRS008_AES_MySQL_Encrypt_Function = Requirement( - name='RQ.SRS008.AES.MySQL.Encrypt.Function', - version='1.0', + name="RQ.SRS008.AES.MySQL.Encrypt.Function", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `aes_encrypt_mysql` function to encrypt data using [AES].\n' - '\n' - ), + "[ClickHouse] SHALL support `aes_encrypt_mysql` function to encrypt data using [AES].\n" + "\n" + ), link=None, level=3, - num='4.11.1') + num="4.11.1", +) RQ_SRS008_AES_MySQL_Encrypt_Function_Syntax = Requirement( - name='RQ.SRS008.AES.MySQL.Encrypt.Function.Syntax', - version='1.0', + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `aes_encrypt_mysql` function\n' - '\n' - '```sql\n' - 'aes_encrypt_mysql(mode, plaintext, key, [iv])\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for the `aes_encrypt_mysql` function\n" + "\n" + "```sql\n" + "aes_encrypt_mysql(mode, plaintext, key, [iv])\n" + "```\n" + "\n" + ), link=None, level=3, - num='4.11.2') + num="4.11.2", +) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_PlainText = Requirement( - name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.PlainText', - version='2.0', + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.PlainText", + version="2.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `plaintext` with `String`, `FixedString`, `Nullable(String)`,\n' - '`Nullable(FixedString)`, `LowCardinality(String)`, or `LowCardinality(FixedString(N))` data types as\n' - 'the second parameter to the `aes_encrypt_mysql` function that SHALL specify the data to be encrypted.\n' - '\n' - ), + "[ClickHouse] SHALL support `plaintext` with `String`, `FixedString`, `Nullable(String)`,\n" + "`Nullable(FixedString)`, `LowCardinality(String)`, or `LowCardinality(FixedString(N))` data types as\n" + "the second parameter to the `aes_encrypt_mysql` function that SHALL specify the data to be encrypted.\n" + "\n" + ), link=None, level=3, - num='4.11.3') + num="4.11.3", +) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Key = Requirement( - name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Key', - version='1.0', + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Key", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `key` with `String` or `FixedString` data types\n' - 'as the third parameter to the `aes_encrypt_mysql` function that SHALL specify the encryption key.\n' - '\n' - ), + "[ClickHouse] SHALL support `key` with `String` or `FixedString` data types\n" + "as the third parameter to the `aes_encrypt_mysql` function that SHALL specify the encryption key.\n" + "\n" + ), link=None, level=3, - num='4.11.4') + num="4.11.4", +) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode = Requirement( - name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode', - version='1.0', + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the first parameter\n' - 'to the `aes_encrypt_mysql` function that SHALL specify encryption key length and block encryption mode.\n' - '\n' - ), + "[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the first parameter\n" + "to the `aes_encrypt_mysql` function that SHALL specify encryption key length and block encryption mode.\n" + "\n" + ), link=None, level=3, - num='4.11.5') + num="4.11.5", +) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_ValuesFormat = Requirement( - name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.ValuesFormat', - version='1.0', + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.ValuesFormat", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support values of the form `aes-[key length]-[mode]` for the `mode` parameter\n' - 'of the `aes_encrypt_mysql` function where\n' - 'the `key_length` SHALL specifies the length of the key and SHALL accept\n' - '`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n' - 'mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`.\n' - '\n' - ), + "[ClickHouse] SHALL support values of the form `aes-[key length]-[mode]` for the `mode` parameter\n" + "of the `aes_encrypt_mysql` function where\n" + "the `key_length` SHALL specifies the length of the key and SHALL accept\n" + "`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n" + "mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`.\n" + "\n" + ), link=None, level=3, - num='4.11.6') + num="4.11.6", +) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_Invalid = Requirement( - name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.Invalid', - version='1.0', + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.Invalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `aes_encrypt_mysql`\n' - 'function is not valid with the exception where such a mode is supported by the underlying\n' - '[OpenSSL] implementation.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `aes_encrypt_mysql`\n" + "function is not valid with the exception where such a mode is supported by the underlying\n" + "[OpenSSL] implementation.\n" + "\n" + ), link=None, level=3, - num='4.11.7') + num="4.11.7", +) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Values = Requirement( - name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Values', - version='1.0', + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Values", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter\n' - 'of the `aes_encrypt_mysql` function:\n' - '\n' - '* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key\n' - '* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key\n' - '* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key\n' - '* `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key\n' - '* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key\n' - '* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key\n' - '* `aes-128-cfb128` that SHALL use [CFB128] block mode encryption with 128 bit key\n' - '* `aes-192-cfb128` that SHALL use [CFB128] block mode encryption with 192 bit key\n' - '* `aes-256-cfb128` that SHALL use [CFB128] block mode encryption with 256 bit key\n' - '* `aes-128-ofb` that SHALL use [OFB] block mode encryption with 128 bit key\n' - '* `aes-192-ofb` that SHALL use [OFB] block mode encryption with 192 bit key\n' - '* `aes-256-ofb` that SHALL use [OFB] block mode encryption with 256 bit key\n' - '\n' - ), + "[ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter\n" + "of the `aes_encrypt_mysql` function:\n" + "\n" + "* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key\n" + "* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key\n" + "* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key\n" + "* `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key\n" + "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key\n" + "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key\n" + "* `aes-128-cfb128` that SHALL use [CFB128] block mode encryption with 128 bit key\n" + "* `aes-192-cfb128` that SHALL use [CFB128] block mode encryption with 192 bit key\n" + "* `aes-256-cfb128` that SHALL use [CFB128] block mode encryption with 256 bit key\n" + "* `aes-128-ofb` that SHALL use [OFB] block mode encryption with 128 bit key\n" + "* `aes-192-ofb` that SHALL use [OFB] block mode encryption with 192 bit key\n" + "* `aes-256-ofb` that SHALL use [OFB] block mode encryption with 256 bit key\n" + "\n" + ), link=None, level=3, - num='4.11.8') + num="4.11.8", +) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Values_GCM_Error = Requirement( - name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Values.GCM.Error', - version='1.0', + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Values.GCM.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if any of the following [GCM] modes are specified as the value \n' - 'for the `mode` parameter of the `aes_encrypt_mysql` function\n' - '\n' - '* `aes-128-gcm`\n' - '* `aes-192-gcm`\n' - '* `aes-256-gcm`\n' - '\n' - ), + "[ClickHouse] SHALL return an error if any of the following [GCM] modes are specified as the value \n" + "for the `mode` parameter of the `aes_encrypt_mysql` function\n" + "\n" + "* `aes-128-gcm`\n" + "* `aes-192-gcm`\n" + "* `aes-256-gcm`\n" + "\n" + ), link=None, level=3, - num='4.11.9') + num="4.11.9", +) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Values_CTR_Error = Requirement( - name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Values.CTR.Error', - version='1.0', + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Values.CTR.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if any of the following [CTR] modes are specified as the value \n' - 'for the `mode` parameter of the `aes_encrypt_mysql` function\n' - '\n' - '* `aes-128-ctr`\n' - '* `aes-192-ctr`\n' - '* `aes-256-ctr`\n' - '\n' - ), + "[ClickHouse] SHALL return an error if any of the following [CTR] modes are specified as the value \n" + "for the `mode` parameter of the `aes_encrypt_mysql` function\n" + "\n" + "* `aes-128-ctr`\n" + "* `aes-192-ctr`\n" + "* `aes-256-ctr`\n" + "\n" + ), link=None, level=3, - num='4.11.10') + num="4.11.10", +) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_InitializationVector = Requirement( - name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.InitializationVector', - version='1.0', + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.InitializationVector", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth\n' - 'parameter to the `aes_encrypt_mysql` function that SHALL specify the initialization vector for block modes that require\n' - 'it.\n' - '\n' - ), + "[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth\n" + "parameter to the `aes_encrypt_mysql` function that SHALL specify the initialization vector for block modes that require\n" + "it.\n" + "\n" + ), link=None, level=3, - num='4.11.11') + num="4.11.11", +) RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_ReturnValue = Requirement( - name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.ReturnValue', - version='1.0', + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.ReturnValue", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return the encrypted value of the data\n' - 'using `String` data type as the result of `aes_encrypt_mysql` function.\n' - '\n' - ), + "[ClickHouse] SHALL return the encrypted value of the data\n" + "using `String` data type as the result of `aes_encrypt_mysql` function.\n" + "\n" + ), link=None, level=3, - num='4.11.12') + num="4.11.12", +) RQ_SRS008_AES_MySQL_Encrypt_Function_Key_Length_TooShortError = Requirement( - name='RQ.SRS008.AES.MySQL.Encrypt.Function.Key.Length.TooShortError', - version='1.0', + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Key.Length.TooShortError", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `key` length is less than the minimum for the `aes_encrypt_mysql`\n' - 'function for a given block mode.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `key` length is less than the minimum for the `aes_encrypt_mysql`\n" + "function for a given block mode.\n" + "\n" + ), link=None, level=3, - num='4.11.13') + num="4.11.13", +) RQ_SRS008_AES_MySQL_Encrypt_Function_Key_Length_TooLong = Requirement( - name='RQ.SRS008.AES.MySQL.Encrypt.Function.Key.Length.TooLong', - version='1.0', + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Key.Length.TooLong", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL use folding algorithm specified below if the `key` length is longer than required\n' - 'for the `aes_encrypt_mysql` function for a given block mode.\n' - '\n' - '```python\n' - 'def fold_key(key, cipher_key_size):\n' - ' key = list(key) if not isinstance(key, (list, tuple)) else key\n' - '\t folded_key = key[:cipher_key_size]\n' - '\t for i in range(cipher_key_size, len(key)):\n' - '\t\t print(i % cipher_key_size, i)\n' - '\t\t folded_key[i % cipher_key_size] ^= key[i]\n' - '\t return folded_key\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL use folding algorithm specified below if the `key` length is longer than required\n" + "for the `aes_encrypt_mysql` function for a given block mode.\n" + "\n" + "```python\n" + "def fold_key(key, cipher_key_size):\n" + " key = list(key) if not isinstance(key, (list, tuple)) else key\n" + "\t folded_key = key[:cipher_key_size]\n" + "\t for i in range(cipher_key_size, len(key)):\n" + "\t\t print(i % cipher_key_size, i)\n" + "\t\t folded_key[i % cipher_key_size] ^= key[i]\n" + "\t return folded_key\n" + "```\n" + "\n" + ), link=None, level=3, - num='4.11.14') + num="4.11.14", +) RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_Length_TooShortError = Requirement( - name='RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.Length.TooShortError', - version='1.0', + name="RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.Length.TooShortError", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `iv` length is specified and is less than the minimum\n' - 'that is required for the `aes_encrypt_mysql` function for a given block mode.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `iv` length is specified and is less than the minimum\n" + "that is required for the `aes_encrypt_mysql` function for a given block mode.\n" + "\n" + ), link=None, level=3, - num='4.11.15') + num="4.11.15", +) RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_Length_TooLong = Requirement( - name='RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.Length.TooLong', - version='1.0', + name="RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.Length.TooLong", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL use the first `N` bytes that are required if the `iv` is specified and\n' - 'its length is longer than required for the `aes_encrypt_mysql` function for a given block mode.\n' - '\n' - ), + "[ClickHouse] SHALL use the first `N` bytes that are required if the `iv` is specified and\n" + "its length is longer than required for the `aes_encrypt_mysql` function for a given block mode.\n" + "\n" + ), link=None, level=3, - num='4.11.16') + num="4.11.16", +) RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_NotValidForMode = Requirement( - name='RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.NotValidForMode', - version='1.0', + name="RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.NotValidForMode", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `iv` is specified for the `aes_encrypt_mysql`\n' - 'function for a mode that does not need it.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `iv` is specified for the `aes_encrypt_mysql`\n" + "function for a mode that does not need it.\n" + "\n" + ), link=None, level=3, - num='4.11.17') + num="4.11.17", +) RQ_SRS008_AES_MySQL_Encrypt_Function_Mode_KeyAndInitializationVector_Length = Requirement( - name='RQ.SRS008.AES.MySQL.Encrypt.Function.Mode.KeyAndInitializationVector.Length', - version='1.0', + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Mode.KeyAndInitializationVector.Length", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when the `aes_encrypt_mysql` function is called with the following parameter values\n' - '\n' - '* `aes-128-ecb` mode and `key` is less than 16 bytes or `iv` is specified\n' - '* `aes-192-ecb` mode and `key` is less than 24 bytes or `iv` is specified\n' - '* `aes-256-ecb` mode and `key` is less than 32 bytes or `iv` is specified\n' - '* `aes-128-cbc` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-192-cbc` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-256-cbc` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-128-cfb1` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-192-cfb1` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-256-cfb1` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-128-cfb8` mode and `key` is less than 16 bytes and if specified `iv` is less than 16 bytes\n' - '* `aes-192-cfb8` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-256-cfb8` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-128-cfb128` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-192-cfb128` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-256-cfb128` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-128-ofb` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-192-ofb` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-256-ofb` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n' - '\n' - ), + "[ClickHouse] SHALL return an error when the `aes_encrypt_mysql` function is called with the following parameter values\n" + "\n" + "* `aes-128-ecb` mode and `key` is less than 16 bytes or `iv` is specified\n" + "* `aes-192-ecb` mode and `key` is less than 24 bytes or `iv` is specified\n" + "* `aes-256-ecb` mode and `key` is less than 32 bytes or `iv` is specified\n" + "* `aes-128-cbc` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-192-cbc` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-256-cbc` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-128-cfb1` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-192-cfb1` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-256-cfb1` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-128-cfb8` mode and `key` is less than 16 bytes and if specified `iv` is less than 16 bytes\n" + "* `aes-192-cfb8` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-256-cfb8` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-128-cfb128` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-192-cfb128` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-256-cfb128` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-128-ofb` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-192-ofb` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-256-ofb` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n" + "\n" + ), link=None, level=3, - num='4.11.18') + num="4.11.18", +) RQ_SRS008_AES_MySQL_Decrypt_Function = Requirement( - name='RQ.SRS008.AES.MySQL.Decrypt.Function', - version='1.0', + name="RQ.SRS008.AES.MySQL.Decrypt.Function", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `aes_decrypt_mysql` function to decrypt data using [AES].\n' - '\n' - ), + "[ClickHouse] SHALL support `aes_decrypt_mysql` function to decrypt data using [AES].\n" + "\n" + ), link=None, level=3, - num='4.12.1') + num="4.12.1", +) RQ_SRS008_AES_MySQL_Decrypt_Function_Syntax = Requirement( - name='RQ.SRS008.AES.MySQL.Decrypt.Function.Syntax', - version='1.0', + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `aes_decrypt_mysql` function\n' - '\n' - '```sql\n' - 'aes_decrypt_mysql(mode, ciphertext, key, [iv])\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for the `aes_decrypt_mysql` function\n" + "\n" + "```sql\n" + "aes_decrypt_mysql(mode, ciphertext, key, [iv])\n" + "```\n" + "\n" + ), link=None, level=3, - num='4.12.2') + num="4.12.2", +) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_CipherText = Requirement( - name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.CipherText', - version='1.0', + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.CipherText", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `ciphertext` accepting any data type as\n' - 'the second parameter to the `aes_decrypt_mysql` function that SHALL specify the data to be decrypted.\n' - '\n' - ), + "[ClickHouse] SHALL support `ciphertext` accepting any data type as\n" + "the second parameter to the `aes_decrypt_mysql` function that SHALL specify the data to be decrypted.\n" + "\n" + ), link=None, level=3, - num='4.12.3') + num="4.12.3", +) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Key = Requirement( - name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Key', - version='1.0', + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Key", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `key` with `String` or `FixedString` data types\n' - 'as the third parameter to the `aes_decrypt_mysql` function that SHALL specify the encryption key.\n' - '\n' - ), + "[ClickHouse] SHALL support `key` with `String` or `FixedString` data types\n" + "as the third parameter to the `aes_decrypt_mysql` function that SHALL specify the encryption key.\n" + "\n" + ), link=None, level=3, - num='4.12.4') + num="4.12.4", +) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode = Requirement( - name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode', - version='1.0', + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the first parameter\n' - 'to the `aes_decrypt_mysql` function that SHALL specify encryption key length and block encryption mode.\n' - '\n' - ), + "[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the first parameter\n" + "to the `aes_decrypt_mysql` function that SHALL specify encryption key length and block encryption mode.\n" + "\n" + ), link=None, level=3, - num='4.12.5') + num="4.12.5", +) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_ValuesFormat = Requirement( - name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.ValuesFormat', - version='1.0', + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.ValuesFormat", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support values of the form `aes-[key length]-[mode]` for the `mode` parameter\n' - 'of the `aes_decrypt_mysql` function where\n' - 'the `key_length` SHALL specifies the length of the key and SHALL accept\n' - '`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n' - 'mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`.\n' - '\n' - ), + "[ClickHouse] SHALL support values of the form `aes-[key length]-[mode]` for the `mode` parameter\n" + "of the `aes_decrypt_mysql` function where\n" + "the `key_length` SHALL specifies the length of the key and SHALL accept\n" + "`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n" + "mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB]. For example, `aes-256-ofb`.\n" + "\n" + ), link=None, level=3, - num='4.12.6') + num="4.12.6", +) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_Invalid = Requirement( - name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.Invalid', - version='1.0', + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.Invalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `aes_decrypt_mysql`\n' - 'function is not valid with the exception where such a mode is supported by the underlying\n' - '[OpenSSL] implementation.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `aes_decrypt_mysql`\n" + "function is not valid with the exception where such a mode is supported by the underlying\n" + "[OpenSSL] implementation.\n" + "\n" + ), link=None, level=3, - num='4.12.7') + num="4.12.7", +) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Values = Requirement( - name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Values', - version='1.0', + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Values", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter\n' - 'of the `aes_decrypt_mysql` function:\n' - '\n' - '* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key\n' - '* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key\n' - '* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key\n' - '* `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key\n' - '* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key\n' - '* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key\n' - '* `aes-128-cfb128` that SHALL use [CFB128] block mode encryption with 128 bit key\n' - '* `aes-192-cfb128` that SHALL use [CFB128] block mode encryption with 192 bit key\n' - '* `aes-256-cfb128` that SHALL use [CFB128] block mode encryption with 256 bit key\n' - '* `aes-128-ofb` that SHALL use [OFB] block mode encryption with 128 bit key\n' - '* `aes-192-ofb` that SHALL use [OFB] block mode encryption with 192 bit key\n' - '* `aes-256-ofb` that SHALL use [OFB] block mode encryption with 256 bit key\n' - '\n' - ), + "[ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter\n" + "of the `aes_decrypt_mysql` function:\n" + "\n" + "* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key\n" + "* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key\n" + "* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key\n" + "* `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key\n" + "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key\n" + "* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key\n" + "* `aes-128-cfb128` that SHALL use [CFB128] block mode encryption with 128 bit key\n" + "* `aes-192-cfb128` that SHALL use [CFB128] block mode encryption with 192 bit key\n" + "* `aes-256-cfb128` that SHALL use [CFB128] block mode encryption with 256 bit key\n" + "* `aes-128-ofb` that SHALL use [OFB] block mode encryption with 128 bit key\n" + "* `aes-192-ofb` that SHALL use [OFB] block mode encryption with 192 bit key\n" + "* `aes-256-ofb` that SHALL use [OFB] block mode encryption with 256 bit key\n" + "\n" + ), link=None, level=3, - num='4.12.8') + num="4.12.8", +) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Values_GCM_Error = Requirement( - name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Values.GCM.Error', - version='1.0', + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Values.GCM.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if any of the following [GCM] modes are specified as the value \n' - 'for the `mode` parameter of the `aes_decrypt_mysql` function\n' - '\n' - '* `aes-128-gcm`\n' - '* `aes-192-gcm`\n' - '* `aes-256-gcm`\n' - '\n' - ), + "[ClickHouse] SHALL return an error if any of the following [GCM] modes are specified as the value \n" + "for the `mode` parameter of the `aes_decrypt_mysql` function\n" + "\n" + "* `aes-128-gcm`\n" + "* `aes-192-gcm`\n" + "* `aes-256-gcm`\n" + "\n" + ), link=None, level=3, - num='4.12.9') + num="4.12.9", +) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Values_CTR_Error = Requirement( - name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Values.CTR.Error', - version='1.0', + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Values.CTR.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if any of the following [CTR] modes are specified as the value \n' - 'for the `mode` parameter of the `aes_decrypt_mysql` function\n' - '\n' - '* `aes-128-ctr`\n' - '* `aes-192-ctr`\n' - '* `aes-256-ctr`\n' - '\n' - ), + "[ClickHouse] SHALL return an error if any of the following [CTR] modes are specified as the value \n" + "for the `mode` parameter of the `aes_decrypt_mysql` function\n" + "\n" + "* `aes-128-ctr`\n" + "* `aes-192-ctr`\n" + "* `aes-256-ctr`\n" + "\n" + ), link=None, level=3, - num='4.12.10') + num="4.12.10", +) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_InitializationVector = Requirement( - name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.InitializationVector', - version='1.0', + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.InitializationVector", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth\n' - 'parameter to the `aes_decrypt_mysql` function that SHALL specify the initialization vector for block modes that require\n' - 'it.\n' - '\n' - ), + "[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth\n" + "parameter to the `aes_decrypt_mysql` function that SHALL specify the initialization vector for block modes that require\n" + "it.\n" + "\n" + ), link=None, level=3, - num='4.12.11') + num="4.12.11", +) RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_ReturnValue = Requirement( - name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.ReturnValue', - version='1.0', + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.ReturnValue", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return the decrypted value of the data\n' - 'using `String` data type as the result of `aes_decrypt_mysql` function.\n' - '\n' - ), + "[ClickHouse] SHALL return the decrypted value of the data\n" + "using `String` data type as the result of `aes_decrypt_mysql` function.\n" + "\n" + ), link=None, level=3, - num='4.12.12') + num="4.12.12", +) RQ_SRS008_AES_MySQL_Decrypt_Function_Key_Length_TooShortError = Requirement( - name='RQ.SRS008.AES.MySQL.Decrypt.Function.Key.Length.TooShortError', - version='1.0', + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Key.Length.TooShortError", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `key` length is less than the minimum for the `aes_decrypt_mysql`\n' - 'function for a given block mode.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `key` length is less than the minimum for the `aes_decrypt_mysql`\n" + "function for a given block mode.\n" + "\n" + ), link=None, level=3, - num='4.12.13') + num="4.12.13", +) RQ_SRS008_AES_MySQL_Decrypt_Function_Key_Length_TooLong = Requirement( - name='RQ.SRS008.AES.MySQL.Decrypt.Function.Key.Length.TooLong', - version='1.0', + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Key.Length.TooLong", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL use folding algorithm specified below if the `key` length is longer than required\n' - 'for the `aes_decrypt_mysql` function for a given block mode.\n' - '\n' - '```python\n' - 'def fold_key(key, cipher_key_size):\n' - ' key = list(key) if not isinstance(key, (list, tuple)) else key\n' - '\t folded_key = key[:cipher_key_size]\n' - '\t for i in range(cipher_key_size, len(key)):\n' - '\t\t print(i % cipher_key_size, i)\n' - '\t\t folded_key[i % cipher_key_size] ^= key[i]\n' - '\t return folded_key\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL use folding algorithm specified below if the `key` length is longer than required\n" + "for the `aes_decrypt_mysql` function for a given block mode.\n" + "\n" + "```python\n" + "def fold_key(key, cipher_key_size):\n" + " key = list(key) if not isinstance(key, (list, tuple)) else key\n" + "\t folded_key = key[:cipher_key_size]\n" + "\t for i in range(cipher_key_size, len(key)):\n" + "\t\t print(i % cipher_key_size, i)\n" + "\t\t folded_key[i % cipher_key_size] ^= key[i]\n" + "\t return folded_key\n" + "```\n" + "\n" + ), link=None, level=3, - num='4.12.14') + num="4.12.14", +) RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_Length_TooShortError = Requirement( - name='RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.Length.TooShortError', - version='1.0', + name="RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.Length.TooShortError", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `iv` length is specified and is less than the minimum\n' - 'that is required for the `aes_decrypt_mysql` function for a given block mode.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `iv` length is specified and is less than the minimum\n" + "that is required for the `aes_decrypt_mysql` function for a given block mode.\n" + "\n" + ), link=None, level=3, - num='4.12.15') + num="4.12.15", +) RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_Length_TooLong = Requirement( - name='RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.Length.TooLong', - version='1.0', + name="RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.Length.TooLong", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL use the first `N` bytes that are required if the `iv` is specified and\n' - 'its length is longer than required for the `aes_decrypt_mysql` function for a given block mode.\n' - '\n' - ), + "[ClickHouse] SHALL use the first `N` bytes that are required if the `iv` is specified and\n" + "its length is longer than required for the `aes_decrypt_mysql` function for a given block mode.\n" + "\n" + ), link=None, level=3, - num='4.12.16') + num="4.12.16", +) RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_NotValidForMode = Requirement( - name='RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.NotValidForMode', - version='1.0', + name="RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.NotValidForMode", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `iv` is specified for the `aes_decrypt_mysql`\n' - 'function for a mode that does not need it.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `iv` is specified for the `aes_decrypt_mysql`\n" + "function for a mode that does not need it.\n" + "\n" + ), link=None, level=3, - num='4.12.17') + num="4.12.17", +) RQ_SRS008_AES_MySQL_Decrypt_Function_Mode_KeyAndInitializationVector_Length = Requirement( - name='RQ.SRS008.AES.MySQL.Decrypt.Function.Mode.KeyAndInitializationVector.Length', - version='1.0', + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Mode.KeyAndInitializationVector.Length", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when the `aes_decrypt_mysql` function is called with the following parameter values\n' - '\n' - '* `aes-128-ecb` mode and `key` is less than 16 bytes or `iv` is specified\n' - '* `aes-192-ecb` mode and `key` is less than 24 bytes or `iv` is specified\n' - '* `aes-256-ecb` mode and `key` is less than 32 bytes or `iv` is specified\n' - '* `aes-128-cbc` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-192-cbc` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-256-cbc` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-128-cfb1` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-192-cfb1` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-256-cfb1` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-128-cfb8` mode and `key` is less than 16 bytes and if specified `iv` is less than 16 bytes\n' - '* `aes-192-cfb8` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-256-cfb8` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-128-cfb128` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-192-cfb128` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-256-cfb128` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-128-ofb` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-192-ofb` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n' - '* `aes-256-ofb` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n' - '\n' - ), + "[ClickHouse] SHALL return an error when the `aes_decrypt_mysql` function is called with the following parameter values\n" + "\n" + "* `aes-128-ecb` mode and `key` is less than 16 bytes or `iv` is specified\n" + "* `aes-192-ecb` mode and `key` is less than 24 bytes or `iv` is specified\n" + "* `aes-256-ecb` mode and `key` is less than 32 bytes or `iv` is specified\n" + "* `aes-128-cbc` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-192-cbc` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-256-cbc` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-128-cfb1` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-192-cfb1` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-256-cfb1` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-128-cfb8` mode and `key` is less than 16 bytes and if specified `iv` is less than 16 bytes\n" + "* `aes-192-cfb8` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-256-cfb8` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-128-cfb128` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-192-cfb128` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-256-cfb128` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-128-ofb` mode and `key` is less than 16 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-192-ofb` mode and `key` is less than 24 bytes or if specified `iv` is less than 16 bytes\n" + "* `aes-256-ofb` mode and `key` is less than 32 bytes or if specified `iv` is less than 16 bytes\n" + "\n" + ), link=None, level=3, - num='4.12.18') + num="4.12.18", +) SRS_008_ClickHouse_AES_Encryption_Functions = Specification( - name='SRS-008 ClickHouse AES Encryption Functions', + name="SRS-008 ClickHouse AES Encryption Functions", description=None, author=None, - date=None, - status=None, + date=None, + status=None, approved_by=None, approved_date=None, approved_version=None, @@ -1713,118 +1796,408 @@ SRS_008_ClickHouse_AES_Encryption_Functions = Specification( parent=None, children=None, headings=( - Heading(name='Revision History', level=1, num='1'), - Heading(name='Introduction', level=1, num='2'), - Heading(name='Terminology', level=1, num='3'), - Heading(name='AES', level=2, num='3.1'), - Heading(name='AEAD', level=2, num='3.2'), - Heading(name='Requirements', level=1, num='4'), - Heading(name='Generic', level=2, num='4.1'), - Heading(name='RQ.SRS008.AES.Functions', level=3, num='4.1.1'), - Heading(name='Compatibility', level=2, num='4.2'), - Heading(name='RQ.SRS008.AES.Functions.Compatibility.MySQL', level=3, num='4.2.1'), - Heading(name='RQ.SRS008.AES.Functions.Compatibility.Dictionaries', level=3, num='4.2.2'), - Heading(name='RQ.SRS008.AES.Functions.Compatibility.Engine.Database.MySQL', level=3, num='4.2.3'), - Heading(name='RQ.SRS008.AES.Functions.Compatibility.Engine.Table.MySQL', level=3, num='4.2.4'), - Heading(name='RQ.SRS008.AES.Functions.Compatibility.TableFunction.MySQL', level=3, num='4.2.5'), - Heading(name='Different Modes', level=2, num='4.3'), - Heading(name='RQ.SRS008.AES.Functions.DifferentModes', level=3, num='4.3.1'), - Heading(name='Multiple Sources', level=2, num='4.4'), - Heading(name='RQ.SRS008.AES.Functions.DataFromMultipleSources', level=3, num='4.4.1'), - Heading(name='Suppressing Sensitive Values', level=2, num='4.5'), - Heading(name='RQ.SRS008.AES.Functions.SuppressOutputOfSensitiveValues', level=3, num='4.5.1'), - Heading(name='Invalid Parameters', level=2, num='4.6'), - Heading(name='RQ.SRS008.AES.Functions.InvalidParameters', level=3, num='4.6.1'), - Heading(name='Mismatched Values', level=2, num='4.7'), - Heading(name='RQ.SRS008.AES.Functions.Mismatched.Key', level=3, num='4.7.1'), - Heading(name='RQ.SRS008.AES.Functions.Mismatched.IV', level=3, num='4.7.2'), - Heading(name='RQ.SRS008.AES.Functions.Mismatched.AAD', level=3, num='4.7.3'), - Heading(name='RQ.SRS008.AES.Functions.Mismatched.Mode', level=3, num='4.7.4'), - Heading(name='Performance', level=2, num='4.8'), - Heading(name='RQ.SRS008.AES.Functions.Check.Performance', level=3, num='4.8.1'), - Heading(name='RQ.SRS008.AES.Function.Check.Performance.BestCase', level=3, num='4.8.2'), - Heading(name='RQ.SRS008.AES.Function.Check.Performance.WorstCase', level=3, num='4.8.3'), - Heading(name='RQ.SRS008.AES.Functions.Check.Compression', level=3, num='4.8.4'), - Heading(name='RQ.SRS008.AES.Functions.Check.Compression.LowCardinality', level=3, num='4.8.5'), - Heading(name='Encrypt Function', level=2, num='4.9'), - Heading(name='RQ.SRS008.AES.Encrypt.Function', level=3, num='4.9.1'), - Heading(name='RQ.SRS008.AES.Encrypt.Function.Syntax', level=3, num='4.9.2'), - Heading(name='RQ.SRS008.AES.Encrypt.Function.NIST.TestVectors', level=3, num='4.9.3'), - Heading(name='RQ.SRS008.AES.Encrypt.Function.Parameters.PlainText', level=3, num='4.9.4'), - Heading(name='RQ.SRS008.AES.Encrypt.Function.Parameters.Key', level=3, num='4.9.5'), - Heading(name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode', level=3, num='4.9.6'), - Heading(name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.ValuesFormat', level=3, num='4.9.7'), - Heading(name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.Invalid', level=3, num='4.9.8'), - Heading(name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Values', level=3, num='4.9.9'), - Heading(name='RQ.SRS008.AES.Encrypt.Function.Parameters.InitializationVector', level=3, num='4.9.10'), - Heading(name='RQ.SRS008.AES.Encrypt.Function.Parameters.AdditionalAuthenticatedData', level=3, num='4.9.11'), - Heading(name='RQ.SRS008.AES.Encrypt.Function.Parameters.ReturnValue', level=3, num='4.9.12'), - Heading(name='RQ.SRS008.AES.Encrypt.Function.Key.Length.InvalidLengthError', level=3, num='4.9.13'), - Heading(name='RQ.SRS008.AES.Encrypt.Function.InitializationVector.Length.InvalidLengthError', level=3, num='4.9.14'), - Heading(name='RQ.SRS008.AES.Encrypt.Function.InitializationVector.NotValidForMode', level=3, num='4.9.15'), - Heading(name='RQ.SRS008.AES.Encrypt.Function.AdditionalAuthenticationData.NotValidForMode', level=3, num='4.9.16'), - Heading(name='RQ.SRS008.AES.Encrypt.Function.AdditionalAuthenticationData.Length', level=3, num='4.9.17'), - Heading(name='RQ.SRS008.AES.Encrypt.Function.NonGCMMode.KeyAndInitializationVector.Length', level=3, num='4.9.18'), - Heading(name='RQ.SRS008.AES.Encrypt.Function.GCMMode.KeyAndInitializationVector.Length', level=3, num='4.9.19'), - Heading(name='Decrypt Function', level=2, num='4.10'), - Heading(name='RQ.SRS008.AES.Decrypt.Function', level=3, num='4.10.1'), - Heading(name='RQ.SRS008.AES.Decrypt.Function.Syntax', level=3, num='4.10.2'), - Heading(name='RQ.SRS008.AES.Decrypt.Function.Parameters.CipherText', level=3, num='4.10.3'), - Heading(name='RQ.SRS008.AES.Decrypt.Function.Parameters.Key', level=3, num='4.10.4'), - Heading(name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode', level=3, num='4.10.5'), - Heading(name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.ValuesFormat', level=3, num='4.10.6'), - Heading(name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.Invalid', level=3, num='4.10.7'), - Heading(name='RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Values', level=3, num='4.10.8'), - Heading(name='RQ.SRS008.AES.Decrypt.Function.Parameters.InitializationVector', level=3, num='4.10.9'), - Heading(name='RQ.SRS008.AES.Decrypt.Function.Parameters.AdditionalAuthenticatedData', level=3, num='4.10.10'), - Heading(name='RQ.SRS008.AES.Decrypt.Function.Parameters.ReturnValue', level=3, num='4.10.11'), - Heading(name='RQ.SRS008.AES.Decrypt.Function.Key.Length.InvalidLengthError', level=3, num='4.10.12'), - Heading(name='RQ.SRS008.AES.Decrypt.Function.InitializationVector.Length.InvalidLengthError', level=3, num='4.10.13'), - Heading(name='RQ.SRS008.AES.Decrypt.Function.InitializationVector.NotValidForMode', level=3, num='4.10.14'), - Heading(name='RQ.SRS008.AES.Decrypt.Function.AdditionalAuthenticationData.NotValidForMode', level=3, num='4.10.15'), - Heading(name='RQ.SRS008.AES.Decrypt.Function.AdditionalAuthenticationData.Length', level=3, num='4.10.16'), - Heading(name='RQ.SRS008.AES.Decrypt.Function.NonGCMMode.KeyAndInitializationVector.Length', level=3, num='4.10.17'), - Heading(name='RQ.SRS008.AES.Decrypt.Function.GCMMode.KeyAndInitializationVector.Length', level=3, num='4.10.18'), - Heading(name='MySQL Encrypt Function', level=2, num='4.11'), - Heading(name='RQ.SRS008.AES.MySQL.Encrypt.Function', level=3, num='4.11.1'), - Heading(name='RQ.SRS008.AES.MySQL.Encrypt.Function.Syntax', level=3, num='4.11.2'), - Heading(name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.PlainText', level=3, num='4.11.3'), - Heading(name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Key', level=3, num='4.11.4'), - Heading(name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode', level=3, num='4.11.5'), - Heading(name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.ValuesFormat', level=3, num='4.11.6'), - Heading(name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.Invalid', level=3, num='4.11.7'), - Heading(name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Values', level=3, num='4.11.8'), - Heading(name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Values.GCM.Error', level=3, num='4.11.9'), - Heading(name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Values.CTR.Error', level=3, num='4.11.10'), - Heading(name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.InitializationVector', level=3, num='4.11.11'), - Heading(name='RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.ReturnValue', level=3, num='4.11.12'), - Heading(name='RQ.SRS008.AES.MySQL.Encrypt.Function.Key.Length.TooShortError', level=3, num='4.11.13'), - Heading(name='RQ.SRS008.AES.MySQL.Encrypt.Function.Key.Length.TooLong', level=3, num='4.11.14'), - Heading(name='RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.Length.TooShortError', level=3, num='4.11.15'), - Heading(name='RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.Length.TooLong', level=3, num='4.11.16'), - Heading(name='RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.NotValidForMode', level=3, num='4.11.17'), - Heading(name='RQ.SRS008.AES.MySQL.Encrypt.Function.Mode.KeyAndInitializationVector.Length', level=3, num='4.11.18'), - Heading(name='MySQL Decrypt Function', level=2, num='4.12'), - Heading(name='RQ.SRS008.AES.MySQL.Decrypt.Function', level=3, num='4.12.1'), - Heading(name='RQ.SRS008.AES.MySQL.Decrypt.Function.Syntax', level=3, num='4.12.2'), - Heading(name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.CipherText', level=3, num='4.12.3'), - Heading(name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Key', level=3, num='4.12.4'), - Heading(name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode', level=3, num='4.12.5'), - Heading(name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.ValuesFormat', level=3, num='4.12.6'), - Heading(name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.Invalid', level=3, num='4.12.7'), - Heading(name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Values', level=3, num='4.12.8'), - Heading(name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Values.GCM.Error', level=3, num='4.12.9'), - Heading(name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Values.CTR.Error', level=3, num='4.12.10'), - Heading(name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.InitializationVector', level=3, num='4.12.11'), - Heading(name='RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.ReturnValue', level=3, num='4.12.12'), - Heading(name='RQ.SRS008.AES.MySQL.Decrypt.Function.Key.Length.TooShortError', level=3, num='4.12.13'), - Heading(name='RQ.SRS008.AES.MySQL.Decrypt.Function.Key.Length.TooLong', level=3, num='4.12.14'), - Heading(name='RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.Length.TooShortError', level=3, num='4.12.15'), - Heading(name='RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.Length.TooLong', level=3, num='4.12.16'), - Heading(name='RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.NotValidForMode', level=3, num='4.12.17'), - Heading(name='RQ.SRS008.AES.MySQL.Decrypt.Function.Mode.KeyAndInitializationVector.Length', level=3, num='4.12.18'), - Heading(name='References', level=1, num='5'), + Heading(name="Revision History", level=1, num="1"), + Heading(name="Introduction", level=1, num="2"), + Heading(name="Terminology", level=1, num="3"), + Heading(name="AES", level=2, num="3.1"), + Heading(name="AEAD", level=2, num="3.2"), + Heading(name="Requirements", level=1, num="4"), + Heading(name="Generic", level=2, num="4.1"), + Heading(name="RQ.SRS008.AES.Functions", level=3, num="4.1.1"), + Heading(name="Compatibility", level=2, num="4.2"), + Heading( + name="RQ.SRS008.AES.Functions.Compatibility.MySQL", level=3, num="4.2.1" ), + Heading( + name="RQ.SRS008.AES.Functions.Compatibility.Dictionaries", + level=3, + num="4.2.2", + ), + Heading( + name="RQ.SRS008.AES.Functions.Compatibility.Engine.Database.MySQL", + level=3, + num="4.2.3", + ), + Heading( + name="RQ.SRS008.AES.Functions.Compatibility.Engine.Table.MySQL", + level=3, + num="4.2.4", + ), + Heading( + name="RQ.SRS008.AES.Functions.Compatibility.TableFunction.MySQL", + level=3, + num="4.2.5", + ), + Heading(name="Different Modes", level=2, num="4.3"), + Heading(name="RQ.SRS008.AES.Functions.DifferentModes", level=3, num="4.3.1"), + Heading(name="Multiple Sources", level=2, num="4.4"), + Heading( + name="RQ.SRS008.AES.Functions.DataFromMultipleSources", level=3, num="4.4.1" + ), + Heading(name="Suppressing Sensitive Values", level=2, num="4.5"), + Heading( + name="RQ.SRS008.AES.Functions.SuppressOutputOfSensitiveValues", + level=3, + num="4.5.1", + ), + Heading(name="Invalid Parameters", level=2, num="4.6"), + Heading(name="RQ.SRS008.AES.Functions.InvalidParameters", level=3, num="4.6.1"), + Heading(name="Mismatched Values", level=2, num="4.7"), + Heading(name="RQ.SRS008.AES.Functions.Mismatched.Key", level=3, num="4.7.1"), + Heading(name="RQ.SRS008.AES.Functions.Mismatched.IV", level=3, num="4.7.2"), + Heading(name="RQ.SRS008.AES.Functions.Mismatched.AAD", level=3, num="4.7.3"), + Heading(name="RQ.SRS008.AES.Functions.Mismatched.Mode", level=3, num="4.7.4"), + Heading(name="Performance", level=2, num="4.8"), + Heading(name="RQ.SRS008.AES.Functions.Check.Performance", level=3, num="4.8.1"), + Heading( + name="RQ.SRS008.AES.Function.Check.Performance.BestCase", + level=3, + num="4.8.2", + ), + Heading( + name="RQ.SRS008.AES.Function.Check.Performance.WorstCase", + level=3, + num="4.8.3", + ), + Heading(name="RQ.SRS008.AES.Functions.Check.Compression", level=3, num="4.8.4"), + Heading( + name="RQ.SRS008.AES.Functions.Check.Compression.LowCardinality", + level=3, + num="4.8.5", + ), + Heading(name="Encrypt Function", level=2, num="4.9"), + Heading(name="RQ.SRS008.AES.Encrypt.Function", level=3, num="4.9.1"), + Heading(name="RQ.SRS008.AES.Encrypt.Function.Syntax", level=3, num="4.9.2"), + Heading( + name="RQ.SRS008.AES.Encrypt.Function.NIST.TestVectors", level=3, num="4.9.3" + ), + Heading( + name="RQ.SRS008.AES.Encrypt.Function.Parameters.PlainText", + level=3, + num="4.9.4", + ), + Heading( + name="RQ.SRS008.AES.Encrypt.Function.Parameters.Key", level=3, num="4.9.5" + ), + Heading( + name="RQ.SRS008.AES.Encrypt.Function.Parameters.Mode", level=3, num="4.9.6" + ), + Heading( + name="RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.ValuesFormat", + level=3, + num="4.9.7", + ), + Heading( + name="RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.Invalid", + level=3, + num="4.9.8", + ), + Heading( + name="RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Values", + level=3, + num="4.9.9", + ), + Heading( + name="RQ.SRS008.AES.Encrypt.Function.Parameters.InitializationVector", + level=3, + num="4.9.10", + ), + Heading( + name="RQ.SRS008.AES.Encrypt.Function.Parameters.AdditionalAuthenticatedData", + level=3, + num="4.9.11", + ), + Heading( + name="RQ.SRS008.AES.Encrypt.Function.Parameters.ReturnValue", + level=3, + num="4.9.12", + ), + Heading( + name="RQ.SRS008.AES.Encrypt.Function.Key.Length.InvalidLengthError", + level=3, + num="4.9.13", + ), + Heading( + name="RQ.SRS008.AES.Encrypt.Function.InitializationVector.Length.InvalidLengthError", + level=3, + num="4.9.14", + ), + Heading( + name="RQ.SRS008.AES.Encrypt.Function.InitializationVector.NotValidForMode", + level=3, + num="4.9.15", + ), + Heading( + name="RQ.SRS008.AES.Encrypt.Function.AdditionalAuthenticationData.NotValidForMode", + level=3, + num="4.9.16", + ), + Heading( + name="RQ.SRS008.AES.Encrypt.Function.AdditionalAuthenticationData.Length", + level=3, + num="4.9.17", + ), + Heading( + name="RQ.SRS008.AES.Encrypt.Function.NonGCMMode.KeyAndInitializationVector.Length", + level=3, + num="4.9.18", + ), + Heading( + name="RQ.SRS008.AES.Encrypt.Function.GCMMode.KeyAndInitializationVector.Length", + level=3, + num="4.9.19", + ), + Heading(name="Decrypt Function", level=2, num="4.10"), + Heading(name="RQ.SRS008.AES.Decrypt.Function", level=3, num="4.10.1"), + Heading(name="RQ.SRS008.AES.Decrypt.Function.Syntax", level=3, num="4.10.2"), + Heading( + name="RQ.SRS008.AES.Decrypt.Function.Parameters.CipherText", + level=3, + num="4.10.3", + ), + Heading( + name="RQ.SRS008.AES.Decrypt.Function.Parameters.Key", level=3, num="4.10.4" + ), + Heading( + name="RQ.SRS008.AES.Decrypt.Function.Parameters.Mode", level=3, num="4.10.5" + ), + Heading( + name="RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.ValuesFormat", + level=3, + num="4.10.6", + ), + Heading( + name="RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Value.Invalid", + level=3, + num="4.10.7", + ), + Heading( + name="RQ.SRS008.AES.Decrypt.Function.Parameters.Mode.Values", + level=3, + num="4.10.8", + ), + Heading( + name="RQ.SRS008.AES.Decrypt.Function.Parameters.InitializationVector", + level=3, + num="4.10.9", + ), + Heading( + name="RQ.SRS008.AES.Decrypt.Function.Parameters.AdditionalAuthenticatedData", + level=3, + num="4.10.10", + ), + Heading( + name="RQ.SRS008.AES.Decrypt.Function.Parameters.ReturnValue", + level=3, + num="4.10.11", + ), + Heading( + name="RQ.SRS008.AES.Decrypt.Function.Key.Length.InvalidLengthError", + level=3, + num="4.10.12", + ), + Heading( + name="RQ.SRS008.AES.Decrypt.Function.InitializationVector.Length.InvalidLengthError", + level=3, + num="4.10.13", + ), + Heading( + name="RQ.SRS008.AES.Decrypt.Function.InitializationVector.NotValidForMode", + level=3, + num="4.10.14", + ), + Heading( + name="RQ.SRS008.AES.Decrypt.Function.AdditionalAuthenticationData.NotValidForMode", + level=3, + num="4.10.15", + ), + Heading( + name="RQ.SRS008.AES.Decrypt.Function.AdditionalAuthenticationData.Length", + level=3, + num="4.10.16", + ), + Heading( + name="RQ.SRS008.AES.Decrypt.Function.NonGCMMode.KeyAndInitializationVector.Length", + level=3, + num="4.10.17", + ), + Heading( + name="RQ.SRS008.AES.Decrypt.Function.GCMMode.KeyAndInitializationVector.Length", + level=3, + num="4.10.18", + ), + Heading(name="MySQL Encrypt Function", level=2, num="4.11"), + Heading(name="RQ.SRS008.AES.MySQL.Encrypt.Function", level=3, num="4.11.1"), + Heading( + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Syntax", level=3, num="4.11.2" + ), + Heading( + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.PlainText", + level=3, + num="4.11.3", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Key", + level=3, + num="4.11.4", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode", + level=3, + num="4.11.5", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.ValuesFormat", + level=3, + num="4.11.6", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Value.Invalid", + level=3, + num="4.11.7", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Values", + level=3, + num="4.11.8", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Values.GCM.Error", + level=3, + num="4.11.9", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.Mode.Values.CTR.Error", + level=3, + num="4.11.10", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.InitializationVector", + level=3, + num="4.11.11", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Parameters.ReturnValue", + level=3, + num="4.11.12", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Key.Length.TooShortError", + level=3, + num="4.11.13", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Key.Length.TooLong", + level=3, + num="4.11.14", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.Length.TooShortError", + level=3, + num="4.11.15", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.Length.TooLong", + level=3, + num="4.11.16", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Encrypt.Function.InitializationVector.NotValidForMode", + level=3, + num="4.11.17", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Encrypt.Function.Mode.KeyAndInitializationVector.Length", + level=3, + num="4.11.18", + ), + Heading(name="MySQL Decrypt Function", level=2, num="4.12"), + Heading(name="RQ.SRS008.AES.MySQL.Decrypt.Function", level=3, num="4.12.1"), + Heading( + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Syntax", level=3, num="4.12.2" + ), + Heading( + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.CipherText", + level=3, + num="4.12.3", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Key", + level=3, + num="4.12.4", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode", + level=3, + num="4.12.5", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.ValuesFormat", + level=3, + num="4.12.6", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Value.Invalid", + level=3, + num="4.12.7", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Values", + level=3, + num="4.12.8", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Values.GCM.Error", + level=3, + num="4.12.9", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.Mode.Values.CTR.Error", + level=3, + num="4.12.10", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.InitializationVector", + level=3, + num="4.12.11", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Parameters.ReturnValue", + level=3, + num="4.12.12", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Key.Length.TooShortError", + level=3, + num="4.12.13", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Key.Length.TooLong", + level=3, + num="4.12.14", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.Length.TooShortError", + level=3, + num="4.12.15", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.Length.TooLong", + level=3, + num="4.12.16", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Decrypt.Function.InitializationVector.NotValidForMode", + level=3, + num="4.12.17", + ), + Heading( + name="RQ.SRS008.AES.MySQL.Decrypt.Function.Mode.KeyAndInitializationVector.Length", + level=3, + num="4.12.18", + ), + Heading(name="References", level=1, num="5"), + ), requirements=( RQ_SRS008_AES_Functions, RQ_SRS008_AES_Functions_Compatibility_MySQL, @@ -1918,8 +2291,8 @@ SRS_008_ClickHouse_AES_Encryption_Functions = Specification( RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_Length_TooLong, RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_NotValidForMode, RQ_SRS008_AES_MySQL_Decrypt_Function_Mode_KeyAndInitializationVector_Length, - ), - content=''' + ), + content=""" # SRS-008 ClickHouse AES Encryption Functions # Software Requirements Specification @@ -2892,4 +3265,5 @@ version: 1.0 [Revision history]: https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/aes_encryption/requirements/requirements.md [Git]: https://git-scm.com/ [NIST test vectors]: https://csrc.nist.gov/Projects/Cryptographic-Algorithm-Validation-Program -''') +""", +) diff --git a/tests/testflows/aes_encryption/tests/common.py b/tests/testflows/aes_encryption/tests/common.py index be1c0b98851..f0a10d34411 100644 --- a/tests/testflows/aes_encryption/tests/common.py +++ b/tests/testflows/aes_encryption/tests/common.py @@ -107,39 +107,51 @@ plaintexts = [ ("Decimal32", "reinterpretAsFixedString(toDecimal32(2, 4))"), ("Decimal64", "reinterpretAsFixedString(toDecimal64(2, 4))"), ("Decimal128", "reinterpretAsFixedString(toDecimal128(2, 4))"), - ("UUID", "reinterpretAsFixedString(toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0'))"), + ( + "UUID", + "reinterpretAsFixedString(toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0'))", + ), ("Date", "reinterpretAsFixedString(toDate('2020-01-01'))"), ("DateTime", "reinterpretAsFixedString(toDateTime('2020-01-01 20:01:02'))"), - ("DateTime64", "reinterpretAsFixedString(toDateTime64('2020-01-01 20:01:02.123', 3))"), + ( + "DateTime64", + "reinterpretAsFixedString(toDateTime64('2020-01-01 20:01:02.123', 3))", + ), ("LowCardinality", "toLowCardinality('1')"), ("LowCardinalityFixedString", "toLowCardinality(toFixedString('1',2))"), - #("Array", "[1,2]"), - not supported - #("Tuple", "(1,'a')") - not supported + # ("Array", "[1,2]"), - not supported + # ("Tuple", "(1,'a')") - not supported ("NULL", "reinterpretAsFixedString(toDateOrNull('foo'))"), ("NullableString", "toNullable('1')"), ("NullableStringNull", "toNullable(NULL)"), ("NullableFixedString", "toNullable(toFixedString('1',2))"), ("NullableFixedStringNull", "toNullable(toFixedString(NULL,2))"), ("IPv4", "reinterpretAsFixedString(toIPv4('171.225.130.45'))"), - ("IPv6", "reinterpretAsFixedString(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'))"), + ( + "IPv6", + "reinterpretAsFixedString(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'))", + ), ("Enum8", r"reinterpretAsFixedString(CAST('a', 'Enum8(\'a\' = 1, \'b\' = 2)'))"), ("Enum16", r"reinterpretAsFixedString(CAST('a', 'Enum16(\'a\' = 1, \'b\' = 2)'))"), ] _hex = hex + def hex(s): - """Convert string to hex. - """ + """Convert string to hex.""" if isinstance(s, str): - return "".join(['%X' % ord(c) for c in s]) + return "".join(["%X" % ord(c) for c in s]) if isinstance(s, bytes): - return "".join(['%X' % c for c in s]) + return "".join(["%X" % c for c in s]) return _hex(s) + def getuid(): if current().subtype == TestSubType.Example: - testname = f"{basename(parentname(current().name)).replace(' ', '_').replace(',','')}" + testname = ( + f"{basename(parentname(current().name)).replace(' ', '_').replace(',','')}" + ) else: testname = f"{basename(current().name).replace(' ', '_').replace(',','')}" - return testname + "_" + str(uuid.uuid1()).replace('-', '_') + return testname + "_" + str(uuid.uuid1()).replace("-", "_") diff --git a/tests/testflows/aes_encryption/tests/compatibility/feature.py b/tests/testflows/aes_encryption/tests/compatibility/feature.py index 5ef547e43f4..509bc894374 100644 --- a/tests/testflows/aes_encryption/tests/compatibility/feature.py +++ b/tests/testflows/aes_encryption/tests/compatibility/feature.py @@ -2,16 +2,17 @@ from testflows.core import * from aes_encryption.requirements import * + @TestFeature @Name("compatibility") -@Requirements( - RQ_SRS008_AES_Functions_DataFromMultipleSources("1.0") -) +@Requirements(RQ_SRS008_AES_Functions_DataFromMultipleSources("1.0")) def feature(self, node="clickhouse1"): - """Check encryption functions usage compatibility. - """ + """Check encryption functions usage compatibility.""" self.context.node = self.context.cluster.node(node) Feature(run=load("aes_encryption.tests.compatibility.insert", "feature"), flags=TE) Feature(run=load("aes_encryption.tests.compatibility.select", "feature"), flags=TE) - Feature(run=load("aes_encryption.tests.compatibility.mysql.feature", "feature"), flags=TE) \ No newline at end of file + Feature( + run=load("aes_encryption.tests.compatibility.mysql.feature", "feature"), + flags=TE, + ) diff --git a/tests/testflows/aes_encryption/tests/compatibility/insert.py b/tests/testflows/aes_encryption/tests/compatibility/insert.py index 6ddcc11b584..c4d80c85896 100644 --- a/tests/testflows/aes_encryption/tests/compatibility/insert.py +++ b/tests/testflows/aes_encryption/tests/compatibility/insert.py @@ -10,6 +10,7 @@ from testflows.asserts import values, error, snapshot from aes_encryption.tests.common import modes, mysql_modes + @contextmanager def table(name): node = current().context.node @@ -33,6 +34,7 @@ def table(name): with Finally("I drop the table", flags=TE): node.query(f"DROP TABLE IF EXISTS {name}") + @contextmanager def mv_transform(table, transform): node = current().context.node @@ -70,6 +72,7 @@ def mv_transform(table, transform): with And("dropping Null input table", flags=TE): node.query(f"DROP TABLE IF EXISTS {table}_input") + @TestScenario def encrypt_using_materialized_view(self): """Check that we can use `encrypt` function when inserting @@ -82,7 +85,9 @@ def encrypt_using_materialized_view(self): aad = "some random aad" for mode, key_len, iv_len, aad_len in modes: - with Example(f"""mode={mode.strip("'")} iv={iv_len} aad={aad_len}""") as example: + with Example( + f"""mode={mode.strip("'")} iv={iv_len} aad={aad_len}""" + ) as example: example_key = f"'{key[:key_len]}'" example_mode = mode example_iv = None if not iv_len else f"'{iv[:iv_len]}'" @@ -92,21 +97,32 @@ def encrypt_using_materialized_view(self): with table("user_data"): with mv_transform("user_data", example_transform): with When("I insert encrypted data"): - node.query(f""" + node.query( + f""" INSERT INTO user_data_input (date, name, secret, mode, key) VALUES ('2020-01-01', 'user0', 'user0_secret', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""}), ('2020-01-02', 'user1', 'user1_secret', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""}), ('2020-01-03', 'user2', 'user2_secret', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""}) - """) + """ + ) with And("I read inserted data back"): - node.query("SELECT date, name, hex(secret) FROM user_data ORDER BY date") + node.query( + "SELECT date, name, hex(secret) FROM user_data ORDER BY date" + ) with Then("output must match the snapshot"): with values() as that: - assert that(snapshot(r.output.strip(), "insert", name=f"encrypt_mv_example_{varname(basename(self.name))}")), error() + assert that( + snapshot( + r.output.strip(), + "insert", + name=f"encrypt_mv_example_{varname(basename(self.name))}", + ) + ), error() + @TestScenario def aes_encrypt_mysql_using_materialized_view(self): @@ -120,30 +136,45 @@ def aes_encrypt_mysql_using_materialized_view(self): aad = "some random aad" for mode, key_len, iv_len in mysql_modes: - with Example(f"""mode={mode.strip("'")} key={key_len} iv={iv_len}""") as example: + with Example( + f"""mode={mode.strip("'")} key={key_len} iv={iv_len}""" + ) as example: example_key = f"'{key[:key_len]}'" example_mode = mode example_iv = None if not iv_len else f"'{iv[:iv_len]}'" - example_transform = f"aes_encrypt_mysql(mode, secret, key{', iv' if example_iv else ''})" + example_transform = ( + f"aes_encrypt_mysql(mode, secret, key{', iv' if example_iv else ''})" + ) with table("user_data"): with mv_transform("user_data", example_transform): with When("I insert encrypted data"): - node.query(f""" + node.query( + f""" INSERT INTO user_data_input (date, name, secret, mode, key) VALUES ('2020-01-01', 'user0', 'user0_secret', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}), ('2020-01-02', 'user1', 'user1_secret', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}), ('2020-01-03', 'user2', 'user2_secret', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}) - """) + """ + ) with And("I read inserted data back"): - node.query("SELECT date, name, hex(secret) FROM user_data ORDER BY date") + node.query( + "SELECT date, name, hex(secret) FROM user_data ORDER BY date" + ) with Then("output must match the snapshot"): with values() as that: - assert that(snapshot(r.output.strip(), "insert", name=f"aes_encrypt_mysql_mv_example_{varname(basename(self.name))}")), error() + assert that( + snapshot( + r.output.strip(), + "insert", + name=f"aes_encrypt_mysql_mv_example_{varname(basename(self.name))}", + ) + ), error() + @TestScenario def encrypt_using_input_table_function(self): @@ -157,7 +188,9 @@ def encrypt_using_input_table_function(self): aad = "some random aad" for mode, key_len, iv_len, aad_len in modes: - with Example(f"""mode={mode.strip("'")} iv={iv_len} aad={aad_len}""") as example: + with Example( + f"""mode={mode.strip("'")} iv={iv_len} aad={aad_len}""" + ) as example: example_key = f"'{key[:key_len]}'" example_mode = mode example_iv = None if not iv_len else f"'{iv[:iv_len]}'" @@ -166,7 +199,8 @@ def encrypt_using_input_table_function(self): with table("user_data"): with When("I insert encrypted data"): - node.query(f""" + node.query( + f""" INSERT INTO user_data SELECT @@ -174,14 +208,24 @@ def encrypt_using_input_table_function(self): FROM input('date Date, name String, secret String') FORMAT Values ('2020-01-01', 'user0', 'user0_secret'), ('2020-01-02', 'user1', 'user1_secret'), ('2020-01-03', 'user2', 'user2_secret') - """) + """ + ) with And("I read inserted data back"): - r = node.query("SELECT date, name, hex(secret) FROM user_data ORDER BY date") + r = node.query( + "SELECT date, name, hex(secret) FROM user_data ORDER BY date" + ) with Then("output must match the snapshot"): with values() as that: - assert that(snapshot(r.output.strip(), "insert", name=f"encrypt_input_example_{varname(basename(example.name))}")), error() + assert that( + snapshot( + r.output.strip(), + "insert", + name=f"encrypt_input_example_{varname(basename(example.name))}", + ) + ), error() + @TestScenario def aes_encrypt_mysql_using_input_table_function(self): @@ -195,7 +239,9 @@ def aes_encrypt_mysql_using_input_table_function(self): aad = "some random aad" for mode, key_len, iv_len in mysql_modes: - with Example(f"""mode={mode.strip("'")} key={key_len} iv={iv_len}""") as example: + with Example( + f"""mode={mode.strip("'")} key={key_len} iv={iv_len}""" + ) as example: example_key = f"'{key[:key_len]}'" example_mode = mode example_iv = None if not iv_len else f"'{iv[:iv_len]}'" @@ -203,7 +249,8 @@ def aes_encrypt_mysql_using_input_table_function(self): with table("user_data"): with When("I insert encrypted data"): - node.query(f""" + node.query( + f""" INSERT INTO user_data SELECT @@ -211,14 +258,24 @@ def aes_encrypt_mysql_using_input_table_function(self): FROM input('date Date, name String, secret String') FORMAT Values ('2020-01-01', 'user0', 'user0_secret'), ('2020-01-02', 'user1', 'user1_secret'), ('2020-01-03', 'user2', 'user2_secret') - """) + """ + ) with And("I read inserted data back"): - r = node.query("SELECT date, name, hex(secret) FROM user_data ORDER BY date") + r = node.query( + "SELECT date, name, hex(secret) FROM user_data ORDER BY date" + ) with Then("output must match the snapshot"): with values() as that: - assert that(snapshot(r.output.strip(), "insert", name=f"aes_encrypt_mysql_input_example_{varname(basename(example.name))}")), error() + assert that( + snapshot( + r.output.strip(), + "insert", + name=f"aes_encrypt_mysql_input_example_{varname(basename(example.name))}", + ) + ), error() + @TestScenario def decrypt_using_materialized_view(self): @@ -232,10 +289,15 @@ def decrypt_using_materialized_view(self): aad = "some random aad" with Given("I load encrypt snapshots"): - snapshot_module = SourceFileLoader("snapshot", os.path.join(current_dir(), "snapshots", "insert.py.insert.snapshot")).load_module() + snapshot_module = SourceFileLoader( + "snapshot", + os.path.join(current_dir(), "snapshots", "insert.py.insert.snapshot"), + ).load_module() for mode, key_len, iv_len, aad_len in modes: - with Example(f"""mode={mode.strip("'")} iv={iv_len} aad={aad_len}""") as example: + with Example( + f"""mode={mode.strip("'")} iv={iv_len} aad={aad_len}""" + ) as example: example_key = f"'{key[:key_len]}'" example_mode = mode example_iv = None if not iv_len else f"'{iv[:iv_len]}'" @@ -244,28 +306,38 @@ def decrypt_using_materialized_view(self): with Given("I have ciphertexts"): example_name = basename(example.name) - ciphertexts = getattr(snapshot_module, varname(f"encrypt_mv_example_{example_name}")) - example_ciphertexts = ["'{}'".format(l.split("\t")[-1].strup("'")) for l in ciphertexts.split("\n")] + ciphertexts = getattr( + snapshot_module, varname(f"encrypt_mv_example_{example_name}") + ) + example_ciphertexts = [ + "'{}'".format(l.split("\t")[-1].strup("'")) + for l in ciphertexts.split("\n") + ] with table("user_data"): with mv_transform("user_data", example_transform): with When("I insert encrypted data"): - node.query(f""" + node.query( + f""" INSERT INTO user_data_input (date, name, secret, mode, key) VALUES ('2020-01-01', 'user0', 'unhex({example_ciphertexts[0]})', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""}), ('2020-01-02', 'user1', 'unhex({example_ciphertexts[1]})', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""}), ('2020-01-03', 'user2', 'unhex({example_ciphertexts[2]})', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""}) - """) + """ + ) with And("I read inserted data back"): - r = node.query("SELECT date, name, secret FROM user_data ORDER BY date") + r = node.query( + "SELECT date, name, secret FROM user_data ORDER BY date" + ) with Then("output must match the expected"): expected = r"""'2020-01-01\tuser0\tuser0_secret\n2020-01-02\tuser1\tuser1_secret\n2020-01-03\tuser2\tuser2_secret'""" assert r.output == expected, error() + @TestScenario def aes_decrypt_mysql_using_materialized_view(self): """Check that we can use `aes_decrypt_mysql` function when inserting @@ -278,40 +350,58 @@ def aes_decrypt_mysql_using_materialized_view(self): aad = "some random aad" with Given("I load encrypt snapshots"): - snapshot_module = SourceFileLoader("snapshot", os.path.join(current_dir(), "snapshots", "insert.py.insert.snapshot")).load_module() + snapshot_module = SourceFileLoader( + "snapshot", + os.path.join(current_dir(), "snapshots", "insert.py.insert.snapshot"), + ).load_module() for mode, key_len, iv_len, aad_len in modes: - with Example(f"""mode={mode.strip("'")} key={key_len} iv={iv_len}""") as example: + with Example( + f"""mode={mode.strip("'")} key={key_len} iv={iv_len}""" + ) as example: example_key = f"'{key[:key_len]}'" example_mode = mode example_iv = None if not iv_len else f"'{iv[:iv_len]}'" example_aad = None if not aad_len else f"'{aad}'" - example_transform = f"aes_decrypt_mysql(mode, secret, key{', iv' if example_iv else ''})" + example_transform = ( + f"aes_decrypt_mysql(mode, secret, key{', iv' if example_iv else ''})" + ) with Given("I have ciphertexts"): example_name = basename(example.name) - ciphertexts = getattr(snapshot_module, varname(f"aes_encrypt_mysql_mv_example_{example_name}")) - example_ciphertexts = ["'{}'".format(l.split("\t")[-1].strup("'")) for l in ciphertexts.split("\n")] + ciphertexts = getattr( + snapshot_module, + varname(f"aes_encrypt_mysql_mv_example_{example_name}"), + ) + example_ciphertexts = [ + "'{}'".format(l.split("\t")[-1].strup("'")) + for l in ciphertexts.split("\n") + ] with table("user_data"): with mv_transform("user_data", example_transform): with When("I insert encrypted data"): - node.query(f""" + node.query( + f""" INSERT INTO user_data_input (date, name, secret, mode, key) VALUES ('2020-01-01', 'user0', 'unhex({example_ciphertexts[0]})', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}), ('2020-01-02', 'user1', 'unhex({example_ciphertexts[1]})', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}), ('2020-01-03', 'user2', 'unhex({example_ciphertexts[2]})', {example_mode}, {example_key}{(", " + example_iv) if example_iv else ""}) - """) + """ + ) with And("I read inserted data back"): - r = node.query("SELECT date, name, secret FROM user_data ORDER BY date") + r = node.query( + "SELECT date, name, secret FROM user_data ORDER BY date" + ) with Then("output must match the expected"): expected = r"""'2020-01-01\tuser0\tuser0_secret\n2020-01-02\tuser1\tuser1_secret\n2020-01-03\tuser2\tuser2_secret'""" assert r.output == expected, error() + @TestScenario def decrypt_using_input_table_function(self): """Check that we can use `decrypt` function when inserting @@ -324,10 +414,15 @@ def decrypt_using_input_table_function(self): aad = "some random aad" with Given("I load encrypt snapshots"): - snapshot_module = SourceFileLoader("snapshot", os.path.join(current_dir(), "snapshots", "insert.py.insert.snapshot")).load_module() + snapshot_module = SourceFileLoader( + "snapshot", + os.path.join(current_dir(), "snapshots", "insert.py.insert.snapshot"), + ).load_module() for mode, key_len, iv_len, aad_len in modes: - with Example(f"""mode={mode.strip("'")} iv={iv_len} aad={aad_len}""") as example: + with Example( + f"""mode={mode.strip("'")} iv={iv_len} aad={aad_len}""" + ) as example: example_key = f"'{key[:key_len]}'" example_mode = mode example_iv = None if not iv_len else f"'{iv[:iv_len]}'" @@ -336,12 +431,18 @@ def decrypt_using_input_table_function(self): with Given("I have ciphertexts"): example_name = basename(example.name) - ciphertexts = getattr(snapshot_module, varname(f"encrypt_input_example_{example_name}")) - example_ciphertexts = [l.split("\\t")[-1].strip("'") for l in ciphertexts.split("\\n")] + ciphertexts = getattr( + snapshot_module, varname(f"encrypt_input_example_{example_name}") + ) + example_ciphertexts = [ + l.split("\\t")[-1].strip("'") for l in ciphertexts.split("\\n") + ] with table("user_data"): with When("I insert decrypted data"): - node.query(textwrap.dedent(f""" + node.query( + textwrap.dedent( + f""" INSERT INTO user_data SELECT @@ -349,15 +450,20 @@ def decrypt_using_input_table_function(self): FROM input('date Date, name String, secret String') FORMAT Values ('2020-01-01', 'user0', '{example_ciphertexts[0]}'), ('2020-01-02', 'user1', '{example_ciphertexts[1]}'), ('2020-01-03', 'user2', '{example_ciphertexts[2]}') - """)) + """ + ) + ) with And("I read inserted data back"): - r = node.query("SELECT date, name, secret FROM user_data ORDER BY date") + r = node.query( + "SELECT date, name, secret FROM user_data ORDER BY date" + ) expected = """2020-01-01\tuser0\tuser0_secret\n2020-01-02\tuser1\tuser1_secret\n2020-01-03\tuser2\tuser2_secret""" with Then("output must match the expected", description=expected): assert r.output == expected, error() + @TestScenario def aes_decrypt_mysql_using_input_table_function(self): """Check that we can use `aes_decrypt_mysql` function when inserting @@ -370,10 +476,15 @@ def aes_decrypt_mysql_using_input_table_function(self): aad = "some random aad" with Given("I load encrypt snapshots"): - snapshot_module = SourceFileLoader("snapshot", os.path.join(current_dir(), "snapshots", "insert.py.insert.snapshot")).load_module() + snapshot_module = SourceFileLoader( + "snapshot", + os.path.join(current_dir(), "snapshots", "insert.py.insert.snapshot"), + ).load_module() for mode, key_len, iv_len in mysql_modes: - with Example(f"""mode={mode.strip("'")} key={key_len} iv={iv_len}""") as example: + with Example( + f"""mode={mode.strip("'")} key={key_len} iv={iv_len}""" + ) as example: example_key = f"'{key[:key_len]}'" example_mode = mode example_iv = None if not iv_len else f"'{iv[:iv_len]}'" @@ -381,12 +492,19 @@ def aes_decrypt_mysql_using_input_table_function(self): with Given("I have ciphertexts"): example_name = basename(example.name) - ciphertexts = getattr(snapshot_module, varname(f"aes_encrypt_mysql_input_example_{example_name}")) - example_ciphertexts = [l.split("\\t")[-1].strip("'") for l in ciphertexts.split("\\n")] + ciphertexts = getattr( + snapshot_module, + varname(f"aes_encrypt_mysql_input_example_{example_name}"), + ) + example_ciphertexts = [ + l.split("\\t")[-1].strip("'") for l in ciphertexts.split("\\n") + ] with table("user_data"): with When("I insert decrypted data"): - node.query(textwrap.dedent(f""" + node.query( + textwrap.dedent( + f""" INSERT INTO user_data SELECT @@ -394,20 +512,24 @@ def aes_decrypt_mysql_using_input_table_function(self): FROM input('date Date, name String, secret String') FORMAT Values ('2020-01-01', 'user0', '{example_ciphertexts[0]}'), ('2020-01-02', 'user1', '{example_ciphertexts[1]}'), ('2020-01-03', 'user2', '{example_ciphertexts[2]}') - """)) + """ + ) + ) with And("I read inserted data back"): - r = node.query("SELECT date, name, secret FROM user_data ORDER BY date") + r = node.query( + "SELECT date, name, secret FROM user_data ORDER BY date" + ) expected = """2020-01-01\tuser0\tuser0_secret\n2020-01-02\tuser1\tuser1_secret\n2020-01-03\tuser2\tuser2_secret""" with Then("output must match the expected", description=expected): assert r.output == expected, error() + @TestFeature @Name("insert") def feature(self, node="clickhouse1"): - """Check encryption functions when used during data insertion into a table. - """ + """Check encryption functions when used during data insertion into a table.""" self.context.node = self.context.cluster.node(node) for scenario in loads(current_module(), Scenario): diff --git a/tests/testflows/aes_encryption/tests/compatibility/mysql/database_engine.py b/tests/testflows/aes_encryption/tests/compatibility/mysql/database_engine.py index 612e8bc450e..27884eb7cb3 100644 --- a/tests/testflows/aes_encryption/tests/compatibility/mysql/database_engine.py +++ b/tests/testflows/aes_encryption/tests/compatibility/mysql/database_engine.py @@ -7,10 +7,10 @@ from testflows.asserts import error from aes_encryption.requirements import * from aes_encryption.tests.common import mysql_modes, hex + @contextmanager def table(name, node, mysql_node, secret_type): - """Create a table that can be accessed using MySQL database engine. - """ + """Create a table that can be accessed using MySQL database engine.""" try: with Given("table in MySQL"): sql = f""" @@ -23,9 +23,15 @@ def table(name, node, mysql_node, secret_type): ); """ with When("I drop the table if exists"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"DROP TABLE IF EXISTS {name};\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "DROP TABLE IF EXISTS {name};"', + exitcode=0, + ) with And("I create a table"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) with And("I create a database using MySQL database engine"): sql = f""" @@ -43,15 +49,22 @@ def table(name, node, mysql_node, secret_type): node.query(f"DROP DATABASE IF EXISTS mysql_db") with And("I drop a table in MySQL", flags=TE): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"DROP TABLE IF EXISTS {name};\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "DROP TABLE IF EXISTS {name};"', + exitcode=0, + ) + @TestOutline(Scenario) -@Examples("mysql_datatype", [ - ("VARBINARY(100)",), - #("VARCHAR(100)",), - ("BLOB", ), - #("TEXT",) -]) +@Examples( + "mysql_datatype", + [ + ("VARBINARY(100)",), + # ("VARCHAR(100)",), + ("BLOB",), + # ("TEXT",) + ], +) def decrypt(self, mysql_datatype): """Check that when using a table provided by MySQL database engine that contains a column encrypted in MySQL stored using specified data type @@ -65,7 +78,7 @@ def decrypt(self, mysql_datatype): for func in ["decrypt", "aes_decrypt_mysql"]: for mode, key_len, iv_len in mysql_modes: - exact_key_size = int(mode.split("-")[1])//8 + exact_key_size = int(mode.split("-")[1]) // 8 if "ecb" not in mode and not iv_len: continue @@ -75,7 +88,9 @@ def decrypt(self, mysql_datatype): if key_len != exact_key_size: continue - with Example(f"""{func} mode={mode.strip("'")} key={key_len} iv={iv_len}"""): + with Example( + f"""{func} mode={mode.strip("'")} key={key_len} iv={iv_len}""" + ): with table("user_data", node, mysql_node, mysql_datatype): example_mode = mode example_key = f"'{key[:key_len]}'" @@ -86,34 +101,51 @@ def decrypt(self, mysql_datatype): SET block_encryption_mode = {example_mode}; INSERT INTO user_data VALUES (NULL, '2020-01-01', 'user0', AES_ENCRYPT('secret', {example_key}{(", " + example_iv) if example_iv else ", ''"})); """ - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) with And("I read encrypted data in MySQL to make sure it is valid"): sql = f""" SET block_encryption_mode = {example_mode}; SELECT id, date, name, AES_DECRYPT(secret, {example_key}{(", " + example_iv) if example_iv else ", ''"}) AS secret FROM user_data; """ - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) with And("I read raw encrypted data in MySQL"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"SELECT id, date, name, hex(secret) as secret FROM user_data;\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "SELECT id, date, name, hex(secret) as secret FROM user_data;"', + exitcode=0, + ) with And("I read raw data using MySQL database engine"): - output = node.query("SELECT id, date, name, hex(secret) AS secret FROM mysql_db.user_data") + output = node.query( + "SELECT id, date, name, hex(secret) AS secret FROM mysql_db.user_data" + ) with And("I read decrypted data using MySQL database engine"): - output = node.query(f"""SELECT hex({func}({example_mode}, secret, {example_key}{(", " + example_iv) if example_iv else ""})) FROM mysql_db.user_data""").output.strip() + output = node.query( + f"""SELECT hex({func}({example_mode}, secret, {example_key}{(", " + example_iv) if example_iv else ""})) FROM mysql_db.user_data""" + ).output.strip() with Then("output should match the original plain text"): assert output == hex("secret"), error() + @TestOutline(Scenario) -@Examples("mysql_datatype", [ - ("VARBINARY(100)",), - #("VARCHAR(100)",), - ("BLOB", ), - #("TEXT",) -]) +@Examples( + "mysql_datatype", + [ + ("VARBINARY(100)",), + # ("VARCHAR(100)",), + ("BLOB",), + # ("TEXT",) + ], +) def encrypt(self, mysql_datatype): """Check that when using a table provided by MySQL database engine that we can encrypt data during insert using the `aes_encrypt_mysql` function @@ -126,7 +158,7 @@ def encrypt(self, mysql_datatype): for func in ["encrypt", "aes_encrypt_mysql"]: for mode, key_len, iv_len in mysql_modes: - exact_key_size = int(mode.split("-")[1])//8 + exact_key_size = int(mode.split("-")[1]) // 8 if "ecb" not in mode and not iv_len: continue @@ -136,15 +168,21 @@ def encrypt(self, mysql_datatype): if key_len != exact_key_size: continue - with Example(f"""{func} mode={mode.strip("'")} key={key_len} iv={iv_len}"""): + with Example( + f"""{func} mode={mode.strip("'")} key={key_len} iv={iv_len}""" + ): with table("user_data", node, mysql_node, mysql_datatype): example_mode = mode example_key = f"'{key[:key_len]}'" example_iv = None if not iv_len else f"'{iv[:iv_len]}'" example_transform = f"{func}({mode}, secret, {example_key}{(', ' + example_iv) if example_iv else ''})" - with When("I insert encrypted data into a table provided by MySQL database engine"): - node.query(textwrap.dedent(f""" + with When( + "I insert encrypted data into a table provided by MySQL database engine" + ): + node.query( + textwrap.dedent( + f""" INSERT INTO mysql_db.user_data SELECT @@ -152,21 +190,36 @@ def encrypt(self, mysql_datatype): FROM input('id Int32, date Date, name String, secret String') FORMAT Values (1, '2020-01-01', 'user0', 'secret') - """)) + """ + ) + ) with And("I read decrypted data using MySQL database engine"): - output = node.query(f"""SELECT hex(aes_decrypt_mysql({example_mode}, secret, {example_key}{(", " + example_iv) if example_iv else ""})) FROM mysql_db.user_data""").output.strip() + output = node.query( + f"""SELECT hex(aes_decrypt_mysql({example_mode}, secret, {example_key}{(", " + example_iv) if example_iv else ""})) FROM mysql_db.user_data""" + ).output.strip() - with Then("decrypted data from MySQL database engine should should match the original plain text"): + with Then( + "decrypted data from MySQL database engine should should match the original plain text" + ): assert output == hex("secret"), error() - with And("I read raw data using MySQL database engine to get expected raw data"): - expected_raw_data = node.query("SELECT hex(secret) AS secret FROM mysql_db.user_data").output.strip() + with And( + "I read raw data using MySQL database engine to get expected raw data" + ): + expected_raw_data = node.query( + "SELECT hex(secret) AS secret FROM mysql_db.user_data" + ).output.strip() with And("I read raw encrypted data in MySQL"): - output = mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"SELECT hex(secret) as secret FROM user_data;\"", exitcode=0).output.strip() + output = mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "SELECT hex(secret) as secret FROM user_data;"', + exitcode=0, + ).output.strip() - with Then("check that raw encryted data in MySQL matches the expected"): + with Then( + "check that raw encryted data in MySQL matches the expected" + ): assert expected_raw_data in output, error() with And("I decrypt data in MySQL to make sure it is valid"): @@ -174,16 +227,20 @@ def encrypt(self, mysql_datatype): SET block_encryption_mode = {example_mode}; SELECT id, date, name, hex(AES_DECRYPT(secret, {example_key}{(", " + example_iv) if example_iv else ", ''"})) AS secret FROM user_data; """ - output = mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0).output.strip() + output = mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ).output.strip() - with Then("decryted data in MySQL should match the original plain text"): + with Then( + "decryted data in MySQL should match the original plain text" + ): assert hex("secret") in output, error() + @TestFeature @Name("database engine") -@Requirements( - RQ_SRS008_AES_Functions_Compatibility_Engine_Database_MySQL("1.0") -) +@Requirements(RQ_SRS008_AES_Functions_Compatibility_Engine_Database_MySQL("1.0")) def feature(self, node="clickhouse1", mysql_node="mysql1"): """Check usage of encryption functions with [MySQL database engine]. diff --git a/tests/testflows/aes_encryption/tests/compatibility/mysql/dictionary.py b/tests/testflows/aes_encryption/tests/compatibility/mysql/dictionary.py index 812e0222866..89adcabd701 100644 --- a/tests/testflows/aes_encryption/tests/compatibility/mysql/dictionary.py +++ b/tests/testflows/aes_encryption/tests/compatibility/mysql/dictionary.py @@ -7,10 +7,10 @@ from testflows.asserts import error from aes_encryption.requirements import * from aes_encryption.tests.common import mysql_modes, hex + @contextmanager def dictionary(name, node, mysql_node, secret_type): - """Create a table in MySQL and use it a source for a dictionary. - """ + """Create a table in MySQL and use it a source for a dictionary.""" try: with Given("table in MySQL"): sql = f""" @@ -23,9 +23,15 @@ def dictionary(name, node, mysql_node, secret_type): ); """ with When("I drop the table if exists"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"DROP TABLE IF EXISTS {name};\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "DROP TABLE IF EXISTS {name};"', + exitcode=0, + ) with And("I create a table"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) with And("dictionary that uses MySQL table as the external source"): with When("I drop the dictionary if exists"): @@ -59,7 +65,11 @@ def dictionary(name, node, mysql_node, secret_type): node.query(f"DROP DICTIONARY IF EXISTS dict_{name}") with And("I drop a table in MySQL", flags=TE): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"DROP TABLE IF EXISTS {name};\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "DROP TABLE IF EXISTS {name};"', + exitcode=0, + ) + @contextmanager def parameters_dictionary(name, node, mysql_node): @@ -80,9 +90,15 @@ def parameters_dictionary(name, node, mysql_node): ); """ with When("I drop the table if exists"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"DROP TABLE IF EXISTS {name};\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "DROP TABLE IF EXISTS {name};"', + exitcode=0, + ) with And("I create a table"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) with And("dictionary that uses MySQL table as the external source"): with When("I drop the dictionary if exists"): @@ -118,7 +134,11 @@ def parameters_dictionary(name, node, mysql_node): node.query(f"DROP DICTIONARY IF EXISTS dict_{name}") with And("I drop a table in MySQL", flags=TE): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"DROP TABLE IF EXISTS {name};\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "DROP TABLE IF EXISTS {name};"', + exitcode=0, + ) + @TestScenario def parameter_values(self): @@ -134,16 +154,24 @@ def parameter_values(self): plaintext = "'secret'" for encrypt, decrypt in [ - ("encrypt", "decrypt"), - ("aes_encrypt_mysql", "aes_decrypt_mysql") - ]: - with Example(f"{encrypt} and {decrypt}", description=f"Check using dictionary for parameters of {encrypt} and {decrypt} functions."): - with parameters_dictionary("parameters_data", node, mysql_node) as dict_name: + ("encrypt", "decrypt"), + ("aes_encrypt_mysql", "aes_decrypt_mysql"), + ]: + with Example( + f"{encrypt} and {decrypt}", + description=f"Check using dictionary for parameters of {encrypt} and {decrypt} functions.", + ): + with parameters_dictionary( + "parameters_data", node, mysql_node + ) as dict_name: with When("I insert parameters values in MySQL"): sql = f""" INSERT INTO parameters_data VALUES (1, 'user0', {mode}, {key}, {iv}, {plaintext}); """ - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) with And("I use dictionary values as parameters"): sql = f""" @@ -164,13 +192,17 @@ def parameter_values(self): with Then("output should match the plain text"): assert f"'{output}'" == plaintext, error() + @TestOutline(Scenario) -@Examples("mysql_datatype", [ - ("VARBINARY(100)",), - #("VARCHAR(100)",), - ("BLOB", ), - #("TEXT",) -]) +@Examples( + "mysql_datatype", + [ + ("VARBINARY(100)",), + # ("VARCHAR(100)",), + ("BLOB",), + # ("TEXT",) + ], +) def decrypt(self, mysql_datatype): """Check that when using a dictionary that uses MySQL table as a source and contains a data encrypted in MySQL and stored using specified data type @@ -184,7 +216,7 @@ def decrypt(self, mysql_datatype): for func in ["decrypt", "aes_decrypt_mysql"]: for mode, key_len, iv_len in mysql_modes: - exact_key_size = int(mode.split("-")[1])//8 + exact_key_size = int(mode.split("-")[1]) // 8 if "ecb" not in mode and not iv_len: continue @@ -194,8 +226,12 @@ def decrypt(self, mysql_datatype): if key_len != exact_key_size: continue - with Example(f"""{func} mode={mode.strip("'")} key={key_len} iv={iv_len}"""): - with dictionary("user_data", node, mysql_node, mysql_datatype) as dict_name: + with Example( + f"""{func} mode={mode.strip("'")} key={key_len} iv={iv_len}""" + ): + with dictionary( + "user_data", node, mysql_node, mysql_datatype + ) as dict_name: example_mode = mode example_key = f"'{key[:key_len]}'" example_iv = None if not iv_len else f"'{iv[:iv_len]}'" @@ -205,23 +241,36 @@ def decrypt(self, mysql_datatype): SET block_encryption_mode = {example_mode}; INSERT INTO user_data VALUES (NULL, '2020-01-01', 'user0', AES_ENCRYPT('secret', {example_key}{(", " + example_iv) if example_iv else ", ''"})); """ - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) with And("I read encrypted data in MySQL to make sure it is valid"): sql = f""" SET block_encryption_mode = {example_mode}; SELECT id, date, name, AES_DECRYPT(secret, {example_key}{(", " + example_iv) if example_iv else ", ''"}) AS secret FROM user_data; """ - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) with And("I read raw encrypted data in MySQL"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"SELECT id, date, name, hex(secret) as secret FROM user_data;\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "SELECT id, date, name, hex(secret) as secret FROM user_data;"', + exitcode=0, + ) with And("I read raw data using MySQL dictionary"): - output = node.query(f"SELECT hex(dictGet('default.{dict_name}', 'secret', toUInt64(1))) AS secret") + output = node.query( + f"SELECT hex(dictGet('default.{dict_name}', 'secret', toUInt64(1))) AS secret" + ) with And("I read decrypted data using MySQL dictionary"): - output = node.query(textwrap.dedent(f""" + output = node.query( + textwrap.dedent( + f""" SELECT hex( {func}( {example_mode}, @@ -229,16 +278,17 @@ def decrypt(self, mysql_datatype): {example_key}{(", " + example_iv) if example_iv else ""} ) ) - """)).output.strip() + """ + ) + ).output.strip() with Then("output should match the original plain text"): assert output == hex("secret"), error() + @TestFeature @Name("dictionary") -@Requirements( - RQ_SRS008_AES_Functions_Compatibility_Dictionaries("1.0") -) +@Requirements(RQ_SRS008_AES_Functions_Compatibility_Dictionaries("1.0")) def feature(self, node="clickhouse1", mysql_node="mysql1"): """Check usage of encryption functions with [MySQL dictionary]. diff --git a/tests/testflows/aes_encryption/tests/compatibility/mysql/feature.py b/tests/testflows/aes_encryption/tests/compatibility/mysql/feature.py index bc470dd13a7..ed5f47ee991 100644 --- a/tests/testflows/aes_encryption/tests/compatibility/mysql/feature.py +++ b/tests/testflows/aes_encryption/tests/compatibility/mysql/feature.py @@ -2,17 +2,27 @@ from testflows.core import * from aes_encryption.requirements import * + @TestFeature @Name("mysql") -@Requirements( - RQ_SRS008_AES_Functions_Compatibility_MySQL("1.0") -) +@Requirements(RQ_SRS008_AES_Functions_Compatibility_MySQL("1.0")) def feature(self, node="clickhouse1"): - """Check encryption functions usage compatibility with MySQL. - """ + """Check encryption functions usage compatibility with MySQL.""" self.context.node = self.context.cluster.node(node) - Feature(run=load("aes_encryption.tests.compatibility.mysql.table_engine", "feature"), flags=TE) - Feature(run=load("aes_encryption.tests.compatibility.mysql.database_engine", "feature"), flags=TE) - Feature(run=load("aes_encryption.tests.compatibility.mysql.table_function", "feature"), flags=TE) - Feature(run=load("aes_encryption.tests.compatibility.mysql.dictionary", "feature"), flags=TE) + Feature( + run=load("aes_encryption.tests.compatibility.mysql.table_engine", "feature"), + flags=TE, + ) + Feature( + run=load("aes_encryption.tests.compatibility.mysql.database_engine", "feature"), + flags=TE, + ) + Feature( + run=load("aes_encryption.tests.compatibility.mysql.table_function", "feature"), + flags=TE, + ) + Feature( + run=load("aes_encryption.tests.compatibility.mysql.dictionary", "feature"), + flags=TE, + ) diff --git a/tests/testflows/aes_encryption/tests/compatibility/mysql/table_engine.py b/tests/testflows/aes_encryption/tests/compatibility/mysql/table_engine.py index afc8b607a6f..7f7d5ada559 100644 --- a/tests/testflows/aes_encryption/tests/compatibility/mysql/table_engine.py +++ b/tests/testflows/aes_encryption/tests/compatibility/mysql/table_engine.py @@ -7,10 +7,10 @@ from testflows.asserts import error from aes_encryption.requirements import * from aes_encryption.tests.common import mysql_modes, hex + @contextmanager def table(name, node, mysql_node, secret_type): - """Create a table that can be accessed using MySQL table engine. - """ + """Create a table that can be accessed using MySQL table engine.""" try: with Given("table in MySQL"): sql = f""" @@ -23,9 +23,15 @@ def table(name, node, mysql_node, secret_type): ); """ with When("I drop the table if exists"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"DROP TABLE IF EXISTS {name};\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "DROP TABLE IF EXISTS {name};"', + exitcode=0, + ) with And("I create a table"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) with And("I create a table using MySQL table engine"): sql = f""" @@ -49,15 +55,22 @@ def table(name, node, mysql_node, secret_type): node.query(f"DROP TABLE IF EXISTS mysql_{name}") with And("I drop a table in MySQL", flags=TE): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"DROP TABLE IF EXISTS {name};\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "DROP TABLE IF EXISTS {name};"', + exitcode=0, + ) + @TestOutline(Scenario) -@Examples("mysql_datatype", [ - ("VARBINARY(100)",), - #("VARCHAR(100)",), - ("BLOB", ), - #("TEXT",) -]) +@Examples( + "mysql_datatype", + [ + ("VARBINARY(100)",), + # ("VARCHAR(100)",), + ("BLOB",), + # ("TEXT",) + ], +) def decrypt(self, mysql_datatype): """Check that when using a table with MySQL table engine that contains a column encrypted in MySQL stored using specified data type @@ -71,7 +84,7 @@ def decrypt(self, mysql_datatype): for func in ["decrypt", "aes_decrypt_mysql"]: for mode, key_len, iv_len in mysql_modes: - exact_key_size = int(mode.split("-")[1])//8 + exact_key_size = int(mode.split("-")[1]) // 8 if "ecb" not in mode and not iv_len: continue @@ -81,7 +94,9 @@ def decrypt(self, mysql_datatype): if key_len != exact_key_size: continue - with Example(f"""{func} mode={mode.strip("'")} key={key_len} iv={iv_len}"""): + with Example( + f"""{func} mode={mode.strip("'")} key={key_len} iv={iv_len}""" + ): with table("user_data", node, mysql_node, mysql_datatype): example_mode = mode example_key = f"'{key[:key_len]}'" @@ -92,34 +107,51 @@ def decrypt(self, mysql_datatype): SET block_encryption_mode = {example_mode}; INSERT INTO user_data VALUES (NULL, '2020-01-01', 'user0', AES_ENCRYPT('secret', {example_key}{(", " + example_iv) if example_iv else ", ''"})); """ - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) with And("I read encrypted data in MySQL to make sure it is valid"): sql = f""" SET block_encryption_mode = {example_mode}; SELECT id, date, name, AES_DECRYPT(secret, {example_key}{(", " + example_iv) if example_iv else ", ''"}) AS secret FROM user_data; """ - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) with And("I read raw encrypted data in MySQL"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"SELECT id, date, name, hex(secret) as secret FROM user_data;\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "SELECT id, date, name, hex(secret) as secret FROM user_data;"', + exitcode=0, + ) with And("I read raw data using MySQL table engine"): - output = node.query("SELECT id, date, name, hex(secret) AS secret FROM mysql_user_data") + output = node.query( + "SELECT id, date, name, hex(secret) AS secret FROM mysql_user_data" + ) with And("I read decrypted data via MySQL table engine"): - output = node.query(f"""SELECT hex({func}({example_mode}, secret, {example_key}{(", " + example_iv) if example_iv else ""})) FROM mysql_user_data""").output.strip() + output = node.query( + f"""SELECT hex({func}({example_mode}, secret, {example_key}{(", " + example_iv) if example_iv else ""})) FROM mysql_user_data""" + ).output.strip() with Then("the output should match the original plain text"): assert output == hex("secret"), error() + @TestOutline(Scenario) -@Examples("mysql_datatype", [ - ("VARBINARY(100)",), - #("VARCHAR(100)",), - ("BLOB", ), - #("TEXT",) -]) +@Examples( + "mysql_datatype", + [ + ("VARBINARY(100)",), + # ("VARCHAR(100)",), + ("BLOB",), + # ("TEXT",) + ], +) def encrypt(self, mysql_datatype): """Check that when using a table with MySQL table engine that we can encrypt data during insert using the `encrypt` and `aes_encrypt_mysql` @@ -132,7 +164,7 @@ def encrypt(self, mysql_datatype): for func in ["encrypt", "aes_encrypt_mysql"]: for mode, key_len, iv_len in mysql_modes: - exact_key_size = int(mode.split("-")[1])//8 + exact_key_size = int(mode.split("-")[1]) // 8 if "ecb" not in mode and not iv_len: continue @@ -150,7 +182,9 @@ def encrypt(self, mysql_datatype): example_transform = f"{func}({mode}, secret, {example_key}{(', ' + example_iv) if example_iv else ''})" with When("I insert encrypted data into MySQL table engine"): - node.query(textwrap.dedent(f""" + node.query( + textwrap.dedent( + f""" INSERT INTO mysql_user_data SELECT @@ -158,21 +192,36 @@ def encrypt(self, mysql_datatype): FROM input('id Nullable(Int32), date Date, name String, secret String') FORMAT Values (null, '2020-01-01', 'user0', 'secret') - """)) + """ + ) + ) with And("I read decrypted data via MySQL table engine"): - output = node.query(f"""SELECT hex(aes_decrypt_mysql({example_mode}, secret, {example_key}{(", " + example_iv) if example_iv else ""})) FROM mysql_user_data""").output.strip() + output = node.query( + f"""SELECT hex(aes_decrypt_mysql({example_mode}, secret, {example_key}{(", " + example_iv) if example_iv else ""})) FROM mysql_user_data""" + ).output.strip() - with Then("decrypted data from MySQL table engine should should match the original plain text"): + with Then( + "decrypted data from MySQL table engine should should match the original plain text" + ): assert output == hex("secret"), error() - with And("I read raw data using MySQL table engine to get expected raw data"): - expected_raw_data = node.query("SELECT hex(secret) AS secret FROM mysql_user_data").output.strip() + with And( + "I read raw data using MySQL table engine to get expected raw data" + ): + expected_raw_data = node.query( + "SELECT hex(secret) AS secret FROM mysql_user_data" + ).output.strip() with And("I read raw encrypted data in MySQL"): - output = mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"SELECT hex(secret) as secret FROM user_data;\"", exitcode=0).output.strip() + output = mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "SELECT hex(secret) as secret FROM user_data;"', + exitcode=0, + ).output.strip() - with Then("check that raw encryted data in MySQL matches the expected"): + with Then( + "check that raw encryted data in MySQL matches the expected" + ): assert expected_raw_data in output, error() with And("I decrypt data in MySQL to make sure it is valid"): @@ -180,16 +229,20 @@ def encrypt(self, mysql_datatype): SET block_encryption_mode = {example_mode}; SELECT id, date, name, hex(AES_DECRYPT(secret, {example_key}{(", " + example_iv) if example_iv else ", ''"})) AS secret FROM user_data; """ - output = mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0).output.strip() + output = mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ).output.strip() - with Then("decryted data in MySQL should match the original plain text"): + with Then( + "decryted data in MySQL should match the original plain text" + ): assert hex("secret") in output, error() + @TestFeature @Name("table engine") -@Requirements( - RQ_SRS008_AES_Functions_Compatibility_Engine_Table_MySQL("1.0") -) +@Requirements(RQ_SRS008_AES_Functions_Compatibility_Engine_Table_MySQL("1.0")) def feature(self, node="clickhouse1", mysql_node="mysql1"): """Check usage of encryption functions with [MySQL table engine]. diff --git a/tests/testflows/aes_encryption/tests/compatibility/mysql/table_function.py b/tests/testflows/aes_encryption/tests/compatibility/mysql/table_function.py index 91ea8956cad..9c38efd2807 100644 --- a/tests/testflows/aes_encryption/tests/compatibility/mysql/table_function.py +++ b/tests/testflows/aes_encryption/tests/compatibility/mysql/table_function.py @@ -7,10 +7,10 @@ from testflows.asserts import error from aes_encryption.requirements import * from aes_encryption.tests.common import mysql_modes, hex + @contextmanager def table(name, node, mysql_node, secret_type): - """Create a table that can be accessed using MySQL table function. - """ + """Create a table that can be accessed using MySQL table function.""" try: with Given("table in MySQL"): sql = f""" @@ -23,22 +23,35 @@ def table(name, node, mysql_node, secret_type): ); """ with When("I drop the table if exists"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"DROP TABLE IF EXISTS {name};\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "DROP TABLE IF EXISTS {name};"', + exitcode=0, + ) with And("I create a table"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) yield f"mysql('{mysql_node.name}:3306', 'db', 'user_data', 'user', 'password')" finally: with And("I drop a table in MySQL", flags=TE): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"DROP TABLE IF EXISTS {name};\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "DROP TABLE IF EXISTS {name};"', + exitcode=0, + ) + @TestOutline(Scenario) -@Examples("mysql_datatype", [ - ("VARBINARY(100)",), - #("VARCHAR(100)",), - ("BLOB", ), - #("TEXT",) -]) +@Examples( + "mysql_datatype", + [ + ("VARBINARY(100)",), + # ("VARCHAR(100)",), + ("BLOB",), + # ("TEXT",) + ], +) def decrypt(self, mysql_datatype): """Check that when using a table accessed through MySQL table function that contains a column encrypted in MySQL stored using specified data type @@ -52,7 +65,7 @@ def decrypt(self, mysql_datatype): for func in ["decrypt", "aes_decrypt_mysql"]: for mode, key_len, iv_len in mysql_modes: - exact_key_size = int(mode.split("-")[1])//8 + exact_key_size = int(mode.split("-")[1]) // 8 if "ecb" not in mode and not iv_len: continue @@ -62,8 +75,12 @@ def decrypt(self, mysql_datatype): if key_len != exact_key_size: continue - with Example(f"""{func} mode={mode.strip("'")} key={key_len} iv={iv_len}"""): - with table("user_data", node, mysql_node, mysql_datatype) as table_function: + with Example( + f"""{func} mode={mode.strip("'")} key={key_len} iv={iv_len}""" + ): + with table( + "user_data", node, mysql_node, mysql_datatype + ) as table_function: example_mode = mode example_key = f"'{key[:key_len]}'" example_iv = None if not iv_len else f"'{iv[:iv_len]}'" @@ -73,34 +90,51 @@ def decrypt(self, mysql_datatype): SET block_encryption_mode = {example_mode}; INSERT INTO user_data VALUES (NULL, '2020-01-01', 'user0', AES_ENCRYPT('secret', {example_key}{(", " + example_iv) if example_iv else ", ''"})); """ - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) with And("I read encrypted data in MySQL to make sure it is valid"): sql = f""" SET block_encryption_mode = {example_mode}; SELECT id, date, name, AES_DECRYPT(secret, {example_key}{(", " + example_iv) if example_iv else ", ''"}) AS secret FROM user_data; """ - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) with And("I read raw encrypted data in MySQL"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"SELECT id, date, name, hex(secret) as secret FROM user_data;\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "SELECT id, date, name, hex(secret) as secret FROM user_data;"', + exitcode=0, + ) with And("I read raw data using MySQL table function"): - output = node.query(f"SELECT id, date, name, hex(secret) AS secret FROM {table_function}") + output = node.query( + f"SELECT id, date, name, hex(secret) AS secret FROM {table_function}" + ) with And("I read decrypted data using MySQL table function"): - output = node.query(f"""SELECT hex({func}({example_mode}, secret, {example_key}{(", " + example_iv) if example_iv else ""})) FROM {table_function}""").output.strip() + output = node.query( + f"""SELECT hex({func}({example_mode}, secret, {example_key}{(", " + example_iv) if example_iv else ""})) FROM {table_function}""" + ).output.strip() with Then("output should match the original plain text"): assert output == hex("secret"), error() + @TestOutline(Scenario) -@Examples("mysql_datatype", [ - ("VARBINARY(100)",), - #("VARCHAR(100)",), - ("BLOB", ), - #("TEXT",) -]) +@Examples( + "mysql_datatype", + [ + ("VARBINARY(100)",), + # ("VARCHAR(100)",), + ("BLOB",), + # ("TEXT",) + ], +) def encrypt(self, mysql_datatype): """Check that when using a table accessed through MySQL table function that we can encrypt data during insert using the `aes_encrypt_mysql` function @@ -113,7 +147,7 @@ def encrypt(self, mysql_datatype): for func in ["encrypt", "aes_encrypt_mysql"]: for mode, key_len, iv_len in mysql_modes: - exact_key_size = int(mode.split("-")[1])//8 + exact_key_size = int(mode.split("-")[1]) // 8 if "ecb" not in mode and not iv_len: continue @@ -123,15 +157,23 @@ def encrypt(self, mysql_datatype): if key_len != exact_key_size: continue - with Example(f"""{func} mode={mode.strip("'")} key={key_len} iv={iv_len}"""): - with table("user_data", node, mysql_node, mysql_datatype) as table_function: + with Example( + f"""{func} mode={mode.strip("'")} key={key_len} iv={iv_len}""" + ): + with table( + "user_data", node, mysql_node, mysql_datatype + ) as table_function: example_mode = mode example_key = f"'{key[:key_len]}'" example_iv = None if not iv_len else f"'{iv[:iv_len]}'" example_transform = f"{func}({mode}, secret, {example_key}{(', ' + example_iv) if example_iv else ''})" - with When("I insert encrypted data into a table provided by MySQL database engine"): - node.query(textwrap.dedent(f""" + with When( + "I insert encrypted data into a table provided by MySQL database engine" + ): + node.query( + textwrap.dedent( + f""" INSERT INTO TABLE FUNCTION {table_function} SELECT @@ -139,21 +181,36 @@ def encrypt(self, mysql_datatype): FROM input('id Int32, date Date, name String, secret String') FORMAT Values (1, '2020-01-01', 'user0', 'secret') - """)) + """ + ) + ) with And("I read decrypted data using MySQL database engine"): - output = node.query(f"""SELECT hex(aes_decrypt_mysql({example_mode}, secret, {example_key}{(", " + example_iv) if example_iv else ""})) FROM {table_function}""").output.strip() + output = node.query( + f"""SELECT hex(aes_decrypt_mysql({example_mode}, secret, {example_key}{(", " + example_iv) if example_iv else ""})) FROM {table_function}""" + ).output.strip() - with Then("decrypted data from MySQL database engine should should match the original plain text"): + with Then( + "decrypted data from MySQL database engine should should match the original plain text" + ): assert output == hex("secret"), error() - with And("I read raw data using MySQL database engine to get expected raw data"): - expected_raw_data = node.query(f"SELECT hex(secret) AS secret FROM {table_function}").output.strip() + with And( + "I read raw data using MySQL database engine to get expected raw data" + ): + expected_raw_data = node.query( + f"SELECT hex(secret) AS secret FROM {table_function}" + ).output.strip() with And("I read raw encrypted data in MySQL"): - output = mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"SELECT hex(secret) as secret FROM user_data;\"", exitcode=0).output.strip() + output = mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "SELECT hex(secret) as secret FROM user_data;"', + exitcode=0, + ).output.strip() - with Then("check that raw encryted data in MySQL matches the expected"): + with Then( + "check that raw encryted data in MySQL matches the expected" + ): assert expected_raw_data in output, error() with And("I decrypt data in MySQL to make sure it is valid"): @@ -161,16 +218,20 @@ def encrypt(self, mysql_datatype): SET block_encryption_mode = {example_mode}; SELECT id, date, name, hex(AES_DECRYPT(secret, {example_key}{(", " + example_iv) if example_iv else ", ''"})) AS secret FROM user_data; """ - output = mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0).output.strip() + output = mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ).output.strip() - with Then("decryted data in MySQL should match the original plain text"): + with Then( + "decryted data in MySQL should match the original plain text" + ): assert hex("secret") in output, error() + @TestFeature @Name("table function") -@Requirements( - RQ_SRS008_AES_Functions_Compatibility_TableFunction_MySQL("1.0") -) +@Requirements(RQ_SRS008_AES_Functions_Compatibility_TableFunction_MySQL("1.0")) def feature(self, node="clickhouse1", mysql_node="mysql1"): """Check usage of encryption functions with [MySQL table function]. diff --git a/tests/testflows/aes_encryption/tests/compatibility/select.py b/tests/testflows/aes_encryption/tests/compatibility/select.py index f81920c65d3..057eb2947bd 100644 --- a/tests/testflows/aes_encryption/tests/compatibility/select.py +++ b/tests/testflows/aes_encryption/tests/compatibility/select.py @@ -7,6 +7,7 @@ from testflows.asserts import values, error, snapshot from aes_encryption.tests.common import modes, mysql_modes + @contextmanager def table(name, sql): node = current().context.node @@ -22,18 +23,22 @@ def table(name, sql): with Finally("I drop the table", flags=TE): node.query(f"DROP TABLE IF EXISTS {name}") + @TestScenario def decrypt(self): - """Check decrypting column when reading data from a table. - """ + """Check decrypting column when reading data from a table.""" node = self.context.node key = f"{'1' * 64}" iv = f"{'2' * 64}" aad = "some random aad" for mode, key_len, iv_len, aad_len in modes: - with Example(f"""mode={mode.strip("'")} key={key_len} iv={iv_len} aad={aad_len}""") as example: - with table("user_table", """ + with Example( + f"""mode={mode.strip("'")} key={key_len} iv={iv_len} aad={aad_len}""" + ) as example: + with table( + "user_table", + """ CREATE TABLE {name} ( date Nullable(Date), @@ -41,7 +46,8 @@ def decrypt(self): secret Nullable(String) ) ENGINE = Memory() - """): + """, + ): example_mode = mode example_key = f"'{key[:key_len]}'" @@ -49,20 +55,29 @@ def decrypt(self): example_aad = None if not aad_len else f"'{aad}'" with When("I insert encrypted data"): - encrypted_secret = node.query(f"""SELECT hex(encrypt({example_mode}, 'secret', {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""}))""").output.strip() - node.query(textwrap.dedent(f""" + encrypted_secret = node.query( + f"""SELECT hex(encrypt({example_mode}, 'secret', {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""}))""" + ).output.strip() + node.query( + textwrap.dedent( + f""" INSERT INTO user_table (date, name, secret) VALUES ('2020-01-01', 'user0', unhex('{encrypted_secret}')) - """)) + """ + ) + ) with And("I decrypt data during query"): - output = node.query(f"""SELECT name, decrypt({example_mode}, secret, {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""}) AS secret FROM user_table FORMAT JSONEachRow""").output.strip() + output = node.query( + f"""SELECT name, decrypt({example_mode}, secret, {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""}) AS secret FROM user_table FORMAT JSONEachRow""" + ).output.strip() with Then("I should get back the original plain text"): assert output == '{"name":"user0","secret":"secret"}', error() + @TestScenario def decrypt_multiple(self, count=1000): """Check decrypting column when reading multiple entries @@ -75,8 +90,12 @@ def decrypt_multiple(self, count=1000): aad = "some random aad" for mode, key_len, iv_len, aad_len in modes: - with Example(f"""mode={mode.strip("'")} key={key_len} iv={iv_len} aad={aad_len}""") as example: - with table("user_table", """ + with Example( + f"""mode={mode.strip("'")} key={key_len} iv={iv_len} aad={aad_len}""" + ) as example: + with table( + "user_table", + """ CREATE TABLE {name} ( date Nullable(Date), @@ -84,7 +103,8 @@ def decrypt_multiple(self, count=1000): secret Nullable(String) ) ENGINE = Memory() - """): + """, + ): example_mode = mode example_key = f"'{key[:key_len]}'" @@ -92,19 +112,32 @@ def decrypt_multiple(self, count=1000): example_aad = None if not aad_len else f"'{aad}'" with When("I insert encrypted data"): - encrypted_secret = node.query(f"""SELECT hex(encrypt({example_mode}, 'secret', {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""}))""").output.strip() - values = [f"('2020-01-01', 'user0', unhex('{encrypted_secret}'))"] * count + encrypted_secret = node.query( + f"""SELECT hex(encrypt({example_mode}, 'secret', {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""}))""" + ).output.strip() + values = [ + f"('2020-01-01', 'user0', unhex('{encrypted_secret}'))" + ] * count node.query( "INSERT INTO user_table\n" " (date, name, secret)\n" - f"VALUES {', '.join(values)}") + f"VALUES {', '.join(values)}" + ) - with And("I decrypt data", description="using a subquery and get the number of entries that match the plaintext"): - output = node.query(f"""SELECT count() AS count FROM (SELECT name, decrypt({example_mode}, secret, {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""}) AS secret FROM user_table) WHERE secret = 'secret' FORMAT JSONEachRow""").output.strip() + with And( + "I decrypt data", + description="using a subquery and get the number of entries that match the plaintext", + ): + output = node.query( + f"""SELECT count() AS count FROM (SELECT name, decrypt({example_mode}, secret, {example_key}{(", " + example_iv) if example_iv else ""}{(", " + example_aad) if example_aad else ""}) AS secret FROM user_table) WHERE secret = 'secret' FORMAT JSONEachRow""" + ).output.strip() - with Then("I should get back the expected result", description=f"{count}"): + with Then( + "I should get back the expected result", description=f"{count}" + ): assert output == f'{{"count":"{count}"}}', error() + @TestScenario def decrypt_unique(self): """Check decrypting column when reading multiple entries @@ -116,7 +149,9 @@ def decrypt_unique(self): iv = f"{'2' * 64}" aad = "some random aad" - with table("user_table", """ + with table( + "user_table", + """ CREATE TABLE {name} ( id UInt64, @@ -125,7 +160,8 @@ def decrypt_unique(self): secret Nullable(String) ) ENGINE = Memory() - """): + """, + ): user_modes = [] user_keys = [] @@ -142,9 +178,11 @@ def decrypt_unique(self): with When(f"I get encrypted data for user {user_id}"): encrypted_secret = node.query( - f"""SELECT hex(encrypt({user_modes[-1]}, 'secret', {user_keys[-1]}))""" - ).output.strip() - values.append(f"({user_id}, '2020-01-01', 'user{user_id}', unhex('{encrypted_secret}'))") + f"""SELECT hex(encrypt({user_modes[-1]}, 'secret', {user_keys[-1]}))""" + ).output.strip() + values.append( + f"({user_id}, '2020-01-01', 'user{user_id}', unhex('{encrypted_secret}'))" + ) user_id += 1 @@ -152,10 +190,13 @@ def decrypt_unique(self): node.query( "INSERT INTO user_table\n" " (id, date, name, secret)\n" - f"VALUES {', '.join(values)}") + f"VALUES {', '.join(values)}" + ) with And("I read decrypted data for all users"): - output = node.query(textwrap.dedent(f""" + output = node.query( + textwrap.dedent( + f""" SELECT count() AS count FROM @@ -170,16 +211,18 @@ def decrypt_unique(self): WHERE secret = 'secret' FORMAT JSONEachRow - """)).output.strip() + """ + ) + ).output.strip() with Then("I should get back the expected result", description=f"{count}"): assert output == f'{{"count":"{count}"}}', error() + @TestFeature @Name("select") def feature(self, node="clickhouse1"): - """Check encryption functions when used during table querying. - """ + """Check encryption functions when used during table querying.""" self.context.node = self.context.cluster.node(node) for scenario in loads(current_module(), Scenario): diff --git a/tests/testflows/aes_encryption/tests/decrypt.py b/tests/testflows/aes_encryption/tests/decrypt.py index 6c99c4d9d41..1c7d958737c 100644 --- a/tests/testflows/aes_encryption/tests/decrypt.py +++ b/tests/testflows/aes_encryption/tests/decrypt.py @@ -10,10 +10,24 @@ from testflows.asserts import error from aes_encryption.requirements.requirements import * from aes_encryption.tests.common import * + @TestOutline -def decrypt(self, ciphertext=None, key=None, mode=None, iv=None, aad=None, exitcode=0, message=None, step=When, cast=None, endcast=None, compare=None, no_checks=False): - """Execute `decrypt` function with the specified parameters. - """ +def decrypt( + self, + ciphertext=None, + key=None, + mode=None, + iv=None, + aad=None, + exitcode=0, + message=None, + step=When, + cast=None, + endcast=None, + compare=None, + no_checks=False, +): + """Execute `decrypt` function with the specified parameters.""" params = [] if mode is not None: params.append(mode) @@ -33,7 +47,10 @@ def decrypt(self, ciphertext=None, key=None, mode=None, iv=None, aad=None, exitc sql = f"{compare} = {sql}" sql = f"SELECT {sql}" - return current().context.node.query(sql, step=step, exitcode=exitcode, message=message, no_checks=no_checks) + return current().context.node.query( + sql, step=step, exitcode=exitcode, message=message, no_checks=no_checks + ) + @TestScenario @Requirements( @@ -58,19 +75,33 @@ def invalid_ciphertext(self): continue with When(f"invalid ciphertext={ciphertext}"): if "cfb" in mode or "ofb" in mode or "ctr" in mode: - decrypt(ciphertext=ciphertext, key=f"'{key[:key_len]}'", mode=mode, iv=d_iv, aad=d_aad, cast="hex") + decrypt( + ciphertext=ciphertext, + key=f"'{key[:key_len]}'", + mode=mode, + iv=d_iv, + aad=d_aad, + cast="hex", + ) else: with When("I execute decrypt function"): - r = decrypt(ciphertext=ciphertext, key=f"'{key[:key_len]}'", mode=mode, iv=d_iv, aad=d_aad, no_checks=True, step=By) + r = decrypt( + ciphertext=ciphertext, + key=f"'{key[:key_len]}'", + mode=mode, + iv=d_iv, + aad=d_aad, + no_checks=True, + step=By, + ) with Then("exitcode is not zero"): assert r.exitcode in [198, 36] with And("exception is present in the output"): assert "DB::Exception:" in r.output + @TestScenario -@Requirements( - RQ_SRS008_AES_Functions_InvalidParameters("1.0") -) +@Requirements(RQ_SRS008_AES_Functions_InvalidParameters("1.0")) def invalid_parameters(self): """Check that `decrypt` function returns an error when we call it with invalid parameters. @@ -78,110 +109,236 @@ def invalid_parameters(self): ciphertext = "unhex('AA1826B5F66A903C888D5DCDA9FB63D1D9CCA10EC55F59D6C00D37')" with Example("no parameters"): - decrypt(exitcode=42, message="DB::Exception: Incorrect number of arguments for function decrypt provided 0, expected 3 to 5") + decrypt( + exitcode=42, + message="DB::Exception: Incorrect number of arguments for function decrypt provided 0, expected 3 to 5", + ) with Example("missing key and mode"): - decrypt(ciphertext=ciphertext, exitcode=42, - message="DB::Exception: Incorrect number of arguments for function decrypt provided 1") + decrypt( + ciphertext=ciphertext, + exitcode=42, + message="DB::Exception: Incorrect number of arguments for function decrypt provided 1", + ) with Example("missing mode"): - decrypt(ciphertext=ciphertext, key="'123'", exitcode=42, - message="DB::Exception: Incorrect number of arguments for function decrypt provided 2") + decrypt( + ciphertext=ciphertext, + key="'123'", + exitcode=42, + message="DB::Exception: Incorrect number of arguments for function decrypt provided 2", + ) with Example("bad key type - UInt8"): - decrypt(ciphertext=ciphertext, key="123", mode="'aes-128-ecb'", exitcode=43, - message="DB::Exception: Received from localhost:9000. DB::Exception: Illegal type of argument #3") + decrypt( + ciphertext=ciphertext, + key="123", + mode="'aes-128-ecb'", + exitcode=43, + message="DB::Exception: Received from localhost:9000. DB::Exception: Illegal type of argument #3", + ) with Example("bad mode type - forgot quotes"): - decrypt(ciphertext=ciphertext, key="'0123456789123456'", mode="aes-128-ecb", exitcode=47, - message="DB::Exception: Missing columns: 'ecb' 'aes' while processing query") + decrypt( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="aes-128-ecb", + exitcode=47, + message="DB::Exception: Missing columns: 'ecb' 'aes' while processing query", + ) with Example("bad mode type - UInt8"): - decrypt(ciphertext=ciphertext, key="'0123456789123456'", mode="128", exitcode=43, - message="DB::Exception: Illegal type of argument #1 'mode'") + decrypt( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="128", + exitcode=43, + message="DB::Exception: Illegal type of argument #1 'mode'", + ) with Example("bad iv type - UInt8"): - decrypt(ciphertext=ciphertext, key="'0123456789123456'", mode="'aes-128-cbc'", iv='128', exitcode=43, - message="DB::Exception: Illegal type of argument") + decrypt( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aes-128-cbc'", + iv="128", + exitcode=43, + message="DB::Exception: Illegal type of argument", + ) with Example("bad aad type - UInt8"): - decrypt(ciphertext=ciphertext, key="'0123456789123456'", mode="'aes-128-gcm'", iv="'012345678912'", aad="123", exitcode=43, - message="DB::Exception: Illegal type of argument") + decrypt( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aes-128-gcm'", + iv="'012345678912'", + aad="123", + exitcode=43, + message="DB::Exception: Illegal type of argument", + ) - with Example("iv not valid for mode", requirements=[RQ_SRS008_AES_Decrypt_Function_InitializationVector_NotValidForMode("1.0")]): - decrypt(ciphertext=ciphertext, key="'0123456789123456'", mode="'aes-128-ecb'", iv="'012345678912'", exitcode=36, - message="DB::Exception: aes-128-ecb does not support IV") + with Example( + "iv not valid for mode", + requirements=[ + RQ_SRS008_AES_Decrypt_Function_InitializationVector_NotValidForMode("1.0") + ], + ): + decrypt( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aes-128-ecb'", + iv="'012345678912'", + exitcode=36, + message="DB::Exception: aes-128-ecb does not support IV", + ) - with Example("iv not valid for mode - size 0", requirements=[RQ_SRS008_AES_Decrypt_Function_InitializationVector_NotValidForMode("1.0")]): - decrypt(ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", key="'0123456789123456'", mode="'aes-128-ecb'", iv="''", exitcode=36, - message="DB::Exception: aes-128-ecb does not support IV") + with Example( + "iv not valid for mode - size 0", + requirements=[ + RQ_SRS008_AES_Decrypt_Function_InitializationVector_NotValidForMode("1.0") + ], + ): + decrypt( + ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", + key="'0123456789123456'", + mode="'aes-128-ecb'", + iv="''", + exitcode=36, + message="DB::Exception: aes-128-ecb does not support IV", + ) - with Example("aad not valid for mode", requirements=[RQ_SRS008_AES_Decrypt_Function_AdditionalAuthenticationData_NotValidForMode("1.0")]): - decrypt(ciphertext=ciphertext, key="'0123456789123456'", mode="'aes-128-cbc'", iv="'0123456789123456'", aad="'aad'", exitcode=36, - message="DB::Exception: AAD can be only set for GCM-mode") + with Example( + "aad not valid for mode", + requirements=[ + RQ_SRS008_AES_Decrypt_Function_AdditionalAuthenticationData_NotValidForMode( + "1.0" + ) + ], + ): + decrypt( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aes-128-cbc'", + iv="'0123456789123456'", + aad="'aad'", + exitcode=36, + message="DB::Exception: AAD can be only set for GCM-mode", + ) - with Example("invalid mode value", requirements=[RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_Invalid("1.0")]): + with Example( + "invalid mode value", + requirements=[ + RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Value_Invalid("1.0") + ], + ): with When("using unsupported cfb1 mode"): - decrypt(ciphertext=ciphertext, key="'0123456789123456'", mode="'aes-128-cfb1'", exitcode=36, - message="DB::Exception: Invalid mode: aes-128-cfb1") + decrypt( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aes-128-cfb1'", + exitcode=36, + message="DB::Exception: Invalid mode: aes-128-cfb1", + ) with When("using unsupported cfb8 mode"): - decrypt(ciphertext=ciphertext, key="'0123456789123456'", mode="'aes-128-cfb8'", exitcode=36, - message="DB::Exception: Invalid mode: aes-128-cfb8") + decrypt( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aes-128-cfb8'", + exitcode=36, + message="DB::Exception: Invalid mode: aes-128-cfb8", + ) with When("typo in the block algorithm"): - decrypt(ciphertext=ciphertext, key="'0123456789123456'", mode="'aes-128-eeb'", exitcode=36, - message="DB::Exception: Invalid mode: aes-128-eeb") + decrypt( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aes-128-eeb'", + exitcode=36, + message="DB::Exception: Invalid mode: aes-128-eeb", + ) with When("typo in the key size"): - decrypt(ciphertext=ciphertext, key="'0123456789123456'", mode="'aes-127-ecb'", exitcode=36, - message="DB::Exception: Invalid mode: aes-127-ecb") + decrypt( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aes-127-ecb'", + exitcode=36, + message="DB::Exception: Invalid mode: aes-127-ecb", + ) with When("typo in the aes prefix"): - decrypt(ciphertext=ciphertext, key="'0123456789123456'", mode="'aee-128-ecb'", exitcode=36, - message="DB::Exception: Invalid mode: aee-128-ecb") + decrypt( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aee-128-ecb'", + exitcode=36, + message="DB::Exception: Invalid mode: aee-128-ecb", + ) with When("missing last dash"): - decrypt(ciphertext=ciphertext, key="'0123456789123456'", mode="'aes-128ecb'", exitcode=36, - message="DB::Exception: Invalid mode: aes-128ecb") + decrypt( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aes-128ecb'", + exitcode=36, + message="DB::Exception: Invalid mode: aes-128ecb", + ) with When("missing first dash"): - decrypt(ciphertext=ciphertext, key="'0123456789123456'", mode="'aes128-ecb'", exitcode=36, - message="DB::Exception: Invalid mode: aes128-ecb") + decrypt( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aes128-ecb'", + exitcode=36, + message="DB::Exception: Invalid mode: aes128-ecb", + ) with When("all capitals"): - decrypt(ciphertext=ciphertext, key="'0123456789123456'", mode="'AES-128-ECB'", exitcode=36, - message="DB::Exception: Invalid mode: AES-128-ECB") + decrypt( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'AES-128-ECB'", + exitcode=36, + message="DB::Exception: Invalid mode: AES-128-ECB", + ) + @TestOutline(Scenario) @Requirements( RQ_SRS008_AES_Decrypt_Function_Key_Length_InvalidLengthError("1.0"), - RQ_SRS008_AES_Decrypt_Function_InitializationVector_Length_InvalidLengthError("1.0"), + RQ_SRS008_AES_Decrypt_Function_InitializationVector_Length_InvalidLengthError( + "1.0" + ), RQ_SRS008_AES_Decrypt_Function_AdditionalAuthenticationData_NotValidForMode("1.0"), - RQ_SRS008_AES_Decrypt_Function_NonGCMMode_KeyAndInitializationVector_Length("1.0") + RQ_SRS008_AES_Decrypt_Function_NonGCMMode_KeyAndInitializationVector_Length("1.0"), +) +@Examples( + "mode key_len iv_len aad", + [ + # ECB + ("'aes-128-ecb'", 16, None, None), + ("'aes-192-ecb'", 24, None, None), + ("'aes-256-ecb'", 32, None, None), + # CBC + ("'aes-128-cbc'", 16, 16, None), + ("'aes-192-cbc'", 24, 16, None), + ("'aes-256-cbc'", 32, 16, None), + # CFB128 + ("'aes-128-cfb128'", 16, 16, None), + ("'aes-192-cfb128'", 24, 16, None), + ("'aes-256-cfb128'", 32, 16, None), + # OFB + ("'aes-128-ofb'", 16, 16, None), + ("'aes-192-ofb'", 24, 16, None), + ("'aes-256-ofb'", 32, 16, None), + # CTR + ("'aes-128-ctr'", 16, 16, None), + ("'aes-192-ctr'", 24, 16, None), + ("'aes-256-ctr'", 32, 16, None), + ], + "%-16s %-10s %-10s %-10s", ) -@Examples("mode key_len iv_len aad", [ - # ECB - ("'aes-128-ecb'", 16, None, None), - ("'aes-192-ecb'", 24, None, None), - ("'aes-256-ecb'", 32, None, None), - # CBC - ("'aes-128-cbc'", 16, 16, None), - ("'aes-192-cbc'", 24, 16, None), - ("'aes-256-cbc'", 32, 16, None), - # CFB128 - ("'aes-128-cfb128'", 16, 16, None), - ("'aes-192-cfb128'", 24, 16, None), - ("'aes-256-cfb128'", 32, 16, None), - # OFB - ("'aes-128-ofb'", 16, 16, None), - ("'aes-192-ofb'", 24, 16, None), - ("'aes-256-ofb'", 32, 16, None), - # CTR - ("'aes-128-ctr'", 16, 16, None), - ("'aes-192-ctr'", 24, 16, None), - ("'aes-256-ctr'", 32, 16, None) -], "%-16s %-10s %-10s %-10s") def invalid_key_or_iv_length_for_mode_non_gcm(self, mode, key_len, iv_len, aad): """Check that an error is returned when key or iv length does not match the expected value for the mode. @@ -191,42 +348,90 @@ def invalid_key_or_iv_length_for_mode_non_gcm(self, mode, key_len, iv_len, aad): iv = "0123456789" * 4 with When("key is too short"): - decrypt(ciphertext=ciphertext, key=f"'{key[:key_len-1]}'", mode=mode, exitcode=36, message="DB::Exception: Invalid key size") + decrypt( + ciphertext=ciphertext, + key=f"'{key[:key_len-1]}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid key size", + ) with When("key is too long"): - decrypt(ciphertext=ciphertext, key=f"'{key[:key_len+1]}'", mode=mode, exitcode=36, message="DB::Exception: Invalid key size") + decrypt( + ciphertext=ciphertext, + key=f"'{key[:key_len+1]}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid key size", + ) if iv_len is not None: with When("iv is too short"): - decrypt(ciphertext=ciphertext, key=f"'{key[:key_len]}'", iv=f"'{iv[:iv_len-1]}'", mode=mode, exitcode=36, message="DB::Exception: Invalid IV size") + decrypt( + ciphertext=ciphertext, + key=f"'{key[:key_len]}'", + iv=f"'{iv[:iv_len-1]}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid IV size", + ) with When("iv is too long"): - decrypt(ciphertext=ciphertext, key=f"'{key[:key_len]}'", iv=f"'{iv[:iv_len+1]}'", mode=mode, exitcode=36, message="DB::Exception: Invalid IV size") + decrypt( + ciphertext=ciphertext, + key=f"'{key[:key_len]}'", + iv=f"'{iv[:iv_len+1]}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid IV size", + ) if aad is None: with When("aad is specified but not needed"): - decrypt(ciphertext=ciphertext, key=f"'{key[:key_len]}'", iv=f"'{iv[:iv_len+1] if iv_len is not None else ''}'", aad="'AAD'", mode=mode, exitcode=36, message="DB::Exception: AAD can be only set for GCM-mode") + decrypt( + ciphertext=ciphertext, + key=f"'{key[:key_len]}'", + iv=f"'{iv[:iv_len+1] if iv_len is not None else ''}'", + aad="'AAD'", + mode=mode, + exitcode=36, + message="DB::Exception: AAD can be only set for GCM-mode", + ) else: with When("iv is specified but not needed"): - decrypt(ciphertext=ciphertext, key=f"'{key[:key_len]}'", iv=f"'{iv}'", mode=mode, exitcode=36, message="DB::Exception: {} does not support IV".format(mode.strip("'"))) + decrypt( + ciphertext=ciphertext, + key=f"'{key[:key_len]}'", + iv=f"'{iv}'", + mode=mode, + exitcode=36, + message="DB::Exception: {} does not support IV".format(mode.strip("'")), + ) + @TestOutline(Scenario) @Requirements( RQ_SRS008_AES_Decrypt_Function_Key_Length_InvalidLengthError("1.0"), - RQ_SRS008_AES_Decrypt_Function_InitializationVector_Length_InvalidLengthError("1.0"), + RQ_SRS008_AES_Decrypt_Function_InitializationVector_Length_InvalidLengthError( + "1.0" + ), RQ_SRS008_AES_Decrypt_Function_AdditionalAuthenticationData_NotValidForMode("1.0"), - RQ_SRS008_AES_Decrypt_Function_GCMMode_KeyAndInitializationVector_Length("1.0") + RQ_SRS008_AES_Decrypt_Function_GCMMode_KeyAndInitializationVector_Length("1.0"), +) +@Examples( + "mode key_len iv_len aad", + [ + # GCM + ("'aes-128-gcm'", 16, 8, "'hello there aad'"), + ("'aes-128-gcm'", 16, None, "'hello there aad'"), + ("'aes-192-gcm'", 24, 8, "''"), + ("'aes-192-gcm'", 24, None, "''"), + ("'aes-256-gcm'", 32, 8, "'a'"), + ("'aes-256-gcm'", 32, None, "'a'"), + ], + "%-16s %-10s %-10s %-10s", ) -@Examples("mode key_len iv_len aad", [ - # GCM - ("'aes-128-gcm'", 16, 8, "'hello there aad'"), - ("'aes-128-gcm'", 16, None, "'hello there aad'"), - ("'aes-192-gcm'", 24, 8, "''"), - ("'aes-192-gcm'", 24, None, "''"), - ("'aes-256-gcm'", 32, 8, "'a'"), - ("'aes-256-gcm'", 32, None, "'a'") -], "%-16s %-10s %-10s %-10s") def invalid_key_or_iv_length_for_gcm(self, mode, key_len, iv_len, aad): """Check that an error is returned when key or iv length does not match the expected value for the GCM mode. @@ -238,25 +443,57 @@ def invalid_key_or_iv_length_for_gcm(self, mode, key_len, iv_len, aad): with When("key is too short"): ciphertext = "unhex('AA1826B5F66A903C888D5DCDA9FB63D1D9CCA10EC55F59D6C00D37')" - decrypt(ciphertext=ciphertext, key=f"'{key[:key_len-1]}'", iv=f"'{iv[:iv_len]}'", mode=mode, exitcode=36, message="DB::Exception: Invalid key size") + decrypt( + ciphertext=ciphertext, + key=f"'{key[:key_len-1]}'", + iv=f"'{iv[:iv_len]}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid key size", + ) with When("key is too long"): ciphertext = "unhex('24AEBFEA049D6F4CF85AAB8CADEDF39CCCAA1C3C2AFF99E194789D')" - decrypt(ciphertext=ciphertext, key=f"'{key[:key_len+1]}'", iv=f"'{iv[:iv_len]}'", mode=mode, exitcode=36, message="DB::Exception: Invalid key size") + decrypt( + ciphertext=ciphertext, + key=f"'{key[:key_len+1]}'", + iv=f"'{iv[:iv_len]}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid key size", + ) if iv_len is not None: with When(f"iv is too short"): - ciphertext = "unhex('24AEBFEA049D6F4CF85AAB8CADEDF39CCCAA1C3C2AFF99E194789D')" - decrypt(ciphertext=ciphertext, key=f"'{key[:key_len]}'", iv=f"'{iv[:iv_len-1]}'", mode=mode, exitcode=198, message="DB::Exception:") + ciphertext = ( + "unhex('24AEBFEA049D6F4CF85AAB8CADEDF39CCCAA1C3C2AFF99E194789D')" + ) + decrypt( + ciphertext=ciphertext, + key=f"'{key[:key_len]}'", + iv=f"'{iv[:iv_len-1]}'", + mode=mode, + exitcode=198, + message="DB::Exception:", + ) else: with When("iv is not specified"): - ciphertext = "unhex('1CD4EC93A4B0C687926E8F8C2AA3B4CE1943D006DAE3A774CB1AE5')" - decrypt(ciphertext=ciphertext, key=f"'{key[:key_len]}'", mode=mode, exitcode=36, message="DB::Exception: Invalid IV size 0 != expected size 12") + ciphertext = ( + "unhex('1CD4EC93A4B0C687926E8F8C2AA3B4CE1943D006DAE3A774CB1AE5')" + ) + decrypt( + ciphertext=ciphertext, + key=f"'{key[:key_len]}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid IV size 0 != expected size 12", + ) + @TestScenario @Requirements( RQ_SRS008_AES_Decrypt_Function_Parameters_AdditionalAuthenticatedData("1.0"), - RQ_SRS008_AES_Decrypt_Function_AdditionalAuthenticationData_Length("1.0") + RQ_SRS008_AES_Decrypt_Function_AdditionalAuthenticationData_Length("1.0"), ) def aad_parameter_types_and_length(self): """Check that `decrypt` function accepts `aad` parameter as the fifth argument @@ -269,36 +506,84 @@ def aad_parameter_types_and_length(self): with When("aad is specified using String type"): ciphertext = "unhex('19A1183335B374C626B24208AAEC97F148732CE05621AC87B21526')" - decrypt(ciphertext=ciphertext, key=key, mode=mode, iv=iv, aad="'aad'", message=plaintext) + decrypt( + ciphertext=ciphertext, + key=key, + mode=mode, + iv=iv, + aad="'aad'", + message=plaintext, + ) with When("aad is specified using String with UTF8 characters"): ciphertext = "unhex('19A1183335B374C626B242C68D9618A8C2664D7B6A3FE978104B39')" - decrypt(ciphertext=ciphertext, key=key, mode=mode, iv=iv, aad="'Gãńdåłf_Thê_Gręât'", message=plaintext) + decrypt( + ciphertext=ciphertext, + key=key, + mode=mode, + iv=iv, + aad="'Gãńdåłf_Thê_Gręât'", + message=plaintext, + ) with When("aad is specified using FixedString type"): ciphertext = "unhex('19A1183335B374C626B24208AAEC97F148732CE05621AC87B21526')" - decrypt(ciphertext=ciphertext, key=key, mode=mode, iv=iv, aad="toFixedString('aad', 3)", message=plaintext) + decrypt( + ciphertext=ciphertext, + key=key, + mode=mode, + iv=iv, + aad="toFixedString('aad', 3)", + message=plaintext, + ) with When("aad is specified using FixedString with UTF8 characters"): ciphertext = "unhex('19A1183335B374C626B242C68D9618A8C2664D7B6A3FE978104B39')" - decrypt(ciphertext=ciphertext, key=key, mode=mode, iv=iv, aad="toFixedString('Gãńdåłf_Thê_Gręât', 24)", message=plaintext) + decrypt( + ciphertext=ciphertext, + key=key, + mode=mode, + iv=iv, + aad="toFixedString('Gãńdåłf_Thê_Gręât', 24)", + message=plaintext, + ) with When("aad is 0 bytes"): ciphertext = "unhex('19A1183335B374C626B242DF92BB3F57F5D82BEDF41FD5D49F8BC9')" - decrypt(ciphertext=ciphertext, key=key, mode=mode, iv=iv, aad="''", message=plaintext) + decrypt( + ciphertext=ciphertext, + key=key, + mode=mode, + iv=iv, + aad="''", + message=plaintext, + ) with When("aad is 1 byte"): ciphertext = "unhex('19A1183335B374C626B242D1BCFC63B09CFE9EAD20285044A01035')" - decrypt(ciphertext=ciphertext, key=key, mode=mode, iv=iv, aad="'1'", message=plaintext) + decrypt( + ciphertext=ciphertext, + key=key, + mode=mode, + iv=iv, + aad="'1'", + message=plaintext, + ) with When("aad is 256 bytes"): ciphertext = "unhex('19A1183335B374C626B242355AD3DD2C5D7E36AEECBB847BF9E8A7')" - decrypt(ciphertext=ciphertext, key=key, mode=mode, iv=iv, aad=f"'{'1' * 256}'", message=plaintext) + decrypt( + ciphertext=ciphertext, + key=key, + mode=mode, + iv=iv, + aad=f"'{'1' * 256}'", + message=plaintext, + ) + @TestScenario -@Requirements( - RQ_SRS008_AES_Decrypt_Function_Parameters_InitializationVector("1.0") -) +@Requirements(RQ_SRS008_AES_Decrypt_Function_Parameters_InitializationVector("1.0")) def iv_parameter_types(self): """Check that `decrypt` function accepts `iv` parameter as the fourth argument of either `String` or `FixedString` types. @@ -308,21 +593,44 @@ def iv_parameter_types(self): key = "'0123456789123456'" with When("iv is specified using String type"): - decrypt(ciphertext="unhex('F024F9372FA0D8B974894D29FFB8A7F7')", key=key, mode=mode, iv=iv, message="hello there") + decrypt( + ciphertext="unhex('F024F9372FA0D8B974894D29FFB8A7F7')", + key=key, + mode=mode, + iv=iv, + message="hello there", + ) with When("iv is specified using String with UTF8 characters"): - decrypt(ciphertext="unhex('7A4EC0FF3796F46BED281F4778ACE1DC')", key=key, mode=mode, iv="'Gãńdåłf_Thê'", message="hello there") + decrypt( + ciphertext="unhex('7A4EC0FF3796F46BED281F4778ACE1DC')", + key=key, + mode=mode, + iv="'Gãńdåłf_Thê'", + message="hello there", + ) with When("iv is specified using FixedString type"): - decrypt(ciphertext="unhex('F024F9372FA0D8B974894D29FFB8A7F7')", key=key, mode=mode, iv=f"toFixedString({iv}, 16)", message="hello there") + decrypt( + ciphertext="unhex('F024F9372FA0D8B974894D29FFB8A7F7')", + key=key, + mode=mode, + iv=f"toFixedString({iv}, 16)", + message="hello there", + ) with When("iv is specified using FixedString with UTF8 characters"): - decrypt(ciphertext="unhex('7A4EC0FF3796F46BED281F4778ACE1DC')", key=key, mode=mode, iv=f"toFixedString('Gãńdåłf_Thê', 16)", message="hello there") + decrypt( + ciphertext="unhex('7A4EC0FF3796F46BED281F4778ACE1DC')", + key=key, + mode=mode, + iv=f"toFixedString('Gãńdåłf_Thê', 16)", + message="hello there", + ) + @TestScenario -@Requirements( - RQ_SRS008_AES_Decrypt_Function_Parameters_Key("1.0") -) +@Requirements(RQ_SRS008_AES_Decrypt_Function_Parameters_Key("1.0")) def key_parameter_types(self): """Check that `decrypt` function accepts `key` parameter as the second argument of either `String` or `FixedString` types. @@ -332,16 +640,37 @@ def key_parameter_types(self): key = "'0123456789123456'" with When("key is specified using String type"): - decrypt(ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", key=key, mode=mode, message="hello there") + decrypt( + ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", + key=key, + mode=mode, + message="hello there", + ) with When("key is specified using String with UTF8 characters"): - decrypt(ciphertext="unhex('180086AA42AD57B71C706EEC372D0C3D')", key="'Gãńdåłf_Thê'", mode=mode, message="hello there") + decrypt( + ciphertext="unhex('180086AA42AD57B71C706EEC372D0C3D')", + key="'Gãńdåłf_Thê'", + mode=mode, + message="hello there", + ) with When("key is specified using FixedString type"): - decrypt(ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", key=f"toFixedString({key}, 16)", mode=mode, message="hello there") + decrypt( + ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", + key=f"toFixedString({key}, 16)", + mode=mode, + message="hello there", + ) with When("key is specified using FixedString with UTF8 characters"): - decrypt(ciphertext="unhex('180086AA42AD57B71C706EEC372D0C3D')", key=f"toFixedString('Gãńdåłf_Thê', 16)", mode=mode, message="hello there") + decrypt( + ciphertext="unhex('180086AA42AD57B71C706EEC372D0C3D')", + key=f"toFixedString('Gãńdåłf_Thê', 16)", + mode=mode, + message="hello there", + ) + @TestScenario @Requirements( @@ -355,25 +684,43 @@ def mode_parameter_types(self): key = "'0123456789123456'" with When("mode is specified using String type"): - decrypt(ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", key=key, mode=mode, message="hello there") + decrypt( + ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", + key=key, + mode=mode, + message="hello there", + ) with When("mode is specified using FixedString type"): - decrypt(ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", key=key, mode=f"toFixedString({mode}, 12)", message="hello there") + decrypt( + ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", + key=key, + mode=f"toFixedString({mode}, 12)", + message="hello there", + ) + @TestScenario -@Requirements( - RQ_SRS008_AES_Decrypt_Function_Parameters_ReturnValue("1.0") -) +@Requirements(RQ_SRS008_AES_Decrypt_Function_Parameters_ReturnValue("1.0")) def return_value(self): - """Check that `decrypt` functions returns String data type. - """ + """Check that `decrypt` functions returns String data type.""" ciphertext = "unhex('F024F9372FA0D8B974894D29FFB8A7F7')" iv = "'0123456789123456'" mode = "'aes-128-cbc'" key = "'0123456789123456'" with When("I get type of the return value"): - sql = "SELECT toTypeName(decrypt(" + mode + "," + ciphertext + "," + key + "," + iv + "))" + sql = ( + "SELECT toTypeName(decrypt(" + + mode + + "," + + ciphertext + + "," + + key + + "," + + iv + + "))" + ) r = self.context.node.query(sql) with Then("type should be String"): @@ -382,6 +729,7 @@ def return_value(self): with When("I get the return value"): decrypt(ciphertext=ciphertext, key=key, mode=mode, iv=iv, message="hello there") + @TestScenario @Requirements( RQ_SRS008_AES_Decrypt_Function_Syntax("1.0"), @@ -397,12 +745,13 @@ def syntax(self): sql = f"SELECT decrypt('aes-128-gcm', unhex('{ciphertext}'), '0123456789123456', '012345678912', 'AAD')" self.context.node.query(sql, step=When, message="hello there") + @TestScenario @Requirements( RQ_SRS008_AES_Decrypt_Function_Parameters_CipherText("1.0"), RQ_SRS008_AES_Decrypt_Function_Parameters_Mode("1.0"), RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_ValuesFormat("1.0"), - RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Values("1.0") + RQ_SRS008_AES_Decrypt_Function_Parameters_Mode_Values("1.0"), ) def decryption(self): """Check that `decrypt` functions accepts `ciphertext` as the second parameter @@ -414,17 +763,23 @@ def decryption(self): aad = "some random aad" with Given("I load encrypt snapshots"): - snapshot_module = SourceFileLoader("snapshot", os.path.join(current_dir(), - "snapshots", "encrypt.py.encrypt.snapshot")).load_module() + snapshot_module = SourceFileLoader( + "snapshot", + os.path.join(current_dir(), "snapshots", "encrypt.py.encrypt.snapshot"), + ).load_module() for mode, key_len, iv_len, aad_len in modes: for datatype, plaintext in plaintexts: - with Example(f"""mode={mode.strip("'")} datatype={datatype.strip("'")} iv={iv_len} aad={aad_len}""") as example: + with Example( + f"""mode={mode.strip("'")} datatype={datatype.strip("'")} iv={iv_len} aad={aad_len}""" + ) as example: with Given("I have ciphertext"): example_name = basename(example.name) - ciphertext = getattr(snapshot_module, varname(f"example_{example_name}")) + ciphertext = getattr( + snapshot_module, varname(f"example_{example_name}") + ) cast = None endcast = None @@ -436,18 +791,23 @@ def decryption(self): cast = "isNull" compare = None - decrypt(ciphertext=ciphertext, key=f"'{key[:key_len]}'", mode=mode, + decrypt( + ciphertext=ciphertext, + key=f"'{key[:key_len]}'", + mode=mode, iv=(None if not iv_len else f"'{iv[:iv_len]}'"), aad=(None if not aad_len else f"'{aad}'"), - cast=cast, endcast=endcast, compare=compare, message="1") + cast=cast, + endcast=endcast, + compare=compare, + message="1", + ) + @TestScenario -@Requirements( - RQ_SRS008_AES_Functions_Mismatched_Key("1.0") -) +@Requirements(RQ_SRS008_AES_Functions_Mismatched_Key("1.0")) def mismatched_key(self): - """Check that `decrypt` function returns garbage or an error when key parameter does not match. - """ + """Check that `decrypt` function returns garbage or an error when key parameter does not match.""" key = f"{'1' * 36}" iv = f"{'2' * 16}" aad = "some random aad" @@ -455,34 +815,46 @@ def mismatched_key(self): plaintext = "'1'" with Given("I load encrypt snapshots"): - snapshot_module = SourceFileLoader("snapshot", os.path.join(current_dir(), - "snapshots", "encrypt.py.encrypt.snapshot")).load_module() + snapshot_module = SourceFileLoader( + "snapshot", + os.path.join(current_dir(), "snapshots", "encrypt.py.encrypt.snapshot"), + ).load_module() for mode, key_len, iv_len, aad_len in modes: - with Example(f"""mode={mode.strip("'")} datatype={datatype.strip("'")} iv={iv_len} aad={aad_len}""") as example: + with Example( + f"""mode={mode.strip("'")} datatype={datatype.strip("'")} iv={iv_len} aad={aad_len}""" + ) as example: with Given("I have ciphertext"): example_name = basename(example.name) - ciphertext = getattr(snapshot_module, varname(f"example_{example_name}")) + ciphertext = getattr( + snapshot_module, varname(f"example_{example_name}") + ) with When("I decrypt using a mismatched key"): - r = decrypt(ciphertext=f"unhex({ciphertext})", key=f"'a{key[:key_len-1]}'", mode=mode, + r = decrypt( + ciphertext=f"unhex({ciphertext})", + key=f"'a{key[:key_len-1]}'", + mode=mode, iv=(None if not iv_len else f"'{iv[:iv_len]}'"), - aad=(None if not aad_len else f"'{aad}'"), no_checks=True, cast="hex") + aad=(None if not aad_len else f"'{aad}'"), + no_checks=True, + cast="hex", + ) with Then("exitcode shoud be 0 or 198"): assert r.exitcode in [0, 198], error() with And("output should be garbage or an error"): output = r.output.strip() - assert "Exception: Failed to decrypt" in output or output != "31", error() + assert ( + "Exception: Failed to decrypt" in output or output != "31" + ), error() + @TestScenario -@Requirements( - RQ_SRS008_AES_Functions_Mismatched_IV("1.0") -) +@Requirements(RQ_SRS008_AES_Functions_Mismatched_IV("1.0")) def mismatched_iv(self): - """Check that `decrypt` function returns garbage or an error when iv parameter does not match. - """ + """Check that `decrypt` function returns garbage or an error when iv parameter does not match.""" key = f"{'1' * 36}" iv = f"{'2' * 16}" aad = "some random aad" @@ -490,35 +862,48 @@ def mismatched_iv(self): plaintext = "'1'" with Given("I load encrypt snapshots"): - snapshot_module = SourceFileLoader("snapshot", os.path.join(current_dir(), "snapshots", "encrypt.py.encrypt.snapshot")).load_module() + snapshot_module = SourceFileLoader( + "snapshot", + os.path.join(current_dir(), "snapshots", "encrypt.py.encrypt.snapshot"), + ).load_module() for mode, key_len, iv_len, aad_len in modes: if not iv_len: continue - with Example(f"""mode={mode.strip("'")} datatype={datatype.strip("'")} iv={iv_len} aad={aad_len}""") as example: + with Example( + f"""mode={mode.strip("'")} datatype={datatype.strip("'")} iv={iv_len} aad={aad_len}""" + ) as example: with Given("I have ciphertext"): example_name = basename(example.name) - ciphertext = getattr(snapshot_module, varname(f"example_{example_name}")) + ciphertext = getattr( + snapshot_module, varname(f"example_{example_name}") + ) with When("I decrypt using a mismatched iv"): - r = decrypt(ciphertext=f"unhex({ciphertext})", key=f"'{key[:key_len]}'", mode=mode, + r = decrypt( + ciphertext=f"unhex({ciphertext})", + key=f"'{key[:key_len]}'", + mode=mode, iv=f"'a{iv[:iv_len-1]}'", - aad=(None if not aad_len else f"'{aad}'"), no_checks=True, cast="hex") + aad=(None if not aad_len else f"'{aad}'"), + no_checks=True, + cast="hex", + ) with Then("exitcode shoud be 0 or 198"): assert r.exitcode in [0, 198], error() with And("output should be garbage or an error"): output = r.output.strip() - assert "Exception: Failed to decrypt" in output or output != "31", error() + assert ( + "Exception: Failed to decrypt" in output or output != "31" + ), error() + @TestScenario -@Requirements( - RQ_SRS008_AES_Functions_Mismatched_AAD("1.0") -) +@Requirements(RQ_SRS008_AES_Functions_Mismatched_AAD("1.0")) def mismatched_aad(self): - """Check that `decrypt` function returns garbage or an error when aad parameter does not match. - """ + """Check that `decrypt` function returns garbage or an error when aad parameter does not match.""" key = f"{'1' * 36}" iv = f"{'2' * 16}" aad = "some random aad" @@ -526,76 +911,102 @@ def mismatched_aad(self): plaintext = "'1'" with Given("I load encrypt snapshots"): - snapshot_module = SourceFileLoader("snapshot", os.path.join(current_dir(), "snapshots", "encrypt.py.encrypt.snapshot")).load_module() + snapshot_module = SourceFileLoader( + "snapshot", + os.path.join(current_dir(), "snapshots", "encrypt.py.encrypt.snapshot"), + ).load_module() for mode, key_len, iv_len, aad_len in modes: if not aad_len: continue - with Example(f"""mode={mode.strip("'")} datatype={datatype.strip("'")} iv={iv_len} aad={aad_len}""") as example: + with Example( + f"""mode={mode.strip("'")} datatype={datatype.strip("'")} iv={iv_len} aad={aad_len}""" + ) as example: with Given("I have ciphertext"): example_name = basename(example.name) - ciphertext = getattr(snapshot_module, varname(f"example_{example_name}")) + ciphertext = getattr( + snapshot_module, varname(f"example_{example_name}") + ) with When("I decrypt using a mismatched aad"): - r = decrypt(ciphertext=f"unhex({ciphertext})", key=f"'{key[:key_len]}'", mode=mode, + r = decrypt( + ciphertext=f"unhex({ciphertext})", + key=f"'{key[:key_len]}'", + mode=mode, iv=(None if not iv_len else f"'{iv[:iv_len]}'"), - aad=(None if not aad_len else f"'a{aad}'"), no_checks=True, cast="hex") + aad=(None if not aad_len else f"'a{aad}'"), + no_checks=True, + cast="hex", + ) with Then("exitcode shoud be 0 or 198"): assert r.exitcode in [0, 198], error() with And("output should be garbage or an error"): output = r.output.strip() - assert "Exception: Failed to decrypt" in output or output != "31", error() + assert ( + "Exception: Failed to decrypt" in output or output != "31" + ), error() + @TestScenario -@Requirements( - RQ_SRS008_AES_Functions_Mismatched_Mode("1.0") -) +@Requirements(RQ_SRS008_AES_Functions_Mismatched_Mode("1.0")) def mismatched_mode(self): - """Check that `decrypt` function returns garbage or an error when mode parameter does not match. - """ + """Check that `decrypt` function returns garbage or an error when mode parameter does not match.""" key = f"{'1' * 36}" iv = f"{'2' * 16}" aad = "some random aad" - plaintext = hex('Gãńdåłf_Thê_Gręât'.encode("utf-8")) + plaintext = hex("Gãńdåłf_Thê_Gręât".encode("utf-8")) with Given("I load encrypt snapshots"): - snapshot_module = SourceFileLoader("snapshot", os.path.join(current_dir(), "snapshots", "encrypt.py.encrypt.snapshot")).load_module() + snapshot_module = SourceFileLoader( + "snapshot", + os.path.join(current_dir(), "snapshots", "encrypt.py.encrypt.snapshot"), + ).load_module() for mode, key_len, iv_len, aad_len in modes: - with Example(f"""mode={mode.strip("'")} datatype=utf8string iv={iv_len} aad={aad_len}""") as example: + with Example( + f"""mode={mode.strip("'")} datatype=utf8string iv={iv_len} aad={aad_len}""" + ) as example: with Given("I have ciphertext"): example_name = basename(example.name) - ciphertext = getattr(snapshot_module, varname(f"example_{example_name}")) + ciphertext = getattr( + snapshot_module, varname(f"example_{example_name}") + ) for mismatched_mode, _, _, _ in modes: if mismatched_mode == mode: continue with When(f"I decrypt using mismatched mode {mismatched_mode}"): - r = decrypt(ciphertext=f"unhex({ciphertext})", key=f"'{key[:key_len]}'", mode=mismatched_mode, + r = decrypt( + ciphertext=f"unhex({ciphertext})", + key=f"'{key[:key_len]}'", + mode=mismatched_mode, iv=(None if not iv_len else f"'{iv[:iv_len]}'"), - aad=(None if not aad_len else f"'{aad}'"), no_checks=True, cast="hex") + aad=(None if not aad_len else f"'{aad}'"), + no_checks=True, + cast="hex", + ) with Then("exitcode shoud be 0 or 36 or 198"): assert r.exitcode in [0, 36, 198], error() with And("output should be garbage or an error"): output = r.output.strip() - condition = "Exception: Failed to decrypt" in output \ - or 'Exception: Invalid key size' in output \ + condition = ( + "Exception: Failed to decrypt" in output + or "Exception: Invalid key size" in output or output != plaintext + ) assert condition, error() + @TestFeature @Name("decrypt") -@Requirements( - RQ_SRS008_AES_Decrypt_Function("1.0") -) +@Requirements(RQ_SRS008_AES_Decrypt_Function("1.0")) def feature(self, node="clickhouse1"): - """Check the behavior of the `decrypt` function. - """ + """Check the behavior of the `decrypt` function.""" self.context.node = self.context.cluster.node(node) for scenario in loads(current_module(), Scenario): diff --git a/tests/testflows/aes_encryption/tests/decrypt_mysql.py b/tests/testflows/aes_encryption/tests/decrypt_mysql.py index 52236ae0910..1a8f53464b7 100644 --- a/tests/testflows/aes_encryption/tests/decrypt_mysql.py +++ b/tests/testflows/aes_encryption/tests/decrypt_mysql.py @@ -10,11 +10,24 @@ from testflows.asserts import error from aes_encryption.requirements.requirements import * from aes_encryption.tests.common import * + @TestOutline -def aes_decrypt_mysql(self, ciphertext=None, key=None, mode=None, iv=None, aad=None, exitcode=0, message=None, - step=When, cast=None, endcast=None, compare=None, no_checks=False): - """Execute `aes_decrypt_mysql` function with the specified parameters. - """ +def aes_decrypt_mysql( + self, + ciphertext=None, + key=None, + mode=None, + iv=None, + aad=None, + exitcode=0, + message=None, + step=When, + cast=None, + endcast=None, + compare=None, + no_checks=False, +): + """Execute `aes_decrypt_mysql` function with the specified parameters.""" params = [] if mode is not None: params.append(mode) @@ -34,7 +47,10 @@ def aes_decrypt_mysql(self, ciphertext=None, key=None, mode=None, iv=None, aad=N sql = f"{compare} = {sql}" sql = f"SELECT {sql}" - return current().context.node.query(sql, step=step, exitcode=exitcode, message=message, no_checks=no_checks) + return current().context.node.query( + sql, step=step, exitcode=exitcode, message=message, no_checks=no_checks + ) + @TestScenario @Requirements( @@ -57,39 +73,60 @@ def invalid_ciphertext(self): continue with When(f"invalid ciphertext={ciphertext}"): if "cfb" in mode or "ofb" in mode or "ctr" in mode: - aes_decrypt_mysql(ciphertext=ciphertext, key=f"'{key[:key_len]}'", mode=mode, iv=d_iv, cast="hex") + aes_decrypt_mysql( + ciphertext=ciphertext, + key=f"'{key[:key_len]}'", + mode=mode, + iv=d_iv, + cast="hex", + ) else: with When("I execute aes_decrypt_mysql function"): - r = aes_decrypt_mysql(ciphertext=ciphertext, key=f"'{key[:key_len]}'", mode=mode, iv=d_iv, no_checks=True, step=By) + r = aes_decrypt_mysql( + ciphertext=ciphertext, + key=f"'{key[:key_len]}'", + mode=mode, + iv=d_iv, + no_checks=True, + step=By, + ) with Then("exitcode is not zero"): assert r.exitcode in [198, 36] with And("exception is present in the output"): assert "DB::Exception:" in r.output + @TestOutline(Scenario) @Requirements( RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Values_GCM_Error("1.0"), - RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Values_CTR_Error("1.0") + RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Values_CTR_Error("1.0"), +) +@Examples( + "mode", + [ + ("'aes-128-gcm'",), + ("'aes-192-gcm'",), + ("'aes-256-gcm'",), + ("'aes-128-ctr'",), + ("'aes-192-ctr'",), + ("'aes-256-ctr'",), + ], ) -@Examples("mode", [ - ("'aes-128-gcm'",), - ("'aes-192-gcm'",), - ("'aes-256-gcm'",), - ("'aes-128-ctr'",), - ("'aes-192-ctr'",), - ("'aes-256-ctr'",) -]) def unsupported_modes(self, mode): - """Check that `aes_decrypt_mysql` function returns an error when unsupported modes are specified. - """ + """Check that `aes_decrypt_mysql` function returns an error when unsupported modes are specified.""" ciphertext = "unhex('AA1826B5F66A903C888D5DCDA9FB63D1D9CCA10EC55F59D6C00D37')" - aes_decrypt_mysql(ciphertext=ciphertext, mode=mode, key=f"'{'1'* 32}'", exitcode=36, message="DB::Exception: Unsupported cipher mode") + aes_decrypt_mysql( + ciphertext=ciphertext, + mode=mode, + key=f"'{'1'* 32}'", + exitcode=36, + message="DB::Exception: Unsupported cipher mode", + ) + @TestScenario -@Requirements( - RQ_SRS008_AES_Functions_InvalidParameters("1.0") -) +@Requirements(RQ_SRS008_AES_Functions_InvalidParameters("1.0")) def invalid_parameters(self): """Check that `aes_decrypt_mysql` function returns an error when we call it with invalid parameters. @@ -97,103 +134,215 @@ def invalid_parameters(self): ciphertext = "unhex('AA1826B5F66A903C888D5DCDA9FB63D1D9CCA10EC55F59D6C00D37')" with Example("no parameters"): - aes_decrypt_mysql(exitcode=42, message="DB::Exception: Incorrect number of arguments for function aes_decrypt_mysql provided 0, expected 3 to 4") + aes_decrypt_mysql( + exitcode=42, + message="DB::Exception: Incorrect number of arguments for function aes_decrypt_mysql provided 0, expected 3 to 4", + ) with Example("missing key and mode"): - aes_decrypt_mysql(ciphertext=ciphertext, exitcode=42, - message="DB::Exception: Incorrect number of arguments for function aes_decrypt_mysql provided 1") + aes_decrypt_mysql( + ciphertext=ciphertext, + exitcode=42, + message="DB::Exception: Incorrect number of arguments for function aes_decrypt_mysql provided 1", + ) with Example("missing mode"): - aes_decrypt_mysql(ciphertext=ciphertext, key="'123'", exitcode=42, - message="DB::Exception: Incorrect number of arguments for function aes_decrypt_mysql provided 2") + aes_decrypt_mysql( + ciphertext=ciphertext, + key="'123'", + exitcode=42, + message="DB::Exception: Incorrect number of arguments for function aes_decrypt_mysql provided 2", + ) with Example("bad key type - UInt8"): - aes_decrypt_mysql(ciphertext=ciphertext, key="123", mode="'aes-128-ecb'", exitcode=43, - message="DB::Exception: Received from localhost:9000. DB::Exception: Illegal type of argument #3") + aes_decrypt_mysql( + ciphertext=ciphertext, + key="123", + mode="'aes-128-ecb'", + exitcode=43, + message="DB::Exception: Received from localhost:9000. DB::Exception: Illegal type of argument #3", + ) with Example("bad mode type - forgot quotes"): - aes_decrypt_mysql(ciphertext=ciphertext, key="'0123456789123456'", mode="aes-128-ecb", exitcode=47, - message="DB::Exception: Missing columns: 'ecb' 'aes' while processing query") + aes_decrypt_mysql( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="aes-128-ecb", + exitcode=47, + message="DB::Exception: Missing columns: 'ecb' 'aes' while processing query", + ) with Example("bad mode type - UInt8"): - aes_decrypt_mysql(ciphertext=ciphertext, key="'0123456789123456'", mode="128", exitcode=43, - message="DB::Exception: Illegal type of argument #1 'mode'") + aes_decrypt_mysql( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="128", + exitcode=43, + message="DB::Exception: Illegal type of argument #1 'mode'", + ) with Example("bad iv type - UInt8"): - aes_decrypt_mysql(ciphertext=ciphertext, key="'0123456789123456'", mode="'aes-128-cbc'", iv='128', exitcode=43, - message="DB::Exception: Illegal type of argument") + aes_decrypt_mysql( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aes-128-cbc'", + iv="128", + exitcode=43, + message="DB::Exception: Illegal type of argument", + ) - with Example("iv not valid for mode", requirements=[RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_NotValidForMode("1.0")]): - aes_decrypt_mysql(ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", key="'0123456789123456'", mode="'aes-128-ecb'", iv="'012345678912'", exitcode=0, - message=None) + with Example( + "iv not valid for mode", + requirements=[ + RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_NotValidForMode( + "1.0" + ) + ], + ): + aes_decrypt_mysql( + ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", + key="'0123456789123456'", + mode="'aes-128-ecb'", + iv="'012345678912'", + exitcode=0, + message=None, + ) - with Example("iv not valid for mode - size 0", requirements=[RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_NotValidForMode("1.0")]): - aes_decrypt_mysql(ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", key="'0123456789123456'", mode="'aes-128-ecb'", iv="''", exitcode=0, - message=None) + with Example( + "iv not valid for mode - size 0", + requirements=[ + RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_NotValidForMode( + "1.0" + ) + ], + ): + aes_decrypt_mysql( + ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", + key="'0123456789123456'", + mode="'aes-128-ecb'", + iv="''", + exitcode=0, + message=None, + ) with Example("aad passed by mistake"): - aes_decrypt_mysql(ciphertext=ciphertext, key="'0123456789123456'", mode="'aes-128-cbc'", iv="'0123456789123456'", aad="'aad'", exitcode=42, - message="DB::Exception: Incorrect number of arguments for function aes_decrypt_mysql provided 5") + aes_decrypt_mysql( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aes-128-cbc'", + iv="'0123456789123456'", + aad="'aad'", + exitcode=42, + message="DB::Exception: Incorrect number of arguments for function aes_decrypt_mysql provided 5", + ) with Example("aad passed by mistake type - UInt8"): - aes_decrypt_mysql(ciphertext=ciphertext, key="'0123456789123456'", mode="'aes-128-gcm'", iv="'012345678912'", aad="123", exitcode=42, - message="DB::Exception: Incorrect number of arguments for function aes_decrypt_mysql provided 5") + aes_decrypt_mysql( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aes-128-gcm'", + iv="'012345678912'", + aad="123", + exitcode=42, + message="DB::Exception: Incorrect number of arguments for function aes_decrypt_mysql provided 5", + ) - with Example("invalid mode value", requirements=[RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_Invalid("1.0")]): + with Example( + "invalid mode value", + requirements=[ + RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Value_Invalid("1.0") + ], + ): with When("typo in the block algorithm"): - aes_decrypt_mysql(ciphertext=ciphertext, key="'0123456789123456'", mode="'aes-128-eeb'", exitcode=36, - message="DB::Exception: Invalid mode: aes-128-eeb") + aes_decrypt_mysql( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aes-128-eeb'", + exitcode=36, + message="DB::Exception: Invalid mode: aes-128-eeb", + ) with When("typo in the key size"): - aes_decrypt_mysql(ciphertext=ciphertext, key="'0123456789123456'", mode="'aes-127-ecb'", exitcode=36, - message="DB::Exception: Invalid mode: aes-127-ecb") + aes_decrypt_mysql( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aes-127-ecb'", + exitcode=36, + message="DB::Exception: Invalid mode: aes-127-ecb", + ) with When("typo in the aes prefix"): - aes_decrypt_mysql(ciphertext=ciphertext, key="'0123456789123456'", mode="'aee-128-ecb'", exitcode=36, - message="DB::Exception: Invalid mode: aee-128-ecb") + aes_decrypt_mysql( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aee-128-ecb'", + exitcode=36, + message="DB::Exception: Invalid mode: aee-128-ecb", + ) with When("missing last dash"): - aes_decrypt_mysql(ciphertext=ciphertext, key="'0123456789123456'", mode="'aes-128ecb'", exitcode=36, - message="DB::Exception: Invalid mode: aes-128ecb") + aes_decrypt_mysql( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aes-128ecb'", + exitcode=36, + message="DB::Exception: Invalid mode: aes-128ecb", + ) with When("missing first dash"): - aes_decrypt_mysql(ciphertext=ciphertext, key="'0123456789123456'", mode="'aes128-ecb'", exitcode=36, - message="DB::Exception: Invalid mode: aes128-ecb") + aes_decrypt_mysql( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'aes128-ecb'", + exitcode=36, + message="DB::Exception: Invalid mode: aes128-ecb", + ) with When("all capitals"): - aes_decrypt_mysql(ciphertext=ciphertext, key="'0123456789123456'", mode="'AES-128-ECB'", exitcode=36, - message="DB::Exception: Invalid mode: AES-128-ECB") + aes_decrypt_mysql( + ciphertext=ciphertext, + key="'0123456789123456'", + mode="'AES-128-ECB'", + exitcode=36, + message="DB::Exception: Invalid mode: AES-128-ECB", + ) + @TestOutline(Scenario) @Requirements( RQ_SRS008_AES_MySQL_Decrypt_Function_Key_Length_TooShortError("1.0"), RQ_SRS008_AES_MySQL_Decrypt_Function_Key_Length_TooLong("1.0"), - RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_Length_TooShortError("1.0"), + RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_Length_TooShortError( + "1.0" + ), RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_Length_TooLong("1.0"), RQ_SRS008_AES_MySQL_Decrypt_Function_InitializationVector_NotValidForMode("1.0"), - RQ_SRS008_AES_MySQL_Decrypt_Function_Mode_KeyAndInitializationVector_Length("1.0") + RQ_SRS008_AES_MySQL_Decrypt_Function_Mode_KeyAndInitializationVector_Length("1.0"), +) +@Examples( + "mode key_len iv_len", + [ + # ECB + ("'aes-128-ecb'", 16, None), + ("'aes-192-ecb'", 24, None), + ("'aes-256-ecb'", 32, None), + # CBC + ("'aes-128-cbc'", 16, 16), + ("'aes-192-cbc'", 24, 16), + ("'aes-256-cbc'", 32, 16), + # CFB128 + ("'aes-128-cfb128'", 16, 16), + ("'aes-192-cfb128'", 24, 16), + ("'aes-256-cfb128'", 32, 16), + # OFB + ("'aes-128-ofb'", 16, 16), + ("'aes-192-ofb'", 24, 16), + ("'aes-256-ofb'", 32, 16), + ], + "%-16s %-10s %-10s", ) -@Examples("mode key_len iv_len", [ - # ECB - ("'aes-128-ecb'", 16, None), - ("'aes-192-ecb'", 24, None), - ("'aes-256-ecb'", 32, None), - # CBC - ("'aes-128-cbc'", 16, 16), - ("'aes-192-cbc'", 24, 16), - ("'aes-256-cbc'", 32, 16), - # CFB128 - ("'aes-128-cfb128'", 16, 16), - ("'aes-192-cfb128'", 24, 16), - ("'aes-256-cfb128'", 32, 16), - # OFB - ("'aes-128-ofb'", 16, 16), - ("'aes-192-ofb'", 24, 16), - ("'aes-256-ofb'", 32, 16) -], "%-16s %-10s %-10s") def key_or_iv_length_for_mode(self, mode, key_len, iv_len): - """Check that key or iv length for mode. - """ + """Check that key or iv length for mode.""" ciphertext = "unhex('31F4C847CAB873AB34584368E3E85E3A')" if mode == "'aes-128-ecb'": ciphertext = "unhex('31F4C847CAB873AB34584368E3E85E3B')" @@ -205,29 +354,76 @@ def key_or_iv_length_for_mode(self, mode, key_len, iv_len): iv = "0123456789" * 4 with When("key is too short"): - aes_decrypt_mysql(ciphertext=ciphertext, key=f"'{key[:key_len-1]}'", mode=mode, exitcode=36, message="DB::Exception: Invalid key size") + aes_decrypt_mysql( + ciphertext=ciphertext, + key=f"'{key[:key_len-1]}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid key size", + ) with When("key is too long"): if "ecb" in mode or "cbc" in mode: - aes_decrypt_mysql(ciphertext=ciphertext, key=f"'{key[:key_len+1]}'", mode=mode, exitcode=198, message="DB::Exception: Failed to decrypt") + aes_decrypt_mysql( + ciphertext=ciphertext, + key=f"'{key[:key_len+1]}'", + mode=mode, + exitcode=198, + message="DB::Exception: Failed to decrypt", + ) else: - aes_decrypt_mysql(ciphertext=ciphertext, key=f"'{key[:key_len+1]}'", mode=mode, cast="hex") + aes_decrypt_mysql( + ciphertext=ciphertext, key=f"'{key[:key_len+1]}'", mode=mode, cast="hex" + ) if iv_len is not None: with When("iv is too short"): - aes_decrypt_mysql(ciphertext=ciphertext, key=f"'{key[:key_len]}'", iv=f"'{iv[:iv_len-1]}'", mode=mode, exitcode=36, message="DB::Exception: Invalid IV size") + aes_decrypt_mysql( + ciphertext=ciphertext, + key=f"'{key[:key_len]}'", + iv=f"'{iv[:iv_len-1]}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid IV size", + ) with When("iv is too long"): if "ecb" in mode or "cbc" in mode: - aes_decrypt_mysql(ciphertext=ciphertext, key=f"'{key[:key_len]}'", iv=f"'{iv[:iv_len+1]}'", mode=mode, exitcode=198, message="DB::Exception: Failed to decrypt") + aes_decrypt_mysql( + ciphertext=ciphertext, + key=f"'{key[:key_len]}'", + iv=f"'{iv[:iv_len+1]}'", + mode=mode, + exitcode=198, + message="DB::Exception: Failed to decrypt", + ) else: - aes_decrypt_mysql(ciphertext=ciphertext, key=f"'{key[:key_len]}'", iv=f"'{iv[:iv_len+1]}'", mode=mode, cast="hex") + aes_decrypt_mysql( + ciphertext=ciphertext, + key=f"'{key[:key_len]}'", + iv=f"'{iv[:iv_len+1]}'", + mode=mode, + cast="hex", + ) else: with When("iv is specified but not needed"): if "ecb" in mode or "cbc" in mode: - aes_decrypt_mysql(ciphertext=ciphertext, key=f"'{key[:key_len]}'", iv=f"'{iv}'", mode=mode, exitcode=198, message="DB::Exception: Failed to decrypt") + aes_decrypt_mysql( + ciphertext=ciphertext, + key=f"'{key[:key_len]}'", + iv=f"'{iv}'", + mode=mode, + exitcode=198, + message="DB::Exception: Failed to decrypt", + ) else: - aes_decrypt_mysql(ciphertext=ciphertext, key=f"'{key[:key_len]}'", iv=f"'{iv}'", mode=mode) + aes_decrypt_mysql( + ciphertext=ciphertext, + key=f"'{key[:key_len]}'", + iv=f"'{iv}'", + mode=mode, + ) + @TestScenario @Requirements( @@ -242,21 +438,44 @@ def iv_parameter_types(self): key = "'0123456789123456'" with When("iv is specified using String type"): - aes_decrypt_mysql(ciphertext="unhex('F024F9372FA0D8B974894D29FFB8A7F7')", key=key, mode=mode, iv=iv, message="hello there") + aes_decrypt_mysql( + ciphertext="unhex('F024F9372FA0D8B974894D29FFB8A7F7')", + key=key, + mode=mode, + iv=iv, + message="hello there", + ) with When("iv is specified using String with UTF8 characters"): - aes_decrypt_mysql(ciphertext="unhex('7A4EC0FF3796F46BED281F4778ACE1DC')", key=key, mode=mode, iv="'Gãńdåłf_Thê'", message="hello there") + aes_decrypt_mysql( + ciphertext="unhex('7A4EC0FF3796F46BED281F4778ACE1DC')", + key=key, + mode=mode, + iv="'Gãńdåłf_Thê'", + message="hello there", + ) with When("iv is specified using FixedString type"): - aes_decrypt_mysql(ciphertext="unhex('F024F9372FA0D8B974894D29FFB8A7F7')", key=key, mode=mode, iv=f"toFixedString({iv}, 16)", message="hello there") + aes_decrypt_mysql( + ciphertext="unhex('F024F9372FA0D8B974894D29FFB8A7F7')", + key=key, + mode=mode, + iv=f"toFixedString({iv}, 16)", + message="hello there", + ) with When("iv is specified using FixedString with UTF8 characters"): - aes_decrypt_mysql(ciphertext="unhex('7A4EC0FF3796F46BED281F4778ACE1DC')", key=key, mode=mode, iv=f"toFixedString('Gãńdåłf_Thê', 16)", message="hello there") + aes_decrypt_mysql( + ciphertext="unhex('7A4EC0FF3796F46BED281F4778ACE1DC')", + key=key, + mode=mode, + iv=f"toFixedString('Gãńdåłf_Thê', 16)", + message="hello there", + ) + @TestScenario -@Requirements( - RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Key("1.0") -) +@Requirements(RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Key("1.0")) def key_parameter_types(self): """Check that `aes_decrypt` function accepts `key` parameter as the second argument of either `String` or `FixedString` types. @@ -266,16 +485,37 @@ def key_parameter_types(self): key = "'0123456789123456'" with When("key is specified using String type"): - aes_decrypt_mysql(ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", key=key, mode=mode, message="hello there") + aes_decrypt_mysql( + ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", + key=key, + mode=mode, + message="hello there", + ) with When("key is specified using String with UTF8 characters"): - aes_decrypt_mysql(ciphertext="unhex('180086AA42AD57B71C706EEC372D0C3D')", key="'Gãńdåłf_Thê'", mode=mode, message="hello there") + aes_decrypt_mysql( + ciphertext="unhex('180086AA42AD57B71C706EEC372D0C3D')", + key="'Gãńdåłf_Thê'", + mode=mode, + message="hello there", + ) with When("key is specified using FixedString type"): - aes_decrypt_mysql(ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", key=f"toFixedString({key}, 16)", mode=mode, message="hello there") + aes_decrypt_mysql( + ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", + key=f"toFixedString({key}, 16)", + mode=mode, + message="hello there", + ) with When("key is specified using FixedString with UTF8 characters"): - aes_decrypt_mysql(ciphertext="unhex('180086AA42AD57B71C706EEC372D0C3D')", key=f"toFixedString('Gãńdåłf_Thê', 16)", mode=mode, message="hello there") + aes_decrypt_mysql( + ciphertext="unhex('180086AA42AD57B71C706EEC372D0C3D')", + key=f"toFixedString('Gãńdåłf_Thê', 16)", + mode=mode, + message="hello there", + ) + @TestScenario @Requirements( @@ -289,32 +529,53 @@ def mode_parameter_types(self): key = "'0123456789123456'" with When("mode is specified using String type"): - aes_decrypt_mysql(ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", key=key, mode=mode, message="hello there") + aes_decrypt_mysql( + ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", + key=key, + mode=mode, + message="hello there", + ) with When("mode is specified using FixedString type"): - aes_decrypt_mysql(ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", key=key, mode=f"toFixedString({mode}, 12)", message="hello there") + aes_decrypt_mysql( + ciphertext="unhex('49C9ADB81BA9B58C485E7ADB90E70576')", + key=key, + mode=f"toFixedString({mode}, 12)", + message="hello there", + ) + @TestScenario -@Requirements( - RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_ReturnValue("1.0") -) +@Requirements(RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_ReturnValue("1.0")) def return_value(self): - """Check that `aes_decrypt_mysql` functions returns String data type. - """ + """Check that `aes_decrypt_mysql` functions returns String data type.""" ciphertext = "unhex('F024F9372FA0D8B974894D29FFB8A7F7')" iv = "'0123456789123456'" mode = "'aes-128-cbc'" key = "'0123456789123456'" with When("I get type of the return value"): - sql = "SELECT toTypeName(aes_decrypt_mysql(" + mode + "," + ciphertext + "," + key + "," + iv + "))" + sql = ( + "SELECT toTypeName(aes_decrypt_mysql(" + + mode + + "," + + ciphertext + + "," + + key + + "," + + iv + + "))" + ) r = self.context.node.query(sql) with Then("type should be String"): assert r.output.strip() == "String", error() with When("I get the return value"): - aes_decrypt_mysql(ciphertext=ciphertext, key=key, mode=mode, iv=iv, message="hello there") + aes_decrypt_mysql( + ciphertext=ciphertext, key=key, mode=mode, iv=iv, message="hello there" + ) + @TestScenario @Requirements( @@ -331,12 +592,13 @@ def syntax(self): sql = f"SELECT aes_decrypt_mysql('aes-128-ofb', unhex('{ciphertext}'), '0123456789123456', '0123456789123456')" self.context.node.query(sql, step=When, message="hello there") + @TestScenario @Requirements( RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_CipherText("1.0"), RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode("1.0"), RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_ValuesFormat("1.0"), - RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Values("1.0") + RQ_SRS008_AES_MySQL_Decrypt_Function_Parameters_Mode_Values("1.0"), ) def decryption(self): """Check that `aes_decrypt_mysql` functions accepts `mode` as the first parameter @@ -347,17 +609,25 @@ def decryption(self): iv = f"{'2' * 64}" with Given("I load encrypt snapshots"): - snapshot_module = SourceFileLoader("snapshot", os.path.join(current_dir(), - "snapshots", "encrypt_mysql.py.encrypt_mysql.snapshot")).load_module() + snapshot_module = SourceFileLoader( + "snapshot", + os.path.join( + current_dir(), "snapshots", "encrypt_mysql.py.encrypt_mysql.snapshot" + ), + ).load_module() for mode, key_len, iv_len in mysql_modes: for datatype, plaintext in plaintexts: - with Example(f"""mode={mode.strip("'")} datatype={datatype.strip("'")} key={key_len} iv={iv_len}""") as example: + with Example( + f"""mode={mode.strip("'")} datatype={datatype.strip("'")} key={key_len} iv={iv_len}""" + ) as example: with Given("I have ciphertext"): example_name = basename(example.name) - ciphertext = getattr(snapshot_module, varname(f"example_{example_name}")) + ciphertext = getattr( + snapshot_module, varname(f"example_{example_name}") + ) cast = None endcast = None @@ -369,124 +639,169 @@ def decryption(self): cast = "isNull" compare = None - aes_decrypt_mysql(ciphertext=ciphertext, key=f"'{key[:key_len]}'", mode=mode, + aes_decrypt_mysql( + ciphertext=ciphertext, + key=f"'{key[:key_len]}'", + mode=mode, iv=(None if not iv_len else f"'{iv[:iv_len]}'"), - cast=cast, endcast=endcast, compare=compare, message="1") + cast=cast, + endcast=endcast, + compare=compare, + message="1", + ) + @TestScenario -@Requirements( - RQ_SRS008_AES_Functions_Mismatched_Key("1.0") -) +@Requirements(RQ_SRS008_AES_Functions_Mismatched_Key("1.0")) def mismatched_key(self): - """Check that `aes_decrypt_mysql` function returns garbage or an error when key parameter does not match. - """ + """Check that `aes_decrypt_mysql` function returns garbage or an error when key parameter does not match.""" key = f"{'1' * 64}" iv = f"{'2' * 64}" with Given("I load encrypt snapshots"): - snapshot_module = SourceFileLoader("snapshot", os.path.join(current_dir(), - "snapshots", "encrypt_mysql.py.encrypt_mysql.snapshot")).load_module() + snapshot_module = SourceFileLoader( + "snapshot", + os.path.join( + current_dir(), "snapshots", "encrypt_mysql.py.encrypt_mysql.snapshot" + ), + ).load_module() for mode, key_len, iv_len in mysql_modes: - with Example(f"""mode={mode.strip("'")} datatype=String key={key_len} iv={iv_len}""") as example: + with Example( + f"""mode={mode.strip("'")} datatype=String key={key_len} iv={iv_len}""" + ) as example: with Given("I have ciphertext"): example_name = basename(example.name) - ciphertext = getattr(snapshot_module, varname(f"example_{example_name}")) + ciphertext = getattr( + snapshot_module, varname(f"example_{example_name}") + ) with When("I decrypt using a mismatched key"): - r = aes_decrypt_mysql(ciphertext=f"unhex({ciphertext})", key=f"'a{key[:key_len-1]}'", mode=mode, + r = aes_decrypt_mysql( + ciphertext=f"unhex({ciphertext})", + key=f"'a{key[:key_len-1]}'", + mode=mode, iv=(None if not iv_len else f"'{iv[:iv_len]}'"), - cast="hex", no_checks=True) + cast="hex", + no_checks=True, + ) with Then("exitcode shoud be 0 or 198"): assert r.exitcode in [0, 198], error() with And("output should be garbage or an error"): output = r.output.strip() - assert "Exception: Failed to decrypt" in output or output != "31", error() + assert ( + "Exception: Failed to decrypt" in output or output != "31" + ), error() + @TestScenario -@Requirements( - RQ_SRS008_AES_Functions_Mismatched_IV("1.0") -) +@Requirements(RQ_SRS008_AES_Functions_Mismatched_IV("1.0")) def mismatched_iv(self): - """Check that `aes_decrypt_mysql` function returns garbage or an error when iv parameter does not match. - """ + """Check that `aes_decrypt_mysql` function returns garbage or an error when iv parameter does not match.""" key = f"{'1' * 64}" iv = f"{'2' * 64}" with Given("I load encrypt snapshots"): - snapshot_module = SourceFileLoader("snapshot", os.path.join(current_dir(), - "snapshots", "encrypt_mysql.py.encrypt_mysql.snapshot")).load_module() + snapshot_module = SourceFileLoader( + "snapshot", + os.path.join( + current_dir(), "snapshots", "encrypt_mysql.py.encrypt_mysql.snapshot" + ), + ).load_module() for mode, key_len, iv_len in mysql_modes: if not iv_len: continue - with Example(f"""mode={mode.strip("'")} datatype=String key={key_len} iv={iv_len}""") as example: + with Example( + f"""mode={mode.strip("'")} datatype=String key={key_len} iv={iv_len}""" + ) as example: with Given("I have ciphertext"): example_name = basename(example.name) - ciphertext = getattr(snapshot_module, varname(f"example_{example_name}")) + ciphertext = getattr( + snapshot_module, varname(f"example_{example_name}") + ) with When("I decrypt using a mismatched key"): - r = aes_decrypt_mysql(ciphertext=f"unhex({ciphertext})", key=f"'{key[:key_len]}'", mode=mode, + r = aes_decrypt_mysql( + ciphertext=f"unhex({ciphertext})", + key=f"'{key[:key_len]}'", + mode=mode, iv=f"'a{iv[:iv_len-1]}'", - cast="hex", no_checks=True) + cast="hex", + no_checks=True, + ) with Then("exitcode shoud be 0 or 198"): assert r.exitcode in [0, 198], error() with And("output should be garbage or an error"): output = r.output.strip() - assert "Exception: Failed to decrypt" in output or output != "31", error() + assert ( + "Exception: Failed to decrypt" in output or output != "31" + ), error() + @TestScenario -@Requirements( - RQ_SRS008_AES_Functions_Mismatched_Mode("1.0") -) +@Requirements(RQ_SRS008_AES_Functions_Mismatched_Mode("1.0")) def mismatched_mode(self): - """Check that `aes_decrypt_mysql` function returns garbage or an error when mode parameter does not match. - """ + """Check that `aes_decrypt_mysql` function returns garbage or an error when mode parameter does not match.""" key = f"{'1' * 64}" iv = f"{'2' * 64}" - plaintext = hex('Gãńdåłf_Thê_Gręât'.encode("utf-8")) + plaintext = hex("Gãńdåłf_Thê_Gręât".encode("utf-8")) with Given("I load encrypt snapshots"): - snapshot_module = SourceFileLoader("snapshot", os.path.join(current_dir(), - "snapshots", "encrypt_mysql.py.encrypt_mysql.snapshot")).load_module() + snapshot_module = SourceFileLoader( + "snapshot", + os.path.join( + current_dir(), "snapshots", "encrypt_mysql.py.encrypt_mysql.snapshot" + ), + ).load_module() for mode, key_len, iv_len in mysql_modes: if not iv_len: continue - with Example(f"""mode={mode.strip("'")} datatype=utf8string key={key_len} iv={iv_len}""") as example: + with Example( + f"""mode={mode.strip("'")} datatype=utf8string key={key_len} iv={iv_len}""" + ) as example: with Given("I have ciphertext"): example_name = basename(example.name) - ciphertext = getattr(snapshot_module, varname(f"example_{example_name}")) + ciphertext = getattr( + snapshot_module, varname(f"example_{example_name}") + ) for mismatched_mode, _, _ in mysql_modes: if mismatched_mode == mode: continue with When(f"I decrypt using a mismatched mode {mismatched_mode}"): - r = aes_decrypt_mysql(ciphertext=f"unhex({ciphertext})", key=f"'{key[:key_len]}'", mode=mismatched_mode, + r = aes_decrypt_mysql( + ciphertext=f"unhex({ciphertext})", + key=f"'{key[:key_len]}'", + mode=mismatched_mode, iv=f"'{iv[:iv_len]}'", - cast="hex", no_checks=True) + cast="hex", + no_checks=True, + ) with Then("exitcode shoud be 0 or 36 or 198"): assert r.exitcode in [0, 36, 198], error() with And("output should be garbage or an error"): output = r.output.strip() - assert "Exception: Failed to decrypt" in output or output != plaintext, error() + assert ( + "Exception: Failed to decrypt" in output + or output != plaintext + ), error() + @TestFeature @Name("decrypt_mysql") -@Requirements( - RQ_SRS008_AES_MySQL_Decrypt_Function("1.0") -) +@Requirements(RQ_SRS008_AES_MySQL_Decrypt_Function("1.0")) def feature(self, node="clickhouse1"): - """Check the behavior of the `aes_decrypt_mysql` function. - """ + """Check the behavior of the `aes_decrypt_mysql` function.""" self.context.node = self.context.cluster.node(node) for scenario in loads(current_module(), Scenario): diff --git a/tests/testflows/aes_encryption/tests/encrypt.py b/tests/testflows/aes_encryption/tests/encrypt.py index dde27c9d454..b18a721d297 100644 --- a/tests/testflows/aes_encryption/tests/encrypt.py +++ b/tests/testflows/aes_encryption/tests/encrypt.py @@ -6,10 +6,20 @@ from testflows.asserts import values, error, snapshot from aes_encryption.requirements.requirements import * from aes_encryption.tests.common import * + @TestOutline -def encrypt(self, plaintext=None, key=None, mode=None, iv=None, aad=None, exitcode=0, message=None, step=When): - """Execute `encrypt` function with the specified parameters. - """ +def encrypt( + self, + plaintext=None, + key=None, + mode=None, + iv=None, + aad=None, + exitcode=0, + message=None, + step=When, +): + """Execute `encrypt` function with the specified parameters.""" params = [] if mode is not None: params.append(mode) @@ -24,156 +34,296 @@ def encrypt(self, plaintext=None, key=None, mode=None, iv=None, aad=None, exitco sql = "SELECT hex(encrypt(" + ", ".join(params) + "))" - return current().context.node.query(sql, step=step, exitcode=exitcode, message=message) + return current().context.node.query( + sql, step=step, exitcode=exitcode, message=message + ) + @TestScenario -@Requirements( - RQ_SRS008_AES_Functions_InvalidParameters("1.0") -) +@Requirements(RQ_SRS008_AES_Functions_InvalidParameters("1.0")) def invalid_parameters(self): """Check that `encrypt` function returns an error when we call it with invalid parameters. """ with Example("no parameters"): - encrypt(exitcode=42, message="DB::Exception: Incorrect number of arguments for function encrypt provided 0, expected 3 to 5") + encrypt( + exitcode=42, + message="DB::Exception: Incorrect number of arguments for function encrypt provided 0, expected 3 to 5", + ) with Example("missing key and mode"): - encrypt(plaintext="'hello there'", exitcode=42, message="DB::Exception: Incorrect number of arguments for function encrypt provided 1") + encrypt( + plaintext="'hello there'", + exitcode=42, + message="DB::Exception: Incorrect number of arguments for function encrypt provided 1", + ) with Example("missing mode"): - encrypt(plaintext="'hello there'", key="'123'", exitcode=42, message="DB::Exception: Incorrect number of arguments for function encrypt provided 2") + encrypt( + plaintext="'hello there'", + key="'123'", + exitcode=42, + message="DB::Exception: Incorrect number of arguments for function encrypt provided 2", + ) with Example("bad key type - UInt8"): - encrypt(plaintext="'hello there'", key="123", mode="'aes-128-ecb'", exitcode=43, - message="DB::Exception: Received from localhost:9000. DB::Exception: Illegal type of argument #3") + encrypt( + plaintext="'hello there'", + key="123", + mode="'aes-128-ecb'", + exitcode=43, + message="DB::Exception: Received from localhost:9000. DB::Exception: Illegal type of argument #3", + ) with Example("bad mode type - forgot quotes"): - encrypt(plaintext="'hello there'", key="'0123456789123456'", mode="aes-128-ecb", exitcode=47, - message="DB::Exception: Missing columns: 'ecb' 'aes' while processing query") + encrypt( + plaintext="'hello there'", + key="'0123456789123456'", + mode="aes-128-ecb", + exitcode=47, + message="DB::Exception: Missing columns: 'ecb' 'aes' while processing query", + ) with Example("bad mode type - UInt8"): - encrypt(plaintext="'hello there'", key="'0123456789123456'", mode="128", exitcode=43, - message="DB::Exception: Illegal type of argument #1 'mode'") + encrypt( + plaintext="'hello there'", + key="'0123456789123456'", + mode="128", + exitcode=43, + message="DB::Exception: Illegal type of argument #1 'mode'", + ) with Example("bad iv type - UInt8"): - encrypt(plaintext="'hello there'", key="'0123456789123456'", mode="'aes-128-cbc'", iv='128', exitcode=43, - message="DB::Exception: Illegal type of argument") + encrypt( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aes-128-cbc'", + iv="128", + exitcode=43, + message="DB::Exception: Illegal type of argument", + ) with Example("bad aad type - UInt8"): - encrypt(plaintext="'hello there'", key="'0123456789123456'", mode="'aes-128-gcm'", iv="'012345678912'", aad="123", exitcode=43, - message="DB::Exception: Illegal type of argument") + encrypt( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aes-128-gcm'", + iv="'012345678912'", + aad="123", + exitcode=43, + message="DB::Exception: Illegal type of argument", + ) - with Example("iv not valid for mode", requirements=[RQ_SRS008_AES_Encrypt_Function_InitializationVector_NotValidForMode("1.0")]): - encrypt(plaintext="'hello there'", key="'0123456789123456'", mode="'aes-128-ecb'", iv="'012345678912'", exitcode=36, - message="DB::Exception: aes-128-ecb does not support IV") + with Example( + "iv not valid for mode", + requirements=[ + RQ_SRS008_AES_Encrypt_Function_InitializationVector_NotValidForMode("1.0") + ], + ): + encrypt( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aes-128-ecb'", + iv="'012345678912'", + exitcode=36, + message="DB::Exception: aes-128-ecb does not support IV", + ) - with Example("iv not valid for mode - size 0", requirements=[RQ_SRS008_AES_Encrypt_Function_InitializationVector_NotValidForMode("1.0")]): - encrypt(plaintext="'hello there'", key="'0123456789123456'", mode="'aes-128-ecb'", iv="''", exitcode=36, - message="DB::Exception: aes-128-ecb does not support IV") + with Example( + "iv not valid for mode - size 0", + requirements=[ + RQ_SRS008_AES_Encrypt_Function_InitializationVector_NotValidForMode("1.0") + ], + ): + encrypt( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aes-128-ecb'", + iv="''", + exitcode=36, + message="DB::Exception: aes-128-ecb does not support IV", + ) - with Example("aad not valid for mode", requirements=[RQ_SRS008_AES_Encrypt_Function_AdditionalAuthenticationData_NotValidForMode("1.0")]): - encrypt(plaintext="'hello there'", key="'0123456789123456'", mode="'aes-128-cbc'", iv="'0123456789123456'", aad="'aad'", exitcode=36, - message="DB::Exception: AAD can be only set for GCM-mode") + with Example( + "aad not valid for mode", + requirements=[ + RQ_SRS008_AES_Encrypt_Function_AdditionalAuthenticationData_NotValidForMode( + "1.0" + ) + ], + ): + encrypt( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aes-128-cbc'", + iv="'0123456789123456'", + aad="'aad'", + exitcode=36, + message="DB::Exception: AAD can be only set for GCM-mode", + ) - with Example("invalid mode value", requirements=[RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_Invalid("1.0")]): + with Example( + "invalid mode value", + requirements=[ + RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_Invalid("1.0") + ], + ): with When("using unsupported cfb1 mode"): - encrypt(plaintext="'hello there'", key="'0123456789123456'", mode="'aes-128-cfb1'", exitcode=36, - message="DB::Exception: Invalid mode: aes-128-cfb1") + encrypt( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aes-128-cfb1'", + exitcode=36, + message="DB::Exception: Invalid mode: aes-128-cfb1", + ) with When("using unsupported cfb8 mode"): - encrypt(plaintext="'hello there'", key="'0123456789123456'", mode="'aes-128-cfb8'", exitcode=36, - message="DB::Exception: Invalid mode: aes-128-cfb8") + encrypt( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aes-128-cfb8'", + exitcode=36, + message="DB::Exception: Invalid mode: aes-128-cfb8", + ) with When("typo in the block algorithm"): - encrypt(plaintext="'hello there'", key="'0123456789123456'", mode="'aes-128-eeb'", exitcode=36, - message="DB::Exception: Invalid mode: aes-128-eeb") + encrypt( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aes-128-eeb'", + exitcode=36, + message="DB::Exception: Invalid mode: aes-128-eeb", + ) with When("typo in the key size"): - encrypt(plaintext="'hello there'", key="'0123456789123456'", mode="'aes-127-ecb'", exitcode=36, - message="DB::Exception: Invalid mode: aes-127-ecb") + encrypt( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aes-127-ecb'", + exitcode=36, + message="DB::Exception: Invalid mode: aes-127-ecb", + ) with When("typo in the aes prefix"): - encrypt(plaintext="'hello there'", key="'0123456789123456'", mode="'aee-128-ecb'", exitcode=36, - message="DB::Exception: Invalid mode: aee-128-ecb") + encrypt( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aee-128-ecb'", + exitcode=36, + message="DB::Exception: Invalid mode: aee-128-ecb", + ) with When("missing last dash"): - encrypt(plaintext="'hello there'", key="'0123456789123456'", mode="'aes-128ecb'", exitcode=36, - message="DB::Exception: Invalid mode: aes-128ecb") + encrypt( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aes-128ecb'", + exitcode=36, + message="DB::Exception: Invalid mode: aes-128ecb", + ) with When("missing first dash"): - encrypt(plaintext="'hello there'", key="'0123456789123456'", mode="'aes128-ecb'", exitcode=36, - message="DB::Exception: Invalid mode: aes128-ecb") + encrypt( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aes128-ecb'", + exitcode=36, + message="DB::Exception: Invalid mode: aes128-ecb", + ) with When("all capitals"): - encrypt(plaintext="'hello there'", key="'0123456789123456'", mode="'AES-128-ECB'", exitcode=36, - message="DB::Exception: Invalid mode: AES-128-ECB") + encrypt( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'AES-128-ECB'", + exitcode=36, + message="DB::Exception: Invalid mode: AES-128-ECB", + ) + @TestOutline(Scenario) -@Requirements( - RQ_SRS008_AES_Functions_InvalidParameters("1.0") +@Requirements(RQ_SRS008_AES_Functions_InvalidParameters("1.0")) +@Examples( + "data_type, value", + [ + ("UInt8", "toUInt8('1')"), + ("UInt16", "toUInt16('1')"), + ("UInt32", "toUInt32('1')"), + ("UInt64", "toUInt64('1')"), + ("Int8", "toInt8('1')"), + ("Int16", "toInt16('1')"), + ("Int32", "toInt32('1')"), + ("Int64", "toInt64('1')"), + ("Float32", "toFloat32('1.0')"), + ("Float64", "toFloat64('1.0')"), + ("Decimal32", "toDecimal32(2, 4)"), + ("Decimal64", "toDecimal64(2, 4)"), + ("Decimal128", "toDecimal128(2, 4)"), + ("UUID", "toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0')"), + ("Date", "toDate('2020-01-01')"), + ("DateTime", "toDateTime('2020-01-01 20:01:02')"), + ("DateTime64", "toDateTime64('2020-01-01 20:01:02.123', 3)"), + ("Array", "[1,2]"), + ("Tuple", "(1,'a')"), + ("IPv4", "toIPv4('171.225.130.45')"), + ("IPv6", "toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001')"), + ("Enum8", r"CAST('a', 'Enum8(\'a\' = 1, \'b\' = 2)')"), + ("Enum16", r"CAST('a', 'Enum16(\'a\' = 1, \'b\' = 2)')"), + ], ) -@Examples("data_type, value", [ - ("UInt8", "toUInt8('1')"), - ("UInt16", "toUInt16('1')"), - ("UInt32", "toUInt32('1')"), - ("UInt64", "toUInt64('1')"), - ("Int8", "toInt8('1')"), - ("Int16", "toInt16('1')"), - ("Int32", "toInt32('1')"), - ("Int64", "toInt64('1')"), - ("Float32", "toFloat32('1.0')"), - ("Float64", "toFloat64('1.0')"), - ("Decimal32", "toDecimal32(2, 4)"), - ("Decimal64", "toDecimal64(2, 4)"), - ("Decimal128", "toDecimal128(2, 4)"), - ("UUID", "toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0')"), - ("Date", "toDate('2020-01-01')"), - ("DateTime", "toDateTime('2020-01-01 20:01:02')"), - ("DateTime64", "toDateTime64('2020-01-01 20:01:02.123', 3)"), - ("Array", "[1,2]"), - ("Tuple", "(1,'a')"), - ("IPv4", "toIPv4('171.225.130.45')"), - ("IPv6", "toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001')"), - ("Enum8", r"CAST('a', 'Enum8(\'a\' = 1, \'b\' = 2)')"), - ("Enum16", r"CAST('a', 'Enum16(\'a\' = 1, \'b\' = 2)')") -]) def invalid_plaintext_data_type(self, data_type, value): """Check that encrypt function returns an error if the plaintext parameter has invalid data type. """ - with When("I try to encrypt plaintext with invalid data type", description=f"{data_type} with value {value}"): - encrypt(plaintext=value, key="'0123456789123456'", mode="'aes-128-cbc'", iv="'0123456789123456'", - exitcode=43, message="DB::Exception: Illegal type of argument") + with When( + "I try to encrypt plaintext with invalid data type", + description=f"{data_type} with value {value}", + ): + encrypt( + plaintext=value, + key="'0123456789123456'", + mode="'aes-128-cbc'", + iv="'0123456789123456'", + exitcode=43, + message="DB::Exception: Illegal type of argument", + ) + @TestOutline(Scenario) @Requirements( RQ_SRS008_AES_Encrypt_Function_Key_Length_InvalidLengthError("1.0"), - RQ_SRS008_AES_Encrypt_Function_InitializationVector_Length_InvalidLengthError("1.0"), + RQ_SRS008_AES_Encrypt_Function_InitializationVector_Length_InvalidLengthError( + "1.0" + ), RQ_SRS008_AES_Encrypt_Function_AdditionalAuthenticationData_NotValidForMode("1.0"), - RQ_SRS008_AES_Encrypt_Function_NonGCMMode_KeyAndInitializationVector_Length("1.0") + RQ_SRS008_AES_Encrypt_Function_NonGCMMode_KeyAndInitializationVector_Length("1.0"), +) +@Examples( + "mode key_len iv_len aad", + [ + # ECB + ("'aes-128-ecb'", 16, None, None), + ("'aes-192-ecb'", 24, None, None), + ("'aes-256-ecb'", 32, None, None), + # CBC + ("'aes-128-cbc'", 16, 16, None), + ("'aes-192-cbc'", 24, 16, None), + ("'aes-256-cbc'", 32, 16, None), + # CFB128 + ("'aes-128-cfb128'", 16, 16, None), + ("'aes-192-cfb128'", 24, 16, None), + ("'aes-256-cfb128'", 32, 16, None), + # OFB + ("'aes-128-ofb'", 16, 16, None), + ("'aes-192-ofb'", 24, 16, None), + ("'aes-256-ofb'", 32, 16, None), + # CTR + ("'aes-128-ctr'", 16, 16, None), + ("'aes-192-ctr'", 24, 16, None), + ("'aes-256-ctr'", 32, 16, None), + ], + "%-16s %-10s %-10s %-10s", ) -@Examples("mode key_len iv_len aad", [ - # ECB - ("'aes-128-ecb'", 16, None, None), - ("'aes-192-ecb'", 24, None, None), - ("'aes-256-ecb'", 32, None, None), - # CBC - ("'aes-128-cbc'", 16, 16, None), - ("'aes-192-cbc'", 24, 16, None), - ("'aes-256-cbc'", 32, 16, None), - # CFB128 - ("'aes-128-cfb128'", 16, 16, None), - ("'aes-192-cfb128'", 24, 16, None), - ("'aes-256-cfb128'", 32, 16, None), - # OFB - ("'aes-128-ofb'", 16, 16, None), - ("'aes-192-ofb'", 24, 16, None), - ("'aes-256-ofb'", 32, 16, None), - # CTR - ("'aes-128-ctr'", 16, 16, None), - ("'aes-192-ctr'", 24, 16, None), - ("'aes-256-ctr'", 32, 16, None), -], "%-16s %-10s %-10s %-10s") def invalid_key_or_iv_length_for_mode_non_gcm(self, mode, key_len, iv_len, aad): """Check that an error is returned when key or iv length does not match the expected value for the mode. @@ -183,39 +333,87 @@ def invalid_key_or_iv_length_for_mode_non_gcm(self, mode, key_len, iv_len, aad): iv = "0123456789" * 4 with When("key is too short"): - encrypt(plaintext=plaintext, key=f"'{key[:key_len-1]}'", mode=mode, exitcode=36, message="DB::Exception: Invalid key size") + encrypt( + plaintext=plaintext, + key=f"'{key[:key_len-1]}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid key size", + ) with When("key is too long"): - encrypt(plaintext=plaintext, key=f"'{key[:key_len+1]}'", mode=mode, exitcode=36, message="DB::Exception: Invalid key size") + encrypt( + plaintext=plaintext, + key=f"'{key[:key_len+1]}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid key size", + ) if iv_len is not None: with When("iv is too short"): - encrypt(plaintext=plaintext, key=f"'{key[:key_len]}'", iv=f"'{iv[:iv_len-1]}'", mode=mode, exitcode=36, message="DB::Exception: Invalid IV size") + encrypt( + plaintext=plaintext, + key=f"'{key[:key_len]}'", + iv=f"'{iv[:iv_len-1]}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid IV size", + ) with When("iv is too long"): - encrypt(plaintext=plaintext, key=f"'{key[:key_len]}'", iv=f"'{iv[:iv_len+1]}'", mode=mode, exitcode=36, message="DB::Exception: Invalid IV size") + encrypt( + plaintext=plaintext, + key=f"'{key[:key_len]}'", + iv=f"'{iv[:iv_len+1]}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid IV size", + ) if aad is None: with When("aad is specified but not needed"): - encrypt(plaintext=plaintext, key=f"'{key[:key_len]}'", iv=f"'{iv[:iv_len+1] if iv_len is not None else ''}'", aad="'AAD'", mode=mode, exitcode=36, message="DB::Exception: AAD can be only set for GCM-mode") + encrypt( + plaintext=plaintext, + key=f"'{key[:key_len]}'", + iv=f"'{iv[:iv_len+1] if iv_len is not None else ''}'", + aad="'AAD'", + mode=mode, + exitcode=36, + message="DB::Exception: AAD can be only set for GCM-mode", + ) else: with When("iv is specified but not needed"): - encrypt(plaintext=plaintext, key=f"'{key[:key_len]}'", iv=f"'{iv}'", mode=mode, exitcode=36, message="DB::Exception: {} does not support IV".format(mode.strip("'"))) + encrypt( + plaintext=plaintext, + key=f"'{key[:key_len]}'", + iv=f"'{iv}'", + mode=mode, + exitcode=36, + message="DB::Exception: {} does not support IV".format(mode.strip("'")), + ) + @TestOutline(Scenario) @Requirements( RQ_SRS008_AES_Encrypt_Function_Key_Length_InvalidLengthError("1.0"), - RQ_SRS008_AES_Encrypt_Function_InitializationVector_Length_InvalidLengthError("1.0"), + RQ_SRS008_AES_Encrypt_Function_InitializationVector_Length_InvalidLengthError( + "1.0" + ), RQ_SRS008_AES_Encrypt_Function_AdditionalAuthenticationData_NotValidForMode("1.0"), - RQ_SRS008_AES_Encrypt_Function_GCMMode_KeyAndInitializationVector_Length("1.0") + RQ_SRS008_AES_Encrypt_Function_GCMMode_KeyAndInitializationVector_Length("1.0"), +) +@Examples( + "mode key_len iv_len aad", + [ + # GCM + ("'aes-128-gcm'", 16, 8, "'hello there aad'"), + ("'aes-192-gcm'", 24, 8, "''"), + ("'aes-256-gcm'", 32, 8, "'a'"), + ], + "%-16s %-10s %-10s %-10s", ) -@Examples("mode key_len iv_len aad", [ - # GCM - ("'aes-128-gcm'", 16, 8, "'hello there aad'"), - ("'aes-192-gcm'", 24, 8, "''"), - ("'aes-256-gcm'", 32, 8, "'a'"), -], "%-16s %-10s %-10s %-10s") def invalid_key_or_iv_length_for_gcm(self, mode, key_len, iv_len, aad): """Check that an error is returned when key or iv length does not match the expected value for the GCM mode. @@ -225,26 +423,59 @@ def invalid_key_or_iv_length_for_gcm(self, mode, key_len, iv_len, aad): iv = "0123456789" * 4 with When("key is too short"): - encrypt(plaintext=plaintext, key=f"'{key[:key_len-1]}'", iv=f"'{iv[:iv_len]}'", mode=mode, exitcode=36, message="DB::Exception: Invalid key size") + encrypt( + plaintext=plaintext, + key=f"'{key[:key_len-1]}'", + iv=f"'{iv[:iv_len]}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid key size", + ) with When("key is too long"): - encrypt(plaintext=plaintext, key=f"'{key[:key_len+1]}'", iv=f"'{iv[:iv_len]}'", mode=mode, exitcode=36, message="DB::Exception: Invalid key size") + encrypt( + plaintext=plaintext, + key=f"'{key[:key_len+1]}'", + iv=f"'{iv[:iv_len]}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid key size", + ) if iv_len is not None: with When(f"iv is too short"): - encrypt(plaintext=plaintext, key=f"'{key[:key_len]}'", iv=f"'{iv[:iv_len-1]}'", mode=mode, exitcode=0) + encrypt( + plaintext=plaintext, + key=f"'{key[:key_len]}'", + iv=f"'{iv[:iv_len-1]}'", + mode=mode, + exitcode=0, + ) else: with When("iv is not specified"): - encrypt(plaintext=plaintext, key=f"'{key[:key_len]}'", mode=mode, exitcode=36, message="DB::Exception: Invalid IV size") + encrypt( + plaintext=plaintext, + key=f"'{key[:key_len]}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid IV size", + ) if aad is not None: with When(f"aad is {aad}"): - encrypt(plaintext=plaintext, key=f"'{key[:key_len]}'", iv=f"'{iv[:iv_len]}'", aad=f"{aad}", mode=mode) + encrypt( + plaintext=plaintext, + key=f"'{key[:key_len]}'", + iv=f"'{iv[:iv_len]}'", + aad=f"{aad}", + mode=mode, + ) + @TestScenario @Requirements( RQ_SRS008_AES_Encrypt_Function_Parameters_AdditionalAuthenticatedData("1.0"), - RQ_SRS008_AES_Encrypt_Function_AdditionalAuthenticationData_Length("1.0") + RQ_SRS008_AES_Encrypt_Function_AdditionalAuthenticationData_Length("1.0"), ) def aad_parameter_types_and_length(self): """Check that `encrypt` function accepts `aad` parameter as the fifth argument @@ -256,30 +487,78 @@ def aad_parameter_types_and_length(self): key = "'0123456789123456'" with When("aad is specified using String type"): - encrypt(plaintext=plaintext, key=key, mode=mode, iv=iv, aad="'aad'", message="19A1183335B374C626B24208AAEC97F148732CE05621AC87B21526") + encrypt( + plaintext=plaintext, + key=key, + mode=mode, + iv=iv, + aad="'aad'", + message="19A1183335B374C626B24208AAEC97F148732CE05621AC87B21526", + ) with When("aad is specified using String with UTF8 characters"): - encrypt(plaintext=plaintext, key=key, mode=mode, iv=iv, aad="'Gãńdåłf_Thê_Gręât'", message="19A1183335B374C626B242C68D9618A8C2664D7B6A3FE978104B39") + encrypt( + plaintext=plaintext, + key=key, + mode=mode, + iv=iv, + aad="'Gãńdåłf_Thê_Gręât'", + message="19A1183335B374C626B242C68D9618A8C2664D7B6A3FE978104B39", + ) with When("aad is specified using FixedString type"): - encrypt(plaintext=plaintext, key=key, mode=mode, iv=iv, aad="toFixedString('aad', 3)", message="19A1183335B374C626B24208AAEC97F148732CE05621AC87B21526") + encrypt( + plaintext=plaintext, + key=key, + mode=mode, + iv=iv, + aad="toFixedString('aad', 3)", + message="19A1183335B374C626B24208AAEC97F148732CE05621AC87B21526", + ) with When("aad is specified using FixedString with UTF8 characters"): - encrypt(plaintext=plaintext, key=key, mode=mode, iv=iv, aad="toFixedString('Gãńdåłf_Thê_Gręât', 24)", message="19A1183335B374C626B242C68D9618A8C2664D7B6A3FE978104B39") + encrypt( + plaintext=plaintext, + key=key, + mode=mode, + iv=iv, + aad="toFixedString('Gãńdåłf_Thê_Gręât', 24)", + message="19A1183335B374C626B242C68D9618A8C2664D7B6A3FE978104B39", + ) with When("aad is 0 bytes"): - encrypt(plaintext=plaintext, key=key, mode=mode, iv=iv, aad="''", message="19A1183335B374C626B242DF92BB3F57F5D82BEDF41FD5D49F8BC9") + encrypt( + plaintext=plaintext, + key=key, + mode=mode, + iv=iv, + aad="''", + message="19A1183335B374C626B242DF92BB3F57F5D82BEDF41FD5D49F8BC9", + ) with When("aad is 1 byte"): - encrypt(plaintext=plaintext, key=key, mode=mode, iv=iv, aad="'1'", message="19A1183335B374C626B242D1BCFC63B09CFE9EAD20285044A01035") + encrypt( + plaintext=plaintext, + key=key, + mode=mode, + iv=iv, + aad="'1'", + message="19A1183335B374C626B242D1BCFC63B09CFE9EAD20285044A01035", + ) with When("aad is 256 bytes"): - encrypt(plaintext=plaintext, key=key, mode=mode, iv=iv, aad=f"'{'1' * 256}'", message="19A1183335B374C626B242355AD3DD2C5D7E36AEECBB847BF9E8A7") + encrypt( + plaintext=plaintext, + key=key, + mode=mode, + iv=iv, + aad=f"'{'1' * 256}'", + message="19A1183335B374C626B242355AD3DD2C5D7E36AEECBB847BF9E8A7", + ) + @TestScenario -@Requirements( - RQ_SRS008_AES_Encrypt_Function_Parameters_InitializationVector("1.0") -) +@Requirements(RQ_SRS008_AES_Encrypt_Function_Parameters_InitializationVector("1.0")) def iv_parameter_types(self): """Check that `encrypt` function accepts `iv` parameter as the fourth argument of either `String` or `FixedString` types. @@ -290,21 +569,44 @@ def iv_parameter_types(self): key = "'0123456789123456'" with When("iv is specified using String type"): - encrypt(plaintext=plaintext, key=key, mode=mode, iv=iv, message="F024F9372FA0D8B974894D29FFB8A7F7") + encrypt( + plaintext=plaintext, + key=key, + mode=mode, + iv=iv, + message="F024F9372FA0D8B974894D29FFB8A7F7", + ) with When("iv is specified using String with UTF8 characters"): - encrypt(plaintext=plaintext, key=key, mode=mode, iv="'Gãńdåłf_Thê'", message="7A4EC0FF3796F46BED281F4778ACE1DC") + encrypt( + plaintext=plaintext, + key=key, + mode=mode, + iv="'Gãńdåłf_Thê'", + message="7A4EC0FF3796F46BED281F4778ACE1DC", + ) with When("iv is specified using FixedString type"): - encrypt(plaintext=plaintext, key=key, mode=mode, iv=f"toFixedString({iv}, 16)", message="F024F9372FA0D8B974894D29FFB8A7F7") + encrypt( + plaintext=plaintext, + key=key, + mode=mode, + iv=f"toFixedString({iv}, 16)", + message="F024F9372FA0D8B974894D29FFB8A7F7", + ) with When("iv is specified using FixedString with UTF8 characters"): - encrypt(plaintext=plaintext, key=key, mode=mode, iv="toFixedString('Gãńdåłf_Thê', 16)", message="7A4EC0FF3796F46BED281F4778ACE1DC") + encrypt( + plaintext=plaintext, + key=key, + mode=mode, + iv="toFixedString('Gãńdåłf_Thê', 16)", + message="7A4EC0FF3796F46BED281F4778ACE1DC", + ) + @TestScenario -@Requirements( - RQ_SRS008_AES_Encrypt_Function_Parameters_Key("1.0") -) +@Requirements(RQ_SRS008_AES_Encrypt_Function_Parameters_Key("1.0")) def key_parameter_types(self): """Check that `encrypt` function accepts `key` parameter as the second argument of either `String` or `FixedString` types. @@ -315,16 +617,37 @@ def key_parameter_types(self): key = "'0123456789123456'" with When("key is specified using String type"): - encrypt(plaintext=plaintext, key=key, mode=mode, message="49C9ADB81BA9B58C485E7ADB90E70576") + encrypt( + plaintext=plaintext, + key=key, + mode=mode, + message="49C9ADB81BA9B58C485E7ADB90E70576", + ) with When("key is specified using String with UTF8 characters"): - encrypt(plaintext=plaintext, key="'Gãńdåłf_Thê'", mode=mode, message="180086AA42AD57B71C706EEC372D0C3D") + encrypt( + plaintext=plaintext, + key="'Gãńdåłf_Thê'", + mode=mode, + message="180086AA42AD57B71C706EEC372D0C3D", + ) with When("key is specified using FixedString type"): - encrypt(plaintext=plaintext, key=f"toFixedString({key}, 16)", mode=mode, message="49C9ADB81BA9B58C485E7ADB90E70576") + encrypt( + plaintext=plaintext, + key=f"toFixedString({key}, 16)", + mode=mode, + message="49C9ADB81BA9B58C485E7ADB90E70576", + ) with When("key is specified using FixedString with UTF8 characters"): - encrypt(plaintext=plaintext, key="toFixedString('Gãńdåłf_Thê', 16)", mode=mode, message="180086AA42AD57B71C706EEC372D0C3D") + encrypt( + plaintext=plaintext, + key="toFixedString('Gãńdåłf_Thê', 16)", + mode=mode, + message="180086AA42AD57B71C706EEC372D0C3D", + ) + @TestScenario @Requirements( @@ -339,17 +662,28 @@ def mode_parameter_types(self): key = "'0123456789123456'" with When("mode is specified using String type"): - encrypt(plaintext=plaintext, key=key, mode=mode, message="49C9ADB81BA9B58C485E7ADB90E70576") + encrypt( + plaintext=plaintext, + key=key, + mode=mode, + message="49C9ADB81BA9B58C485E7ADB90E70576", + ) with When("mode is specified using FixedString type"): - encrypt(plaintext=plaintext, key=key, mode=f"toFixedString({mode}, 12)", message="49C9ADB81BA9B58C485E7ADB90E70576") + encrypt( + plaintext=plaintext, + key=key, + mode=f"toFixedString({mode}, 12)", + message="49C9ADB81BA9B58C485E7ADB90E70576", + ) + @TestScenario @Requirements( RQ_SRS008_AES_Encrypt_Function_Parameters_PlainText("2.0"), RQ_SRS008_AES_Encrypt_Function_Parameters_Mode("1.0"), RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_ValuesFormat("1.0"), - RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Values("1.0") + RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Values("1.0"), ) def encryption(self): """Check that `encrypt` functions accepts `plaintext` as the second parameter @@ -361,37 +695,65 @@ def encryption(self): for mode, key_len, iv_len, aad_len in modes: for datatype, plaintext in plaintexts: - with Example(f"""mode={mode.strip("'")} datatype={datatype.strip("'")} iv={iv_len} aad={aad_len}""") as example: + with Example( + f"""mode={mode.strip("'")} datatype={datatype.strip("'")} iv={iv_len} aad={aad_len}""" + ) as example: - r = encrypt(plaintext=plaintext, key=f"'{key[:key_len]}'", mode=mode, - iv=(None if not iv_len else f"'{iv[:iv_len]}'"), aad=(None if not aad_len else f"'{aad}'")) + r = encrypt( + plaintext=plaintext, + key=f"'{key[:key_len]}'", + mode=mode, + iv=(None if not iv_len else f"'{iv[:iv_len]}'"), + aad=(None if not aad_len else f"'{aad}'"), + ) with Then("I check output against snapshot"): with values() as that: example_name = basename(example.name) - assert that(snapshot(r.output.strip(), "encrypt", name=f"example_{example_name.replace(' ', '_')}")), error() + assert that( + snapshot( + r.output.strip(), + "encrypt", + name=f"example_{example_name.replace(' ', '_')}", + ) + ), error() + @TestScenario -@Requirements( - RQ_SRS008_AES_Encrypt_Function_Parameters_ReturnValue("1.0") -) +@Requirements(RQ_SRS008_AES_Encrypt_Function_Parameters_ReturnValue("1.0")) def return_value(self): - """Check that `encrypt` functions returns String data type. - """ + """Check that `encrypt` functions returns String data type.""" plaintext = "'hello there'" iv = "'0123456789123456'" mode = "'aes-128-cbc'" key = "'0123456789123456'" with When("I get type of the return value"): - sql = "SELECT toTypeName(encrypt(" + mode + "," + plaintext + "," + key + "," + iv + "))" + sql = ( + "SELECT toTypeName(encrypt(" + + mode + + "," + + plaintext + + "," + + key + + "," + + iv + + "))" + ) r = self.context.node.query(sql) with Then("type should be String"): assert r.output.strip() == "String", error() with When("I get return ciphertext as hex"): - encrypt(plaintext=plaintext, key=key, mode=mode, iv=iv, message="F024F9372FA0D8B974894D29FFB8A7F7") + encrypt( + plaintext=plaintext, + key=key, + mode=mode, + iv=iv, + message="F024F9372FA0D8B974894D29FFB8A7F7", + ) + @TestScenario @Requirements( @@ -405,16 +767,16 @@ def syntax(self): ``` """ sql = "SELECT hex(encrypt('aes-128-gcm', 'hello there', '0123456789123456', '012345678912', 'AAD'))" - self.context.node.query(sql, step=When, message="19A1183335B374C626B242A6F6E8712E2B64DCDC6A468B2F654614") + self.context.node.query( + sql, step=When, message="19A1183335B374C626B242A6F6E8712E2B64DCDC6A468B2F654614" + ) + @TestFeature @Name("encrypt") -@Requirements( - RQ_SRS008_AES_Encrypt_Function("1.0") -) +@Requirements(RQ_SRS008_AES_Encrypt_Function("1.0")) def feature(self, node="clickhouse1"): - """Check the behavior of the `encrypt` function. - """ + """Check the behavior of the `encrypt` function.""" self.context.node = self.context.cluster.node(node) for scenario in loads(current_module(), Scenario): diff --git a/tests/testflows/aes_encryption/tests/encrypt_mysql.py b/tests/testflows/aes_encryption/tests/encrypt_mysql.py index b831d6dda85..f43abfd28a2 100644 --- a/tests/testflows/aes_encryption/tests/encrypt_mysql.py +++ b/tests/testflows/aes_encryption/tests/encrypt_mysql.py @@ -5,10 +5,19 @@ from testflows.asserts import values, error, snapshot from aes_encryption.requirements.requirements import * from aes_encryption.tests.common import * + @TestOutline -def aes_encrypt_mysql(self, plaintext=None, key=None, mode=None, iv=None, exitcode=0, message=None, step=When): - """Execute `aes_encrypt_mysql` function with the specified parameters. - """ +def aes_encrypt_mysql( + self, + plaintext=None, + key=None, + mode=None, + iv=None, + exitcode=0, + message=None, + step=When, +): + """Execute `aes_encrypt_mysql` function with the specified parameters.""" params = [] if mode is not None: params.append(mode) @@ -21,178 +30,325 @@ def aes_encrypt_mysql(self, plaintext=None, key=None, mode=None, iv=None, exitco sql = "SELECT hex(aes_encrypt_mysql(" + ", ".join(params) + "))" - return current().context.node.query(sql, step=step, exitcode=exitcode, message=message) + return current().context.node.query( + sql, step=step, exitcode=exitcode, message=message + ) + @TestOutline(Scenario) @Requirements( RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Values_GCM_Error("1.0"), - RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Values_CTR_Error("1.0") + RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Values_CTR_Error("1.0"), +) +@Examples( + "mode", + [ + ("'aes-128-gcm'",), + ("'aes-192-gcm'",), + ("'aes-256-gcm'",), + ("'aes-128-ctr'",), + ("'aes-192-ctr'",), + ("'aes-256-ctr'",), + ], ) -@Examples("mode", [ - ("'aes-128-gcm'",), - ("'aes-192-gcm'",), - ("'aes-256-gcm'",), - ("'aes-128-ctr'",), - ("'aes-192-ctr'",), - ("'aes-256-ctr'",), -]) def unsupported_modes(self, mode): - """Check that `aes_encrypt_mysql` function returns an error when unsupported modes are specified. - """ - aes_encrypt_mysql(plaintext="'hello there'", mode=mode, key=f"'{'1'* 32}'", exitcode=36, message="DB::Exception: Unsupported cipher mode") + """Check that `aes_encrypt_mysql` function returns an error when unsupported modes are specified.""" + aes_encrypt_mysql( + plaintext="'hello there'", + mode=mode, + key=f"'{'1'* 32}'", + exitcode=36, + message="DB::Exception: Unsupported cipher mode", + ) + @TestScenario -@Requirements( - RQ_SRS008_AES_Functions_InvalidParameters("1.0") -) +@Requirements(RQ_SRS008_AES_Functions_InvalidParameters("1.0")) def invalid_parameters(self): """Check that `aes_encrypt_mysql` function returns an error when we call it with invalid parameters. """ with Example("no parameters"): - aes_encrypt_mysql(exitcode=42, message="DB::Exception: Incorrect number of arguments for function aes_encrypt_mysql provided 0, expected 3 to 4") + aes_encrypt_mysql( + exitcode=42, + message="DB::Exception: Incorrect number of arguments for function aes_encrypt_mysql provided 0, expected 3 to 4", + ) with Example("missing key and mode"): - aes_encrypt_mysql(plaintext="'hello there'", exitcode=42, message="DB::Exception: Incorrect number of arguments for function aes_encrypt_mysql provided 1") + aes_encrypt_mysql( + plaintext="'hello there'", + exitcode=42, + message="DB::Exception: Incorrect number of arguments for function aes_encrypt_mysql provided 1", + ) with Example("missing mode"): - aes_encrypt_mysql(plaintext="'hello there'", key="'123'", exitcode=42, message="DB::Exception: Incorrect number of arguments for function aes_encrypt_mysql provided 2") + aes_encrypt_mysql( + plaintext="'hello there'", + key="'123'", + exitcode=42, + message="DB::Exception: Incorrect number of arguments for function aes_encrypt_mysql provided 2", + ) with Example("bad key type - UInt8"): - aes_encrypt_mysql(plaintext="'hello there'", key="123", mode="'aes-128-ecb'", exitcode=43, - message="DB::Exception: Received from localhost:9000. DB::Exception: Illegal type of argument #3") + aes_encrypt_mysql( + plaintext="'hello there'", + key="123", + mode="'aes-128-ecb'", + exitcode=43, + message="DB::Exception: Received from localhost:9000. DB::Exception: Illegal type of argument #3", + ) with Example("bad mode type - forgot quotes"): - aes_encrypt_mysql(plaintext="'hello there'", key="'0123456789123456'", mode="aes-128-ecb", exitcode=47, - message="DB::Exception: Missing columns: 'ecb' 'aes' while processing query") + aes_encrypt_mysql( + plaintext="'hello there'", + key="'0123456789123456'", + mode="aes-128-ecb", + exitcode=47, + message="DB::Exception: Missing columns: 'ecb' 'aes' while processing query", + ) with Example("bad mode type - UInt8"): - aes_encrypt_mysql(plaintext="'hello there'", key="'0123456789123456'", mode="128", exitcode=43, - message="DB::Exception: Illegal type of argument #1 'mode'") + aes_encrypt_mysql( + plaintext="'hello there'", + key="'0123456789123456'", + mode="128", + exitcode=43, + message="DB::Exception: Illegal type of argument #1 'mode'", + ) with Example("bad iv type - UInt8"): - aes_encrypt_mysql(plaintext="'hello there'", key="'0123456789123456'", mode="'aes-128-cbc'", iv='128', exitcode=43, - message="DB::Exception: Illegal type of argument") + aes_encrypt_mysql( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aes-128-cbc'", + iv="128", + exitcode=43, + message="DB::Exception: Illegal type of argument", + ) - with Example("iv not valid for mode", requirements=[RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_NotValidForMode("1.0")]): - aes_encrypt_mysql(plaintext="'hello there'", key="'0123456789123456'", mode="'aes-128-ecb'", iv="'012345678912'", exitcode=36, - message="DB::Exception: aes-128-ecb does not support IV") + with Example( + "iv not valid for mode", + requirements=[ + RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_NotValidForMode( + "1.0" + ) + ], + ): + aes_encrypt_mysql( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aes-128-ecb'", + iv="'012345678912'", + exitcode=36, + message="DB::Exception: aes-128-ecb does not support IV", + ) - with Example("iv not valid for mode - size 0", requirements=[RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_NotValidForMode("1.0")]): - aes_encrypt_mysql(plaintext="'hello there'", key="'0123456789123456'", mode="'aes-128-ecb'", iv="''", exitcode=0, - message=None) + with Example( + "iv not valid for mode - size 0", + requirements=[ + RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_NotValidForMode( + "1.0" + ) + ], + ): + aes_encrypt_mysql( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aes-128-ecb'", + iv="''", + exitcode=0, + message=None, + ) - with Example("invalid mode value", requirements=[RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_Invalid("1.0")]): + with Example( + "invalid mode value", + requirements=[ + RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Value_Invalid("1.0") + ], + ): with When("typo in the block algorithm"): - aes_encrypt_mysql(plaintext="'hello there'", key="'0123456789123456'", mode="'aes-128-eeb'", exitcode=36, - message="DB::Exception: Invalid mode: aes-128-eeb") + aes_encrypt_mysql( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aes-128-eeb'", + exitcode=36, + message="DB::Exception: Invalid mode: aes-128-eeb", + ) with When("typo in the key size"): - aes_encrypt_mysql(plaintext="'hello there'", key="'0123456789123456'", mode="'aes-127-ecb'", exitcode=36, - message="DB::Exception: Invalid mode: aes-127-ecb") + aes_encrypt_mysql( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aes-127-ecb'", + exitcode=36, + message="DB::Exception: Invalid mode: aes-127-ecb", + ) with When("typo in the aes prefix"): - aes_encrypt_mysql(plaintext="'hello there'", key="'0123456789123456'", mode="'aee-128-ecb'", exitcode=36, - message="DB::Exception: Invalid mode: aee-128-ecb") + aes_encrypt_mysql( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aee-128-ecb'", + exitcode=36, + message="DB::Exception: Invalid mode: aee-128-ecb", + ) with When("missing last dash"): - aes_encrypt_mysql(plaintext="'hello there'", key="'0123456789123456'", mode="'aes-128ecb'", exitcode=36, - message="DB::Exception: Invalid mode: aes-128ecb") + aes_encrypt_mysql( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aes-128ecb'", + exitcode=36, + message="DB::Exception: Invalid mode: aes-128ecb", + ) with When("missing first dash"): - aes_encrypt_mysql(plaintext="'hello there'", key="'0123456789123456'", mode="'aes128-ecb'", exitcode=36, - message="DB::Exception: Invalid mode: aes128-ecb") + aes_encrypt_mysql( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'aes128-ecb'", + exitcode=36, + message="DB::Exception: Invalid mode: aes128-ecb", + ) with When("all capitals"): - aes_encrypt_mysql(plaintext="'hello there'", key="'0123456789123456'", mode="'AES-128-ECB'", exitcode=36, - message="DB::Exception: Invalid mode: AES-128-ECB") + aes_encrypt_mysql( + plaintext="'hello there'", + key="'0123456789123456'", + mode="'AES-128-ECB'", + exitcode=36, + message="DB::Exception: Invalid mode: AES-128-ECB", + ) + @TestOutline(Scenario) -@Requirements( - RQ_SRS008_AES_Functions_InvalidParameters("1.0") +@Requirements(RQ_SRS008_AES_Functions_InvalidParameters("1.0")) +@Examples( + "data_type, value", + [ + ("UInt8", "toUInt8('1')"), + ("UInt16", "toUInt16('1')"), + ("UInt32", "toUInt32('1')"), + ("UInt64", "toUInt64('1')"), + ("Int8", "toInt8('1')"), + ("Int16", "toInt16('1')"), + ("Int32", "toInt32('1')"), + ("Int64", "toInt64('1')"), + ("Float32", "toFloat32('1.0')"), + ("Float64", "toFloat64('1.0')"), + ("Decimal32", "toDecimal32(2, 4)"), + ("Decimal64", "toDecimal64(2, 4)"), + ("Decimal128", "toDecimal128(2, 4)"), + ("UUID", "toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0')"), + ("Date", "toDate('2020-01-01')"), + ("DateTime", "toDateTime('2020-01-01 20:01:02')"), + ("DateTime64", "toDateTime64('2020-01-01 20:01:02.123', 3)"), + ("Array", "[1,2]"), + ("Tuple", "(1,'a')"), + ("IPv4", "toIPv4('171.225.130.45')"), + ("IPv6", "toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001')"), + ("Enum8", r"CAST('a', 'Enum8(\'a\' = 1, \'b\' = 2)')"), + ("Enum16", r"CAST('a', 'Enum16(\'a\' = 1, \'b\' = 2)')"), + ], ) -@Examples("data_type, value", [ - ("UInt8", "toUInt8('1')"), - ("UInt16", "toUInt16('1')"), - ("UInt32", "toUInt32('1')"), - ("UInt64", "toUInt64('1')"), - ("Int8", "toInt8('1')"), - ("Int16", "toInt16('1')"), - ("Int32", "toInt32('1')"), - ("Int64", "toInt64('1')"), - ("Float32", "toFloat32('1.0')"), - ("Float64", "toFloat64('1.0')"), - ("Decimal32", "toDecimal32(2, 4)"), - ("Decimal64", "toDecimal64(2, 4)"), - ("Decimal128", "toDecimal128(2, 4)"), - ("UUID", "toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0')"), - ("Date", "toDate('2020-01-01')"), - ("DateTime", "toDateTime('2020-01-01 20:01:02')"), - ("DateTime64", "toDateTime64('2020-01-01 20:01:02.123', 3)"), - ("Array", "[1,2]"), - ("Tuple", "(1,'a')"), - ("IPv4", "toIPv4('171.225.130.45')"), - ("IPv6", "toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001')"), - ("Enum8", r"CAST('a', 'Enum8(\'a\' = 1, \'b\' = 2)')"), - ("Enum16", r"CAST('a', 'Enum16(\'a\' = 1, \'b\' = 2)')") -]) def invalid_plaintext_data_type(self, data_type, value): """Check that aes_encrypt_mysql function returns an error if the plaintext parameter has invalid data type. """ - with When("I try to encrypt plaintext with invalid data type", description=f"{data_type} with value {value}"): - aes_encrypt_mysql(plaintext=value, key="'0123456789123456'", mode="'aes-128-cbc'", iv="'0123456789123456'", - exitcode=43, message="DB::Exception: Illegal type of argument") + with When( + "I try to encrypt plaintext with invalid data type", + description=f"{data_type} with value {value}", + ): + aes_encrypt_mysql( + plaintext=value, + key="'0123456789123456'", + mode="'aes-128-cbc'", + iv="'0123456789123456'", + exitcode=43, + message="DB::Exception: Illegal type of argument", + ) + @TestOutline(Scenario) @Requirements( RQ_SRS008_AES_MySQL_Encrypt_Function_Key_Length_TooShortError("1.0"), RQ_SRS008_AES_MySQL_Encrypt_Function_Key_Length_TooLong("1.0"), - RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_Length_TooShortError("1.0"), + RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_Length_TooShortError( + "1.0" + ), RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_Length_TooLong("1.0"), RQ_SRS008_AES_MySQL_Encrypt_Function_InitializationVector_NotValidForMode("1.0"), - RQ_SRS008_AES_MySQL_Encrypt_Function_Mode_KeyAndInitializationVector_Length("1.0") + RQ_SRS008_AES_MySQL_Encrypt_Function_Mode_KeyAndInitializationVector_Length("1.0"), +) +@Examples( + "mode key_len iv_len", + [ + # ECB + ("'aes-128-ecb'", 16, None), + ("'aes-192-ecb'", 24, None), + ("'aes-256-ecb'", 32, None), + # CBC + ("'aes-128-cbc'", 16, 16), + ("'aes-192-cbc'", 24, 16), + ("'aes-256-cbc'", 32, 16), + # CFB128 + ("'aes-128-cfb128'", 16, 16), + ("'aes-192-cfb128'", 24, 16), + ("'aes-256-cfb128'", 32, 16), + # OFB + ("'aes-128-ofb'", 16, 16), + ("'aes-192-ofb'", 24, 16), + ("'aes-256-ofb'", 32, 16), + ], + "%-16s %-10s %-10s", ) -@Examples("mode key_len iv_len", [ - # ECB - ("'aes-128-ecb'", 16, None), - ("'aes-192-ecb'", 24, None), - ("'aes-256-ecb'", 32, None), - # CBC - ("'aes-128-cbc'", 16, 16), - ("'aes-192-cbc'", 24, 16), - ("'aes-256-cbc'", 32, 16), - # CFB128 - ("'aes-128-cfb128'", 16, 16), - ("'aes-192-cfb128'", 24, 16), - ("'aes-256-cfb128'", 32, 16), - # OFB - ("'aes-128-ofb'", 16, 16), - ("'aes-192-ofb'", 24, 16), - ("'aes-256-ofb'", 32, 16) -], "%-16s %-10s %-10s") def key_or_iv_length_for_mode(self, mode, key_len, iv_len): - """Check that key or iv length for mode. - """ + """Check that key or iv length for mode.""" plaintext = "'hello there'" key = "0123456789" * 4 iv = "0123456789" * 4 with When("key is too short"): - aes_encrypt_mysql(plaintext=plaintext, key=f"'{key[:key_len-1]}'", mode=mode, exitcode=36, message="DB::Exception: Invalid key size") + aes_encrypt_mysql( + plaintext=plaintext, + key=f"'{key[:key_len-1]}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid key size", + ) with When("key is too long"): aes_encrypt_mysql(plaintext=plaintext, key=f"'{key[:key_len+1]}'", mode=mode) if iv_len is not None: with When("iv is too short"): - aes_encrypt_mysql(plaintext=plaintext, key=f"'{key[:key_len]}'", iv=f"'{iv[:iv_len-1]}'", mode=mode, exitcode=36, message="DB::Exception: Invalid IV size") + aes_encrypt_mysql( + plaintext=plaintext, + key=f"'{key[:key_len]}'", + iv=f"'{iv[:iv_len-1]}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid IV size", + ) with When("iv is too long"): - aes_encrypt_mysql(plaintext=plaintext, key=f"'{key[:key_len]}'", iv=f"'{iv[:iv_len+1]}'", mode=mode) + aes_encrypt_mysql( + plaintext=plaintext, + key=f"'{key[:key_len]}'", + iv=f"'{iv[:iv_len+1]}'", + mode=mode, + ) else: with When("iv is specified but not needed"): - aes_encrypt_mysql(plaintext=plaintext, key=f"'{key[:key_len]}'", iv=f"'{iv}'", mode=mode, exitcode=36, message="DB::Exception: Invalid IV size") + aes_encrypt_mysql( + plaintext=plaintext, + key=f"'{key[:key_len]}'", + iv=f"'{iv}'", + mode=mode, + exitcode=36, + message="DB::Exception: Invalid IV size", + ) + @TestScenario @Requirements( @@ -208,22 +364,44 @@ def iv_parameter_types(self): key = "'0123456789123456'" with When("iv is specified using String type"): - aes_encrypt_mysql(plaintext=plaintext, key=key, mode=mode, iv=iv, message="F024F9372FA0D8B974894D29FFB8A7F7") + aes_encrypt_mysql( + plaintext=plaintext, + key=key, + mode=mode, + iv=iv, + message="F024F9372FA0D8B974894D29FFB8A7F7", + ) with When("iv is specified using String with UTF8 characters"): - aes_encrypt_mysql(plaintext=plaintext, key=key, mode=mode, iv="'Gãńdåłf_Thê'", message="7A4EC0FF3796F46BED281F4778ACE1DC") + aes_encrypt_mysql( + plaintext=plaintext, + key=key, + mode=mode, + iv="'Gãńdåłf_Thê'", + message="7A4EC0FF3796F46BED281F4778ACE1DC", + ) with When("iv is specified using FixedString type"): - aes_encrypt_mysql(plaintext=plaintext, key=key, mode=mode, iv=f"toFixedString({iv}, 16)", message="F024F9372FA0D8B974894D29FFB8A7F7") + aes_encrypt_mysql( + plaintext=plaintext, + key=key, + mode=mode, + iv=f"toFixedString({iv}, 16)", + message="F024F9372FA0D8B974894D29FFB8A7F7", + ) with When("iv is specified using FixedString with UTF8 characters"): - aes_encrypt_mysql(plaintext=plaintext, key=key, mode=mode, iv="toFixedString('Gãńdåłf_Thê', 16)", message="7A4EC0FF3796F46BED281F4778ACE1DC") + aes_encrypt_mysql( + plaintext=plaintext, + key=key, + mode=mode, + iv="toFixedString('Gãńdåłf_Thê', 16)", + message="7A4EC0FF3796F46BED281F4778ACE1DC", + ) @TestScenario -@Requirements( - RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Key("1.0") -) +@Requirements(RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Key("1.0")) def key_parameter_types(self): """Check that `aes_encrypt_mysql` function accepts `key` parameter as the second argument of either `String` or `FixedString` types. @@ -234,16 +412,36 @@ def key_parameter_types(self): key = "'0123456789123456'" with When("key is specified using String type"): - aes_encrypt_mysql(plaintext=plaintext, key=key, mode=mode, message="49C9ADB81BA9B58C485E7ADB90E70576") + aes_encrypt_mysql( + plaintext=plaintext, + key=key, + mode=mode, + message="49C9ADB81BA9B58C485E7ADB90E70576", + ) with When("key is specified using String with UTF8 characters"): - aes_encrypt_mysql(plaintext=plaintext, key="'Gãńdåłf_Thê'", mode=mode, message="180086AA42AD57B71C706EEC372D0C3D") + aes_encrypt_mysql( + plaintext=plaintext, + key="'Gãńdåłf_Thê'", + mode=mode, + message="180086AA42AD57B71C706EEC372D0C3D", + ) with When("key is specified using FixedString type"): - aes_encrypt_mysql(plaintext=plaintext, key=f"toFixedString({key}, 16)", mode=mode, message="49C9ADB81BA9B58C485E7ADB90E70576") + aes_encrypt_mysql( + plaintext=plaintext, + key=f"toFixedString({key}, 16)", + mode=mode, + message="49C9ADB81BA9B58C485E7ADB90E70576", + ) with When("key is specified using FixedString with UTF8 characters"): - aes_encrypt_mysql(plaintext=plaintext, key="toFixedString('Gãńdåłf_Thê', 16)", mode=mode, message="180086AA42AD57B71C706EEC372D0C3D") + aes_encrypt_mysql( + plaintext=plaintext, + key="toFixedString('Gãńdåłf_Thê', 16)", + mode=mode, + message="180086AA42AD57B71C706EEC372D0C3D", + ) @TestScenario @@ -259,32 +457,57 @@ def mode_parameter_types(self): key = "'0123456789123456'" with When("mode is specified using String type"): - aes_encrypt_mysql(plaintext=plaintext, key=key, mode=mode, message="49C9ADB81BA9B58C485E7ADB90E70576") + aes_encrypt_mysql( + plaintext=plaintext, + key=key, + mode=mode, + message="49C9ADB81BA9B58C485E7ADB90E70576", + ) with When("mode is specified using FixedString type"): - aes_encrypt_mysql(plaintext=plaintext, key=key, mode=f"toFixedString({mode}, 12)", message="49C9ADB81BA9B58C485E7ADB90E70576") + aes_encrypt_mysql( + plaintext=plaintext, + key=key, + mode=f"toFixedString({mode}, 12)", + message="49C9ADB81BA9B58C485E7ADB90E70576", + ) + @TestScenario -@Requirements( - RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_ReturnValue("1.0") -) +@Requirements(RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_ReturnValue("1.0")) def return_value(self): - """Check that `aes_encrypt_mysql` functions returns String data type. - """ + """Check that `aes_encrypt_mysql` functions returns String data type.""" plaintext = "'hello there'" iv = "'0123456789123456'" mode = "'aes-128-cbc'" key = "'0123456789123456'" with When("I get type of the return value"): - sql = "SELECT toTypeName(aes_encrypt_mysql("+ mode + "," + plaintext + "," + key + "," + iv + "))" + sql = ( + "SELECT toTypeName(aes_encrypt_mysql(" + + mode + + "," + + plaintext + + "," + + key + + "," + + iv + + "))" + ) r = self.context.node.query(sql) with Then("type should be String"): assert r.output.strip() == "String", error() with When("I get return ciphertext as hex"): - aes_encrypt_mysql(plaintext=plaintext, key=key, mode=mode, iv=iv, message="F024F9372FA0D8B974894D29FFB8A7F7") + aes_encrypt_mysql( + plaintext=plaintext, + key=key, + mode=mode, + iv=iv, + message="F024F9372FA0D8B974894D29FFB8A7F7", + ) + @TestScenario @Requirements( @@ -300,12 +523,13 @@ def syntax(self): sql = "SELECT hex(aes_encrypt_mysql('aes-128-ofb', 'hello there', '0123456789123456', '0123456789123456'))" self.context.node.query(sql, step=When, message="70FE78410D6EE237C2DE4A") + @TestScenario @Requirements( RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_PlainText("2.0"), RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode("1.0"), RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_ValuesFormat("1.0"), - RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Values("1.0") + RQ_SRS008_AES_MySQL_Encrypt_Function_Parameters_Mode_Values("1.0"), ) def encryption(self): """Check that `aes_encrypt_mysql` functions accepts `plaintext` as the second parameter @@ -317,24 +541,34 @@ def encryption(self): for mode, key_len, iv_len in mysql_modes: for datatype, plaintext in plaintexts: - with Example(f"""mode={mode.strip("'")} datatype={datatype.strip("'")} key={key_len} iv={iv_len}""") as example: + with Example( + f"""mode={mode.strip("'")} datatype={datatype.strip("'")} key={key_len} iv={iv_len}""" + ) as example: - r = aes_encrypt_mysql(plaintext=plaintext, key=f"'{key[:key_len]}'", mode=mode, - iv=(None if not iv_len else f"'{iv[:iv_len]}'")) + r = aes_encrypt_mysql( + plaintext=plaintext, + key=f"'{key[:key_len]}'", + mode=mode, + iv=(None if not iv_len else f"'{iv[:iv_len]}'"), + ) with Then("I check output against snapshot"): with values() as that: example_name = basename(example.name) - assert that(snapshot(r.output.strip(), "encrypt_mysql", name=f"example_{example_name.replace(' ', '_')}")), error() + assert that( + snapshot( + r.output.strip(), + "encrypt_mysql", + name=f"example_{example_name.replace(' ', '_')}", + ) + ), error() + @TestFeature @Name("encrypt_mysql") -@Requirements( - RQ_SRS008_AES_MySQL_Encrypt_Function("1.0") -) +@Requirements(RQ_SRS008_AES_MySQL_Encrypt_Function("1.0")) def feature(self, node="clickhouse1"): - """Check the behavior of the `aes_encrypt_mysql` function. - """ + """Check the behavior of the `aes_encrypt_mysql` function.""" self.context.node = self.context.cluster.node(node) for scenario in loads(current_module(), Scenario): diff --git a/tests/testflows/datetime64_extended_range/regression.py b/tests/testflows/datetime64_extended_range/regression.py index bafe4d685d7..69c4021df4c 100755 --- a/tests/testflows/datetime64_extended_range/regression.py +++ b/tests/testflows/datetime64_extended_range/regression.py @@ -28,42 +28,88 @@ from datetime64_extended_range.common import * # Juba and Monrovia timezones are damaged - probably, due to wrong DST shifts lookup tables xfails = { - "type conversion/to int 8 16 32 64 128 256/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/16581#issuecomment-804360350")], - "type conversion/to uint 8 16 32 64 256/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/16581#issuecomment-804360350")], - "non existent time/leap seconds/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/17079#issuecomment-783396589")], - "date time funcs/date diff/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22824")], - "date time funcs/format date time/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22852")], - "date time funcs/time slot/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22854")], - "date time funcs/to monday/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22854")], - "date time funcs/time slots/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/16260")], - "date time funcs/to relative :/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22927#issuecomment-816574952")], - "date time funcs/to start of :/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22928")], - "date time funcs/to unix timestamp/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22929")], - "date time funcs/to week/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22930")], - "date time funcs/to year week/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22948")], - "type conversion/to unix timestamp64 */:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22959")], - "type conversion/from unix timestamp64 */:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22959")], - "type conversion/to int 8 16 32 64 128 256/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/16581#issuecomment-804360350")], + "type conversion/to int 8 16 32 64 128 256/:": [ + ( + Fail, + "https://github.com/ClickHouse/ClickHouse/issues/16581#issuecomment-804360350", + ) + ], + "type conversion/to uint 8 16 32 64 256/:": [ + ( + Fail, + "https://github.com/ClickHouse/ClickHouse/issues/16581#issuecomment-804360350", + ) + ], + "non existent time/leap seconds/:": [ + ( + Fail, + "https://github.com/ClickHouse/ClickHouse/issues/17079#issuecomment-783396589", + ) + ], + "date time funcs/date diff/:": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/22824") + ], + "date time funcs/format date time/:": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/22852") + ], + "date time funcs/time slot/:": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/22854") + ], + "date time funcs/to monday/:": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/22854") + ], + "date time funcs/time slots/:": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/16260") + ], + "date time funcs/to relative :/:": [ + ( + Fail, + "https://github.com/ClickHouse/ClickHouse/issues/22927#issuecomment-816574952", + ) + ], + "date time funcs/to start of :/:": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/22928") + ], + "date time funcs/to unix timestamp/:": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/22929") + ], + "date time funcs/to week/:": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/22930") + ], + "date time funcs/to year week/:": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/22948") + ], + "type conversion/to unix timestamp64 */:": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/22959") + ], + "type conversion/from unix timestamp64 */:": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/22959") + ], + "type conversion/to int 8 16 32 64 128 256/:": [ + ( + Fail, + "https://github.com/ClickHouse/ClickHouse/issues/16581#issuecomment-804360350", + ) + ], "reference times/:": [(Fail, "check procedure unclear")], # need to investigate "type conversion/to datetime/cast=True": [(Fail, "need to investigate")], - "date time funcs/today": [(Fail, "need to investigate")] + "date time funcs/today": [(Fail, "need to investigate")], } @TestModule @Name("datetime64 extended range") @ArgumentParser(argparser) -@Specifications( - SRS_010_ClickHouse_DateTime64_Extended_Range -) +@Specifications(SRS_010_ClickHouse_DateTime64_Extended_Range) @Requirements( RQ_SRS_010_DateTime64_ExtendedRange("1.0"), ) @XFails(xfails) -def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=False): - """ClickHouse DateTime64 Extended Range regression module. - """ +def regression( + self, local, clickhouse_binary_path, clickhouse_version=None, stress=False +): + """ClickHouse DateTime64 Extended Range regression module.""" nodes = { "clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3"), } @@ -72,19 +118,57 @@ def regression(self, local, clickhouse_binary_path, clickhouse_version=None, str self.context.stress = stress self.context.clickhouse_version = clickhouse_version - with Cluster(local, clickhouse_binary_path, nodes=nodes, - docker_compose_project_dir=os.path.join(current_dir(), "datetime64_extended_range_env")) as cluster: + with Cluster( + local, + clickhouse_binary_path, + nodes=nodes, + docker_compose_project_dir=os.path.join( + current_dir(), "datetime64_extended_range_env" + ), + ) as cluster: self.context.cluster = cluster with Pool(2) as pool: try: - Scenario(run=load("datetime64_extended_range.tests.generic", "generic"), parallel=True, executor=pool) - Scenario(run=load("datetime64_extended_range.tests.non_existent_time", "feature"), parallel=True, executor=pool) - Scenario(run=load("datetime64_extended_range.tests.reference_times", "reference_times"), parallel=True, executor=pool) - Scenario(run=load("datetime64_extended_range.tests.date_time_functions", "date_time_funcs"), parallel=True, executor=pool) - Scenario(run=load("datetime64_extended_range.tests.type_conversion", "type_conversion"), parallel=True, executor=pool) + Scenario( + run=load("datetime64_extended_range.tests.generic", "generic"), + parallel=True, + executor=pool, + ) + Scenario( + run=load( + "datetime64_extended_range.tests.non_existent_time", "feature" + ), + parallel=True, + executor=pool, + ) + Scenario( + run=load( + "datetime64_extended_range.tests.reference_times", + "reference_times", + ), + parallel=True, + executor=pool, + ) + Scenario( + run=load( + "datetime64_extended_range.tests.date_time_functions", + "date_time_funcs", + ), + parallel=True, + executor=pool, + ) + Scenario( + run=load( + "datetime64_extended_range.tests.type_conversion", + "type_conversion", + ), + parallel=True, + executor=pool, + ) finally: join() + if main(): regression() diff --git a/tests/testflows/datetime64_extended_range/requirements/requirements.py b/tests/testflows/datetime64_extended_range/requirements/requirements.py index a9ba2c235f2..1bbaf3547d9 100644 --- a/tests/testflows/datetime64_extended_range/requirements/requirements.py +++ b/tests/testflows/datetime64_extended_range/requirements/requirements.py @@ -9,1631 +9,1730 @@ from testflows.core import Requirement Heading = Specification.Heading RQ_SRS_010_DateTime64_ExtendedRange = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support extended range for the [DateTime64] data type that includes dates from the year **1925** to **2238**.\n' - '\n' - ), + "[ClickHouse] SHALL support extended range for the [DateTime64] data type that includes dates from the year **1925** to **2238**.\n" + "\n" + ), link=None, level=4, - num='4.1.0.1') + num="4.1.0.1", +) RQ_SRS_010_DateTime64_ExtendedRange_NormalRange_Start = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.Start', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.Start", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support proper time handling around the normal date range that starts at `1970-01-01 00:00:00.000`\n' - 'expressed using the [ISO 8601 format].\n' - '\n' - ), + "[ClickHouse] SHALL support proper time handling around the normal date range that starts at `1970-01-01 00:00:00.000`\n" + "expressed using the [ISO 8601 format].\n" + "\n" + ), link=None, level=4, - num='4.1.0.2') + num="4.1.0.2", +) RQ_SRS_010_DateTime64_ExtendedRange_NormalRange_Start_BeforeEpochForTimeZone = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.Start.BeforeEpochForTimeZone', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.Start.BeforeEpochForTimeZone", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support proper time handling around the start of the [normal date range]\n' - 'when this time for the time zone is before the start of the [normal date range].\n' - '\n' - ), + "[ClickHouse] SHALL support proper time handling around the start of the [normal date range]\n" + "when this time for the time zone is before the start of the [normal date range].\n" + "\n" + ), link=None, level=4, - num='4.1.0.3') + num="4.1.0.3", +) RQ_SRS_010_DateTime64_ExtendedRange_NormalRange_End = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.End', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.End", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support proper time handling around the normal date range that ends at `2105-12-31T23:59:59.99999`\n' - 'expressed using the [ISO 8601 format].\n' - '\n' - ), + "[ClickHouse] SHALL support proper time handling around the normal date range that ends at `2105-12-31T23:59:59.99999`\n" + "expressed using the [ISO 8601 format].\n" + "\n" + ), link=None, level=4, - num='4.1.0.4') + num="4.1.0.4", +) RQ_SRS_010_DateTime64_ExtendedRange_NormalRange_End_AfterEpochForTimeZone = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.End.AfterEpochForTimeZone', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.End.AfterEpochForTimeZone", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support proper time handling around the end of the [normal date range]\n' - 'when this time for the time zone is after the end of the [normal date range].\n' - '\n' - ), + "[ClickHouse] SHALL support proper time handling around the end of the [normal date range]\n" + "when this time for the time zone is after the end of the [normal date range].\n" + "\n" + ), link=None, level=4, - num='4.1.0.5') + num="4.1.0.5", +) RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support proper conversion to and from [DateTime64] data type from other data types.\n' - '\n' - ), + "[ClickHouse] SHALL support proper conversion to and from [DateTime64] data type from other data types.\n" + "\n" + ), link=None, level=4, - num='4.1.0.6') + num="4.1.0.6", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [Dates and Times Functions] with the [DateTime64] data type\n' - 'when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [Dates and Times Functions] with the [DateTime64] data type\n" + "when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=4, - num='4.1.0.7') + num="4.1.0.7", +) RQ_SRS_010_DateTime64_ExtendedRange_TimeZones = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.TimeZones', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.TimeZones", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation with the [DateTime64] extended range data type\n' - 'when combined with a supported time zone.\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation with the [DateTime64] extended range data type\n" + "when combined with a supported time zone.\n" + "\n" + ), link=None, level=4, - num='4.1.0.8') + num="4.1.0.8", +) RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support proper handling of non-existent times when using [DateTime64] extended range data type.\n' - '\n' - ), + "[ClickHouse] SHALL support proper handling of non-existent times when using [DateTime64] extended range data type.\n" + "\n" + ), link=None, level=4, - num='4.1.0.9') + num="4.1.0.9", +) RQ_SRS_010_DateTime64_ExtendedRange_Comparison = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.Comparison', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.Comparison", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support proper handling of time comparison when using [DateTime64] extended range data type.\n' + "[ClickHouse] SHALL support proper handling of time comparison when using [DateTime64] extended range data type.\n" "For example, `SELECT toDateTime64('2019-05-05 20:20:12.050', 3) < now()`.\n" - '\n' - ), + "\n" + ), link=None, level=4, - num='4.1.0.10') + num="4.1.0.10", +) RQ_SRS_010_DateTime64_ExtendedRange_SpecificTimestamps = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.SpecificTimestamps', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.SpecificTimestamps", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL properly work with the following timestamps in all supported timezones:\n' - '```\n' - '[9961200,73476000,325666800,354675600,370400400,386125200,388566010,401850000,417574811,496803600,528253200,624423614,636516015,671011200,717555600,752047218,859683600,922582800,1018173600,1035705600,1143334800,1162105223,1174784400,1194156000,1206838823,1224982823,1236495624,1319936400,1319936424,1425798025,1459040400,1509872400,2090451627,2140668000]\n' - '```\n' - '\n' - '\n' - ), + "[ClickHouse] SHALL properly work with the following timestamps in all supported timezones:\n" + "```\n" + "[9961200,73476000,325666800,354675600,370400400,386125200,388566010,401850000,417574811,496803600,528253200,624423614,636516015,671011200,717555600,752047218,859683600,922582800,1018173600,1035705600,1143334800,1162105223,1174784400,1194156000,1206838823,1224982823,1236495624,1319936400,1319936424,1425798025,1459040400,1509872400,2090451627,2140668000]\n" + "```\n" + "\n" + "\n" + ), link=None, level=4, - num='4.1.0.11') + num="4.1.0.11", +) RQ_SRS_010_DateTime64_ExtendedRange_Start = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.Start', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.Start", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support extended range for the [DateTime64] data type that starts at `1925-01-01T00:00:00.000000`\n' - 'expressed using the [ISO 8601 format].\n' - '\n' - ), + "[ClickHouse] SHALL support extended range for the [DateTime64] data type that starts at `1925-01-01T00:00:00.000000`\n" + "expressed using the [ISO 8601 format].\n" + "\n" + ), link=None, level=4, - num='4.2.0.1') + num="4.2.0.1", +) RQ_SRS_010_DateTime64_ExtendedRange_End = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.End', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.End", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support extended range for the [DateTime64] data type that ends at `2238-12-31T23:59:59.999999`\n' - 'expressed using the [ISO 8601 format].\n' - '\n' - ), + "[ClickHouse] SHALL support extended range for the [DateTime64] data type that ends at `2238-12-31T23:59:59.999999`\n" + "expressed using the [ISO 8601 format].\n" + "\n" + ), link=None, level=4, - num='4.2.0.2') + num="4.2.0.2", +) RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_InvalidDate = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.InvalidDate', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.InvalidDate", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support proper handling of invalid dates when using [DateTime64] extended range data type,\n' - 'such as:\n' - '\n' - '* `YYYY-04-31, YYYY-06-31, YYYY-09-31, YYYY-11-31`\n' - '* `1990-02-30 00:00:02`\n' - '\n' - ), + "[ClickHouse] SHALL support proper handling of invalid dates when using [DateTime64] extended range data type,\n" + "such as:\n" + "\n" + "* `YYYY-04-31, YYYY-06-31, YYYY-09-31, YYYY-11-31`\n" + "* `1990-02-30 00:00:02`\n" + "\n" + ), link=None, level=5, - num='4.2.0.3.1') + num="4.2.0.3.1", +) RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_InvalidTime = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.InvalidTime', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.InvalidTime", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support proper handling of invalid time for a timezone\n' - 'when using [DateTime64] extended range data type, for example,\n' - '\n' - '* `2002-04-07 02:30:00` never happened at all in the US/Eastern timezone ([Stuart Bishop: pytz library](http://pytz.sourceforge.net/#problems-with-localtime))\n' - '\n' - '\n' - ), + "[ClickHouse] SHALL support proper handling of invalid time for a timezone\n" + "when using [DateTime64] extended range data type, for example,\n" + "\n" + "* `2002-04-07 02:30:00` never happened at all in the US/Eastern timezone ([Stuart Bishop: pytz library](http://pytz.sourceforge.net/#problems-with-localtime))\n" + "\n" + "\n" + ), link=None, level=5, - num='4.2.0.3.2') + num="4.2.0.3.2", +) RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_TimeZoneSwitch = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.TimeZoneSwitch', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.TimeZoneSwitch", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support proper handling of invalid time when using [DateTime64] extended range data type\n' - 'when the invalid time is caused when *countries switch timezone definitions with no\n' - 'daylight savings time switch* [Stuart Bishop: pytz library](http://pytz.sourceforge.net/#problems-with-localtime).\n' - '\n' - '>\n' - '> For example, in 1915 Warsaw switched from Warsaw time to Central European time with\n' - '> no daylight savings transition. So at the stroke of midnight on August 5th 1915 the clocks\n' - '> were wound back 24 minutes creating an ambiguous time period that cannot be specified without\n' - '> referring to the timezone abbreviation or the actual UTC offset. In this case midnight happened twice,\n' - '> neither time during a daylight saving time period. pytz handles this transition by treating the ambiguous\n' - '> period before the switch as daylight savings time, and the ambiguous period after as standard time.\n' - '>\n' - '> [Stuart Bishop: pytz library](http://pytz.sourceforge.net/#problems-with-localtime)\n' - '\n' - ), + "[ClickHouse] SHALL support proper handling of invalid time when using [DateTime64] extended range data type\n" + "when the invalid time is caused when *countries switch timezone definitions with no\n" + "daylight savings time switch* [Stuart Bishop: pytz library](http://pytz.sourceforge.net/#problems-with-localtime).\n" + "\n" + ">\n" + "> For example, in 1915 Warsaw switched from Warsaw time to Central European time with\n" + "> no daylight savings transition. So at the stroke of midnight on August 5th 1915 the clocks\n" + "> were wound back 24 minutes creating an ambiguous time period that cannot be specified without\n" + "> referring to the timezone abbreviation or the actual UTC offset. In this case midnight happened twice,\n" + "> neither time during a daylight saving time period. pytz handles this transition by treating the ambiguous\n" + "> period before the switch as daylight savings time, and the ambiguous period after as standard time.\n" + ">\n" + "> [Stuart Bishop: pytz library](http://pytz.sourceforge.net/#problems-with-localtime)\n" + "\n" + ), link=None, level=5, - num='4.2.0.3.3') + num="4.2.0.3.3", +) RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_DaylightSavingTime = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.DaylightSavingTime', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.DaylightSavingTime", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support proper handling of invalid time when using [DateTime64] extended range data type\n' - 'when for a given timezone time switches from standard to daylight saving.\n' - '\n' - '> For example, in the US/Eastern timezone on the last Sunday morning in October, the following sequence happens:\n' - '>\n' - '> 01:00 EDT occurs\n' - '> 1 hour later, instead of 2:00am the clock is turned back 1 hour and 01:00 happens again (this time 01:00 EST)\n' - '> In fact, every instant between 01:00 and 02:00 occurs twice.\n' - '> [Stuart Bishop: pytz library](http://pytz.sourceforge.net/#problems-with-localtime)\n' - '\n' - ), + "[ClickHouse] SHALL support proper handling of invalid time when using [DateTime64] extended range data type\n" + "when for a given timezone time switches from standard to daylight saving.\n" + "\n" + "> For example, in the US/Eastern timezone on the last Sunday morning in October, the following sequence happens:\n" + ">\n" + "> 01:00 EDT occurs\n" + "> 1 hour later, instead of 2:00am the clock is turned back 1 hour and 01:00 happens again (this time 01:00 EST)\n" + "> In fact, every instant between 01:00 and 02:00 occurs twice.\n" + "> [Stuart Bishop: pytz library](http://pytz.sourceforge.net/#problems-with-localtime)\n" + "\n" + ), link=None, level=5, - num='4.2.0.3.4') + num="4.2.0.3.4", +) RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_DaylightSavingTime_Disappeared = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.DaylightSavingTime.Disappeared', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.DaylightSavingTime.Disappeared", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support proper handling of invalid time when using [DateTime64] extended range data type\n' - 'for a given timezone when transition from the standard to daylight saving time causes an hour to disappear.\n' - '\n' + "[ClickHouse] SHALL support proper handling of invalid time when using [DateTime64] extended range data type\n" + "for a given timezone when transition from the standard to daylight saving time causes an hour to disappear.\n" + "\n" "Expected behavior: if DateTime64 initialized by a skipped time value, it is being treated as DST and resulting value will be an hour earlier, e.g. `SELECT toDateTime64('2020-03-08 02:34:00', 0, 'America/Denver')` returns `2020-03-08 01:34:00`.\n" - '\n' - ), + "\n" + ), link=None, level=5, - num='4.2.0.3.5') + num="4.2.0.3.5", +) RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_LeapSeconds = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.LeapSeconds', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.LeapSeconds", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support proper handling of leap seconds adjustments when using [DateTime64] extended range data type.\n' - '\n' - ), + "[ClickHouse] SHALL support proper handling of leap seconds adjustments when using [DateTime64] extended range data type.\n" + "\n" + ), link=None, level=5, - num='4.2.0.3.6') + num="4.2.0.3.6", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toTimeZone = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toTimeZone', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toTimeZone", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toTimeZone](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#totimezone)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toTimeZone](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#totimezone)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.1') + num="4.2.0.4.1", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toYear = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYear', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYear", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toYear](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#toyear)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toYear](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#toyear)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.2') + num="4.2.0.4.2", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toQuarter = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toQuarter', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toQuarter", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toQuarter](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#toquarter)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toQuarter](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#toquarter)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.3') + num="4.2.0.4.3", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toMonth = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toMonth', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toMonth", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toMonth](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tomonth)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toMonth](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tomonth)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.4') + num="4.2.0.4.4", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toDayOfYear = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toDayOfYear', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toDayOfYear", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toDayOfYear](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#todayofyear)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toDayOfYear](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#todayofyear)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.5') + num="4.2.0.4.5", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toDayOfMonth = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toDayOfMonth', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toDayOfMonth", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toDayOfMonth](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#todayofmonth)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toDayOfMonth](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#todayofmonth)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.6') + num="4.2.0.4.6", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toDayOfWeek = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toDayOfWeek', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toDayOfWeek", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toDayOfWeek](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#todayofweek)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toDayOfWeek](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#todayofweek)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.7') + num="4.2.0.4.7", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toHour = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toHour', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toHour", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toHour](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tohour)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toHour](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tohour)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.8') + num="4.2.0.4.8", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toMinute = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toMinute', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toMinute", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toMinute](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tominute)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toMinute](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tominute)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.9') + num="4.2.0.4.9", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toSecond = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toSecond', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toSecond", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toSecond](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tosecond)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toSecond](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tosecond)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.10') + num="4.2.0.4.10", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toUnixTimestamp = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toUnixTimestamp', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toUnixTimestamp", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toUnitTimestamp](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#to-unix-timestamp)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - 'Timestamp value expected to be negative when DateTime64 value is prior to `1970-01-01` and positine otherwise.\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toUnitTimestamp](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#to-unix-timestamp)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "Timestamp value expected to be negative when DateTime64 value is prior to `1970-01-01` and positine otherwise.\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.11') + num="4.2.0.4.11", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfYear = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfYear', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfYear", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toStartOfYear](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofyear)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toStartOfYear](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofyear)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.12') + num="4.2.0.4.12", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfISOYear = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfISOYear', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfISOYear", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toStartOfISOYear](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofisoyear)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toStartOfISOYear](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofisoyear)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.13') + num="4.2.0.4.13", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfQuarter = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfQuarter', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfQuarter", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toStartOfQuarter](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofquarter)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toStartOfQuarter](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofquarter)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.14') + num="4.2.0.4.14", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfMonth = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfMonth', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfMonth", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toStartOfMonth](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofmonth)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toStartOfMonth](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofmonth)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.15') + num="4.2.0.4.15", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toMonday = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toMonday', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toMonday", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toMonday](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tomonday)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toMonday](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tomonday)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.16') + num="4.2.0.4.16", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfWeek = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfWeek', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfWeek", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toStartOfWeek](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofweektmode)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toStartOfWeek](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofweektmode)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.17') + num="4.2.0.4.17", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfDay = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfDay', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfDay", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toStartOfDay](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofday)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toStartOfDay](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofday)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.18') + num="4.2.0.4.18", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfHour = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfHour', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfHour", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toStartOfHour](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofhour)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toStartOfHour](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofhour)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.19') + num="4.2.0.4.19", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfMinute = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfMinute', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfMinute", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toStartOfMinute](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofminute)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toStartOfMinute](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofminute)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.20') + num="4.2.0.4.20", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfSecond = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfSecond', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfSecond", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toStartOfSecond](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofsecond)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toStartOfSecond](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofsecond)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.21') + num="4.2.0.4.21", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfFiveMinute = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFiveMinute', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFiveMinute", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toStartOfFiveMinute](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartoffiveminute)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toStartOfFiveMinute](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartoffiveminute)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.22') + num="4.2.0.4.22", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfTenMinutes = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfTenMinutes', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfTenMinutes", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toStartOfTenMinutes](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartoftenminutes)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toStartOfTenMinutes](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartoftenminutes)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.23') + num="4.2.0.4.23", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfFifteenMinutes = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFifteenMinutes', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFifteenMinutes", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toStartOfFifteenMinutes](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartoffifteenminutes)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toStartOfFifteenMinutes](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartoffifteenminutes)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.24') + num="4.2.0.4.24", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfInterval = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfInterval', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfInterval", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toStartOfInterval](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofintervaltime-or-data-interval-x-unit-time-zone)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - 'More detailed description can be found [here](https://github.com/ClickHouse/ClickHouse/issues/1201).\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toStartOfInterval](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofintervaltime-or-data-interval-x-unit-time-zone)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "More detailed description can be found [here](https://github.com/ClickHouse/ClickHouse/issues/1201).\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.25') + num="4.2.0.4.25", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toTime = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toTime', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toTime", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toTime](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#totime)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toTime](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#totime)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.26') + num="4.2.0.4.26", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toRelativeYearNum = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeYearNum', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeYearNum", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toRelativeYearNum](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#torelativeyearnum)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toRelativeYearNum](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#torelativeyearnum)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.27') + num="4.2.0.4.27", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toRelativeQuarterNum = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeQuarterNum', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeQuarterNum", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toRelativeQuarterNum](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#torelativequarternum)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toRelativeQuarterNum](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#torelativequarternum)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.28') + num="4.2.0.4.28", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toRelativeMonthNum = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeMonthNum', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeMonthNum", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toRelativeMonthNum](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#torelativemonthnum)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toRelativeMonthNum](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#torelativemonthnum)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.29') + num="4.2.0.4.29", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toRelativeWeekNum = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeWeekNum', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeWeekNum", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toRelativeWeekNum](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#torelativeweeknum)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toRelativeWeekNum](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#torelativeweeknum)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.30') + num="4.2.0.4.30", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toRelativeDayNum = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeDayNum', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeDayNum", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toRelativeDayNum](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#torelativedaynum)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toRelativeDayNum](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#torelativedaynum)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.31') + num="4.2.0.4.31", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toRelativeHourNum = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeHourNum', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeHourNum", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toRelativeHourNum](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#torelativehournum)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toRelativeHourNum](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#torelativehournum)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.32') + num="4.2.0.4.32", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toRelativeMinuteNum = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeMinuteNum', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeMinuteNum", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toRelativeMinuteNum](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#torelativeminutenum)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toRelativeMinuteNum](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#torelativeminutenum)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.33') + num="4.2.0.4.33", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toRelativeSecondNum = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeSecondNum', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeSecondNum", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toRelativeSecondNum](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#torelativesecondnum)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toRelativeSecondNum](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#torelativesecondnum)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.34') + num="4.2.0.4.34", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toISOYear = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toISOYear', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toISOYear", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toISOYear](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#toisoyear)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toISOYear](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#toisoyear)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.35') + num="4.2.0.4.35", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toISOWeek = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toISOWeek', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toISOWeek", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toISOWeek](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#toisoweek)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toISOWeek](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#toisoweek)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.36') + num="4.2.0.4.36", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toWeek = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toWeek', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toWeek", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toWeek](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#toweekdatemode)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toWeek](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#toweekdatemode)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.37') + num="4.2.0.4.37", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toYearWeek = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYearWeek', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYearWeek", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toYearWeek](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#toyearweekdatemode)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toYearWeek](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#toyearweekdatemode)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.38') + num="4.2.0.4.38", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_now = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.now', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.now", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support conversion of output from the [now](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#now)\n' - 'function to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support conversion of output from the [now](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#now)\n" + "function to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.39') + num="4.2.0.4.39", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_today = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.today', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.today", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support conversion of output from the [today](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#today)\n' - 'function to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support conversion of output from the [today](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#today)\n" + "function to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.40') + num="4.2.0.4.40", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_yesterday = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.yesterday', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.yesterday", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support conversion of output from the [yesterday](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#yesterday)\n' - 'function to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support conversion of output from the [yesterday](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#yesterday)\n" + "function to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.41') + num="4.2.0.4.41", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_timeSlot = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.timeSlot', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.timeSlot", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support conversion of output from the [timeSlot](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#timeslot)\n' - 'function to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support conversion of output from the [timeSlot](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#timeslot)\n" + "function to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.42') + num="4.2.0.4.42", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toYYYYMM = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYYYYMM', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYYYYMM", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toYYYYMM](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#toyyyymm)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toYYYYMM](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#toyyyymm)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.43') + num="4.2.0.4.43", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toYYYYMMDD = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYYYYMMDD', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYYYYMMDD", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toYYYYMMDD](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#toyyyymmdd)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toYYYYMMDD](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#toyyyymmdd)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.44') + num="4.2.0.4.44", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toYYYYMMDDhhmmss = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYYYYMMDDhhmmss', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYYYYMMDDhhmmss", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [toYYYYMMDDhhmmss](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#toyyyymmddhhmmss)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [toYYYYMMDDhhmmss](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#toyyyymmddhhmmss)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.45') + num="4.2.0.4.45", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_addYears = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addYears', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addYears", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [addYears](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [addYears](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.46') + num="4.2.0.4.46", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_addMonths = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addMonths', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addMonths", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [addMonths](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [addMonths](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.47') + num="4.2.0.4.47", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_addWeeks = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addWeeks', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addWeeks", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [addWeeks](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [addWeeks](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.48') + num="4.2.0.4.48", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_addDays = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addDays', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addDays", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [addDays](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [addDays](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.49') + num="4.2.0.4.49", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_addHours = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addHours', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addHours", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [addHours](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [addHours](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.50') + num="4.2.0.4.50", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_addMinutes = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addMinutes', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addMinutes", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [addMinutes](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [addMinutes](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.51') + num="4.2.0.4.51", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_addSeconds = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addSeconds', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addSeconds", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [addSeconds](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [addSeconds](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.52') + num="4.2.0.4.52", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_addQuarters = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addQuarters', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addQuarters", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [addQuarters](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [addQuarters](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.53') + num="4.2.0.4.53", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_subtractYears = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractYears', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractYears", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [subtractYears](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [subtractYears](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.54') + num="4.2.0.4.54", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_subtractMonths = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractMonths', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractMonths", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [subtractMonths](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [subtractMonths](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.55') + num="4.2.0.4.55", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_subtractWeeks = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractWeeks', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractWeeks", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [subtractWeeks](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [subtractWeeks](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.56') + num="4.2.0.4.56", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_subtractDays = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractDays', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractDays", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [subtractDays](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [subtractDays](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.57') + num="4.2.0.4.57", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_subtractHours = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractHours', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractHours", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [subtractHours](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [subtractHours](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.58') + num="4.2.0.4.58", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_subtractMinutes = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractMinutes', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractMinutes", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [subtractMinutes](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [subtractMinutes](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.59') + num="4.2.0.4.59", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_subtractSeconds = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractSeconds', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractSeconds", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [subtractSeconds](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [subtractSeconds](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.60') + num="4.2.0.4.60", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_subtractQuarters = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractQuarters', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractQuarters", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [subtractQuarters](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [subtractQuarters](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.61') + num="4.2.0.4.61", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_dateDiff = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.dateDiff', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.dateDiff", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [dateDiff](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#datediff)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [dateDiff](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#datediff)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.62') + num="4.2.0.4.62", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_timeSlots = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.timeSlots', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.timeSlots", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [timeSlots](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#timeslotsstarttime-duration-size)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [timeSlots](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#timeslotsstarttime-duration-size)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.63') + num="4.2.0.4.63", +) RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_formatDateTime = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.formatDateTime', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.formatDateTime", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of the [formatDateTime](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#formatdatetime)\n' - 'function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of the [formatDateTime](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#formatdatetime)\n" + "function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + "\n" + ), link=None, level=5, - num='4.2.0.4.64') + num="4.2.0.4.64", +) RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toInt_8_16_32_64_128_256_ = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toInt(8|16|32|64|128|256)', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toInt(8|16|32|64|128|256)", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n' - 'to integer types using [toInt(8|16|32|64|128|256)](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#toint8163264128256) functions.\n' - '\n' - ), + "[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n" + "to integer types using [toInt(8|16|32|64|128|256)](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#toint8163264128256) functions.\n" + "\n" + ), link=None, level=5, - num='4.2.1.4.1') + num="4.2.1.4.1", +) RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toUInt_8_16_32_64_256_ = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUInt(8|16|32|64|256)', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUInt(8|16|32|64|256)", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n' - 'to unsigned integer types using [toUInt(8|16|32|64|256)](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#touint8163264256) functions.\n' - '\n' - ), + "[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n" + "to unsigned integer types using [toUInt(8|16|32|64|256)](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#touint8163264256) functions.\n" + "\n" + ), link=None, level=5, - num='4.2.1.4.2') + num="4.2.1.4.2", +) RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toFloat_32_64_ = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toFloat(32|64)', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toFloat(32|64)", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n' - 'to float types using [toFloat(32|64)](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#tofloat3264) functions.\n' - '\n' - ), + "[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n" + "to float types using [toFloat(32|64)](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#tofloat3264) functions.\n" + "\n" + ), link=None, level=5, - num='4.2.1.4.3') + num="4.2.1.4.3", +) RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDate = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDate', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDate", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range]\n' - 'to the [Date](https://clickhouse.com/docs/en/sql-reference/data-types/date/) type using the [toDate](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#todate) function.\n' - 'This function is ONLY supposed to work in NORMAL RANGE.\n' - '\n' - ), + "[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range]\n" + "to the [Date](https://clickhouse.com/docs/en/sql-reference/data-types/date/) type using the [toDate](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#todate) function.\n" + "This function is ONLY supposed to work in NORMAL RANGE.\n" + "\n" + ), link=None, level=5, - num='4.2.1.4.4') + num="4.2.1.4.4", +) RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDateTime = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDateTime', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDateTime", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n' - 'to the [DateTime](https://clickhouse.com/docs/en/sql-reference/data-types/datetime/) type using the [toDateTime](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#todatetime) function.\n' - 'This function is ONLY supposed to work in NORMAL RANGE.\n' - '\n' - ), + "[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n" + "to the [DateTime](https://clickhouse.com/docs/en/sql-reference/data-types/datetime/) type using the [toDateTime](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#todatetime) function.\n" + "This function is ONLY supposed to work in NORMAL RANGE.\n" + "\n" + ), link=None, level=5, - num='4.2.1.4.5') + num="4.2.1.4.5", +) RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDateTime64 = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDateTime64', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDateTime64", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct conversion from the data types supported by the [toDateTime64](https://clickhouse.com/docs/en/sql-reference/data-types/datetime64/) function\n' - 'to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n' - '\n' - ), + "[ClickHouse] SHALL support correct conversion from the data types supported by the [toDateTime64](https://clickhouse.com/docs/en/sql-reference/data-types/datetime64/) function\n" + "to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n" + "\n" + ), link=None, level=5, - num='4.2.1.4.6') + num="4.2.1.4.6", +) RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDateTime64_FromString_MissingTime = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDateTime64.FromString.MissingTime', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDateTime64.FromString.MissingTime", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct conversion from the [String](https://clickhouse.com/docs/en/sql-reference/data-types/string/)\n' - 'data type to the [DateTime64](https://clickhouse.com/docs/en/sql-reference/data-types/datetime64/) data type\n' - 'when value of the string is missing the `hh:mm-ss.sss` part.\n' + "[ClickHouse] SHALL support correct conversion from the [String](https://clickhouse.com/docs/en/sql-reference/data-types/string/)\n" + "data type to the [DateTime64](https://clickhouse.com/docs/en/sql-reference/data-types/datetime64/) data type\n" + "when value of the string is missing the `hh:mm-ss.sss` part.\n" "For example, `toDateTime64('2020-01-01', 3)`.\n" - '\n' - ), + "\n" + ), link=None, level=5, - num='4.2.1.4.7') + num="4.2.1.4.7", +) RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDecimal_32_64_128_256_ = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDecimal(32|64|128|256)', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDecimal(32|64|128|256)", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n' - 'to [Decimal](https://clickhouse.com/docs/en/sql-reference/data-types/decimal/) types using [toDecimal(32|64|128|256)](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#todecimal3264128256) functions.\n' - '\n' - ), + "[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n" + "to [Decimal](https://clickhouse.com/docs/en/sql-reference/data-types/decimal/) types using [toDecimal(32|64|128|256)](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#todecimal3264128256) functions.\n" + "\n" + ), link=None, level=5, - num='4.2.1.4.8') + num="4.2.1.4.8", +) RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toString = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toString', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toString", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n' - 'to the [String](https://clickhouse.com/docs/en/sql-reference/data-types/string/) type using the [toString](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#tostring) function.\n' - '\n' - ), + "[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n" + "to the [String](https://clickhouse.com/docs/en/sql-reference/data-types/string/) type using the [toString](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#tostring) function.\n" + "\n" + ), link=None, level=5, - num='4.2.1.4.9') + num="4.2.1.4.9", +) RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_ = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.CAST(x,T)', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.CAST(x,T)", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n' - 'to one of the supported data type using the [CAST(x,T)](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) function.\n' - '\n' - ), + "[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n" + "to one of the supported data type using the [CAST(x,T)](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) function.\n" + "\n" + ), link=None, level=5, - num='4.2.1.4.10') + num="4.2.1.4.10", +) RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toUnixTimestamp64Milli = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUnixTimestamp64Milli', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUnixTimestamp64Milli", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n' - 'to the [Int64](https://clickhouse.com/docs/en/sql-reference/data-types/int-uint/) type using the [toUnixTimestamp64Milli](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#tounixtimestamp64milli) function.\n' - '\n' - ), + "[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n" + "to the [Int64](https://clickhouse.com/docs/en/sql-reference/data-types/int-uint/) type using the [toUnixTimestamp64Milli](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#tounixtimestamp64milli) function.\n" + "\n" + ), link=None, level=5, - num='4.2.1.4.11') + num="4.2.1.4.11", +) RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toUnixTimestamp64Micro = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUnixTimestamp64Micro', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUnixTimestamp64Micro", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n' - 'to the [Int64](https://clickhouse.com/docs/en/sql-reference/data-types/int-uint/) type using the [toUnixTimestamp64Micro](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#tounixtimestamp64micro) function.\n' - '\n' - ), + "[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n" + "to the [Int64](https://clickhouse.com/docs/en/sql-reference/data-types/int-uint/) type using the [toUnixTimestamp64Micro](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#tounixtimestamp64micro) function.\n" + "\n" + ), link=None, level=5, - num='4.2.1.4.12') + num="4.2.1.4.12", +) RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toUnixTimestamp64Nano = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUnixTimestamp64Nano', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUnixTimestamp64Nano", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n' - 'to the [Int64](https://clickhouse.com/docs/en/sql-reference/data-types/int-uint/) type using the [toUnixTimestamp64Nano](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#tounixtimestamp64nano) function.\n' - '\n' - ), + "[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n" + "to the [Int64](https://clickhouse.com/docs/en/sql-reference/data-types/int-uint/) type using the [toUnixTimestamp64Nano](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#tounixtimestamp64nano) function.\n" + "\n" + ), link=None, level=5, - num='4.2.1.4.13') + num="4.2.1.4.13", +) RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_fromUnixTimestamp64Milli = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.fromUnixTimestamp64Milli', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.fromUnixTimestamp64Milli", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct conversion from the [Int64](https://clickhouse.com/docs/en/sql-reference/data-types/int-uint/) type\n' - 'to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n' - 'using the [fromUnixTimestamp64Milli](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#fromunixtimestamp64milli) function.\n' - '\n' - ), + "[ClickHouse] SHALL support correct conversion from the [Int64](https://clickhouse.com/docs/en/sql-reference/data-types/int-uint/) type\n" + "to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n" + "using the [fromUnixTimestamp64Milli](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#fromunixtimestamp64milli) function.\n" + "\n" + ), link=None, level=5, - num='4.2.1.4.14') + num="4.2.1.4.14", +) RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_fromUnixTimestamp64Micro = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.fromUnixTimestamp64Micro', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.fromUnixTimestamp64Micro", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct conversion from the [Int64](https://clickhouse.com/docs/en/sql-reference/data-types/int-uint/) type\n' - 'to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n' - 'using the [fromUnixTimestamp64Micro](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#fromunixtimestamp64micro) function.\n' - '\n' - ), + "[ClickHouse] SHALL support correct conversion from the [Int64](https://clickhouse.com/docs/en/sql-reference/data-types/int-uint/) type\n" + "to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n" + "using the [fromUnixTimestamp64Micro](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#fromunixtimestamp64micro) function.\n" + "\n" + ), link=None, level=5, - num='4.2.1.4.15') + num="4.2.1.4.15", +) RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_fromUnixTimestamp64Nano = Requirement( - name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.fromUnixTimestamp64Nano', - version='1.0', + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.fromUnixTimestamp64Nano", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct conversion from the [Int64](https://clickhouse.com/docs/en/sql-reference/data-types/int-uint/) type\n' - 'to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n' - 'using the [fromUnixTimestamp64Nano](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#fromunixtimestamp64nano) function.\n' - '\n' - ), + "[ClickHouse] SHALL support correct conversion from the [Int64](https://clickhouse.com/docs/en/sql-reference/data-types/int-uint/) type\n" + "to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]\n" + "using the [fromUnixTimestamp64Nano](https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#fromunixtimestamp64nano) function.\n" + "\n" + ), link=None, level=5, - num='4.2.1.4.16') + num="4.2.1.4.16", +) SRS_010_ClickHouse_DateTime64_Extended_Range = Specification( - name='SRS-010 ClickHouse DateTime64 Extended Range', + name="SRS-010 ClickHouse DateTime64 Extended Range", description=None, author=None, - date=None, - status=None, + date=None, + status=None, approved_by=None, approved_date=None, approved_version=None, @@ -1645,119 +1744,503 @@ SRS_010_ClickHouse_DateTime64_Extended_Range = Specification( parent=None, children=None, headings=( - Heading(name='Revision History', level=1, num='1'), - Heading(name='Introduction', level=1, num='2'), - Heading(name='Terminology', level=1, num='3'), - Heading(name='SRS', level=2, num='3.1'), - Heading(name='Normal Date Range', level=2, num='3.2'), - Heading(name='Extended Date Range', level=2, num='3.3'), - Heading(name='Requirements', level=1, num='4'), - Heading(name='Generic', level=2, num='4.1'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange', level=4, num='4.1.0.1'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.Start', level=4, num='4.1.0.2'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.Start.BeforeEpochForTimeZone', level=4, num='4.1.0.3'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.End', level=4, num='4.1.0.4'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.End.AfterEpochForTimeZone', level=4, num='4.1.0.5'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions', level=4, num='4.1.0.6'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions', level=4, num='4.1.0.7'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.TimeZones', level=4, num='4.1.0.8'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime', level=4, num='4.1.0.9'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.Comparison', level=4, num='4.1.0.10'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.SpecificTimestamps', level=4, num='4.1.0.11'), - Heading(name='Specific', level=2, num='4.2'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.Start', level=4, num='4.2.0.1'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.End', level=4, num='4.2.0.2'), - Heading(name='Non-Existent Time', level=4, num='4.2.0.3'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.InvalidDate', level=5, num='4.2.0.3.1'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.InvalidTime', level=5, num='4.2.0.3.2'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.TimeZoneSwitch', level=5, num='4.2.0.3.3'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.DaylightSavingTime', level=5, num='4.2.0.3.4'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.DaylightSavingTime.Disappeared', level=5, num='4.2.0.3.5'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.LeapSeconds', level=5, num='4.2.0.3.6'), - Heading(name='Dates And Times Functions', level=4, num='4.2.0.4'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toTimeZone', level=5, num='4.2.0.4.1'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYear', level=5, num='4.2.0.4.2'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toQuarter', level=5, num='4.2.0.4.3'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toMonth', level=5, num='4.2.0.4.4'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toDayOfYear', level=5, num='4.2.0.4.5'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toDayOfMonth', level=5, num='4.2.0.4.6'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toDayOfWeek', level=5, num='4.2.0.4.7'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toHour', level=5, num='4.2.0.4.8'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toMinute', level=5, num='4.2.0.4.9'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toSecond', level=5, num='4.2.0.4.10'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toUnixTimestamp', level=5, num='4.2.0.4.11'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfYear', level=5, num='4.2.0.4.12'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfISOYear', level=5, num='4.2.0.4.13'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfQuarter', level=5, num='4.2.0.4.14'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfMonth', level=5, num='4.2.0.4.15'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toMonday', level=5, num='4.2.0.4.16'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfWeek', level=5, num='4.2.0.4.17'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfDay', level=5, num='4.2.0.4.18'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfHour', level=5, num='4.2.0.4.19'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfMinute', level=5, num='4.2.0.4.20'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfSecond', level=5, num='4.2.0.4.21'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFiveMinute', level=5, num='4.2.0.4.22'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfTenMinutes', level=5, num='4.2.0.4.23'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFifteenMinutes', level=5, num='4.2.0.4.24'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfInterval', level=5, num='4.2.0.4.25'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toTime', level=5, num='4.2.0.4.26'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeYearNum', level=5, num='4.2.0.4.27'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeQuarterNum', level=5, num='4.2.0.4.28'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeMonthNum', level=5, num='4.2.0.4.29'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeWeekNum', level=5, num='4.2.0.4.30'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeDayNum', level=5, num='4.2.0.4.31'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeHourNum', level=5, num='4.2.0.4.32'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeMinuteNum', level=5, num='4.2.0.4.33'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeSecondNum', level=5, num='4.2.0.4.34'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toISOYear', level=5, num='4.2.0.4.35'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toISOWeek', level=5, num='4.2.0.4.36'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toWeek', level=5, num='4.2.0.4.37'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYearWeek', level=5, num='4.2.0.4.38'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.now', level=5, num='4.2.0.4.39'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.today', level=5, num='4.2.0.4.40'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.yesterday', level=5, num='4.2.0.4.41'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.timeSlot', level=5, num='4.2.0.4.42'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYYYYMM', level=5, num='4.2.0.4.43'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYYYYMMDD', level=5, num='4.2.0.4.44'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYYYYMMDDhhmmss', level=5, num='4.2.0.4.45'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addYears', level=5, num='4.2.0.4.46'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addMonths', level=5, num='4.2.0.4.47'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addWeeks', level=5, num='4.2.0.4.48'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addDays', level=5, num='4.2.0.4.49'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addHours', level=5, num='4.2.0.4.50'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addMinutes', level=5, num='4.2.0.4.51'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addSeconds', level=5, num='4.2.0.4.52'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addQuarters', level=5, num='4.2.0.4.53'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractYears', level=5, num='4.2.0.4.54'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractMonths', level=5, num='4.2.0.4.55'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractWeeks', level=5, num='4.2.0.4.56'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractDays', level=5, num='4.2.0.4.57'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractHours', level=5, num='4.2.0.4.58'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractMinutes', level=5, num='4.2.0.4.59'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractSeconds', level=5, num='4.2.0.4.60'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractQuarters', level=5, num='4.2.0.4.61'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.dateDiff', level=5, num='4.2.0.4.62'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.timeSlots', level=5, num='4.2.0.4.63'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.formatDateTime', level=5, num='4.2.0.4.64'), - Heading(name='Type Conversion Functions', level=3, num='4.2.1'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toInt(8|16|32|64|128|256)', level=5, num='4.2.1.4.1'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUInt(8|16|32|64|256)', level=5, num='4.2.1.4.2'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toFloat(32|64)', level=5, num='4.2.1.4.3'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDate', level=5, num='4.2.1.4.4'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDateTime', level=5, num='4.2.1.4.5'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDateTime64', level=5, num='4.2.1.4.6'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDateTime64.FromString.MissingTime', level=5, num='4.2.1.4.7'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDecimal(32|64|128|256)', level=5, num='4.2.1.4.8'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toString', level=5, num='4.2.1.4.9'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.CAST(x,T)', level=5, num='4.2.1.4.10'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUnixTimestamp64Milli', level=5, num='4.2.1.4.11'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUnixTimestamp64Micro', level=5, num='4.2.1.4.12'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUnixTimestamp64Nano', level=5, num='4.2.1.4.13'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.fromUnixTimestamp64Milli', level=5, num='4.2.1.4.14'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.fromUnixTimestamp64Micro', level=5, num='4.2.1.4.15'), - Heading(name='RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.fromUnixTimestamp64Nano', level=5, num='4.2.1.4.16'), - Heading(name='References', level=1, num='5'), + Heading(name="Revision History", level=1, num="1"), + Heading(name="Introduction", level=1, num="2"), + Heading(name="Terminology", level=1, num="3"), + Heading(name="SRS", level=2, num="3.1"), + Heading(name="Normal Date Range", level=2, num="3.2"), + Heading(name="Extended Date Range", level=2, num="3.3"), + Heading(name="Requirements", level=1, num="4"), + Heading(name="Generic", level=2, num="4.1"), + Heading(name="RQ.SRS-010.DateTime64.ExtendedRange", level=4, num="4.1.0.1"), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.Start", + level=4, + num="4.1.0.2", ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.Start.BeforeEpochForTimeZone", + level=4, + num="4.1.0.3", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.End", + level=4, + num="4.1.0.4", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.End.AfterEpochForTimeZone", + level=4, + num="4.1.0.5", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions", + level=4, + num="4.1.0.6", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions", + level=4, + num="4.1.0.7", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.TimeZones", level=4, num="4.1.0.8" + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime", + level=4, + num="4.1.0.9", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.Comparison", + level=4, + num="4.1.0.10", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.SpecificTimestamps", + level=4, + num="4.1.0.11", + ), + Heading(name="Specific", level=2, num="4.2"), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.Start", level=4, num="4.2.0.1" + ), + Heading(name="RQ.SRS-010.DateTime64.ExtendedRange.End", level=4, num="4.2.0.2"), + Heading(name="Non-Existent Time", level=4, num="4.2.0.3"), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.InvalidDate", + level=5, + num="4.2.0.3.1", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.InvalidTime", + level=5, + num="4.2.0.3.2", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.TimeZoneSwitch", + level=5, + num="4.2.0.3.3", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.DaylightSavingTime", + level=5, + num="4.2.0.3.4", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.DaylightSavingTime.Disappeared", + level=5, + num="4.2.0.3.5", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.LeapSeconds", + level=5, + num="4.2.0.3.6", + ), + Heading(name="Dates And Times Functions", level=4, num="4.2.0.4"), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toTimeZone", + level=5, + num="4.2.0.4.1", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYear", + level=5, + num="4.2.0.4.2", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toQuarter", + level=5, + num="4.2.0.4.3", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toMonth", + level=5, + num="4.2.0.4.4", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toDayOfYear", + level=5, + num="4.2.0.4.5", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toDayOfMonth", + level=5, + num="4.2.0.4.6", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toDayOfWeek", + level=5, + num="4.2.0.4.7", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toHour", + level=5, + num="4.2.0.4.8", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toMinute", + level=5, + num="4.2.0.4.9", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toSecond", + level=5, + num="4.2.0.4.10", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toUnixTimestamp", + level=5, + num="4.2.0.4.11", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfYear", + level=5, + num="4.2.0.4.12", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfISOYear", + level=5, + num="4.2.0.4.13", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfQuarter", + level=5, + num="4.2.0.4.14", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfMonth", + level=5, + num="4.2.0.4.15", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toMonday", + level=5, + num="4.2.0.4.16", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfWeek", + level=5, + num="4.2.0.4.17", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfDay", + level=5, + num="4.2.0.4.18", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfHour", + level=5, + num="4.2.0.4.19", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfMinute", + level=5, + num="4.2.0.4.20", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfSecond", + level=5, + num="4.2.0.4.21", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFiveMinute", + level=5, + num="4.2.0.4.22", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfTenMinutes", + level=5, + num="4.2.0.4.23", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFifteenMinutes", + level=5, + num="4.2.0.4.24", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfInterval", + level=5, + num="4.2.0.4.25", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toTime", + level=5, + num="4.2.0.4.26", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeYearNum", + level=5, + num="4.2.0.4.27", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeQuarterNum", + level=5, + num="4.2.0.4.28", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeMonthNum", + level=5, + num="4.2.0.4.29", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeWeekNum", + level=5, + num="4.2.0.4.30", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeDayNum", + level=5, + num="4.2.0.4.31", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeHourNum", + level=5, + num="4.2.0.4.32", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeMinuteNum", + level=5, + num="4.2.0.4.33", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeSecondNum", + level=5, + num="4.2.0.4.34", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toISOYear", + level=5, + num="4.2.0.4.35", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toISOWeek", + level=5, + num="4.2.0.4.36", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toWeek", + level=5, + num="4.2.0.4.37", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYearWeek", + level=5, + num="4.2.0.4.38", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.now", + level=5, + num="4.2.0.4.39", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.today", + level=5, + num="4.2.0.4.40", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.yesterday", + level=5, + num="4.2.0.4.41", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.timeSlot", + level=5, + num="4.2.0.4.42", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYYYYMM", + level=5, + num="4.2.0.4.43", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYYYYMMDD", + level=5, + num="4.2.0.4.44", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYYYYMMDDhhmmss", + level=5, + num="4.2.0.4.45", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addYears", + level=5, + num="4.2.0.4.46", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addMonths", + level=5, + num="4.2.0.4.47", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addWeeks", + level=5, + num="4.2.0.4.48", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addDays", + level=5, + num="4.2.0.4.49", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addHours", + level=5, + num="4.2.0.4.50", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addMinutes", + level=5, + num="4.2.0.4.51", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addSeconds", + level=5, + num="4.2.0.4.52", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addQuarters", + level=5, + num="4.2.0.4.53", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractYears", + level=5, + num="4.2.0.4.54", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractMonths", + level=5, + num="4.2.0.4.55", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractWeeks", + level=5, + num="4.2.0.4.56", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractDays", + level=5, + num="4.2.0.4.57", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractHours", + level=5, + num="4.2.0.4.58", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractMinutes", + level=5, + num="4.2.0.4.59", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractSeconds", + level=5, + num="4.2.0.4.60", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractQuarters", + level=5, + num="4.2.0.4.61", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.dateDiff", + level=5, + num="4.2.0.4.62", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.timeSlots", + level=5, + num="4.2.0.4.63", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.formatDateTime", + level=5, + num="4.2.0.4.64", + ), + Heading(name="Type Conversion Functions", level=3, num="4.2.1"), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toInt(8|16|32|64|128|256)", + level=5, + num="4.2.1.4.1", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUInt(8|16|32|64|256)", + level=5, + num="4.2.1.4.2", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toFloat(32|64)", + level=5, + num="4.2.1.4.3", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDate", + level=5, + num="4.2.1.4.4", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDateTime", + level=5, + num="4.2.1.4.5", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDateTime64", + level=5, + num="4.2.1.4.6", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDateTime64.FromString.MissingTime", + level=5, + num="4.2.1.4.7", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDecimal(32|64|128|256)", + level=5, + num="4.2.1.4.8", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toString", + level=5, + num="4.2.1.4.9", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.CAST(x,T)", + level=5, + num="4.2.1.4.10", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUnixTimestamp64Milli", + level=5, + num="4.2.1.4.11", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUnixTimestamp64Micro", + level=5, + num="4.2.1.4.12", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUnixTimestamp64Nano", + level=5, + num="4.2.1.4.13", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.fromUnixTimestamp64Milli", + level=5, + num="4.2.1.4.14", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.fromUnixTimestamp64Micro", + level=5, + num="4.2.1.4.15", + ), + Heading( + name="RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.fromUnixTimestamp64Nano", + level=5, + num="4.2.1.4.16", + ), + Heading(name="References", level=1, num="5"), + ), requirements=( RQ_SRS_010_DateTime64_ExtendedRange, RQ_SRS_010_DateTime64_ExtendedRange_NormalRange_Start, @@ -1858,8 +2341,8 @@ SRS_010_ClickHouse_DateTime64_Extended_Range = Specification( RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_fromUnixTimestamp64Milli, RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_fromUnixTimestamp64Micro, RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_fromUnixTimestamp64Nano, - ), - content=''' + ), + content=""" # SRS-010 ClickHouse DateTime64 Extended Range # Software Requirements Specification @@ -2665,4 +3148,5 @@ using the [fromUnixTimestamp64Nano](https://clickhouse.com/docs/en/sql-reference [Revision History]: https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/datetime64_extended_range/requirements/requirements.md [Git]: https://git-scm.com/ [GitHub]: https://github.com -''') +""", +) diff --git a/tests/testflows/datetime64_extended_range/tests/common.py b/tests/testflows/datetime64_extended_range/tests/common.py index c3bee076bf4..1154cf21b92 100644 --- a/tests/testflows/datetime64_extended_range/tests/common.py +++ b/tests/testflows/datetime64_extended_range/tests/common.py @@ -8,30 +8,31 @@ from datetime64_extended_range.common import * def in_normal_range(dt: datetime.datetime): - """Check if DateTime is in normal range - """ - return dt <= datetime.datetime(2105, 12, 31, 23, 59, 59, 999999) and dt >= datetime.datetime(1970, 1, 1, 0, 0, 0) + """Check if DateTime is in normal range""" + return dt <= datetime.datetime( + 2105, 12, 31, 23, 59, 59, 999999 + ) and dt >= datetime.datetime(1970, 1, 1, 0, 0, 0) def years_range(stress=False, padding=(0, 0)): - """Returns a set of year values used for testing. - """ - return range(1925+padding[0], 2283-padding[1]) if stress else (1927, 2000, 2281) + """Returns a set of year values used for testing.""" + return range(1925 + padding[0], 2283 - padding[1]) if stress else (1927, 2000, 2281) def timezones_range(stress=False): - """Returns a set of timezone values used for testing. - """ + """Returns a set of timezone values used for testing.""" if stress: return pytz.all_timezones else: - return ['UTC', 'Asia/Novosibirsk', 'America/Denver'] + return ["UTC", "Asia/Novosibirsk", "America/Denver"] @contextmanager def create_table(timezone, node): try: - node.query(f"CREATE TABLE dt(timestamp DateTime64(3, {timezone})) Engine = TinyLog") + node.query( + f"CREATE TABLE dt(timestamp DateTime64(3, {timezone})) Engine = TinyLog" + ) yield finally: node.query("DROP TABLE dt") @@ -49,15 +50,16 @@ def insert_check_datetime(self, datetime, expected, precision=0, timezone="UTC") """ with create_table(timezone, self.context.node): with When("I use toDateTime64"): - r = self.context.node.query(f"SELECT toDateTime64('{datetime}', {precision}, '{timezone}')") + r = self.context.node.query( + f"SELECT toDateTime64('{datetime}', {precision}, '{timezone}')" + ) with Then(f"I expect {expected}"): assert r.output == expected, error() def datetime_generator(year, microseconds=False): - """Helper generator - """ + """Helper generator""" date = datetime.datetime(year, 1, 1, 0, 0, 0) if microseconds: date = datetime.datetime(year, 1, 1, 0, 0, 0, 123000) @@ -67,12 +69,17 @@ def datetime_generator(year, microseconds=False): def select_dates_in_year(year, stress=False, microseconds=False): - """Returns various datetimes in a year that are to be checked - """ + """Returns various datetimes in a year that are to be checked""" if not stress: - dates = [datetime.datetime(year, 1, 1, 0, 0, 0), datetime.datetime(year, 12, 31, 23, 59, 59)] + dates = [ + datetime.datetime(year, 1, 1, 0, 0, 0), + datetime.datetime(year, 12, 31, 23, 59, 59), + ] if microseconds: - dates = [datetime.datetime(year, 1, 1, 0, 0, 0, 123000), datetime.datetime(year, 12, 31, 23, 59, 59, 123000)] + dates = [ + datetime.datetime(year, 1, 1, 0, 0, 0, 123000), + datetime.datetime(year, 12, 31, 23, 59, 59, 123000), + ] if year % 4 == 0 and (year % 100 != 0 or year % 400 == 0): dates.append(datetime.datetime(year, 2, 29, 11, 11, 11, 123000)) return dates @@ -91,7 +98,9 @@ def select_check_datetime(self, datetime, expected, precision=0, timezone="UTC") :param timezone: timezone, default: UTC """ with When("I use toDateTime64"): - r = self.context.node.query(f"SELECT toDateTime64('{datetime}', {precision}, '{timezone}')") + r = self.context.node.query( + f"SELECT toDateTime64('{datetime}', {precision}, '{timezone}')" + ) with Then(f"I expect {expected}"): assert r.output == expected, error() @@ -116,7 +125,9 @@ def exec_query(self, request, expected=None, exitcode=None): @TestStep -def walk_datetime_in_incrementing_steps(self, date, hrs_range=(0, 24), step=1, timezone="UTC", precision=0): +def walk_datetime_in_incrementing_steps( + self, date, hrs_range=(0, 24), step=1, timezone="UTC", precision=0 +): """Sweep time starting from some start date. The time is incremented in steps specified by the `step` parameter (default: 1 min). @@ -130,22 +141,38 @@ def walk_datetime_in_incrementing_steps(self, date, hrs_range=(0, 24), step=1, t with Pool(2) as pool: try: - with When(f"I loop through datetime range {hrs_range} starting from {date} in {step}min increments"): - for hrs in range(*hrs_range) if stress else (hrs_range[0], hrs_range[1]-1): + with When( + f"I loop through datetime range {hrs_range} starting from {date} in {step}min increments" + ): + for hrs in ( + range(*hrs_range) if stress else (hrs_range[0], hrs_range[1] - 1) + ): for mins in range(0, 60, step) if stress else (0, 59): - datetime = f"{date} {str(hrs).zfill(2)}:{str(mins).zfill(2)}:{secs}" + datetime = ( + f"{date} {str(hrs).zfill(2)}:{str(mins).zfill(2)}:{secs}" + ) expected = datetime with When(f"time is {datetime}"): - Test(name=f"{hrs}:{mins}:{secs}", test=select_check_datetime, parallel=True, executor=pool)( - datetime=datetime, precision=precision, timezone=timezone, - expected=expected) + Test( + name=f"{hrs}:{mins}:{secs}", + test=select_check_datetime, + parallel=True, + executor=pool, + )( + datetime=datetime, + precision=precision, + timezone=timezone, + expected=expected, + ) finally: join() @TestStep -def walk_datetime_in_decrementing_steps(self, date, hrs_range=(23, 0), step=1, timezone="UTC", precision=0): +def walk_datetime_in_decrementing_steps( + self, date, hrs_range=(23, 0), step=1, timezone="UTC", precision=0 +): """Sweep time starting from some start date. The time is decremented in steps specified by the `step` parameter (default: 1 min). @@ -160,15 +187,29 @@ def walk_datetime_in_decrementing_steps(self, date, hrs_range=(23, 0), step=1, t with Pool(2) as pool: try: - with When(f"I loop through datetime range {hrs_range} starting from {date} in {step}min decrements"): - for hrs in range(*hrs_range, -1) if stress else (hrs_range[1], hrs_range[0]): + with When( + f"I loop through datetime range {hrs_range} starting from {date} in {step}min decrements" + ): + for hrs in ( + range(*hrs_range, -1) if stress else (hrs_range[1], hrs_range[0]) + ): for mins in range(59, 0, -step) if stress else (59, 0): - datetime = f"{date} {str(hrs).zfill(2)}:{str(mins).zfill(2)}:{secs}" + datetime = ( + f"{date} {str(hrs).zfill(2)}:{str(mins).zfill(2)}:{secs}" + ) expected = datetime with When(f"time is {datetime}"): - Test(name=f"{hrs}:{mins}:{secs}", test=select_check_datetime, parallel=True, executor=pool)( - datetime=datetime, precision=precision, timezone=timezone, - expected=expected) + Test( + name=f"{hrs}:{mins}:{secs}", + test=select_check_datetime, + parallel=True, + executor=pool, + )( + datetime=datetime, + precision=precision, + timezone=timezone, + expected=expected, + ) finally: join() diff --git a/tests/testflows/datetime64_extended_range/tests/date_time_functions.py b/tests/testflows/datetime64_extended_range/tests/date_time_functions.py index f972caac95b..53add63e8f2 100644 --- a/tests/testflows/datetime64_extended_range/tests/date_time_functions.py +++ b/tests/testflows/datetime64_extended_range/tests/date_time_functions.py @@ -15,8 +15,7 @@ from datetime64_extended_range.tests.common import * RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toTimeZone("1.0") ) def to_time_zone(self): - """Check the toTimeZone() function with DateTime64 extended range. - """ + """Check the toTimeZone() function with DateTime64 extended range.""" stress = self.context.stress timezones = timezones_range(stress) @@ -40,8 +39,7 @@ def to_time_zone(self): @TestOutline def to_date_part(self, py_func, ch_func): - """Check the toYear/toMonth/toQuarter functions with DateTime64 extended range. - """ + """Check the toYear/toMonth/toQuarter functions with DateTime64 extended range.""" stress = self.context.stress for year in years_range(stress): @@ -56,9 +54,13 @@ def to_date_part(self, py_func, ch_func): with Given("I compute expected output using pytz"): with By(f"localizing {dt} using {tz1} timezone"): time_tz1 = pytz.timezone(tz1).localize(dt) - with And(f"converting {tz1} local datetime {dt} to {tz2} timezone"): + with And( + f"converting {tz1} local datetime {dt} to {tz2} timezone" + ): time_tz2 = time_tz1.astimezone(pytz.timezone(tz2)) - with And(f"calling the '{py_func}' method of the datetime object to get expected result"): + with And( + f"calling the '{py_func}' method of the datetime object to get expected result" + ): result = eval(f"(time_tz2.{py_func}") expected = f"{result}" with And(f"Forming a {ch_func} ClickHouse query"): @@ -71,7 +73,7 @@ def to_date_part(self, py_func, ch_func): @TestScenario @Requirements( RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toYear("1.0"), - RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toRelativeYearNum("1.0") + RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toRelativeYearNum("1.0"), ) def to_year(self): """Check the toYear() and toRelativeYearNum() [which is just an alias for toYear] @@ -81,12 +83,9 @@ def to_year(self): @TestScenario -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toMonth("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toMonth("1.0")) def to_month(self): - """Check the toMonth() function with DateTime64 extended range. - """ + """Check the toMonth() function with DateTime64 extended range.""" to_date_part(py_func="month)", ch_func="toMonth") @@ -106,8 +105,7 @@ def to_quarter(self): @TestOutline def to_day_of(self, py_func, ch_func): - """Check the toDayOf....() functions with DateTime64 extended range. - """ + """Check the toDayOf....() functions with DateTime64 extended range.""" stress = self.context.stress for year in years_range(stress): @@ -138,8 +136,7 @@ def to_day_of(self, py_func, ch_func): RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toDayOfYear("1.0") ) def to_day_of_year(self): - """Check toDayOfYear() function with DateTime64 extended range date time. - """ + """Check toDayOfYear() function with DateTime64 extended range date time.""" to_day_of(py_func="tm_yday", ch_func="toDayOfYear") @@ -148,8 +145,7 @@ def to_day_of_year(self): RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toDayOfMonth("1.0") ) def to_day_of_month(self): - """Check toDayOfMonth() function with DateTime64 extended range date time. - """ + """Check toDayOfMonth() function with DateTime64 extended range date time.""" to_day_of(py_func="tm_mday", ch_func="toDayOfMonth") @@ -158,15 +154,13 @@ def to_day_of_month(self): RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toDayOfWeek("1.0") ) def to_day_of_week(self): - """Check toDayOfWeek() function with DateTime64 extended range date time. - """ + """Check toDayOfWeek() function with DateTime64 extended range date time.""" to_day_of(py_func="tm_wday", ch_func="toDayOfWeek") @TestOutline def to_time_part(self, py_func, ch_func): - """Check the functions like toHour/toMinute/toSecond with DateTime64 extended range. - """ + """Check the functions like toHour/toMinute/toSecond with DateTime64 extended range.""" stress = self.context.stress for year in years_range(stress): @@ -191,12 +185,9 @@ def to_time_part(self, py_func, ch_func): @TestScenario -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toHour("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toHour("1.0")) def to_hour(self): - """Check toHour() function with DateTime64 extended range date time. - """ + """Check toHour() function with DateTime64 extended range date time.""" to_time_part(py_func="hour", ch_func="toHour") @@ -205,8 +196,7 @@ def to_hour(self): RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toMinute("1.0") ) def to_minute(self): - """Check toMinute() function with DateTime64 extended range date time. - """ + """Check toMinute() function with DateTime64 extended range date time.""" to_time_part(py_func="minute", ch_func="toMinute") @@ -215,8 +205,7 @@ def to_minute(self): RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toSecond("1.0") ) def to_second(self): - """Check toSecond() function with DateTime64 extended range date time. - """ + """Check toSecond() function with DateTime64 extended range date time.""" to_time_part(py_func="second", ch_func="toSecond") @@ -225,8 +214,7 @@ def to_second(self): RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toUnixTimestamp("1.0") ) def to_unix_timestamp(self): - """Check the toUnixTimestamp() function with DateTime64 extended range - """ + """Check the toUnixTimestamp() function with DateTime64 extended range""" stress = self.context.stress for year in years_range(stress): @@ -277,7 +265,7 @@ def to_start_of_year(self): def iso_year_start(dt): """Helper to find the beginning of iso year.""" - dt_s = datetime.datetime(dt.year-1, 12, 23, 0, 0, 0) + dt_s = datetime.datetime(dt.year - 1, 12, 23, 0, 0, 0) while dt_s.isocalendar()[0] != dt.year: dt_s += datetime.timedelta(days=1) return dt_s @@ -306,7 +294,10 @@ def to_start_of_iso_year(self): dt_str = dt.strftime("%Y-%m-%d %H:%M:%S") query = f"SELECT toStartOfISOYear(toDateTime64('{dt_str}', 0, '{tz}'))" with Then("I execute toStartOfISOYear query"): - exec_query(request=query, expected=f"{expected.strftime('%Y-%m-%d')}") + exec_query( + request=query, + expected=f"{expected.strftime('%Y-%m-%d')}", + ) @TestScenario @@ -331,7 +322,9 @@ def to_start_of_quarter(self): with By("computing expected result with python"): time_tz1 = pytz.timezone(tz1).localize(dt) time_tz2 = time_tz1.astimezone(pytz.timezone(tz2)) - expected = f"{year}-{str(time_tz2.month//3 * 3 + 1).zfill(2)}-01" + expected = ( + f"{year}-{str(time_tz2.month//3 * 3 + 1).zfill(2)}-01" + ) with And("forming ClickHouse query"): dt_str = dt.strftime("%Y-%m-%d %H:%M:%S") query = f"SELECT toStartOfQuarter(toDateTime64('{dt_str}', 0, '{tz1}'), '{tz2}')" @@ -391,7 +384,13 @@ def to_monday(self): with By("computing expected result with python"): time_tz1 = pytz.timezone(tz1).localize(dt) time_tz2 = time_tz1.astimezone(pytz.timezone(tz2)) - expected_date = time_tz2 + datetime.timedelta(days=(-dt.weekday() if dt.weekday() <= 3 else 7 - dt.weekday())) + expected_date = time_tz2 + datetime.timedelta( + days=( + -dt.weekday() + if dt.weekday() <= 3 + else 7 - dt.weekday() + ) + ) expected = f"{expected_date.strftime('%Y-%m-%d')}" with And("forming ClickHouse query"): dt_str = dt.strftime("%Y-%m-%d %H:%M:%S") @@ -416,13 +415,21 @@ def to_start_of_week(self): with When(f"I check each of the datetimes in {year}"): for dt in datetimes: for tz1, tz2 in itertools.product(timezones, timezones): - for mode in (0, 1): # mode - week beginning, either 0 (Sunday) or 1 (Monday) + for mode in ( + 0, + 1, + ): # mode - week beginning, either 0 (Sunday) or 1 (Monday) with Step(f"{dt} {tz1} -> {tz2}"): with By("computing expected result with python"): time_tz1 = pytz.timezone(tz1).localize(dt) time_tz2 = time_tz1.astimezone(pytz.timezone(tz2)) expected_date = time_tz2 + datetime.timedelta( - days=(mode - dt.weekday() if dt.weekday() <= (3+mode) else (mode + 7) - dt.weekday())) + days=( + mode - dt.weekday() + if dt.weekday() <= (3 + mode) + else (mode + 7) - dt.weekday() + ) + ) expected = f"{expected_date.strftime('%Y-%m-%d')}" with And("forming ClickHouse query"): dt_str = dt.strftime("%Y-%m-%d %H:%M:%S") @@ -520,7 +527,9 @@ def to_start_of_second(self): for year in years_range(stress=stress): with Given(f"I choose datetimes in {year}"): - datetimes = select_dates_in_year(year=year, stress=stress, microseconds=True) + datetimes = select_dates_in_year( + year=year, stress=stress, microseconds=True + ) with When(f"I check each of the datetimes in {year}"): for dt in datetimes: @@ -543,7 +552,9 @@ def to_start_of_minutes_interval(self, interval, func): for year in years_range(stress=stress): with Given(f"I choose datetimes in {year}"): - datetimes = select_dates_in_year(year=year, stress=stress, microseconds=True) + datetimes = select_dates_in_year( + year=year, stress=stress, microseconds=True + ) with When(f"I check each of the datetimes in {year}"): for dt in datetimes: @@ -551,17 +562,23 @@ def to_start_of_minutes_interval(self, interval, func): with Step(f"{dt} {tz}"): with By("Computing expected result using python"): mins = dt.minute // interval * interval - expected = f"{dt.strftime('%Y-%m-%d %H:')}{str(mins).zfill(2)}:00" + expected = ( + f"{dt.strftime('%Y-%m-%d %H:')}{str(mins).zfill(2)}:00" + ) with And(f"Forming a {func} query to ClickHouse"): dt_str = dt.strftime("%Y-%m-%d %H:%M:%S") - query = f"SELECT {func}(toDateTime64('{dt_str}', 0, '{tz}'))" + query = ( + f"SELECT {func}(toDateTime64('{dt_str}', 0, '{tz}'))" + ) with Then(f"I execute {func} query"): exec_query(request=query, expected=f"{expected}") @TestScenario @Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfFiveMinute("1.0") + RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfFiveMinute( + "1.0" + ) ) def to_start_of_five_minute(self): """Check the toStartOfFiveMinute with DateTime64 extended range.""" @@ -570,7 +587,9 @@ def to_start_of_five_minute(self): @TestScenario @Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfTenMinutes("1.0") + RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfTenMinutes( + "1.0" + ) ) def to_start_of_ten_minutes(self): """Check the toStartOfTenMinutes with DateTime64 extended range.""" @@ -579,7 +598,9 @@ def to_start_of_ten_minutes(self): @TestScenario @Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfFifteenMinutes("1.0") + RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfFifteenMinutes( + "1.0" + ) ) def to_start_of_fifteen_minutes(self): """Check the toStartOfFifteenMinutes with DateTime64 extended range.""" @@ -602,13 +623,21 @@ def to_start_of_interval_helper(dt: datetime.datetime, interval_type, interval_v :param interval_type: interval type selector, String :param interval_value: interval size, int """ - intervals_in_seconds = {"SECOND": 1, "MINUTE": 60, "HOUR": 3600, "DAY": 68400, "WEEK": 604800} + intervals_in_seconds = { + "SECOND": 1, + "MINUTE": 60, + "HOUR": 3600, + "DAY": 68400, + "WEEK": 604800, + } zero_datetime = datetime.datetime(1970, 1, 1, 0, 0, 0) delta = dt - zero_datetime if interval_type in intervals_in_seconds.keys(): divisor = interval_value * intervals_in_seconds[interval_type] - retval = (zero_datetime + datetime.timedelta(seconds=(delta.seconds // divisor * divisor))) + retval = zero_datetime + datetime.timedelta( + seconds=(delta.seconds // divisor * divisor) + ) if interval_type == "WEEK": return retval.strftime("%Y-%m-%d") return retval.strftime("%Y-%m-%d %H:%M:%S") @@ -616,16 +645,24 @@ def to_start_of_interval_helper(dt: datetime.datetime, interval_type, interval_v elif interval_type == "MONTH": diff = (dt.year - zero_datetime.year) * 12 + (dt.month - zero_datetime.month) result_diff = diff // interval_value * interval_value - return (zero_datetime + rd.relativedelta(months=result_diff)).strftime("%Y-%m-%d") + return (zero_datetime + rd.relativedelta(months=result_diff)).strftime( + "%Y-%m-%d" + ) elif interval_type == "QUARTER": - diff = (dt.year - zero_datetime.year) * 4 + (dt.month // 4 - zero_datetime.month // 4) + diff = (dt.year - zero_datetime.year) * 4 + ( + dt.month // 4 - zero_datetime.month // 4 + ) result_diff = diff // interval_value * interval_value - return (zero_datetime + rd.relativedelta(months=result_diff*4)).strftime("%Y-%m-%d") + return (zero_datetime + rd.relativedelta(months=result_diff * 4)).strftime( + "%Y-%m-%d" + ) elif interval_type == "YEAR": result_diff = (dt.year - zero_datetime.year) // interval_value * interval_value - return (zero_datetime + rd.relativedelta(years=result_diff)).strftime("%Y-%m-%d") + return (zero_datetime + rd.relativedelta(years=result_diff)).strftime( + "%Y-%m-%d" + ) @TestScenario @@ -633,13 +670,20 @@ def to_start_of_interval_helper(dt: datetime.datetime, interval_type, interval_v RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfInterval("1.0") ) def to_start_of_interval(self): - """Check the toStartOfInterval with DateTime64 extended range. - """ + """Check the toStartOfInterval with DateTime64 extended range.""" stress = self.context.stress timezones = timezones_range(stress) - intervals_testing_ranges = {"SECOND": range(1, 15), "MINUTE": range(1, 15), "HOUR": range(1, 10), "DAY": (1, 5, 10), - "WEEK": range(1, 5), "MONTH": range(1, 6), "QUARTER": range(1, 4), "YEAR": range(1, 5)} + intervals_testing_ranges = { + "SECOND": range(1, 15), + "MINUTE": range(1, 15), + "HOUR": range(1, 10), + "DAY": (1, 5, 10), + "WEEK": range(1, 5), + "MONTH": range(1, 6), + "QUARTER": range(1, 4), + "YEAR": range(1, 5), + } for year in years_range(stress=stress): with Given(f"I choose datetimes in {year}"): @@ -652,8 +696,12 @@ def to_start_of_interval(self): for value in intervals_testing_ranges[interval]: with Step(f"{dt} {tz} {interval}: {value}"): with By("Computing expected result using python"): - expected = to_start_of_interval_helper(dt, interval, value) - with And(f"Forming a toStartOfInterval() query to ClickHouse"): + expected = to_start_of_interval_helper( + dt, interval, value + ) + with And( + f"Forming a toStartOfInterval() query to ClickHouse" + ): dt_str = dt.strftime("%Y-%m-%d %H:%M:%S") query = f"SELECT toStartOfInterval(toDateTime64('{dt_str}', 0, '{tz}'), INTERVAL {value} {interval})" with Then(f"I execute toStartOfInterval() query"): @@ -668,7 +716,9 @@ def to_iso(self, func, isocalendar_pos): for year in years_range(stress=stress): with Given(f"I choose datetimes in {year}"): - datetimes = select_dates_in_year(year=year, stress=stress, microseconds=True) + datetimes = select_dates_in_year( + year=year, stress=stress, microseconds=True + ) with When(f"I check each of the datetimes in {year}"): for dt in datetimes: @@ -678,7 +728,9 @@ def to_iso(self, func, isocalendar_pos): expected = f"{dt.isocalendar()[isocalendar_pos]}" with And("forming ClickHouse query"): dt_str = dt.strftime("%Y-%m-%d %H:%M:%S") - query = f"SELECT {func}(toDateTime64('{dt_str}', 0, '{tz}'))" + query = ( + f"SELECT {func}(toDateTime64('{dt_str}', 0, '{tz}'))" + ) with Then("I execute query"): exec_query(request=query, expected=f"{expected}") @@ -702,9 +754,7 @@ def to_iso_week(self): @TestScenario -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toTime("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toTime("1.0")) def to_time(self): """Check the toTime function with DateTime64 extended range.""" stress = self.context.stress @@ -712,7 +762,9 @@ def to_time(self): for year in years_range(stress=stress): with Given(f"I choose datetimes in {year}"): - datetimes = select_dates_in_year(year=year, stress=stress, microseconds=True) + datetimes = select_dates_in_year( + year=year, stress=stress, microseconds=True + ) with When(f"I check each of the datetimes in {year}"): for dt in datetimes: @@ -722,14 +774,18 @@ def to_time(self): expected = f"1970-01-02 {dt.strftime('%H:%M:%S')}" with And("forming ClickHouse query"): dt_str = dt.strftime("%Y-%m-%d %H:%M:%S") - query = f"SELECT toTime(toDateTime64('{dt_str}', 0, '{tz}'))" + query = ( + f"SELECT toTime(toDateTime64('{dt_str}', 0, '{tz}'))" + ) with Then("I execute query"): exec_query(request=query, expected=f"{expected}") @TestScenario @Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toRelativeQuarterNum("1.0") + RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toRelativeQuarterNum( + "1.0" + ) ) def to_relative_quarter_num(self): """Check the toRelativeQuarterNum function with DateTime64 extended range.""" @@ -738,7 +794,9 @@ def to_relative_quarter_num(self): for year in years_range(stress=stress): with Given(f"I choose datetimes in {year}"): - datetimes = select_dates_in_year(year=year, stress=stress, microseconds=True) + datetimes = select_dates_in_year( + year=year, stress=stress, microseconds=True + ) with When(f"I check each of the datetimes in {year}"): for dt in datetimes: @@ -764,14 +822,23 @@ def to_relative_week_num(self): for year in years_range(stress=stress): with Given(f"I choose datetimes in {year}"): - datetimes = select_dates_in_year(year=year, stress=stress, microseconds=True) + datetimes = select_dates_in_year( + year=year, stress=stress, microseconds=True + ) with When(f"I check each of the datetimes in {year}"): for dt in datetimes: for tz in timezones: with When(f"{dt} {tz}"): with By("computing expected result using python"): - week_num = ((dt + datetime.timedelta(days=8) - datetime.timedelta(days=dt.weekday())) - datetime.datetime(1970, 1, 1, 0, 0, 0)).days // 7 + week_num = ( + ( + dt + + datetime.timedelta(days=8) + - datetime.timedelta(days=dt.weekday()) + ) + - datetime.datetime(1970, 1, 1, 0, 0, 0) + ).days // 7 expected = f"{week_num}" with And("forming ClickHouse query"): dt_str = dt.strftime("%Y-%m-%d %H:%M:%S") @@ -791,7 +858,9 @@ def to_relative_month_num(self): for year in years_range(stress=stress): with Given(f"I choose datetimes in {year}"): - datetimes = select_dates_in_year(year=year, stress=stress, microseconds=True) + datetimes = select_dates_in_year( + year=year, stress=stress, microseconds=True + ) with When(f"I check each of the datetimes in {year}"): for dt in datetimes: @@ -818,7 +887,9 @@ def to_relative_day_num(self): for year in years_range(stress=stress): with Given(f"I choose datetimes in {year}"): - datetimes = select_dates_in_year(year=year, stress=stress, microseconds=True) + datetimes = select_dates_in_year( + year=year, stress=stress, microseconds=True + ) with When(f"I check each of the datetimes in {year}"): for dt in datetimes: @@ -844,18 +915,24 @@ def to_relative_time(self, divisor, func): for year in years_range(stress=stress): with Given(f"I choose datetimes in {year}"): - datetimes = select_dates_in_year(year=year, stress=stress, microseconds=True) + datetimes = select_dates_in_year( + year=year, stress=stress, microseconds=True + ) with When(f"I check each of the datetimes in {year}"): for dt in datetimes: for tz in timezones: with When(f"{dt} {tz}"): with By("Computing the expected result using python"): - result = (dt - datetime.datetime(1970, 1, 1, 0, 0, 0)).total_seconds() // divisor + result = ( + dt - datetime.datetime(1970, 1, 1, 0, 0, 0) + ).total_seconds() // divisor expected = f"{result}" with And(f"Forming a {func} query to ClickHouse"): dt_str = dt.strftime("%Y-%m-%d %H:%M:%S") - query = f"SELECT {func}(toDateTime64('{dt_str}', 0, '{tz}'))" + query = ( + f"SELECT {func}(toDateTime64('{dt_str}', 0, '{tz}'))" + ) with Then(f"I execute {func} query"): exec_query(request=query, expected=f"{expected}") @@ -873,7 +950,9 @@ def to_relative_hour_num(self): @TestScenario @Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toRelativeMinuteNum("1.0") + RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toRelativeMinuteNum( + "1.0" + ) ) def to_relative_minute_num(self): """Check the toRelativeMinuteNum function @@ -884,7 +963,9 @@ def to_relative_minute_num(self): @TestScenario @Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toRelativeSecondNum("1.0") + RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toRelativeSecondNum( + "1.0" + ) ) def to_relative_second_num(self): """Check the toRelativeSecondNum function @@ -904,72 +985,114 @@ def to_week_compute_expected(dt: datetime.datetime, mode: int, ret_year=False): while ex.weekday() != 0: ex += datetime.timedelta(days=1) - first_monday = ex.day-1 + first_monday = ex.day - 1 ex = datetime.datetime(year, 1, 1) while ex.weekday() != 6: ex += datetime.timedelta(days=1) - first_sunday = ex.day-1 + first_sunday = ex.day - 1 if mode == 0: # First day of week: Sunday, Week 1 is the first week with Sunday, range 0-53 - expected = (dt - datetime.datetime(year, 1, 1) - datetime.timedelta(days=first_sunday)).days // 7 + 1 + expected = ( + dt - datetime.datetime(year, 1, 1) - datetime.timedelta(days=first_sunday) + ).days // 7 + 1 elif mode == 1: # First day of week: Monday, Week 1 is the first week containing 4 or more days, range 0-53 if j1_weekday <= 3: - expected = (dt - datetime.datetime(year, 1, 1) + datetime.timedelta(days=7+j1_weekday)).days // 7 + expected = ( + dt + - datetime.datetime(year, 1, 1) + + datetime.timedelta(days=7 + j1_weekday) + ).days // 7 else: - expected = (dt - datetime.datetime(year, 1, 1) + datetime.timedelta(days=j1_weekday)).days // 7 + expected = ( + dt - datetime.datetime(year, 1, 1) + datetime.timedelta(days=j1_weekday) + ).days // 7 elif mode == 2: # First day of week: Sunday, Week 1 is the first week with Sunday, range 1-53 - expected = (dt - datetime.datetime(year, 1, 1) - datetime.timedelta(days=first_sunday)).days // 7 + 1 + expected = ( + dt - datetime.datetime(year, 1, 1) - datetime.timedelta(days=first_sunday) + ).days // 7 + 1 if expected == 0: - return to_week_compute_expected(datetime.datetime(dt.year-1, 12, 31), 2) + return to_week_compute_expected(datetime.datetime(dt.year - 1, 12, 31), 2) elif mode == 3: # First day of week: Monday, Week 1 is the first week containing 4 or more days, range 1-53 if j1_weekday <= 3: - expected = (dt - datetime.datetime(year, 1, 1) + datetime.timedelta(days=7+j1_weekday)).days // 7 + expected = ( + dt + - datetime.datetime(year, 1, 1) + + datetime.timedelta(days=7 + j1_weekday) + ).days // 7 else: - expected = (dt - datetime.datetime(year, 1, 1) + datetime.timedelta(days=j1_weekday)).days // 7 + expected = ( + dt - datetime.datetime(year, 1, 1) + datetime.timedelta(days=j1_weekday) + ).days // 7 if expected == 0: - return to_week_compute_expected(datetime.datetime(dt.year-1, 12, 31), 3) + return to_week_compute_expected(datetime.datetime(dt.year - 1, 12, 31), 3) elif mode == 4: # First day of week: Sunday, Week 1 is the first week containing 4 or more days, range 0-53 if j1_weekday <= 3: - expected = (dt - datetime.datetime(year, 1, 1) + datetime.timedelta(days=8+j1_weekday)).days // 7 + expected = ( + dt + - datetime.datetime(year, 1, 1) + + datetime.timedelta(days=8 + j1_weekday) + ).days // 7 else: - expected = (dt - datetime.datetime(year, 1, 1) + datetime.timedelta(days=j1_weekday+1)).days // 7 + expected = ( + dt + - datetime.datetime(year, 1, 1) + + datetime.timedelta(days=j1_weekday + 1) + ).days // 7 elif mode == 5: # First day of week: Monday, Week 1 is the first week with a Monday, range 0-53 - expected = (dt - datetime.datetime(year, 1, 1) - datetime.timedelta(days=first_monday)).days // 7 + 1 + expected = ( + dt - datetime.datetime(year, 1, 1) - datetime.timedelta(days=first_monday) + ).days // 7 + 1 elif mode == 6: # First day of week: Sunday, Week 1 is the first week containing 4 or more days, range 1-53 if j1_weekday <= 3: - expected = (dt - datetime.datetime(year, 1, 1) + datetime.timedelta(days=8+j1_weekday)).days // 7 + expected = ( + dt + - datetime.datetime(year, 1, 1) + + datetime.timedelta(days=8 + j1_weekday) + ).days // 7 else: - expected = (dt - datetime.datetime(year, 1, 1) + datetime.timedelta(days=j1_weekday+1)).days // 7 + expected = ( + dt + - datetime.datetime(year, 1, 1) + + datetime.timedelta(days=j1_weekday + 1) + ).days // 7 if expected == 0: - return to_week_compute_expected(datetime.datetime(dt.year-1, 12, 31), 6) + return to_week_compute_expected(datetime.datetime(dt.year - 1, 12, 31), 6) elif mode == 7: # First day of week: Monday, Week 1 is the first week with a Monday, range 1-53 - expected = (dt - datetime.datetime(year, 1, 1) - datetime.timedelta(days=first_monday)).days // 7 + 1 + expected = ( + dt - datetime.datetime(year, 1, 1) - datetime.timedelta(days=first_monday) + ).days // 7 + 1 if expected == 0: - return to_week_compute_expected(datetime.datetime(dt.year-1, 12, 31), 7) + return to_week_compute_expected(datetime.datetime(dt.year - 1, 12, 31), 7) elif mode == 8: # First day of week: Sunday, Week 1 is the week containing January 1, range 1-53 - expected = (dt - datetime.datetime(year, 1, 1) + datetime.timedelta(days=(j1_weekday+1)%7)).days // 7 + 1 + expected = ( + dt + - datetime.datetime(year, 1, 1) + + datetime.timedelta(days=(j1_weekday + 1) % 7) + ).days // 7 + 1 elif mode == 9: # First day of week: Monday, Week 1 is the week containing January 1, range 1-53 - expected = (dt - datetime.datetime(year, 1, 1) + datetime.timedelta(days=j1_weekday%7)).days // 7 + 1 + expected = ( + dt - datetime.datetime(year, 1, 1) + datetime.timedelta(days=j1_weekday % 7) + ).days // 7 + 1 return f"{dt.year}{str(expected).zfill(2)}" if ret_year else f"{expected}" @@ -996,7 +1119,9 @@ def to_week_year_week(self, clh_func, ret_year): for mode in range(0, 10): with When(f"{dt} {tz}, mode={mode}"): with By("Computing expected output using python"): - expected = to_week_compute_expected(dt=dt, mode=mode, ret_year=ret_year) + expected = to_week_compute_expected( + dt=dt, mode=mode, ret_year=ret_year + ) with And(f"Forming a {clh_func} query"): dt_str = dt.strftime("%Y-%m-%d %H:%M:%S") query = f"SELECT {clh_func}(toDateTime64('{dt_str}', 0, '{tz}'), {mode})" @@ -1005,9 +1130,7 @@ def to_week_year_week(self, clh_func, ret_year): @TestScenario -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toWeek("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toWeek("1.0")) def to_week(self): """Check the toWeek function with DateTime64 extended range.""" to_week_year_week(clh_func="toWeek", ret_year=False) @@ -1045,7 +1168,9 @@ def to_yyyymm(self): expected = f"{dt.strftime('%Y%m')}" with And("forming ClickHouse query"): dt_str = dt.strftime("%Y-%m-%d %H:%M:%S") - query = f"SELECT toYYYYMM(toDateTime64('{dt_str}', 0, '{tz}'))" + query = ( + f"SELECT toYYYYMM(toDateTime64('{dt_str}', 0, '{tz}'))" + ) with Then("I execute query"): exec_query(request=query, expected=f"{expected}") @@ -1074,7 +1199,11 @@ def to_yyyymmdd(self): with And("forming ClickHouse query"): dt_str = dt.strftime("%Y-%m-%d %H:%M:%S") query = f"SELECT toYYYYMMDD(toDateTime64('{dt_str}', 0, '{tz}'))" - with Then("I execute query", description=f"expected {expected}", flags=TE): + with Then( + "I execute query", + description=f"expected {expected}", + flags=TE, + ): exec_query(request=query, expected=f"{expected}") @@ -1107,9 +1236,7 @@ def to_yyyymmddhhmmss(self): @TestScenario -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_now("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_now("1.0")) def now(self): """Check the now() conversion to DateTime64 extended range. In this test, we cannot assure that pytz now() and ClickHouse now() will be executed at the same time, so we need @@ -1127,9 +1254,13 @@ def now(self): with When("I execute query and format its result to string"): r = self.context.node.query(f"SELECT toDateTime64(now(), 0, '{tz}')") query_result = r.output - received_dt = datetime.datetime.strptime(query_result, '%Y-%m-%d %H:%M:%S') + received_dt = datetime.datetime.strptime( + query_result, "%Y-%m-%d %H:%M:%S" + ) - with Then("I compute the difference between ClickHouse query result and pytz result"): + with Then( + "I compute the difference between ClickHouse query result and pytz result" + ): dt = dt.replace(tzinfo=None) if dt < received_dt: diff = (received_dt - dt).total_seconds() @@ -1141,12 +1272,9 @@ def now(self): @TestScenario -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_today("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_today("1.0")) def today(self): - """Check the today() conversion to DateTime64 extended range. - """ + """Check the today() conversion to DateTime64 extended range.""" stress = self.context.stress timezones = timezones_range(stress) @@ -1159,9 +1287,13 @@ def today(self): with When("I execute query and format its result to string"): r = self.context.node.query(f"SELECT toDateTime64(today(), 0, '{tz}')") query_result = r.output - received_dt = datetime.datetime.strptime(query_result, '%Y-%m-%d %H:%M:%S') + received_dt = datetime.datetime.strptime( + query_result, "%Y-%m-%d %H:%M:%S" + ) - with Then("I compute the difference between ClickHouse query result and pytz result"): + with Then( + "I compute the difference between ClickHouse query result and pytz result" + ): dt = dt.replace(tzinfo=None) if dt < received_dt: diff = (received_dt - dt).total_seconds() @@ -1177,8 +1309,7 @@ def today(self): RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_yesterday("1.0") ) def yesterday(self): - """Check the yesterday() conversion to DateTime64 extended range. - """ + """Check the yesterday() conversion to DateTime64 extended range.""" stress = self.context.stress timezones = timezones_range(stress) @@ -1190,11 +1321,17 @@ def yesterday(self): with Step(f"{dt} {tz}"): with When("I execute query and format its result to string"): - r = self.context.node.query(f"SELECT toDateTime64(yesterday(), 0, '{tz}')") + r = self.context.node.query( + f"SELECT toDateTime64(yesterday(), 0, '{tz}')" + ) query_result = r.output - received_dt = datetime.datetime.strptime(query_result, '%Y-%m-%d %H:%M:%S') + received_dt = datetime.datetime.strptime( + query_result, "%Y-%m-%d %H:%M:%S" + ) - with Then("I compute the difference between ClickHouse query result and pytz result"): + with Then( + "I compute the difference between ClickHouse query result and pytz result" + ): dt = dt.replace(tzinfo=None) dt -= datetime.timedelta(days=1) if dt < received_dt: @@ -1207,7 +1344,9 @@ def yesterday(self): @TestOutline -def add_subtract_functions(self, clh_func, py_key, test_range, years_padding=(1, 1), modifier=1, mult=1): +def add_subtract_functions( + self, clh_func, py_key, test_range, years_padding=(1, 1), modifier=1, mult=1 +): """Check the addYears/addMonths/addWeeks/addDays/addHours/addMinutes/addSeconds with DateTime64 extended range. Calculating expected result using eval() to avoid writing 9000+ comparisons and just parse string as object field name. :param self: self @@ -1233,7 +1372,9 @@ def add_subtract_functions(self, clh_func, py_key, test_range, years_padding=(1, with By("converting datetime to string"): dt_str = dt.strftime("%Y-%m-%d %H:%M:%S") with And("computing the expected result using pytz"): - dt_new = dt + rd.relativedelta(**{py_key: mult*incr*modifier}) + dt_new = dt + rd.relativedelta( + **{py_key: mult * incr * modifier} + ) tzone = pytz.timezone(tz) dt_norm = tzone.normalize(tzone.localize(dt_new)) expected = f"{dt_norm.strftime('%Y-%m-%d %H:%M:%S')}" @@ -1249,7 +1390,9 @@ def add_subtract_functions(self, clh_func, py_key, test_range, years_padding=(1, ) def add_years(self): """Check the addYears function with DateTime64 extended range.""" - add_subtract_functions(clh_func="addYears", py_key="years", test_range=(0, 1), years_padding=(0, 1)) + add_subtract_functions( + clh_func="addYears", py_key="years", test_range=(0, 1), years_padding=(0, 1) + ) @TestScenario @@ -1258,7 +1401,13 @@ def add_years(self): ) def subtract_years(self): """Check the subtractYears function with DateTime64 extended range.""" - add_subtract_functions(clh_func="subtractYears", py_key="years", test_range=(0, 1), years_padding=(1, 0), mult=-1) + add_subtract_functions( + clh_func="subtractYears", + py_key="years", + test_range=(0, 1), + years_padding=(1, 0), + mult=-1, + ) @TestScenario @@ -1267,7 +1416,13 @@ def subtract_years(self): ) def add_quarters(self): """Check the addQuarters function with DateTime64 extended range.""" - add_subtract_functions(clh_func="addQuarters", py_key="months", test_range=range(1, 5), years_padding=(0, 1), modifier=3) + add_subtract_functions( + clh_func="addQuarters", + py_key="months", + test_range=range(1, 5), + years_padding=(0, 1), + modifier=3, + ) @TestScenario @@ -1276,7 +1431,14 @@ def add_quarters(self): ) def subtract_quarters(self): """Check the subtractQuarters function with DateTime64 extended range.""" - add_subtract_functions(clh_func="subtractQuarters", py_key="months", test_range=range(1, 5), years_padding=(1, 0), modifier=3, mult=-1) + add_subtract_functions( + clh_func="subtractQuarters", + py_key="months", + test_range=range(1, 5), + years_padding=(1, 0), + modifier=3, + mult=-1, + ) @TestScenario @@ -1285,7 +1447,12 @@ def subtract_quarters(self): ) def add_months(self): """Check the addMonths function with DateTime64 extended range.""" - add_subtract_functions(clh_func="addMonths", py_key="months", test_range=range(1, 13), years_padding=(0, 1)) + add_subtract_functions( + clh_func="addMonths", + py_key="months", + test_range=range(1, 13), + years_padding=(0, 1), + ) @TestScenario @@ -1294,7 +1461,13 @@ def add_months(self): ) def subtract_months(self): """Check the subtractMonths function with DateTime64 extended range.""" - add_subtract_functions(clh_func="subtractMonths", py_key="months", test_range=range(1, 13), years_padding=(1, 0), mult=-1) + add_subtract_functions( + clh_func="subtractMonths", + py_key="months", + test_range=range(1, 13), + years_padding=(1, 0), + mult=-1, + ) @TestScenario @@ -1303,7 +1476,13 @@ def subtract_months(self): ) def add_weeks(self): """Check the addWeeks function with DateTime64 extended range.""" - add_subtract_functions(clh_func="addWeeks", py_key="days", test_range=range(6), years_padding=(0, 1), modifier=7) + add_subtract_functions( + clh_func="addWeeks", + py_key="days", + test_range=range(6), + years_padding=(0, 1), + modifier=7, + ) @TestScenario @@ -1312,14 +1491,18 @@ def add_weeks(self): ) def subtract_weeks(self): """Check the subtractWeeks function with DateTime64 extended range.""" - add_subtract_functions(clh_func="subtractWeeks", py_key="days", test_range=range(6), years_padding=(1, 0), modifier=7, mult=-1) - + add_subtract_functions( + clh_func="subtractWeeks", + py_key="days", + test_range=range(6), + years_padding=(1, 0), + modifier=7, + mult=-1, + ) @TestScenario -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_addDays("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_addDays("1.0")) def add_days(self): """Check the addDays function work with DateTime64 extended range""" add_subtract_functions(clh_func="addDays", py_key="days", test_range=range(50)) @@ -1331,7 +1514,9 @@ def add_days(self): ) def subtract_days(self): """Check the subtractDays function work with DateTime64 extended range""" - add_subtract_functions(clh_func="subtractDays", py_key="days", test_range=range(50), mult=-1) + add_subtract_functions( + clh_func="subtractDays", py_key="days", test_range=range(50), mult=-1 + ) @TestScenario @@ -1349,7 +1534,9 @@ def add_hours(self): ) def subtract_hours(self): """Check the subtractHours function work with DateTime64 extended range""" - add_subtract_functions(clh_func="subtractHours", py_key="hours", test_range=range(25), mult=-1) + add_subtract_functions( + clh_func="subtractHours", py_key="hours", test_range=range(25), mult=-1 + ) @TestScenario @@ -1358,7 +1545,9 @@ def subtract_hours(self): ) def add_minutes(self): """Check the addMinutes function work with DateTime64 extended range""" - add_subtract_functions(clh_func="addMinutes", py_key="minutes", test_range=range(60)) + add_subtract_functions( + clh_func="addMinutes", py_key="minutes", test_range=range(60) + ) @TestScenario @@ -1367,7 +1556,9 @@ def add_minutes(self): ) def subtract_minutes(self): """Check the subtractMinutes function work with DateTime64 extended range""" - add_subtract_functions(clh_func="subtractMinutes", py_key="minutes", test_range=range(60), mult=-1) + add_subtract_functions( + clh_func="subtractMinutes", py_key="minutes", test_range=range(60), mult=-1 + ) @TestScenario @@ -1376,7 +1567,9 @@ def subtract_minutes(self): ) def add_seconds(self): """Check the addSeconds function work with DateTime64 extended range""" - add_subtract_functions(clh_func="addSeconds", py_key="seconds", test_range=range(60)) + add_subtract_functions( + clh_func="addSeconds", py_key="seconds", test_range=range(60) + ) @TestScenario @@ -1385,12 +1578,13 @@ def add_seconds(self): ) def subtract_seconds(self): """Check the subtractSeconds function work with DateTime64 extended range""" - add_subtract_functions(clh_func="subtractSeconds", py_key="seconds", test_range=range(60), mult=-1) + add_subtract_functions( + clh_func="subtractSeconds", py_key="seconds", test_range=range(60), mult=-1 + ) def date_diff_helper(dt1, dt2: datetime.datetime, unit: str): - """Helper for computing dateDiff expected result using Python. - """ + """Helper for computing dateDiff expected result using Python.""" delta = dt2 - dt1 if unit == "second": return delta.total_seconds() @@ -1415,10 +1609,18 @@ def date_diff_helper(dt1, dt2: datetime.datetime, unit: str): RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_dateDiff("1.0") ) def date_diff(self): - """Check how dateDiff works with DateTime64 extended range. - """ + """Check how dateDiff works with DateTime64 extended range.""" stress = self.context.stress - compare_units = ("second", "minute", "hour", "day", "week", "month", "quarter", "year") + compare_units = ( + "second", + "minute", + "hour", + "day", + "week", + "month", + "quarter", + "year", + ) timezones = timezones_range(stress=stress) with Background("I select a set of datetimes to be compared"): @@ -1448,13 +1650,37 @@ def date_diff(self): ) def format_date_time(self): """Test formatDateTime() when DateTime64 is out of normal range. - This function formats DateTime according to a given Format string. - """ + This function formats DateTime according to a given Format string. + """ stress = self.context.stress timezones = timezones_range(stress) - modes = ('C', 'd', 'D', 'e', 'F', 'G', 'g', 'H', 'I', 'j', 'm', 'M', 'n', - 'p', 'R', 'S', 't', 'T', 'u', 'V', 'w', 'y', 'Y', '%') + modes = ( + "C", + "d", + "D", + "e", + "F", + "G", + "g", + "H", + "I", + "j", + "m", + "M", + "n", + "p", + "R", + "S", + "t", + "T", + "u", + "V", + "w", + "y", + "Y", + "%", + ) for year in years_range(stress=stress): with Given(f"I choose datetimes in {year}"): @@ -1476,11 +1702,17 @@ def format_date_time(self): def time_slots_get_expected(dt: datetime.datetime, duration, size=1800): - """Helper to compute expected array for timeSlots(). - """ + """Helper to compute expected array for timeSlots().""" zero_time = datetime.datetime(1970, 1, 1, 0, 0, 0) - result = [(zero_time + datetime.timedelta(seconds=((dt - zero_time).total_seconds() // size * size))).strftime("%Y-%m-%d %H:%M:%S")] + result = [ + ( + zero_time + + datetime.timedelta( + seconds=((dt - zero_time).total_seconds() // size * size) + ) + ).strftime("%Y-%m-%d %H:%M:%S") + ] s = 1 while s <= duration: @@ -1516,24 +1748,26 @@ def time_slots(self): for size in range(1, 50, 3): with Step(f"{dt}, dur={duration}, size={size}"): with By("getting an expected array using python"): - expected = time_slots_get_expected(dt=dt, duration=duration, size=size) + expected = time_slots_get_expected( + dt=dt, duration=duration, size=size + ) with And("forming a ClickHouse query"): dt_str = dt.strftime("%Y-%m-%d %H:%M:%S") query = f"SELECT timeSlots(toDateTime64('{dt_str}', 0, 'UTC'), toUInt32({duration}), {size})" with Then("I execute query"): try: - assert eval(self.context.node.query(query).output) == expected, error() + assert ( + eval(self.context.node.query(query).output) + == expected + ), error() except SyntaxError: assert False @TestFeature -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions("1.0")) def date_time_funcs(self, node="clickhouse1"): - """Check the basic operations with DateTime64 - """ + """Check the basic operations with DateTime64""" self.context.node = self.context.cluster.node(node) with Pool(4) as pool: diff --git a/tests/testflows/datetime64_extended_range/tests/generic.py b/tests/testflows/datetime64_extended_range/tests/generic.py index 6eb117553e0..9ac2975e5a2 100644 --- a/tests/testflows/datetime64_extended_range/tests/generic.py +++ b/tests/testflows/datetime64_extended_range/tests/generic.py @@ -7,51 +7,59 @@ from datetime64_extended_range.tests.common import * import pytz import itertools + @TestScenario @Requirements( RQ_SRS_010_DateTime64_ExtendedRange_NormalRange_Start("1.0"), ) def normal_range_start(self): - """Check DateTime64 can accept a dates around the start of the normal range that begins at 1970-01-01 00:00:00.000. - """ - with When("I do incrementing time sweep", description="check different time points in the first 24 hours at given date"): - walk_datetime_in_incrementing_steps(date="1970-01-01", precision=3, hrs_range=(0, 24)) + """Check DateTime64 can accept a dates around the start of the normal range that begins at 1970-01-01 00:00:00.000.""" + with When( + "I do incrementing time sweep", + description="check different time points in the first 24 hours at given date", + ): + walk_datetime_in_incrementing_steps( + date="1970-01-01", precision=3, hrs_range=(0, 24) + ) @TestScenario -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_NormalRange_End("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_NormalRange_End("1.0")) def normal_range_end(self): - """Check DateTime64 can accept a dates around the end of the normal range that ends at 2105-12-31 23:59:59.99999. - """ - with When("I do decrementing time sweep", - description="check different time points in the last 24 hours at given date"): - walk_datetime_in_decrementing_steps(date="2105-12-31", precision=3, hrs_range=(23, 0)) + """Check DateTime64 can accept a dates around the end of the normal range that ends at 2105-12-31 23:59:59.99999.""" + with When( + "I do decrementing time sweep", + description="check different time points in the last 24 hours at given date", + ): + walk_datetime_in_decrementing_steps( + date="2105-12-31", precision=3, hrs_range=(23, 0) + ) @TestScenario -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_Start("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_Start("1.0")) def extended_range_start(self): - """Check DateTime64 supports dates around the beginning of the extended range that begins at 1698-01-01 00:00:00.000000. - """ - with When("I do incrementing time sweep", - description="check different time points in the first 24 hours at given date"): - walk_datetime_in_incrementing_steps(date="1925-01-01", precision=5, hrs_range=(0, 24)) + """Check DateTime64 supports dates around the beginning of the extended range that begins at 1698-01-01 00:00:00.000000.""" + with When( + "I do incrementing time sweep", + description="check different time points in the first 24 hours at given date", + ): + walk_datetime_in_incrementing_steps( + date="1925-01-01", precision=5, hrs_range=(0, 24) + ) @TestScenario -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_End("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_End("1.0")) def extended_range_end(self, precision=3): - """Check DateTime64 supports dates around the beginning of the extended range that ends at 2377-12-31T23:59:59.999999. - """ - with When("I do decrementing time sweep", - description="check different time points in the last 24 hours at given date"): - walk_datetime_in_decrementing_steps(date="2238-12-31", precision=5, hrs_range=(23, 0)) + """Check DateTime64 supports dates around the beginning of the extended range that ends at 2377-12-31T23:59:59.999999.""" + with When( + "I do decrementing time sweep", + description="check different time points in the last 24 hours at given date", + ): + walk_datetime_in_decrementing_steps( + date="2238-12-31", precision=5, hrs_range=(23, 0) + ) @TestScenario @@ -62,9 +70,13 @@ def timezone_local_below_normal_range(self): """Check how UTC normal range time value treated when current timezone time value is out of normal range. """ - with When("I do incrementing time sweep", - description="check different time points when UTC datetime fits normal range but below it for local datetime"): - walk_datetime_in_incrementing_steps(date="1969-12-31", hrs_range=(17, 24), timezone='America/Phoenix') + with When( + "I do incrementing time sweep", + description="check different time points when UTC datetime fits normal range but below it for local datetime", + ): + walk_datetime_in_incrementing_steps( + date="1969-12-31", hrs_range=(17, 24), timezone="America/Phoenix" + ) @TestScenario @@ -75,18 +87,19 @@ def timezone_local_above_normal_range(self): """Check how UTC normal range time value treated when current timezone time value is out of normal range. """ - with When("I do decrementing time sweep", - description="check different time points when UTC datetime fits normal range but above it for local datetime"): - walk_datetime_in_decrementing_steps(date="2106-01-01", hrs_range=(6, 0), timezone='Asia/Novosibirsk') + with When( + "I do decrementing time sweep", + description="check different time points when UTC datetime fits normal range but above it for local datetime", + ): + walk_datetime_in_decrementing_steps( + date="2106-01-01", hrs_range=(6, 0), timezone="Asia/Novosibirsk" + ) @TestScenario -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_Comparison("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_Comparison("1.0")) def comparison_check(self): - """Check how comparison works with DateTime64 extended range. - """ + """Check how comparison works with DateTime64 extended range.""" stress = self.context.stress comparators = (">", "<", "==", "<=", ">=", "!=") timezones = timezones_range(stress=stress) @@ -112,12 +125,9 @@ def comparison_check(self): @TestScenario -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_TimeZones("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TimeZones("1.0")) def timezones_support(self): - """Check how timezones work with DateTime64 extended range. - """ + """Check how timezones work with DateTime64 extended range.""" stress = self.context.stress timezones = timezones_range(stress=stress) @@ -138,8 +148,7 @@ def timezones_support(self): @TestFeature def generic(self, node="clickhouse1"): - """Check the basic operations with DateTime64 - """ + """Check the basic operations with DateTime64""" self.context.node = self.context.cluster.node(node) for scenario in loads(current_module(), Scenario, Suite): diff --git a/tests/testflows/datetime64_extended_range/tests/non_existent_time.py b/tests/testflows/datetime64_extended_range/tests/non_existent_time.py index 1036302b61b..0e3e180fe23 100644 --- a/tests/testflows/datetime64_extended_range/tests/non_existent_time.py +++ b/tests/testflows/datetime64_extended_range/tests/non_existent_time.py @@ -6,9 +6,7 @@ from datetime64_extended_range.tests.common import * @TestScenario -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_InvalidDate("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_InvalidDate("1.0")) def invalid_date(self): """Check how non-existent date is treated. For example, check 31st day in month that only has 30 days. @@ -29,16 +27,16 @@ def invalid_date(self): @TestOutline(Suite) -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_InvalidTime("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_InvalidTime("1.0")) @Examples( - "datetime expected timezone", [ - ('2002-04-07 02:30:00', '2002-04-07 01:30:00', 'America/New_York'), - ('2020-03-29 02:30:00', '2020-03-29 01:30:00', 'Europe/Zurich'), - ('2017-03-26 02:30:00', '2017-03-26 01:30:00', 'Europe/Berlin') - ]) -def invalid_time(self, datetime, expected, timezone='UTC'): + "datetime expected timezone", + [ + ("2002-04-07 02:30:00", "2002-04-07 01:30:00", "America/New_York"), + ("2020-03-29 02:30:00", "2020-03-29 01:30:00", "Europe/Zurich"), + ("2017-03-26 02:30:00", "2017-03-26 01:30:00", "Europe/Berlin"), + ], +) +def invalid_time(self, datetime, expected, timezone="UTC"): """proper handling of invalid time for a timezone when using DateTime64 extended range data type, for example, 2:30am on 7th April 2002 never happened at all in the US/Eastern timezone, @@ -50,13 +48,26 @@ def invalid_time(self, datetime, expected, timezone='UTC'): @TestOutline(Scenario) @Requirements( RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_DaylightSavingTime("1.0"), - RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_DaylightSavingTime_Disappeared("1.0") + RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_DaylightSavingTime_Disappeared( + "1.0" + ), ) @Examples( - "tz time_dates", [ - ('America/Denver', {'02:30:00': ('2018-03-11', '2020-03-08', '1980-04-27', '1942-02-09')}), - ('Europe/Zurich', {'02:30:00': ('2016-03-27', '2020-03-29', '1981-03-29'), '01:30:00': ('1942-05-04', )}) -]) + "tz time_dates", + [ + ( + "America/Denver", + {"02:30:00": ("2018-03-11", "2020-03-08", "1980-04-27", "1942-02-09")}, + ), + ( + "Europe/Zurich", + { + "02:30:00": ("2016-03-27", "2020-03-29", "1981-03-29"), + "01:30:00": ("1942-05-04",), + }, + ), + ], +) def dst_disappeared(self, tz, time_dates): """Proper handling of switching DST, when an hour is being skipped. Testing in 2 steps: first, try to make a DateTime64 with skipped time value. @@ -72,7 +83,9 @@ def dst_disappeared(self, tz, time_dates): dt -= datetime.timedelta(hours=1) expected = dt.strftime("%Y-%m-%d %H:%M:%S") with Then(f"I check skipped hour"): - select_check_datetime(datetime=dt_str, expected=expected, timezone=tz) + select_check_datetime( + datetime=dt_str, expected=expected, timezone=tz + ) with Step("Addition test"): with When("computing expected result"): dt += datetime.timedelta(hours=2) @@ -83,14 +96,37 @@ def dst_disappeared(self, tz, time_dates): @TestOutline(Scenario) -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_LeapSeconds("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_LeapSeconds("1.0")) @Examples( - "datet years", [ - ("06-30 23:59:55", [1972, 1981, 1982, 1983, 1985, 1992, 1993, 1994, 1997, 2012, 2015]), - ("12-31 23:59:55", [1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1987, 1989, 1990, 1995, 1998, 2005, 2008, 2016]) -]) + "datet years", + [ + ( + "06-30 23:59:55", + [1972, 1981, 1982, 1983, 1985, 1992, 1993, 1994, 1997, 2012, 2015], + ), + ( + "12-31 23:59:55", + [ + 1972, + 1973, + 1974, + 1975, + 1976, + 1977, + 1978, + 1979, + 1987, + 1989, + 1990, + 1995, + 1998, + 2005, + 2008, + 2016, + ], + ), + ], +) def leap_seconds(self, datet, years): """Test proper handling of leap seconds. Read more: https://de.wikipedia.org/wiki/Schaltsekunde Being checked by selecting a timestamp prior to leap second and adding seconds so that the result is after it. @@ -99,7 +135,7 @@ def leap_seconds(self, datet, years): with When(f"{datet}, {year}"): with By("forming an expected result using python"): dt_str = f"{year}-{datet}" - dt = datetime.datetime.strptime(dt_str, '%Y-%m-%d %H:%M:%S') + dt = datetime.datetime.strptime(dt_str, "%Y-%m-%d %H:%M:%S") dt += datetime.timedelta(seconds=9) expected = dt.strftime("%Y-%m-%d %H:%M:%S") with And(f"forming a query"): @@ -111,7 +147,7 @@ def leap_seconds(self, datet, years): @TestScenario @Requirements( RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_DaylightSavingTime("1.0"), - RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_TimeZoneSwitch("1.0") + RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_TimeZoneSwitch("1.0"), ) def dst_time_zone_switch(self): """Check how ClickHouse supports handling of invalid time when using DateTime64 extended range data type @@ -122,15 +158,19 @@ def dst_time_zone_switch(self): utc = pytz.timezone("UTC") for timezone in timezones: - if timezone == 'UTC': + if timezone == "UTC": continue with Step(f"{timezone}"): tz = pytz.timezone(timezone) transition_times = tz._utc_transition_times transition_info = tz._transition_info - for i in range(len(transition_times)-1, 0, -1): - if (transition_times[i] > datetime.datetime.now()) or (transition_times[i].year < 1925) or (transition_times[i].year > 2238): + for i in range(len(transition_times) - 1, 0, -1): + if ( + (transition_times[i] > datetime.datetime.now()) + or (transition_times[i].year < 1925) + or (transition_times[i].year > 2238) + ): continue with Step(f"{transition_times[i]}"): with By("localize python datetime"): @@ -138,7 +178,9 @@ def dst_time_zone_switch(self): dt0 = dt - datetime.timedelta(hours=4) dt0 = utc.localize(dt0).astimezone(tz).replace(tzinfo=None) with And("compute expected result using Pytz"): - seconds_shift = transition_info[i][0] - transition_info[i-1][0] + seconds_shift = ( + transition_info[i][0] - transition_info[i - 1][0] + ) dt1 = dt0 + datetime.timedelta(hours=8) + seconds_shift dt0_str = dt0.strftime("%Y-%m-%d %H:%M:%S") dt1_str = dt1.strftime("%Y-%m-%d %H:%M:%S") @@ -150,12 +192,9 @@ def dst_time_zone_switch(self): @TestFeature @Name("non existent time") -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime("1.0")) def feature(self, node="clickhouse1"): - """Check how ClickHouse treats non-existent time in DateTime64 data type. - """ + """Check how ClickHouse treats non-existent time in DateTime64 data type.""" self.context.node = self.context.cluster.node(node) for scenario in loads(current_module(), Scenario, Suite): diff --git a/tests/testflows/datetime64_extended_range/tests/reference_times.py b/tests/testflows/datetime64_extended_range/tests/reference_times.py index cdec3eb260c..9cd9fadc35c 100644 --- a/tests/testflows/datetime64_extended_range/tests/reference_times.py +++ b/tests/testflows/datetime64_extended_range/tests/reference_times.py @@ -8,19 +8,49 @@ from datetime64_extended_range.tests.common import * @TestSuite -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_SpecificTimestamps("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_SpecificTimestamps("1.0")) def reference_times(self, node="clickhouse1"): """Check how ClickHouse converts a set of particular timestamps to DateTime64 for all timezones and compare the result to pytz. """ self.context.node = self.context.cluster.node(node) - timestamps = [9961200, 73476000, 325666800, 354675600, 370400400, 386125200, 388566010, 401850000, 417574811, - 496803600, 528253200, 624423614, 636516015, 671011200, 717555600, 752047218, 859683600, 922582800, - 1018173600, 1035705600, 1143334800, 1162105223, 1174784400, 1194156000, 1206838823, 1224982823, - 1236495624, 1319936400, 1319936424, 1425798025, 1459040400, 1509872400, 2090451627, 2140668000] + timestamps = [ + 9961200, + 73476000, + 325666800, + 354675600, + 370400400, + 386125200, + 388566010, + 401850000, + 417574811, + 496803600, + 528253200, + 624423614, + 636516015, + 671011200, + 717555600, + 752047218, + 859683600, + 922582800, + 1018173600, + 1035705600, + 1143334800, + 1162105223, + 1174784400, + 1194156000, + 1206838823, + 1224982823, + 1236495624, + 1319936400, + 1319936424, + 1425798025, + 1459040400, + 1509872400, + 2090451627, + 2140668000, + ] query = "" diff --git a/tests/testflows/datetime64_extended_range/tests/type_conversion.py b/tests/testflows/datetime64_extended_range/tests/type_conversion.py index 85582b82d7b..c52ecdce582 100644 --- a/tests/testflows/datetime64_extended_range/tests/type_conversion.py +++ b/tests/testflows/datetime64_extended_range/tests/type_conversion.py @@ -14,13 +14,29 @@ from datetime64_extended_range.tests.common import * @TestOutline(Scenario) -@Examples("cast", [ - (False, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toInt_8_16_32_64_128_256_("1.0"))), - (True, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_("1.0"))) -]) +@Examples( + "cast", + [ + ( + False, + Requirements( + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toInt_8_16_32_64_128_256_( + "1.0" + ) + ), + ), + ( + True, + Requirements( + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_( + "1.0" + ) + ), + ), + ], +) def to_int_8_16_32_64_128_256(self, cast): - """Check the toInt(8|16|32|64|128|256) functions with DateTime64 extended range - """ + """Check the toInt(8|16|32|64|128|256) functions with DateTime64 extended range""" stress = self.context.stress timezones = timezones_range(stress) @@ -45,7 +61,9 @@ def to_int_8_16_32_64_128_256(self, cast): np_res = py_res if np_res == py_res: with Given(f"{py_res} fits int{int_type}"): - with When(f"making a query string for ClickHouse if py_res fits int{int_type}"): + with When( + f"making a query string for ClickHouse if py_res fits int{int_type}" + ): if cast: query = f"SELECT cast(toDateTime64('{dt_str}', 0, '{tz}'), 'Int{int_type}')" else: @@ -55,13 +73,29 @@ def to_int_8_16_32_64_128_256(self, cast): @TestOutline(Scenario) -@Examples("cast", [ - (False, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toUInt_8_16_32_64_256_("1.0"))), - (True, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_("1.0"))) -]) +@Examples( + "cast", + [ + ( + False, + Requirements( + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toUInt_8_16_32_64_256_( + "1.0" + ) + ), + ), + ( + True, + Requirements( + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_( + "1.0" + ) + ), + ), + ], +) def to_uint_8_16_32_64_256(self, cast): - """Check the toUInt(8|16|32|64|256) functions with DateTime64 extended range - """ + """Check the toUInt(8|16|32|64|256) functions with DateTime64 extended range""" stress = self.context.stress timezones = timezones_range(stress) @@ -86,7 +120,9 @@ def to_uint_8_16_32_64_256(self, cast): np_res = py_res if np_res == py_res: with Given(f"{py_res} fits int{int_type}"): - with When(f"making a query string for ClickHouse if py_res fits int{int_type}"): + with When( + f"making a query string for ClickHouse if py_res fits int{int_type}" + ): if cast: query = f"SELECT cast(toDateTime64('{dt_str}', 0, '{tz}'), 'UInt{int_type}')" else: @@ -96,13 +132,29 @@ def to_uint_8_16_32_64_256(self, cast): @TestOutline(Scenario) -@Examples("cast", [ - (False, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toFloat_32_64_("1.0"))), - (True, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_("1.0"))) -]) +@Examples( + "cast", + [ + ( + False, + Requirements( + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toFloat_32_64_( + "1.0" + ) + ), + ), + ( + True, + Requirements( + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_( + "1.0" + ) + ), + ), + ], +) def to_float_32_64(self, cast): - """Check the toFloat(32|64) functions with DateTime64 extended range - """ + """Check the toFloat(32|64) functions with DateTime64 extended range""" stress = self.context.stress timezones = timezones_range(stress) @@ -133,11 +185,12 @@ def to_float_32_64(self, cast): @TestScenario @Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDateTime64_FromString_MissingTime("1.0") + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDateTime64_FromString_MissingTime( + "1.0" + ) ) def to_datetime64_from_string_missing_time(self): - """Check the toDateTime64() with DateTime64 extended range conversion when string is missing the time part. - """ + """Check the toDateTime64() with DateTime64 extended range conversion when string is missing the time part.""" stress = self.context.stress timezones = timezones_range(stress) @@ -163,8 +216,7 @@ def to_datetime64_from_string_missing_time(self): RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDateTime64("1.0") ) def to_datetime64(self): - """Check the toDateTime64() conversion with DateTime64. This is supposed to work in normal range ONLY. - """ + """Check the toDateTime64() conversion with DateTime64. This is supposed to work in normal range ONLY.""" stress = self.context.stress timezones = timezones_range(stress) @@ -185,13 +237,29 @@ def to_datetime64(self): @TestOutline(Scenario) -@Examples("cast", [ - (False, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDate("1.0"))), - (True, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_("1.0"))) -]) +@Examples( + "cast", + [ + ( + False, + Requirements( + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDate( + "1.0" + ) + ), + ), + ( + True, + Requirements( + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_( + "1.0" + ) + ), + ), + ], +) def to_date(self, cast): - """Check the toDate() conversion with DateTime64. This is supposed to work in normal range ONLY. - """ + """Check the toDate() conversion with DateTime64. This is supposed to work in normal range ONLY.""" stress = self.context.stress timezones = timezones_range(stress) @@ -202,7 +270,7 @@ def to_date(self, cast): for dt in datetimes: for tz in timezones: with Step(f"{dt} {tz}"): - expected = None # by default - not checked, checking the exitcode + expected = None # by default - not checked, checking the exitcode with By("converting datetime to string"): dt_str = dt.strftime("%Y-%m-%d %H:%M:%S") @@ -214,20 +282,38 @@ def to_date(self, cast): if cast: query = f"SELECT CAST(toDateTime64('{dt_str}', 0, '{tz}'), 'Date')" else: - query = f"SELECT toDate(toDateTime64('{dt_str}', 0, '{tz}'))" + query = ( + f"SELECT toDate(toDateTime64('{dt_str}', 0, '{tz}'))" + ) with Then(f"I execute toDate() query and check return/exitcode"): exec_query(request=query, expected=expected, exitcode=0) @TestOutline(Scenario) -@Examples("cast", [ - (False, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDateTime("1.0"))), - (True, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_("1.0"))) -]) +@Examples( + "cast", + [ + ( + False, + Requirements( + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDateTime( + "1.0" + ) + ), + ), + ( + True, + Requirements( + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_( + "1.0" + ) + ), + ), + ], +) def to_datetime(self, cast): - """Check the toDateTime() conversion with DateTime64. This is supposed to work in normal range ONLY. - """ + """Check the toDateTime() conversion with DateTime64. This is supposed to work in normal range ONLY.""" stress = self.context.stress timezones = timezones_range(stress) @@ -247,7 +333,9 @@ def to_datetime(self, cast): dt_transformed = dt_local.astimezone(tzlocal()) expected = f"{dt_transformed.strftime('%Y-%m-%d %H:%M:%S')}" else: - query = f"SELECT toDateTime(toDateTime64('{dt_str}', 0, '{tz}'))" + query = ( + f"SELECT toDateTime(toDateTime64('{dt_str}', 0, '{tz}'))" + ) with When("figure out expected result in python"): expected = f"{dt.strftime('%Y-%m-%d %H:%M:%S')}" @@ -260,13 +348,29 @@ def to_datetime(self, cast): @TestOutline(Scenario) -@Examples("cast", [ - (False, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toString("1.0"))), - (True, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_("1.0"))) -]) +@Examples( + "cast", + [ + ( + False, + Requirements( + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toString( + "1.0" + ) + ), + ), + ( + True, + Requirements( + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_( + "1.0" + ) + ), + ), + ], +) def to_string(self, cast): - """Check the toString() with DateTime64 extended range. - """ + """Check the toString() with DateTime64 extended range.""" stress = self.context.stress timezones = timezones_range(stress) @@ -283,22 +387,45 @@ def to_string(self, cast): if cast: query = f"SELECT cast(toDateTime64('{dt_str}', 0, '{tz}'), 'String')" else: - query = f"SELECT toString(toDateTime64('{dt_str}', 0, '{tz}'))" + query = ( + f"SELECT toString(toDateTime64('{dt_str}', 0, '{tz}'))" + ) with Then(f"I execute toDateTime64() query"): exec_query(request=query, expected=f"{dt_str}") def valid_decimal_range(bit_depth, S): """A helper to find valid range for Decimal(32|64|128|256) with given scale (S)""" - return {32: -1 * 10 ** (9 - S), 64: -1 * 10 ** (18 - S), 128: -1 * 10 ** (38 - S), 256: -1 * 10 ** (76 - S)}[ - bit_depth] + return { + 32: -1 * 10 ** (9 - S), + 64: -1 * 10 ** (18 - S), + 128: -1 * 10 ** (38 - S), + 256: -1 * 10 ** (76 - S), + }[bit_depth] @TestOutline(Scenario) -@Examples("cast", [ - (False, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDecimal_32_64_128_256_("1.0"))), - (True, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_("1.0"))) -]) +@Examples( + "cast", + [ + ( + False, + Requirements( + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDecimal_32_64_128_256_( + "1.0" + ) + ), + ), + ( + True, + Requirements( + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_( + "1.0" + ) + ), + ), + ], +) def to_decimal_32_64_128_256(self, cast): """Check the toDecimal(32|64|128|256) functions with DateTime64 extended range. Decimal32(S) - ( -1 * 10^(9 - S), 1 * 10^(9 - S) ) @@ -320,7 +447,9 @@ def to_decimal_32_64_128_256(self, cast): for decimal_type in (32, 64, 128, 256): for scale in range(scales[decimal_type]): with When(f"{dt} {tz}, Decimal{decimal_type}({scale})"): - valid_range = valid_decimal_range(bit_depth=decimal_type, S=scale) + valid_range = valid_decimal_range( + bit_depth=decimal_type, S=scale + ) with By("computing the expected result using python"): expected = decimal.Decimal(time.mktime(dt.timetuple())) if -valid_range < expected < valid_range: @@ -342,11 +471,13 @@ def to_unix_timestamp64_milli_micro_nano(self, scale): """ stress = self.context.stress timezones = timezones_range(stress) - func = {3: 'Milli', 6: 'Micro', 9: 'Nano'} + func = {3: "Milli", 6: "Micro", 9: "Nano"} for year in years_range(stress): with Given(f"I select datetimes in {year}"): - datetimes = select_dates_in_year(year=year, stress=stress, microseconds=True) + datetimes = select_dates_in_year( + year=year, stress=stress, microseconds=True + ) for d in datetimes: for tz in timezones: @@ -355,7 +486,7 @@ def to_unix_timestamp64_milli_micro_nano(self, scale): with By("converting datetime to string"): dt_str = dt.strftime("%Y-%m-%d %H:%M:%S.%f") with And("converting DateTime to UTC"): - dt = dt.astimezone(pytz.timezone('UTC')) + dt = dt.astimezone(pytz.timezone("UTC")) with And("computing the expected result using python"): expected = int(dt.timestamp() * (10**scale)) if expected >= 0: @@ -370,31 +501,34 @@ def to_unix_timestamp64_milli_micro_nano(self, scale): @TestScenario @Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toUnixTimestamp64Milli("1.0") + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toUnixTimestamp64Milli( + "1.0" + ) ) def to_unix_timestamp64_milli(self): - """Check the toUnixTimestamp64Milli functions with DateTime64 extended range. - """ + """Check the toUnixTimestamp64Milli functions with DateTime64 extended range.""" to_unix_timestamp64_milli_micro_nano(scale=3) @TestScenario @Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toUnixTimestamp64Micro("1.0") + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toUnixTimestamp64Micro( + "1.0" + ) ) def to_unix_timestamp64_micro(self): - """Check the toUnixTimestamp64Micro functions with DateTime64 extended range. - """ + """Check the toUnixTimestamp64Micro functions with DateTime64 extended range.""" to_unix_timestamp64_milli_micro_nano(scale=6) @TestScenario @Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toUnixTimestamp64Nano("1.0") + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toUnixTimestamp64Nano( + "1.0" + ) ) def to_unix_timestamp64_nano(self): - """Check the toUnixTimestamp64Nano functions with DateTime64 extended range. - """ + """Check the toUnixTimestamp64Nano functions with DateTime64 extended range.""" to_unix_timestamp64_milli_micro_nano(scale=9) @@ -405,11 +539,13 @@ def from_unix_timestamp64_milli_micro_nano(self, scale): """ stress = self.context.stress timezones = timezones_range(stress) - func = {3: 'Milli', 6: 'Micro', 9: 'Nano'} + func = {3: "Milli", 6: "Micro", 9: "Nano"} for year in years_range(stress): with Given(f"I select datetimes in {year}"): - datetimes = select_dates_in_year(year=year, stress=stress, microseconds=True) + datetimes = select_dates_in_year( + year=year, stress=stress, microseconds=True + ) for d in datetimes: for tz in timezones: @@ -417,9 +553,9 @@ def from_unix_timestamp64_milli_micro_nano(self, scale): with When(f"{dt} {tz}"): with By("converting datetime to string"): d_str = d.strftime("%Y-%m-%d %H:%M:%S.%f") - d_str += "0" * (scale-3) + d_str += "0" * (scale - 3) with And("converting DateTime64 to UTC"): - dt = dt.astimezone(pytz.timezone('UTC')) + dt = dt.astimezone(pytz.timezone("UTC")) with And("computing the expected result using python"): ts = int(dt.timestamp() * (10**scale)) if ts >= 0: @@ -434,38 +570,39 @@ def from_unix_timestamp64_milli_micro_nano(self, scale): @TestScenario @Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_fromUnixTimestamp64Milli("1.0") + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_fromUnixTimestamp64Milli( + "1.0" + ) ) def from_unix_timestamp64_milli(self): - """Check the fromUnixTimestamp64Milli functions with DateTime64 extended range. - """ + """Check the fromUnixTimestamp64Milli functions with DateTime64 extended range.""" from_unix_timestamp64_milli_micro_nano(scale=3) @TestScenario @Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_fromUnixTimestamp64Micro("1.0") + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_fromUnixTimestamp64Micro( + "1.0" + ) ) def from_unix_timestamp64_micro(self): - """Check the fromUnixTimestamp64Micro functions with DateTime64 extended range. - """ + """Check the fromUnixTimestamp64Micro functions with DateTime64 extended range.""" from_unix_timestamp64_milli_micro_nano(scale=6) @TestScenario @Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_fromUnixTimestamp64Nano("1.0") + RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_fromUnixTimestamp64Nano( + "1.0" + ) ) def from_unix_timestamp64_nano(self): - """Check the fromUnixTimestamp64Nano functions with DateTime64 extended range. - """ + """Check the fromUnixTimestamp64Nano functions with DateTime64 extended range.""" from_unix_timestamp64_milli_micro_nano(scale=9) @TestFeature -@Requirements( - RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions("1.0") -) +@Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions("1.0")) def type_conversion(self, node="clickhouse1"): """Check the type conversion operations with DateTime64. Cast can be set as Requirement thereby as the module diff --git a/tests/testflows/example/regression.py b/tests/testflows/example/regression.py index c601ebafb78..8c6cb4f29b9 100755 --- a/tests/testflows/example/regression.py +++ b/tests/testflows/example/regression.py @@ -9,12 +9,12 @@ from helpers.cluster import Cluster from helpers.argparser import argparser from platform import processor as current_cpu + @TestFeature @Name("example") @ArgumentParser(argparser) def regression(self, local, clickhouse_binary_path, clickhouse_version, stress=None): - """Simple example of how you can use TestFlows to test ClickHouse. - """ + """Simple example of how you can use TestFlows to test ClickHouse.""" nodes = { "clickhouse": ("clickhouse1",), } @@ -25,16 +25,21 @@ def regression(self, local, clickhouse_binary_path, clickhouse_version, stress=N self.context.stress = stress folder_name = os.path.basename(current_dir()) - if current_cpu() == 'aarch64': + if current_cpu() == "aarch64": env = f"{folder_name}_env_arm64" else: env = f"{folder_name}_env" - with Cluster(local, clickhouse_binary_path, nodes=nodes, - docker_compose_project_dir=os.path.join(current_dir(), env)) as cluster: + with Cluster( + local, + clickhouse_binary_path, + nodes=nodes, + docker_compose_project_dir=os.path.join(current_dir(), env), + ) as cluster: self.context.cluster = cluster Scenario(run=load("example.tests.example", "scenario")) + if main(): regression() diff --git a/tests/testflows/example/requirements/requirements.py b/tests/testflows/example/requirements/requirements.py index 5b4765eb90e..92b6d912335 100644 --- a/tests/testflows/example/requirements/requirements.py +++ b/tests/testflows/example/requirements/requirements.py @@ -9,76 +9,79 @@ from testflows.core import Requirement Heading = Specification.Heading RQ_SRS_001_Example = Requirement( - name='RQ.SRS-001.Example', - version='1.0', + name="RQ.SRS-001.Example", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - 'This is a long description of the requirement that can include any\n' - 'relevant information. \n' - '\n' - 'The one-line block that follows the requirement defines the `version` \n' - 'of the requirement. The version is controlled manually and is used\n' - 'to indicate material changes to the requirement that would \n' - 'require tests that cover this requirement to be updated.\n' - '\n' - 'It is a good practice to use requirement names that are broken\n' - 'up into groups. It is not recommended to use only numbers\n' - 'because if the requirement must be moved the numbering will not match.\n' - 'Therefore, the requirement name should start with the group\n' - 'name which is then followed by a number if any. For example,\n' - '\n' - ' RQ.SRS-001.Group.Subgroup.1\n' - '\n' + "This is a long description of the requirement that can include any\n" + "relevant information. \n" + "\n" + "The one-line block that follows the requirement defines the `version` \n" + "of the requirement. The version is controlled manually and is used\n" + "to indicate material changes to the requirement that would \n" + "require tests that cover this requirement to be updated.\n" + "\n" + "It is a good practice to use requirement names that are broken\n" + "up into groups. It is not recommended to use only numbers\n" + "because if the requirement must be moved the numbering will not match.\n" + "Therefore, the requirement name should start with the group\n" + "name which is then followed by a number if any. For example,\n" + "\n" + " RQ.SRS-001.Group.Subgroup.1\n" + "\n" "To keep names short, try to use abbreviations for the requirement's group name.\n" - '\n' - ), + "\n" + ), link=None, level=2, - num='4.1') + num="4.1", +) RQ_SRS_001_Example_Subgroup = Requirement( - name='RQ.SRS-001.Example.Subgroup', - version='1.0', + name="RQ.SRS-001.Example.Subgroup", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - 'This an example of a sub-requirement of the [RQ.SRS-001.Example](#rqsrs-001example).\n' - '\n' - ), + "This an example of a sub-requirement of the [RQ.SRS-001.Example](#rqsrs-001example).\n" + "\n" + ), link=None, level=2, - num='4.2') + num="4.2", +) RQ_SRS_001_Example_Select_1 = Requirement( - name='RQ.SRS-001.Example.Select.1', - version='1.0', + name="RQ.SRS-001.Example.Select.1", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return `1` when user executes query\n' - '\n' - '```sql\n' - 'SELECT 1\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return `1` when user executes query\n" + "\n" + "```sql\n" + "SELECT 1\n" + "```\n" + "\n" + ), link=None, level=2, - num='4.3') + num="4.3", +) SRS_001_ClickHouse_Software_Requirements_Specification_Template = Specification( - name='SRS-001 ClickHouse Software Requirements Specification Template', + name="SRS-001 ClickHouse Software Requirements Specification Template", description=None, - author='[name of the author]', - date='[date]', - status=None, + author="[name of the author]", + date="[date]", + status=None, approved_by=None, approved_date=None, approved_version=None, @@ -90,26 +93,26 @@ SRS_001_ClickHouse_Software_Requirements_Specification_Template = Specification( parent=None, children=None, headings=( - Heading(name='Revision History', level=1, num='1'), - Heading(name='Introduction', level=1, num='2'), - Heading(name='Table of Contents', level=2, num='2.1'), - Heading(name='Generating HTML version', level=2, num='2.2'), - Heading(name='Generating Python Requirements', level=2, num='2.3'), - Heading(name='Terminology', level=1, num='3'), - Heading(name='SRS', level=2, num='3.1'), - Heading(name='Some term that you will use', level=2, num='3.2'), - Heading(name='Requirements', level=1, num='4'), - Heading(name='RQ.SRS-001.Example', level=2, num='4.1'), - Heading(name='RQ.SRS-001.Example.Subgroup', level=2, num='4.2'), - Heading(name='RQ.SRS-001.Example.Select.1', level=2, num='4.3'), - Heading(name='References', level=1, num='5'), - ), + Heading(name="Revision History", level=1, num="1"), + Heading(name="Introduction", level=1, num="2"), + Heading(name="Table of Contents", level=2, num="2.1"), + Heading(name="Generating HTML version", level=2, num="2.2"), + Heading(name="Generating Python Requirements", level=2, num="2.3"), + Heading(name="Terminology", level=1, num="3"), + Heading(name="SRS", level=2, num="3.1"), + Heading(name="Some term that you will use", level=2, num="3.2"), + Heading(name="Requirements", level=1, num="4"), + Heading(name="RQ.SRS-001.Example", level=2, num="4.1"), + Heading(name="RQ.SRS-001.Example.Subgroup", level=2, num="4.2"), + Heading(name="RQ.SRS-001.Example.Select.1", level=2, num="4.3"), + Heading(name="References", level=1, num="5"), + ), requirements=( RQ_SRS_001_Example, RQ_SRS_001_Example_Subgroup, RQ_SRS_001_Example_Select_1, - ), - content=''' + ), + content=""" # SRS-001 ClickHouse Software Requirements Specification Template **Author:** [name of the author] @@ -245,4 +248,5 @@ SELECT 1 [Some term that you will use]: #Sometermthatyouwilluse [ClickHouse]: https://clickhouse.com [Git]: https://git-scm.com/ -''') +""", +) diff --git a/tests/testflows/example/tests/example.py b/tests/testflows/example/tests/example.py index ea77f8b0235..9977e973ede 100644 --- a/tests/testflows/example/tests/example.py +++ b/tests/testflows/example/tests/example.py @@ -3,14 +3,12 @@ from testflows.asserts import error from example.requirements import * + @TestScenario @Name("select 1") -@Requirements( - RQ_SRS_001_Example_Select_1("1.0") -) +@Requirements(RQ_SRS_001_Example_Select_1("1.0")) def scenario(self, node="clickhouse1"): - """Check that ClickHouse returns 1 when user executes `SELECT 1` query. - """ + """Check that ClickHouse returns 1 when user executes `SELECT 1` query.""" node = self.context.cluster.node(node) with When("I execute query select 1"): diff --git a/tests/testflows/extended_precision_data_types/common.py b/tests/testflows/extended_precision_data_types/common.py index ebd0a6cac45..959ff96a536 100644 --- a/tests/testflows/extended_precision_data_types/common.py +++ b/tests/testflows/extended_precision_data_types/common.py @@ -10,49 +10,59 @@ from helpers.common import * rounding_precision = 7 + @contextmanager def allow_experimental_bigint(node): - """Enable experimental big int setting in Clickhouse. - """ + """Enable experimental big int setting in Clickhouse.""" setting = ("allow_experimental_bigint_types", 1) default_query_settings = None try: with Given("I add allow_experimental_bigint to the default query settings"): - default_query_settings = getsattr(current().context, "default_query_settings", []) + default_query_settings = getsattr( + current().context, "default_query_settings", [] + ) default_query_settings.append(setting) yield finally: - with Finally("I remove allow_experimental_bigint from the default query settings"): + with Finally( + "I remove allow_experimental_bigint from the default query settings" + ): if default_query_settings: try: default_query_settings.pop(default_query_settings.index(setting)) except ValueError: pass + @TestStep(Given) def allow_experimental_map_type(self): - """Set allow_experimental_map_type = 1 - """ + """Set allow_experimental_map_type = 1""" setting = ("allow_experimental_map_type", 1) default_query_settings = None try: with By("adding allow_experimental_map_type to the default query settings"): - default_query_settings = getsattr(current().context, "default_query_settings", []) + default_query_settings = getsattr( + current().context, "default_query_settings", [] + ) default_query_settings.append(setting) yield finally: - with Finally("I remove allow_experimental_map_type from the default query settings"): + with Finally( + "I remove allow_experimental_map_type from the default query settings" + ): if default_query_settings: try: default_query_settings.pop(default_query_settings.index(setting)) except ValueError: pass -def execute_query(sql, expected=None, format="TabSeparatedWithNames", compare_func=None): - """Execute SQL query and compare the output to the snapshot. - """ + +def execute_query( + sql, expected=None, format="TabSeparatedWithNames", compare_func=None +): + """Execute SQL query and compare the output to the snapshot.""" name = basename(current().name) with When("I execute query", description=sql): @@ -70,12 +80,16 @@ def execute_query(sql, expected=None, format="TabSeparatedWithNames", compare_fu else: with Then("I check output against snapshot"): with values() as that: - assert that(snapshot("\n" + r.output.strip() + "\n", "tests", name=name, encoder=str)), error() + assert that( + snapshot( + "\n" + r.output.strip() + "\n", "tests", name=name, encoder=str + ) + ), error() + @TestStep(Given) def table(self, data_type, name="table0"): - """Create a table. - """ + """Create a table.""" node = current().context.node try: @@ -87,34 +101,51 @@ def table(self, data_type, name="table0"): with Finally("drop the table"): node.query(f"DROP TABLE IF EXISTS {name}") + def getuid(): - """Create a unique variable name based on the test it is called from. - """ + """Create a unique variable name based on the test it is called from.""" if current().subtype == TestSubType.Example: - testname = f"{basename(parentname(current().name)).replace(' ', '_').replace(',','')}" + testname = ( + f"{basename(parentname(current().name)).replace(' ', '_').replace(',','')}" + ) else: testname = f"{basename(current().name).replace(' ', '_').replace(',','')}" - for char in ['(', ')', '[', ']','\'']: - testname = testname.replace(f'{char}', '') + for char in ["(", ")", "[", "]", "'"]: + testname = testname.replace(f"{char}", "") + + return testname + "_" + str(uuid.uuid1()).replace("-", "_") - return testname + "_" + str(uuid.uuid1()).replace('-', '_') def to_data_type(data_type, value): - """Return a conversion statement based on the data type provided - """ - if data_type in ['Decimal256(0)']: - return f'toDecimal256(\'{value}\',0)' + """Return a conversion statement based on the data type provided""" + if data_type in ["Decimal256(0)"]: + return f"toDecimal256('{value}',0)" else: - return f'to{data_type}(\'{value}\')' + return f"to{data_type}('{value}')" data_types = [ - ('Int128', '-170141183460469231731687303715884105728', '170141183460469231731687303715884105727'), - ('Int256', '-57896044618658097711785492504343953926634992332820282019728792003956564819968', '57896044618658097711785492504343953926634992332820282019728792003956564819967'), - ('UInt128','0','340282366920938463463374607431768211455'), - ('UInt256', '0', '115792089237316195423570985008687907853269984665640564039457584007913129639935'), + ( + "Int128", + "-170141183460469231731687303715884105728", + "170141183460469231731687303715884105727", + ), + ( + "Int256", + "-57896044618658097711785492504343953926634992332820282019728792003956564819968", + "57896044618658097711785492504343953926634992332820282019728792003956564819967", + ), + ("UInt128", "0", "340282366920938463463374607431768211455"), + ( + "UInt256", + "0", + "115792089237316195423570985008687907853269984665640564039457584007913129639935", + ), ] -Decimal256_min_max = -1000000000000000000000000000000000000000000000000000000000000000000000000000,1000000000000000000000000000000000000000000000000000000000000000000000000000 +Decimal256_min_max = ( + -1000000000000000000000000000000000000000000000000000000000000000000000000000, + 1000000000000000000000000000000000000000000000000000000000000000000000000000, +) diff --git a/tests/testflows/extended_precision_data_types/errors.py b/tests/testflows/extended_precision_data_types/errors.py index 63b82f3368d..a38b3ce571d 100644 --- a/tests/testflows/extended_precision_data_types/errors.py +++ b/tests/testflows/extended_precision_data_types/errors.py @@ -1,11 +1,14 @@ def not_implemented_bigints(name): - return(48, f"Exception: {name} is not implemented for big integers") + return (48, f"Exception: {name} is not implemented for big integers") + def bigints_not_implements(name): - return(48, f'Exception: {name} for big integers is not implemented') + return (48, f"Exception: {name} for big integers is not implemented") + def illegal_type(): - return(43, 'Exception: Illegal type') + return (43, "Exception: Illegal type") + def illegal_column(): - return(44, 'Exception: Illegal column') \ No newline at end of file + return (44, "Exception: Illegal column") diff --git a/tests/testflows/extended_precision_data_types/regression.py b/tests/testflows/extended_precision_data_types/regression.py index a0b7c2576bc..f185a5e4ecb 100755 --- a/tests/testflows/extended_precision_data_types/regression.py +++ b/tests/testflows/extended_precision_data_types/regression.py @@ -10,40 +10,42 @@ from helpers.cluster import Cluster from helpers.argparser import argparser from extended_precision_data_types.requirements import * -xfails = { -} +xfails = {} + +xflags = {} -xflags = { -} @TestModule @ArgumentParser(argparser) @XFails(xfails) @XFlags(xflags) @Name("extended precision data types") -@Specifications( - SRS020_ClickHouse_Extended_Precision_Data_Types -) +@Specifications(SRS020_ClickHouse_Extended_Precision_Data_Types) @Requirements( RQ_SRS_020_ClickHouse_Extended_Precision("1.0"), ) -def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None): - """Extended precision data type regression. - """ - nodes = { - "clickhouse": - ("clickhouse1",) - } +def regression( + self, local, clickhouse_binary_path, clickhouse_version=None, stress=None +): + """Extended precision data type regression.""" + nodes = {"clickhouse": ("clickhouse1",)} if stress is not None: self.context.stress = stress self.context.clickhouse_version = clickhouse_version - with Cluster(local, clickhouse_binary_path, nodes=nodes, - docker_compose_project_dir=os.path.join(current_dir(), "extended-precision-data-type_env")) as cluster: + with Cluster( + local, + clickhouse_binary_path, + nodes=nodes, + docker_compose_project_dir=os.path.join( + current_dir(), "extended-precision-data-type_env" + ), + ) as cluster: self.context.cluster = cluster Feature(run=load("extended_precision_data_types.tests.feature", "feature")) + if main(): regression() diff --git a/tests/testflows/extended_precision_data_types/requirements/__init__.py b/tests/testflows/extended_precision_data_types/requirements/__init__.py index 75e9d5b4bb8..02f7d430154 100644 --- a/tests/testflows/extended_precision_data_types/requirements/__init__.py +++ b/tests/testflows/extended_precision_data_types/requirements/__init__.py @@ -1 +1 @@ -from .requirements import * \ No newline at end of file +from .requirements import * diff --git a/tests/testflows/extended_precision_data_types/requirements/requirements.py b/tests/testflows/extended_precision_data_types/requirements/requirements.py index fa828897f66..3b1aa89d0e2 100644 --- a/tests/testflows/extended_precision_data_types/requirements/requirements.py +++ b/tests/testflows/extended_precision_data_types/requirements/requirements.py @@ -9,757 +9,787 @@ from testflows.core import Requirement Heading = Specification.Heading RQ_SRS_020_ClickHouse_Extended_Precision = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using [Extended Precision Data Types].\n' - '\n' - ), + "[ClickHouse] SHALL support using [Extended Precision Data Types].\n" "\n" + ), link=None, level=2, - num='4.1') + num="4.1", +) RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_toInt128 = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toInt128', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toInt128", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support converting values to `Int128` using the `toInt128` function.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT toInt128(1)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support converting values to `Int128` using the `toInt128` function.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT toInt128(1)\n" + "```\n" + "\n" + ), link=None, level=3, - num='4.2.1') + num="4.2.1", +) RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_toUInt128 = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toUInt128', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toUInt128", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support converting values to `UInt128` format using `toUInt128` function.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT toUInt128(1)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support converting values to `UInt128` format using `toUInt128` function.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT toUInt128(1)\n" + "```\n" + "\n" + ), link=None, level=3, - num='4.2.2') + num="4.2.2", +) RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_toInt256 = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toInt256', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toInt256", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support converting values to `Int256` using `toInt256` function.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT toInt256(1)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support converting values to `Int256` using `toInt256` function.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT toInt256(1)\n" + "```\n" + "\n" + ), link=None, level=3, - num='4.2.3') + num="4.2.3", +) RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_toUInt256 = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toUInt256', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toUInt256", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support converting values to `UInt256` format using `toUInt256` function.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT toUInt256(1)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support converting values to `UInt256` format using `toUInt256` function.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT toUInt256(1)\n" + "```\n" + "\n" + ), link=None, level=3, - num='4.2.4') + num="4.2.4", +) RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_toDecimal256 = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toDecimal256', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toDecimal256", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support converting values to `Decimal256` format using `toDecimal256` function.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT toDecimal256(1,2)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support converting values to `Decimal256` format using `toDecimal256` function.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT toDecimal256(1,2)\n" + "```\n" + "\n" + ), link=None, level=3, - num='4.2.5') + num="4.2.5", +) RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_FromMySQL = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.FromMySQL', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.FromMySQL", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support converting to [Extended Precision Data Types] from MySQL.\n' - '\n' - '\n' - ), + "[ClickHouse] SHALL support converting to [Extended Precision Data Types] from MySQL.\n" + "\n" + "\n" + ), link=None, level=3, - num='4.2.6') + num="4.2.6", +) RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_ToMySQL = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.ToMySQL', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.ToMySQL", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] MAY not support converting from [Extended Precision Data Types] to MySQL.\n' - '\n' - ), + "[ClickHouse] MAY not support converting from [Extended Precision Data Types] to MySQL.\n" + "\n" + ), link=None, level=3, - num='4.2.7') + num="4.2.7", +) RQ_SRS_020_ClickHouse_Extended_Precision_Arithmetic_Int_Supported = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Arithmetic.Int.Supported', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Arithmetic.Int.Supported", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using [Arithmetic functions] with Int128, UInt128, Int256, and UInt256.\n' - '\n' - 'Arithmetic functions:\n' - '* plus\n' - '* minus\n' - '* multiply\n' - '* divide\n' - '* intDiv\n' - '* intDivOrZero\n' - '* modulo\n' - '* moduloOrZero\n' - '* negate\n' - '* abs\n' - '* gcd\n' - '* lcm\n' - '\n' - ), + "[ClickHouse] SHALL support using [Arithmetic functions] with Int128, UInt128, Int256, and UInt256.\n" + "\n" + "Arithmetic functions:\n" + "* plus\n" + "* minus\n" + "* multiply\n" + "* divide\n" + "* intDiv\n" + "* intDivOrZero\n" + "* modulo\n" + "* moduloOrZero\n" + "* negate\n" + "* abs\n" + "* gcd\n" + "* lcm\n" + "\n" + ), link=None, level=3, - num='4.3.1') + num="4.3.1", +) RQ_SRS_020_ClickHouse_Extended_Precision_Arithmetic_Dec_Supported = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Arithmetic.Dec.Supported', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Arithmetic.Dec.Supported", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using the following [Arithmetic functions] with Decimal256:\n' - '\n' - '* plus\n' - '* minus\n' - '* multiply\n' - '* divide\n' - '* intDiv\n' - '* intDivOrZero\n' - '* negate\n' - '* abs\n' - '\n' - ), + "[ClickHouse] SHALL support using the following [Arithmetic functions] with Decimal256:\n" + "\n" + "* plus\n" + "* minus\n" + "* multiply\n" + "* divide\n" + "* intDiv\n" + "* intDivOrZero\n" + "* negate\n" + "* abs\n" + "\n" + ), link=None, level=3, - num='4.3.2') + num="4.3.2", +) RQ_SRS_020_ClickHouse_Extended_Precision_Arithmetic_Dec_NotSupported = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Arithmetic.Dec.NotSupported', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Arithmetic.Dec.NotSupported", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] MAY not support using the following [Arithmetic functions] with Decimal256:\n' - '\n' - '* modulo\n' - '* moduloOrZero\n' - '* gcd\n' - '* lcm\n' - '\n' - ), + "[ClickHouse] MAY not support using the following [Arithmetic functions] with Decimal256:\n" + "\n" + "* modulo\n" + "* moduloOrZero\n" + "* gcd\n" + "* lcm\n" + "\n" + ), link=None, level=3, - num='4.3.3') + num="4.3.3", +) RQ_SRS_020_ClickHouse_Extended_Precision_Arrays_Int_Supported = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Arrays.Int.Supported', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Arrays.Int.Supported", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using the following [Array functions] with Int128, UInt128, Int256, and UInt256.\n' - '\n' - '* empty\n' - '* notEmpty\n' - '* length\n' - '* arrayCount\n' - '* arrayPopBack\n' - '* arrayPopFront\n' - '* arraySort\n' - '* arrayReverseSort\n' - '* arrayUniq\n' - '* arrayJoin\n' - '* arrayDistinct\n' - '* arrayEnumerate\n' - '* arrayEnumerateDense\n' - '* arrayEnumerateUniq\n' - '* arrayReverse\n' - '* reverse\n' - '* arrayFlatten\n' - '* arrayCompact\n' - '* arrayExists\n' - '* arrayAll\n' - '* arrayMin\n' - '* arrayMax\n' - '* arraySum\n' - '* arrayAvg\n' - '* arrayReduce\n' - '* arrayReduceInRanges\n' - '* arrayZip\n' - '* arrayMap\n' - '* arrayFilter\n' - '* arrayFill\n' - '* arrayReverseFill\n' - '* arraySplit\n' - '* arrayFirst\n' - '* arrayFirstIndex\n' - '* arrayConcat\n' - '* hasAll\n' - '* hasAny\n' - '* hasSubstr\n' - '* arrayElement\n' - '* has\n' - '* indexOf\n' - '* countEqual\n' - '* arrayPushBack\n' - '* arrayPushFront\n' - '* arrayResize\n' - '* arraySlice\n' - '\n' - ), + "[ClickHouse] SHALL support using the following [Array functions] with Int128, UInt128, Int256, and UInt256.\n" + "\n" + "* empty\n" + "* notEmpty\n" + "* length\n" + "* arrayCount\n" + "* arrayPopBack\n" + "* arrayPopFront\n" + "* arraySort\n" + "* arrayReverseSort\n" + "* arrayUniq\n" + "* arrayJoin\n" + "* arrayDistinct\n" + "* arrayEnumerate\n" + "* arrayEnumerateDense\n" + "* arrayEnumerateUniq\n" + "* arrayReverse\n" + "* reverse\n" + "* arrayFlatten\n" + "* arrayCompact\n" + "* arrayExists\n" + "* arrayAll\n" + "* arrayMin\n" + "* arrayMax\n" + "* arraySum\n" + "* arrayAvg\n" + "* arrayReduce\n" + "* arrayReduceInRanges\n" + "* arrayZip\n" + "* arrayMap\n" + "* arrayFilter\n" + "* arrayFill\n" + "* arrayReverseFill\n" + "* arraySplit\n" + "* arrayFirst\n" + "* arrayFirstIndex\n" + "* arrayConcat\n" + "* hasAll\n" + "* hasAny\n" + "* hasSubstr\n" + "* arrayElement\n" + "* has\n" + "* indexOf\n" + "* countEqual\n" + "* arrayPushBack\n" + "* arrayPushFront\n" + "* arrayResize\n" + "* arraySlice\n" + "\n" + ), link=None, level=3, - num='4.4.1') + num="4.4.1", +) RQ_SRS_020_ClickHouse_Extended_Precision_Arrays_Int_NotSupported = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Arrays.Int.NotSupported', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Arrays.Int.NotSupported", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] MAY not support using the following [Array functions] with Int128, UInt128, Int256, and UInt256:\n' - '\n' - '* arrayDifference\n' - '* arrayCumSum\n' - '* arrayCumSumNonNegative\n' - '\n' - ), + "[ClickHouse] MAY not support using the following [Array functions] with Int128, UInt128, Int256, and UInt256:\n" + "\n" + "* arrayDifference\n" + "* arrayCumSum\n" + "* arrayCumSumNonNegative\n" + "\n" + ), link=None, level=3, - num='4.4.2') + num="4.4.2", +) RQ_SRS_020_ClickHouse_Extended_Precision_Arrays_Dec_Supported = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Arrays.Dec.Supported', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Arrays.Dec.Supported", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using the following [Array functions] with Decimal256:\n' - '\n' - '* empty\n' - '* notEmpty\n' - '* length\n' - '* arrayCount\n' - '* arrayPopBack\n' - '* arrayPopFront\n' - '* arraySort\n' - '* arrayReverseSort\n' - '* arrayUniq\n' - '* arrayJoin\n' - '* arrayDistinct\n' - '* arrayEnumerate\n' - '* arrayEnumerateDense\n' - '* arrayEnumerateUniq\n' - '* arrayReverse\n' - '* reverse\n' - '* arrayFlatten\n' - '* arrayCompact\n' - '* arrayExists\n' - '* arrayAll\n' - '* arrayReduce\n' - '* arrayReduceInRanges\n' - '* arrayZip\n' - '* arrayMap\n' - '* arrayFilter\n' - '* arrayFill\n' - '* arrayReverseFill\n' - '* arraySplit\n' - '* arrayFirst\n' - '* arrayFirstIndex\n' - '* arrayConcat\n' - '* hasAll\n' - '* hasAny\n' - '* hasSubstr\n' - '* arrayElement\n' - '* has\n' - '* indexOf\n' - '* countEqual\n' - '* arrayPushBack\n' - '* arrayPushFront\n' - '* arrayResize\n' - '* arraySlice\n' - '\n' - ), + "[ClickHouse] SHALL support using the following [Array functions] with Decimal256:\n" + "\n" + "* empty\n" + "* notEmpty\n" + "* length\n" + "* arrayCount\n" + "* arrayPopBack\n" + "* arrayPopFront\n" + "* arraySort\n" + "* arrayReverseSort\n" + "* arrayUniq\n" + "* arrayJoin\n" + "* arrayDistinct\n" + "* arrayEnumerate\n" + "* arrayEnumerateDense\n" + "* arrayEnumerateUniq\n" + "* arrayReverse\n" + "* reverse\n" + "* arrayFlatten\n" + "* arrayCompact\n" + "* arrayExists\n" + "* arrayAll\n" + "* arrayReduce\n" + "* arrayReduceInRanges\n" + "* arrayZip\n" + "* arrayMap\n" + "* arrayFilter\n" + "* arrayFill\n" + "* arrayReverseFill\n" + "* arraySplit\n" + "* arrayFirst\n" + "* arrayFirstIndex\n" + "* arrayConcat\n" + "* hasAll\n" + "* hasAny\n" + "* hasSubstr\n" + "* arrayElement\n" + "* has\n" + "* indexOf\n" + "* countEqual\n" + "* arrayPushBack\n" + "* arrayPushFront\n" + "* arrayResize\n" + "* arraySlice\n" + "\n" + ), link=None, level=3, - num='4.4.3') + num="4.4.3", +) RQ_SRS_020_ClickHouse_Extended_Precision_Arrays_Dec_NotSupported = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Arrays.Dec.NotSupported', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Arrays.Dec.NotSupported", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] MAY not support using the following [Array functions] with Decimal256:\n' - '\n' - '* arrayMin\n' - '* arrayMax\n' - '* arraaySum\n' - '* arrayAvg\n' - '* arrayDifference\n' - '* arrayCumSum\n' - '* arrayCumSumNonNegative\n' - '\n' - ), + "[ClickHouse] MAY not support using the following [Array functions] with Decimal256:\n" + "\n" + "* arrayMin\n" + "* arrayMax\n" + "* arraaySum\n" + "* arrayAvg\n" + "* arrayDifference\n" + "* arrayCumSum\n" + "* arrayCumSumNonNegative\n" + "\n" + ), link=None, level=3, - num='4.4.4') + num="4.4.4", +) RQ_SRS_020_ClickHouse_Extended_Precision_Comparison = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Comparison', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Comparison", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using [Comparison functions] with [Extended Precision Data Types].\n' - '\n' - 'Comparison functions:\n' - '* equals\n' - '* notEquals\n' - '* less\n' - '* greater\n' - '* lessOrEquals\n' - '* greaterOrEquals\n' - '\n' - ), + "[ClickHouse] SHALL support using [Comparison functions] with [Extended Precision Data Types].\n" + "\n" + "Comparison functions:\n" + "* equals\n" + "* notEquals\n" + "* less\n" + "* greater\n" + "* lessOrEquals\n" + "* greaterOrEquals\n" + "\n" + ), link=None, level=3, - num='4.5.1') + num="4.5.1", +) RQ_SRS_020_ClickHouse_Extended_Precision_Logical = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Logical', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Logical", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] MAY not support using [Logical functions] with [Extended Precision Data Types].\n' - '\n' - 'Logical functions:\n' - '* and\n' - '* or\n' - '* not\n' - '* xor\n' - '\n' - ), + "[ClickHouse] MAY not support using [Logical functions] with [Extended Precision Data Types].\n" + "\n" + "Logical functions:\n" + "* and\n" + "* or\n" + "* not\n" + "* xor\n" + "\n" + ), link=None, level=3, - num='4.6.1') + num="4.6.1", +) RQ_SRS_020_ClickHouse_Extended_Precision_Mathematical_Supported = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Mathematical.Supported', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Mathematical.Supported", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using the following [Mathematical functions] with [Extended Precision Data Types]:\n' - '\n' - '* exp\n' - '* log, ln\n' - '* exp2\n' - '* log2\n' - '* exp10\n' - '* log10\n' - '* sqrt\n' - '* cbrt\n' - '* erf\n' - '* erfc\n' - '* lgamma\n' - '* tgamma\n' - '* sin\n' - '* cos\n' - '* tan\n' - '* asin\n' - '* acos\n' - '* atan\n' - '* cosh\n' - '* acosh\n' - '* sinh\n' - '* asinh\n' - '* tanh\n' - '* atanh\n' - '* log1p\n' - '* sign\n' - '\n' - ), + "[ClickHouse] SHALL support using the following [Mathematical functions] with [Extended Precision Data Types]:\n" + "\n" + "* exp\n" + "* log, ln\n" + "* exp2\n" + "* log2\n" + "* exp10\n" + "* log10\n" + "* sqrt\n" + "* cbrt\n" + "* erf\n" + "* erfc\n" + "* lgamma\n" + "* tgamma\n" + "* sin\n" + "* cos\n" + "* tan\n" + "* asin\n" + "* acos\n" + "* atan\n" + "* cosh\n" + "* acosh\n" + "* sinh\n" + "* asinh\n" + "* tanh\n" + "* atanh\n" + "* log1p\n" + "* sign\n" + "\n" + ), link=None, level=3, - num='4.7.1') + num="4.7.1", +) RQ_SRS_020_ClickHouse_Extended_Precision_Mathematical_NotSupported = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Mathematical.NotSupported', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Mathematical.NotSupported", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] MAY not support using the following [Mathematical functions] with [Extended Precision Data Types]:\n' - '\n' - '* pow, power\n' - '* intExp2\n' - '* intExp10\n' - '* atan2\n' - '* hypot\n' - '\n' - ), + "[ClickHouse] MAY not support using the following [Mathematical functions] with [Extended Precision Data Types]:\n" + "\n" + "* pow, power\n" + "* intExp2\n" + "* intExp10\n" + "* atan2\n" + "* hypot\n" + "\n" + ), link=None, level=3, - num='4.7.2') + num="4.7.2", +) RQ_SRS_020_ClickHouse_Extended_Precision_Rounding_Int_Supported = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Rounding.Int.Supported', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Rounding.Int.Supported", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using the following [Rounding functions] with Int128, UInt128, Int256, and UInt256:\n' - '\n' - '* floor\n' - '* ceil\n' - '* trunc\n' - '* round\n' - '* roundBankers\n' - '* roundDuration\n' - '* roundAge\n' - '\n' - ), + "[ClickHouse] SHALL support using the following [Rounding functions] with Int128, UInt128, Int256, and UInt256:\n" + "\n" + "* floor\n" + "* ceil\n" + "* trunc\n" + "* round\n" + "* roundBankers\n" + "* roundDuration\n" + "* roundAge\n" + "\n" + ), link=None, level=3, - num='4.8.1') + num="4.8.1", +) RQ_SRS_020_ClickHouse_Extended_Precision_Rounding_Int_NotSupported = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Rounding.Int.NotSupported', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Rounding.Int.NotSupported", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] MAY not support using the following [Rounding functions] with Int128, UInt128, Int256, and UInt256:\n' - '\n' - '* roundDown\n' - '* roundToExp2\n' - '\n' - ), + "[ClickHouse] MAY not support using the following [Rounding functions] with Int128, UInt128, Int256, and UInt256:\n" + "\n" + "* roundDown\n" + "* roundToExp2\n" + "\n" + ), link=None, level=3, - num='4.8.2') + num="4.8.2", +) RQ_SRS_020_ClickHouse_Extended_Precision_Rounding_Dec_Supported = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Rounding.Dec.Supported', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Rounding.Dec.Supported", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using the following [Rounding functions] with Decimal256:\n' - '\n' - '* floor\n' - '* ceil\n' - '* trunc\n' - '* round\n' - '* roundBankers\n' - '\n' - ), + "[ClickHouse] SHALL support using the following [Rounding functions] with Decimal256:\n" + "\n" + "* floor\n" + "* ceil\n" + "* trunc\n" + "* round\n" + "* roundBankers\n" + "\n" + ), link=None, level=3, - num='4.8.3') + num="4.8.3", +) RQ_SRS_020_ClickHouse_Extended_Precision_Rounding_Dec_NotSupported = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Rounding.Dec.NotSupported', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Rounding.Dec.NotSupported", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] MAY not support using the following [Rounding functions] with Decimal256:\n' - '\n' - '* roundDuration\n' - '* roundAge\n' - '* roundDown\n' - '* roundToExp2\n' - '\n' - ), + "[ClickHouse] MAY not support using the following [Rounding functions] with Decimal256:\n" + "\n" + "* roundDuration\n" + "* roundAge\n" + "* roundDown\n" + "* roundToExp2\n" + "\n" + ), link=None, level=3, - num='4.8.4') + num="4.8.4", +) RQ_SRS_020_ClickHouse_Extended_Precision_Bit_Int_Supported = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Bit.Int.Supported', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Bit.Int.Supported", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using the following [Bit functions] with Int128, UInt128, Int256, and UInt256:\n' - '\n' - '* bitAnd\n' - '* bitOr\n' - '* bitXor\n' - '* bitNot\n' - '* bitShiftLeft\n' - '* bitShiftRight\n' - '* bitCount\n' - '\n' - ), + "[ClickHouse] SHALL support using the following [Bit functions] with Int128, UInt128, Int256, and UInt256:\n" + "\n" + "* bitAnd\n" + "* bitOr\n" + "* bitXor\n" + "* bitNot\n" + "* bitShiftLeft\n" + "* bitShiftRight\n" + "* bitCount\n" + "\n" + ), link=None, level=3, - num='4.9.1') + num="4.9.1", +) RQ_SRS_020_ClickHouse_Extended_Precision_Bit_Int_NotSupported = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Bit.Int.NotSupported', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Bit.Int.NotSupported", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] MAY not support using the following [Bit functions] with Int128, UInt128, Int256, and UInt256:\n' - '\n' - '* bitRotateLeft\n' - '* bitRotateRight\n' - '* bitTest\n' - '* bitTestAll\n' - '* bitTestAny\n' - '\n' - ), + "[ClickHouse] MAY not support using the following [Bit functions] with Int128, UInt128, Int256, and UInt256:\n" + "\n" + "* bitRotateLeft\n" + "* bitRotateRight\n" + "* bitTest\n" + "* bitTestAll\n" + "* bitTestAny\n" + "\n" + ), link=None, level=3, - num='4.9.2') + num="4.9.2", +) RQ_SRS_020_ClickHouse_Extended_Precision_Bit_Dec_NotSupported = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Bit.Dec.NotSupported', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Bit.Dec.NotSupported", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] MAY not support using [Bit functions] with Decimal256.\n' - '\n' - 'Bit functions:\n' - '* bitAnd\n' - '* bitOr\n' - '* bitXor\n' - '* bitNot\n' - '* bitShiftLeft\n' - '* bitShiftRight\n' - '* bitCount\n' - '* bitRotateLeft\n' - '* bitRotateRight\n' - '* bitTest\n' - '* bitTestAll\n' - '* bitTestAny\n' - '\n' - ), + "[ClickHouse] MAY not support using [Bit functions] with Decimal256.\n" + "\n" + "Bit functions:\n" + "* bitAnd\n" + "* bitOr\n" + "* bitXor\n" + "* bitNot\n" + "* bitShiftLeft\n" + "* bitShiftRight\n" + "* bitCount\n" + "* bitRotateLeft\n" + "* bitRotateRight\n" + "* bitTest\n" + "* bitTestAll\n" + "* bitTestAny\n" + "\n" + ), link=None, level=3, - num='4.9.3') + num="4.9.3", +) RQ_SRS_020_ClickHouse_Extended_Precision_Null = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Null', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Null", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using [Null functions] with [Extended Precision Data Types].\n' - '\n' - 'Null functions:\n' - '* isNull\n' - '* isNotNull\n' - '* coalesce\n' - '* ifNull\n' - '* nullIf\n' - '* assumeNotNull\n' - '* toNullable\n' - '\n' - ), + "[ClickHouse] SHALL support using [Null functions] with [Extended Precision Data Types].\n" + "\n" + "Null functions:\n" + "* isNull\n" + "* isNotNull\n" + "* coalesce\n" + "* ifNull\n" + "* nullIf\n" + "* assumeNotNull\n" + "* toNullable\n" + "\n" + ), link=None, level=3, - num='4.10.1') + num="4.10.1", +) RQ_SRS_020_ClickHouse_Extended_Precision_Tuple = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Tuple', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Tuple", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using [Tuple functions] with [Extended Precision Data Types].\n' - '\n' - 'Tuple functions:\n' - '* tuple\n' - '* tupleElement\n' - '* untuple\n' - '\n' - ), + "[ClickHouse] SHALL support using [Tuple functions] with [Extended Precision Data Types].\n" + "\n" + "Tuple functions:\n" + "* tuple\n" + "* tupleElement\n" + "* untuple\n" + "\n" + ), link=None, level=3, - num='4.11.1') + num="4.11.1", +) RQ_SRS_020_ClickHouse_Extended_Precision_Map_Supported = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Map.Supported', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Map.Supported", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using the following [Map functions] with [Extended Precision Data Types]:\n' - '\n' - '* map\n' - '* mapContains\n' - '* mapKeys\n' - '* mapValues\n' - '\n' - ), + "[ClickHouse] SHALL support using the following [Map functions] with [Extended Precision Data Types]:\n" + "\n" + "* map\n" + "* mapContains\n" + "* mapKeys\n" + "* mapValues\n" + "\n" + ), link=None, level=3, - num='4.12.1') + num="4.12.1", +) RQ_SRS_020_ClickHouse_Extended_Precision_Map_NotSupported = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Map.NotSupported', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Map.NotSupported", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] MAY not support using the following [Map functions] with [Extended Precision Data Types]:\n' - '\n' - '* mapAdd\n' - '* mapSubtract\n' - '* mapPopulateSeries\n' - '\n' - ), + "[ClickHouse] MAY not support using the following [Map functions] with [Extended Precision Data Types]:\n" + "\n" + "* mapAdd\n" + "* mapSubtract\n" + "* mapPopulateSeries\n" + "\n" + ), link=None, level=3, - num='4.12.2') + num="4.12.2", +) RQ_SRS_020_ClickHouse_Extended_Precision_Create_Table = Requirement( - name='RQ.SRS-020.ClickHouse.Extended.Precision.Create.Table', - version='1.0', + name="RQ.SRS-020.ClickHouse.Extended.Precision.Create.Table", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support creating table with columns that use [Extended Precision Data Types].\n' - '\n' - ), + "[ClickHouse] SHALL support creating table with columns that use [Extended Precision Data Types].\n" + "\n" + ), link=None, level=3, - num='4.13.1') + num="4.13.1", +) SRS020_ClickHouse_Extended_Precision_Data_Types = Specification( - name='SRS020 ClickHouse Extended Precision Data Types', + name="SRS020 ClickHouse Extended Precision Data Types", description=None, author=None, - date=None, - status=None, + date=None, + status=None, approved_by=None, approved_date=None, approved_version=None, @@ -771,56 +801,172 @@ SRS020_ClickHouse_Extended_Precision_Data_Types = Specification( parent=None, children=None, headings=( - Heading(name='Revision History', level=1, num='1'), - Heading(name='Introduction', level=1, num='2'), - Heading(name='Terminology', level=1, num='3'), - Heading(name='Extended Precision Data Types', level=2, num='3.1'), - Heading(name='Requirements', level=1, num='4'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision', level=2, num='4.1'), - Heading(name='Conversion', level=2, num='4.2'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toInt128', level=3, num='4.2.1'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toUInt128', level=3, num='4.2.2'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toInt256', level=3, num='4.2.3'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toUInt256', level=3, num='4.2.4'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toDecimal256', level=3, num='4.2.5'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.FromMySQL', level=3, num='4.2.6'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.ToMySQL', level=3, num='4.2.7'), - Heading(name='Arithmetic', level=2, num='4.3'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Arithmetic.Int.Supported', level=3, num='4.3.1'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Arithmetic.Dec.Supported', level=3, num='4.3.2'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Arithmetic.Dec.NotSupported', level=3, num='4.3.3'), - Heading(name='Arrays', level=2, num='4.4'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Arrays.Int.Supported', level=3, num='4.4.1'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Arrays.Int.NotSupported', level=3, num='4.4.2'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Arrays.Dec.Supported', level=3, num='4.4.3'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Arrays.Dec.NotSupported', level=3, num='4.4.4'), - Heading(name='Comparison', level=2, num='4.5'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Comparison', level=3, num='4.5.1'), - Heading(name='Logical Functions', level=2, num='4.6'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Logical', level=3, num='4.6.1'), - Heading(name='Mathematical Functions', level=2, num='4.7'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Mathematical.Supported', level=3, num='4.7.1'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Mathematical.NotSupported', level=3, num='4.7.2'), - Heading(name='Rounding Functions', level=2, num='4.8'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Rounding.Int.Supported', level=3, num='4.8.1'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Rounding.Int.NotSupported', level=3, num='4.8.2'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Rounding.Dec.Supported', level=3, num='4.8.3'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Rounding.Dec.NotSupported', level=3, num='4.8.4'), - Heading(name='Bit Functions', level=2, num='4.9'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Bit.Int.Supported', level=3, num='4.9.1'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Bit.Int.NotSupported', level=3, num='4.9.2'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Bit.Dec.NotSupported', level=3, num='4.9.3'), - Heading(name='Null Functions', level=2, num='4.10'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Null', level=3, num='4.10.1'), - Heading(name='Tuple Functions', level=2, num='4.11'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Tuple', level=3, num='4.11.1'), - Heading(name='Map Functions', level=2, num='4.12'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Map.Supported', level=3, num='4.12.1'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Map.NotSupported', level=3, num='4.12.2'), - Heading(name='Create', level=2, num='4.13'), - Heading(name='RQ.SRS-020.ClickHouse.Extended.Precision.Create.Table', level=3, num='4.13.1'), - Heading(name='References', level=1, num='5'), + Heading(name="Revision History", level=1, num="1"), + Heading(name="Introduction", level=1, num="2"), + Heading(name="Terminology", level=1, num="3"), + Heading(name="Extended Precision Data Types", level=2, num="3.1"), + Heading(name="Requirements", level=1, num="4"), + Heading(name="RQ.SRS-020.ClickHouse.Extended.Precision", level=2, num="4.1"), + Heading(name="Conversion", level=2, num="4.2"), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toInt128", + level=3, + num="4.2.1", ), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toUInt128", + level=3, + num="4.2.2", + ), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toInt256", + level=3, + num="4.2.3", + ), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toUInt256", + level=3, + num="4.2.4", + ), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.toDecimal256", + level=3, + num="4.2.5", + ), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.FromMySQL", + level=3, + num="4.2.6", + ), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Conversion.ToMySQL", + level=3, + num="4.2.7", + ), + Heading(name="Arithmetic", level=2, num="4.3"), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Arithmetic.Int.Supported", + level=3, + num="4.3.1", + ), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Arithmetic.Dec.Supported", + level=3, + num="4.3.2", + ), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Arithmetic.Dec.NotSupported", + level=3, + num="4.3.3", + ), + Heading(name="Arrays", level=2, num="4.4"), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Arrays.Int.Supported", + level=3, + num="4.4.1", + ), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Arrays.Int.NotSupported", + level=3, + num="4.4.2", + ), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Arrays.Dec.Supported", + level=3, + num="4.4.3", + ), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Arrays.Dec.NotSupported", + level=3, + num="4.4.4", + ), + Heading(name="Comparison", level=2, num="4.5"), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Comparison", + level=3, + num="4.5.1", + ), + Heading(name="Logical Functions", level=2, num="4.6"), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Logical", + level=3, + num="4.6.1", + ), + Heading(name="Mathematical Functions", level=2, num="4.7"), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Mathematical.Supported", + level=3, + num="4.7.1", + ), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Mathematical.NotSupported", + level=3, + num="4.7.2", + ), + Heading(name="Rounding Functions", level=2, num="4.8"), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Rounding.Int.Supported", + level=3, + num="4.8.1", + ), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Rounding.Int.NotSupported", + level=3, + num="4.8.2", + ), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Rounding.Dec.Supported", + level=3, + num="4.8.3", + ), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Rounding.Dec.NotSupported", + level=3, + num="4.8.4", + ), + Heading(name="Bit Functions", level=2, num="4.9"), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Bit.Int.Supported", + level=3, + num="4.9.1", + ), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Bit.Int.NotSupported", + level=3, + num="4.9.2", + ), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Bit.Dec.NotSupported", + level=3, + num="4.9.3", + ), + Heading(name="Null Functions", level=2, num="4.10"), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Null", level=3, num="4.10.1" + ), + Heading(name="Tuple Functions", level=2, num="4.11"), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Tuple", level=3, num="4.11.1" + ), + Heading(name="Map Functions", level=2, num="4.12"), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Map.Supported", + level=3, + num="4.12.1", + ), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Map.NotSupported", + level=3, + num="4.12.2", + ), + Heading(name="Create", level=2, num="4.13"), + Heading( + name="RQ.SRS-020.ClickHouse.Extended.Precision.Create.Table", + level=3, + num="4.13.1", + ), + Heading(name="References", level=1, num="5"), + ), requirements=( RQ_SRS_020_ClickHouse_Extended_Precision, RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_toInt128, @@ -853,8 +999,8 @@ SRS020_ClickHouse_Extended_Precision_Data_Types = Specification( RQ_SRS_020_ClickHouse_Extended_Precision_Map_Supported, RQ_SRS_020_ClickHouse_Extended_Precision_Map_NotSupported, RQ_SRS_020_ClickHouse_Extended_Precision_Create_Table, - ), - content=''' + ), + content=""" # SRS020 ClickHouse Extended Precision Data Types # Software Requirements Specification @@ -1420,4 +1566,5 @@ version: 1.0 [Revision History]: https://github.com/ClickHouse/ClickHouse/blob/master/tests/testflows/extended_precision_data_types/requirements/requirements.md [Git]: https://git-scm.com/ [GitHub]: https://github.com -''') +""", +) diff --git a/tests/testflows/extended_precision_data_types/tests/arithmetic.py b/tests/testflows/extended_precision_data_types/tests/arithmetic.py index c57f3d7d8e1..e949ef65f53 100644 --- a/tests/testflows/extended_precision_data_types/tests/arithmetic.py +++ b/tests/testflows/extended_precision_data_types/tests/arithmetic.py @@ -5,198 +5,256 @@ from extended_precision_data_types.requirements import * from extended_precision_data_types.common import * funcs = [ - ('plus', '2'), - ('minus', '0'), - ('multiply', '1'), - ('divide', '1'), - ('intDiv', '1'), - ('intDivOrZero', '1'), - ('modulo', '0'), - ('moduloOrZero', '0'), - ('negate', '-1'), - ('abs', '1'), - ('gcd', '1'), - ('lcm', '1'), + ("plus", "2"), + ("minus", "0"), + ("multiply", "1"), + ("divide", "1"), + ("intDiv", "1"), + ("intDivOrZero", "1"), + ("modulo", "0"), + ("moduloOrZero", "0"), + ("negate", "-1"), + ("abs", "1"), + ("gcd", "1"), + ("lcm", "1"), +] + +Examples_list = [ + tuple(list(func) + list(data_type) + [Name(f"{func[0]} - {data_type[0]}")]) + for func in funcs + for data_type in data_types +] +Examples_dec_list = [ + tuple(list(func) + [Name(f"{func[0]} - Decimal256")]) for func in funcs ] -Examples_list = [tuple(list(func)+list(data_type)+[Name(f'{func[0]} - {data_type[0]}')]) for func in funcs for data_type in data_types] -Examples_dec_list = [tuple(list(func)+[Name(f'{func[0]} - Decimal256')]) for func in funcs] @TestOutline -@Examples('arithmetic_func expected_result int_type min max', Examples_list) +@Examples("arithmetic_func expected_result int_type min max", Examples_list) def inline_check(self, arithmetic_func, expected_result, int_type, min, max, node=None): - """Check that arithmetic functions work using inline tests with Int128, UInt128, Int256, and UInt256. - """ + """Check that arithmetic functions work using inline tests with Int128, UInt128, Int256, and UInt256.""" if node is None: node = self.context.node - if arithmetic_func in ['negate','abs']: + if arithmetic_func in ["negate", "abs"]: with When(f"I check {arithmetic_func} with {int_type}"): output = node.query(f"SELECT {arithmetic_func}(to{int_type}(1))").output assert output == expected_result, error() with When(f"I check {arithmetic_func} with {int_type} max and min value"): - execute_query(f""" + execute_query( + f""" SELECT {arithmetic_func}(to{int_type}(\'{max}\')), {arithmetic_func}(to{int_type}(\'{min}\')) - """) + """ + ) else: with When(f"I check {arithmetic_func} with {int_type}"): - output = node.query(f"SELECT {arithmetic_func}(to{int_type}(1), to{int_type}(1))").output + output = node.query( + f"SELECT {arithmetic_func}(to{int_type}(1), to{int_type}(1))" + ).output assert output == expected_result, error() - if arithmetic_func in ['gcd','lcm']: + if arithmetic_func in ["gcd", "lcm"]: - if int_type in ['UInt128','UInt256']: - exitcode=153 + if int_type in ["UInt128", "UInt256"]: + exitcode = 153 else: - exitcode=151 + exitcode = 151 with When(f"I check {arithmetic_func} with {int_type} max and min value"): - node.query(f"SELECT {arithmetic_func}(to{int_type}(\'{max}\'), to{int_type}(1)), {arithmetic_func}(to{int_type}(\'{min}\'), to{int_type}(1))", - exitcode = exitcode, message = 'Exception:') + node.query( + f"SELECT {arithmetic_func}(to{int_type}('{max}'), to{int_type}(1)), {arithmetic_func}(to{int_type}('{min}'), to{int_type}(1))", + exitcode=exitcode, + message="Exception:", + ) else: with When(f"I check {arithmetic_func} with {int_type} max and min value"): - execute_query(f""" + execute_query( + f""" SELECT round({arithmetic_func}(to{int_type}(\'{max}\'), to{int_type}(1)), {rounding_precision}), round({arithmetic_func}(to{int_type}(\'{min}\'), to{int_type}(1)), {rounding_precision}) - """) + """ + ) + @TestOutline -@Examples('arithmetic_func expected_result int_type min max', Examples_list) +@Examples("arithmetic_func expected_result int_type min max", Examples_list) def table_check(self, arithmetic_func, expected_result, int_type, min, max, node=None): - """Check that arithmetic functions work using tables with Int128, UInt128, Int256, and UInt256. - """ + """Check that arithmetic functions work using tables with Int128, UInt128, Int256, and UInt256.""" if node is None: node = self.context.node - table_name = f'table_{getuid()}' + table_name = f"table_{getuid()}" with Given(f"I have a table"): - table(name = table_name, data_type = int_type) + table(name=table_name, data_type=int_type) - if arithmetic_func in ['negate','abs']: + if arithmetic_func in ["negate", "abs"]: for value in [1, min, max]: - with When(f"I insert {arithmetic_func} with {int_type} {value} into the table"): - node.query(f"INSERT INTO {table_name} SELECT {arithmetic_func}(to{int_type}(\'{value}\'))") + with When( + f"I insert {arithmetic_func} with {int_type} {value} into the table" + ): + node.query( + f"INSERT INTO {table_name} SELECT {arithmetic_func}(to{int_type}('{value}'))" + ) with Then(f"I check the table output of {arithmetic_func} with {int_type}"): - execute_query(f""" + execute_query( + f""" SELECT * FROM {table_name} ORDER BY a ASC - """) + """ + ) else: with When(f"I insert {arithmetic_func} with {int_type} into the table"): - node.query(f"INSERT INTO {table_name} SELECT round({arithmetic_func}(to{int_type}(1), to{int_type}(1)), {rounding_precision})") + node.query( + f"INSERT INTO {table_name} SELECT round({arithmetic_func}(to{int_type}(1), to{int_type}(1)), {rounding_precision})" + ) with Then("I check that the output matches the expected value"): output = node.query(f"SELECT * FROM {table_name}").output assert output == expected_result, error() - if arithmetic_func in ['gcd', 'lcm']: + if arithmetic_func in ["gcd", "lcm"]: - if int_type in ['UInt128', 'UInt256']: + if int_type in ["UInt128", "UInt256"]: - with When(f"I insert {arithmetic_func} with {int_type} {min} into the table"): - node.query(f"INSERT INTO {table_name} SELECT {arithmetic_func}(to{int_type}(\'{min}\'), to{int_type}(1))", - exitcode = 153, message = 'Exception:') + with When( + f"I insert {arithmetic_func} with {int_type} {min} into the table" + ): + node.query( + f"INSERT INTO {table_name} SELECT {arithmetic_func}(to{int_type}('{min}'), to{int_type}(1))", + exitcode=153, + message="Exception:", + ) - with And(f"I insert {arithmetic_func} with {int_type} {max} into the table"): - node.query(f"INSERT INTO {table_name} SELECT {arithmetic_func}(to{int_type}(\'{max}\'), to{int_type}(1))") + with And( + f"I insert {arithmetic_func} with {int_type} {max} into the table" + ): + node.query( + f"INSERT INTO {table_name} SELECT {arithmetic_func}(to{int_type}('{max}'), to{int_type}(1))" + ) else: for value in [min, max]: - with When(f"I insert {arithmetic_func} with {int_type} {value} into the table"): - node.query(f"INSERT INTO {table_name} SELECT {arithmetic_func}(to{int_type}(\'{value}\'), to{int_type}(1))", - exitcode = 151, message = 'Exception:') + with When( + f"I insert {arithmetic_func} with {int_type} {value} into the table" + ): + node.query( + f"INSERT INTO {table_name} SELECT {arithmetic_func}(to{int_type}('{value}'), to{int_type}(1))", + exitcode=151, + message="Exception:", + ) else: for value in [min, max]: - with When(f"I insert {arithmetic_func} with {int_type} {value} into the table"): - node.query(f"INSERT INTO {table_name} SELECT round({arithmetic_func}(to{int_type}(\'{value}\'), to{int_type}(1)), {rounding_precision})") + with When( + f"I insert {arithmetic_func} with {int_type} {value} into the table" + ): + node.query( + f"INSERT INTO {table_name} SELECT round({arithmetic_func}(to{int_type}('{value}'), to{int_type}(1)), {rounding_precision})" + ) with Then(f"I check the table output of {arithmetic_func} with {int_type}"): - execute_query(f""" + execute_query( + f""" SELECT * FROM {table_name} ORDER BY a ASC - """) + """ + ) + @TestOutline -@Examples('arithmetic_func expected_result', Examples_dec_list) +@Examples("arithmetic_func expected_result", Examples_dec_list) def inline_check_dec(self, arithmetic_func, expected_result, node=None): - """Check that arithmetic functions work using inline with Decimal256. - """ + """Check that arithmetic functions work using inline with Decimal256.""" if node is None: node = self.context.node - if arithmetic_func in ['negate','abs']: + if arithmetic_func in ["negate", "abs"]: with When(f"I check {arithmetic_func} with toDecimal256"): output = node.query(f"SELECT {arithmetic_func}(toDecimal256(1,0))").output assert output == expected_result, error() - elif arithmetic_func in ['modulo', 'moduloOrZero', 'gcd', 'lcm']: + elif arithmetic_func in ["modulo", "moduloOrZero", "gcd", "lcm"]: with When(f"I check {arithmetic_func} with toDecimal256"): - node.query(f"SELECT {arithmetic_func}(toDecimal256(1,0), toDecimal256(1,0))", - exitcode=43, message = 'Exception:') + node.query( + f"SELECT {arithmetic_func}(toDecimal256(1,0), toDecimal256(1,0))", + exitcode=43, + message="Exception:", + ) else: with When(f"I check {arithmetic_func} with toDecimal256"): - output = node.query(f"SELECT {arithmetic_func}(toDecimal256(1,0), toDecimal256(1,0))").output + output = node.query( + f"SELECT {arithmetic_func}(toDecimal256(1,0), toDecimal256(1,0))" + ).output assert output == expected_result, error() + @TestOutline -@Examples('arithmetic_func expected_result', Examples_dec_list) +@Examples("arithmetic_func expected_result", Examples_dec_list) def table_check_dec(self, arithmetic_func, expected_result, node=None): - """Check that arithmetic functions work using tables with Decimal256. - """ + """Check that arithmetic functions work using tables with Decimal256.""" if node is None: node = self.context.node - table_name = f'table_{getuid()}' + table_name = f"table_{getuid()}" with Given(f"I have a table"): - table(name = table_name, data_type = 'Decimal256(0)') + table(name=table_name, data_type="Decimal256(0)") - if arithmetic_func in ['negate','abs']: + if arithmetic_func in ["negate", "abs"]: with When(f"I insert {arithmetic_func} with toDecimal256 into the table"): - node.query(f"INSERT INTO {table_name} SELECT {arithmetic_func}(toDecimal256(1,0))") + node.query( + f"INSERT INTO {table_name} SELECT {arithmetic_func}(toDecimal256(1,0))" + ) with Then(f"I check the table for output of {arithmetic_func} with Decimal256"): - execute_query(f""" + execute_query( + f""" SELECT * FROM {table_name} ORDER BY a ASC - """) + """ + ) - elif arithmetic_func in ['modulo', 'moduloOrZero', 'gcd', 'lcm']: + elif arithmetic_func in ["modulo", "moduloOrZero", "gcd", "lcm"]: with When(f"I check {arithmetic_func} with toDecimal256"): - node.query(f"INSERT INTO {table_name} SELECT {arithmetic_func}(toDecimal256(1,0), toDecimal256(1,0))", - exitcode=43, message = 'Exception:') + node.query( + f"INSERT INTO {table_name} SELECT {arithmetic_func}(toDecimal256(1,0), toDecimal256(1,0))", + exitcode=43, + message="Exception:", + ) else: with When(f"I insert {arithmetic_func} with toDecimal256 into the table"): - node.query(f"INSERT INTO {table_name} SELECT round({arithmetic_func}(toDecimal256(1,0), toDecimal256(1,0)), {rounding_precision})") + node.query( + f"INSERT INTO {table_name} SELECT round({arithmetic_func}(toDecimal256(1,0), toDecimal256(1,0)), {rounding_precision})" + ) with Then("I check that the output matches the expected value"): output = node.query(f"SELECT * FROM {table_name}").output assert output == expected_result, error() + @TestFeature @Name("arithmetic") @Requirements( @@ -205,13 +263,12 @@ def table_check_dec(self, arithmetic_func, expected_result, node=None): RQ_SRS_020_ClickHouse_Extended_Precision_Arithmetic_Dec_NotSupported("1.0"), ) def feature(self, node="clickhouse1", mysql_node="mysql1", stress=None, parallel=None): - """Check that arithmetic functions work with extended precision data types. - """ + """Check that arithmetic functions work with extended precision data types.""" self.context.node = self.context.cluster.node(node) self.context.mysql_node = self.context.cluster.node(mysql_node) with allow_experimental_bigint(self.context.node): - Scenario(run = inline_check) - Scenario(run = table_check) - Scenario(run = inline_check_dec) - Scenario(run = table_check_dec) + Scenario(run=inline_check) + Scenario(run=table_check) + Scenario(run=inline_check_dec) + Scenario(run=table_check_dec) diff --git a/tests/testflows/extended_precision_data_types/tests/array_tuple_map.py b/tests/testflows/extended_precision_data_types/tests/array_tuple_map.py index c39574ba75e..106458d58bc 100644 --- a/tests/testflows/extended_precision_data_types/tests/array_tuple_map.py +++ b/tests/testflows/extended_precision_data_types/tests/array_tuple_map.py @@ -3,8 +3,10 @@ import uuid from extended_precision_data_types.requirements import * from extended_precision_data_types.common import * + def get_table_name(): - return "table" + "_" + str(uuid.uuid1()).replace('-', '_') + return "table" + "_" + str(uuid.uuid1()).replace("-", "_") + @TestOutline(Suite) @Requirements( @@ -14,65 +16,74 @@ def get_table_name(): RQ_SRS_020_ClickHouse_Extended_Precision_Arrays_Dec_NotSupported("1.0"), ) def array_func(self, data_type, node=None): - """Check array functions with extended precision data types. - """ + """Check array functions with extended precision data types.""" if node is None: node = self.context.node - for func in ['arrayPopBack(', - 'arrayPopFront(', - 'arraySort(', - 'arrayReverseSort(', - 'arrayDistinct(', - 'arrayEnumerate(', - 'arrayEnumerateDense(', - 'arrayEnumerateUniq(', - 'arrayReverse(', - 'reverse(', - 'arrayFlatten(', - 'arrayCompact(', - 'arrayReduceInRanges(\'sum\', [(1, 5)],', - 'arrayMap(x -> (x + 2),', - 'arrayFill(x -> x=3,', - 'arrayReverseFill(x -> x=3,', - f'arrayConcat([{to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}],', - 'arrayFilter(x -> x == 1, ']: + for func in [ + "arrayPopBack(", + "arrayPopFront(", + "arraySort(", + "arrayReverseSort(", + "arrayDistinct(", + "arrayEnumerate(", + "arrayEnumerateDense(", + "arrayEnumerateUniq(", + "arrayReverse(", + "reverse(", + "arrayFlatten(", + "arrayCompact(", + "arrayReduceInRanges('sum', [(1, 5)],", + "arrayMap(x -> (x + 2),", + "arrayFill(x -> x=3,", + "arrayReverseFill(x -> x=3,", + f"arrayConcat([{to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}],", + "arrayFilter(x -> x == 1, ", + ]: with Scenario(f"Inline - {data_type} - {func})"): - execute_query(f""" + execute_query( + f""" SELECT {func}array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)})) - """) + """ + ) with Scenario(f"Table - {data_type} - {func})"): table_name = get_table_name() - table(name = table_name, data_type = f'Array({data_type})') + table(name=table_name, data_type=f"Array({data_type})") with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}," - f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}))") + node.query( + f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}))" + ) execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") - for func in ['arraySplit((x, y) -> x=y, [0, 0, 0],']: + for func in ["arraySplit((x, y) -> x=y, [0, 0, 0],"]: with Scenario(f"Inline - {data_type} - {func})"): - execute_query(f"SELECT {func}array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}," - f"{to_data_type(data_type,1)}))") + execute_query( + f"SELECT {func}array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}," + f"{to_data_type(data_type,1)}))" + ) with Scenario(f"Table - {data_type} - {func})"): table_name = get_table_name() - table(name = table_name, data_type = f'Array(Array({data_type}))') + table(name=table_name, data_type=f"Array(Array({data_type}))") with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}," - f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}))") + node.query( + f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}))" + ) execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") - for func in [f'arrayZip([{to_data_type(data_type,1)}],']: + for func in [f"arrayZip([{to_data_type(data_type,1)}],"]: with Scenario(f"Inline - {data_type} - {func})"): execute_query(f"SELECT {func}array({to_data_type(data_type,3)}))") @@ -80,47 +91,62 @@ def array_func(self, data_type, node=None): with Scenario(f"Table - {data_type} - {func})"): table_name = get_table_name() - table(name = table_name, data_type = f'Array(Tuple({data_type}, {data_type}))') + table(name=table_name, data_type=f"Array(Tuple({data_type}, {data_type}))") with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,1)}))") + node.query( + f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,1)}))" + ) execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") - for func in ['empty(', - 'notEmpty(', - 'length(', - 'arrayCount(x -> x == 1, ', - 'arrayUniq(', - 'arrayJoin(', - 'arrayExists(x -> x==1,', - 'arrayAll(x -> x==1,', - 'arrayMin(', - 'arrayMax(', - 'arraySum(', - 'arrayAvg(', - 'arrayReduce(\'max\', ', - 'arrayFirst(x -> x==3,', - 'arrayFirstIndex(x -> x==3,', - f'hasAll([{to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}], ', - f'hasAny([{to_data_type(data_type,2)}, {to_data_type(data_type,1)}], ', - f'hasSubstr([{to_data_type(data_type,2)}, {to_data_type(data_type,1)}], ']: + for func in [ + "empty(", + "notEmpty(", + "length(", + "arrayCount(x -> x == 1, ", + "arrayUniq(", + "arrayJoin(", + "arrayExists(x -> x==1,", + "arrayAll(x -> x==1,", + "arrayMin(", + "arrayMax(", + "arraySum(", + "arrayAvg(", + "arrayReduce('max', ", + "arrayFirst(x -> x==3,", + "arrayFirstIndex(x -> x==3,", + f"hasAll([{to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}], ", + f"hasAny([{to_data_type(data_type,2)}, {to_data_type(data_type,1)}], ", + f"hasSubstr([{to_data_type(data_type,2)}, {to_data_type(data_type,1)}], ", + ]: - if func in ['arrayMin(','arrayMax(','arraySum(', 'arrayAvg('] and data_type in ['Decimal256(0)']: + if func in [ + "arrayMin(", + "arrayMax(", + "arraySum(", + "arrayAvg(", + ] and data_type in ["Decimal256(0)"]: with Scenario(f"Inline - {data_type} - {func})"): - node.query(f"SELECT {func}array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}))", - exitcode = 44, message = 'Exception:') + node.query( + f"SELECT {func}array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}))", + exitcode=44, + message="Exception:", + ) with Scenario(f"Table - {data_type} - {func})"): table_name = get_table_name() - table(name = table_name, data_type = data_type) + table(name=table_name, data_type=data_type) with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}," + node.query( + f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}," f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}))", - exitcode = 44, message = 'Exception:') + exitcode=44, + message="Exception:", + ) execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") @@ -128,155 +154,185 @@ def array_func(self, data_type, node=None): with Scenario(f"Inline - {data_type} - {func})"): - execute_query(f"SELECT {func}array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}))") + execute_query( + f"SELECT {func}array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}))" + ) with Scenario(f"Table - {data_type} - {func})"): table_name = get_table_name() - table(name = table_name, data_type = data_type) + table(name=table_name, data_type=data_type) with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}," - f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}))") + node.query( + f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}))" + ) execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") - for func in ['arrayDifference(', - 'arrayCumSum(', - 'arrayCumSumNonNegative(']: + for func in ["arrayDifference(", "arrayCumSum(", "arrayCumSumNonNegative("]: - if data_type in ['Decimal256(0)']: + if data_type in ["Decimal256(0)"]: exitcode = 44 else: exitcode = 43 with Scenario(f"Inline - {data_type} - {func})"): - node.query(f"SELECT {func}array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}))", - exitcode = exitcode, message = 'Exception:') + node.query( + f"SELECT {func}array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}))", + exitcode=exitcode, + message="Exception:", + ) with Scenario(f"Table - {data_type} - {func})"): table_name = get_table_name() - table(name = table_name, data_type = data_type) + table(name=table_name, data_type=data_type) with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}," + node.query( + f"INSERT INTO {table_name} SELECT {func}array({to_data_type(data_type,3)}," f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}))", - exitcode = exitcode, message = 'Exception:') + exitcode=exitcode, + message="Exception:", + ) execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") - for func in ['arrayElement']: + for func in ["arrayElement"]: with Scenario(f"Inline - {data_type} - {func}"): - execute_query(f""" + execute_query( + f""" SELECT {func}(array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}, {to_data_type(data_type,1)}), 1) - """) + """ + ) with Scenario(f"Table - {data_type} - {func}"): table_name = get_table_name() - table(name = table_name, data_type = data_type) + table(name=table_name, data_type=data_type) with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}(array({to_data_type(data_type,3)}," - f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}), 1)") + node.query( + f"INSERT INTO {table_name} SELECT {func}(array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}), 1)" + ) execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") - for func in ['arrayPushBack', - 'arrayPushFront']: + for func in ["arrayPushBack", "arrayPushFront"]: with Scenario(f"Inline - {data_type} - {func}"): - execute_query(f"SELECT {func}(array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}," - f"{to_data_type(data_type,1)}), {to_data_type(data_type,1)})") + execute_query( + f"SELECT {func}(array({to_data_type(data_type,3)}, {to_data_type(data_type,2)}," + f"{to_data_type(data_type,1)}), {to_data_type(data_type,1)})" + ) with Scenario(f"Table - {data_type} - {func}"): table_name = get_table_name() - table(name = table_name, data_type = f'Array({data_type})') + table(name=table_name, data_type=f"Array({data_type})") with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}(array({to_data_type(data_type,3)}," - f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}), {to_data_type(data_type,1)})") + node.query( + f"INSERT INTO {table_name} SELECT {func}(array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}), {to_data_type(data_type,1)})" + ) execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") - for func in ['arrayResize', - 'arraySlice']: + for func in ["arrayResize", "arraySlice"]: with Scenario(f"Inline - {data_type} - {func}"): - execute_query(f"SELECT {func}(array({to_data_type(data_type,3)}," - f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}), 1)") + execute_query( + f"SELECT {func}(array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}), 1)" + ) with Scenario(f"Table - {data_type} - {func}"): table_name = get_table_name() - table(name = table_name, data_type = f'Array({data_type})') + table(name=table_name, data_type=f"Array({data_type})") with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}(array({to_data_type(data_type,3)}," - f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}), 1)") + node.query( + f"INSERT INTO {table_name} SELECT {func}(array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}), 1)" + ) execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") - for func in ['has', - 'indexOf', - 'countEqual']: + for func in ["has", "indexOf", "countEqual"]: with Scenario(f"Inline - {data_type} - {func}"): - execute_query(f"SELECT {func}(array({to_data_type(data_type,3)}," - f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}), NULL)") + execute_query( + f"SELECT {func}(array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}), NULL)" + ) with Scenario(f"Table - {data_type} - {func}"): table_name = get_table_name() - table(name = table_name, data_type = data_type) + table(name=table_name, data_type=data_type) with When("I insert the output into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}(array({to_data_type(data_type,3)}," - f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}), NULL)") + node.query( + f"INSERT INTO {table_name} SELECT {func}(array({to_data_type(data_type,3)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,1)}), NULL)" + ) execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") + @TestOutline(Suite) @Requirements( RQ_SRS_020_ClickHouse_Extended_Precision_Tuple("1.0"), ) def tuple_func(self, data_type, node=None): - """Check tuple functions with extended precision data types. - """ + """Check tuple functions with extended precision data types.""" if node is None: node = self.context.node with Scenario(f"Creating a tuple with {data_type}"): - node.query(f"SELECT tuple({to_data_type(data_type,1)}, {to_data_type(data_type,1)}, {to_data_type(data_type,1)})") + node.query( + f"SELECT tuple({to_data_type(data_type,1)}, {to_data_type(data_type,1)}, {to_data_type(data_type,1)})" + ) with Scenario(f"Creating a tuple with {data_type} on a table"): table_name = get_table_name() - table(name = table_name, data_type = f'Tuple({data_type}, {data_type}, {data_type})') + table( + name=table_name, data_type=f"Tuple({data_type}, {data_type}, {data_type})" + ) with When("I insert the output into a table"): - node.query(f"INSERT INTO {table_name} SELECT tuple({to_data_type(data_type,1)}," - f"{to_data_type(data_type,1)}, {to_data_type(data_type,1)})") + node.query( + f"INSERT INTO {table_name} SELECT tuple({to_data_type(data_type,1)}," + f"{to_data_type(data_type,1)}, {to_data_type(data_type,1)})" + ) execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") with Scenario(f"tupleElement with {data_type}"): - node.query(f"SELECT tupleElement(({to_data_type(data_type,1)}, {to_data_type(data_type,1)}), 1)") + node.query( + f"SELECT tupleElement(({to_data_type(data_type,1)}, {to_data_type(data_type,1)}), 1)" + ) with Scenario(f"tupleElement with {data_type} on a table"): table_name = get_table_name() - table(name = table_name, data_type = data_type) + table(name=table_name, data_type=data_type) with When("I insert the output into a table"): - node.query(f"INSERT INTO {table_name} SELECT tupleElement(({to_data_type(data_type,1)}, {to_data_type(data_type,1)}), 1)") + node.query( + f"INSERT INTO {table_name} SELECT tupleElement(({to_data_type(data_type,1)}, {to_data_type(data_type,1)}), 1)" + ) execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") @@ -286,58 +342,70 @@ def tuple_func(self, data_type, node=None): with Scenario(f"untuple with {data_type} on a table"): table_name = get_table_name() - table(name = table_name, data_type = data_type) + table(name=table_name, data_type=data_type) with When("I insert the output into a table"): - node.query(f"INSERT INTO {table_name} SELECT untuple(({to_data_type(data_type,1)},))") + node.query( + f"INSERT INTO {table_name} SELECT untuple(({to_data_type(data_type,1)},))" + ) execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") with Scenario(f"tupleHammingDistance with {data_type}"): - node.query(f"SELECT tupleHammingDistance(({to_data_type(data_type,1)}, {to_data_type(data_type,1)})," - f"({to_data_type(data_type,2)}, {to_data_type(data_type,2)}))") + node.query( + f"SELECT tupleHammingDistance(({to_data_type(data_type,1)}, {to_data_type(data_type,1)})," + f"({to_data_type(data_type,2)}, {to_data_type(data_type,2)}))" + ) with Scenario(f"tupleHammingDistance with {data_type} on a table"): table_name = get_table_name() - table(name = table_name, data_type = data_type) + table(name=table_name, data_type=data_type) with When("I insert the output into a table"): - node.query(f"INSERT INTO {table_name} SELECT tupleHammingDistance(({to_data_type(data_type,1)}," - f"{to_data_type(data_type,1)}), ({to_data_type(data_type,2)}, {to_data_type(data_type,2)}))") + node.query( + f"INSERT INTO {table_name} SELECT tupleHammingDistance(({to_data_type(data_type,1)}," + f"{to_data_type(data_type,1)}), ({to_data_type(data_type,2)}, {to_data_type(data_type,2)}))" + ) execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") + @TestOutline(Suite) @Requirements( RQ_SRS_020_ClickHouse_Extended_Precision_Map_Supported("1.0"), RQ_SRS_020_ClickHouse_Extended_Precision_Map_NotSupported("1.0"), ) def map_func(self, data_type, node=None): - """Check Map functions with extended precision data types. - """ + """Check Map functions with extended precision data types.""" if node is None: node = self.context.node with Scenario(f"Creating a map with {data_type}"): - node.query(f"SELECT map('key1', {to_data_type(data_type,1)}, 'key2', {to_data_type(data_type,2)})") + node.query( + f"SELECT map('key1', {to_data_type(data_type,1)}, 'key2', {to_data_type(data_type,2)})" + ) with Scenario(f"Creating a map with {data_type} on a table"): table_name = get_table_name() - table(name = table_name, data_type = f'Map(String, {data_type})') + table(name=table_name, data_type=f"Map(String, {data_type})") with When("I insert the output into a table"): - node.query(f"INSERT INTO {table_name} SELECT map('key1', {to_data_type(data_type,1)}, 'key2', {to_data_type(data_type,2)})") + node.query( + f"INSERT INTO {table_name} SELECT map('key1', {to_data_type(data_type,1)}, 'key2', {to_data_type(data_type,2)})" + ) execute_query(f"SELECT * FROM {table_name}") with Scenario(f"mapAdd with {data_type}"): - sql = (f"SELECT mapAdd(([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," + sql = ( + f"SELECT mapAdd(([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}])," f"([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," - f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))") + f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))" + ) if data_type.startswith("Decimal"): node.query(sql, exitcode=43, message="Exception:") else: @@ -346,14 +414,18 @@ def map_func(self, data_type, node=None): with Scenario(f"mapAdd with {data_type} on a table"): table_name = get_table_name() - table(name = table_name, data_type = f'Tuple(Array({data_type}), Array({data_type}))') + table( + name=table_name, data_type=f"Tuple(Array({data_type}), Array({data_type}))" + ) with When("I insert the output into a table"): - sql = (f"INSERT INTO {table_name} SELECT mapAdd((" + sql = ( + f"INSERT INTO {table_name} SELECT mapAdd((" f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}])," f"([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," - f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))") + f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))" + ) exitcode, message = 0, None if data_type.startswith("Decimal"): @@ -363,11 +435,13 @@ def map_func(self, data_type, node=None): execute_query(f"""SELECT * FROM {table_name} ORDER BY a ASC""") with Scenario(f"mapSubtract with {data_type}"): - sql = (f"SELECT mapSubtract((" + sql = ( + f"SELECT mapSubtract((" f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}])," f"([{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]," - f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))") + f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))" + ) if data_type.startswith("Decimal"): node.query(sql, exitcode=43, message="Exception:") @@ -377,13 +451,17 @@ def map_func(self, data_type, node=None): with Scenario(f"mapSubtract with {data_type} on a table"): table_name = get_table_name() - table(name = table_name, data_type = f'Tuple(Array({data_type}), Array({data_type}))') + table( + name=table_name, data_type=f"Tuple(Array({data_type}), Array({data_type}))" + ) with When("I insert the output into a table"): - sql = (f"INSERT INTO {table_name} SELECT mapSubtract(([{to_data_type(data_type,1)}," + sql = ( + f"INSERT INTO {table_name} SELECT mapSubtract(([{to_data_type(data_type,1)}," f"{to_data_type(data_type,2)}], [{to_data_type(data_type,1)}," f"{to_data_type(data_type,2)}]), ([{to_data_type(data_type,1)}," - f"{to_data_type(data_type,2)}], [{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))") + f"{to_data_type(data_type,2)}], [{to_data_type(data_type,1)}, {to_data_type(data_type,2)}]))" + ) exitcode, message = 0, None if data_type.startswith("Decimal"): @@ -393,8 +471,10 @@ def map_func(self, data_type, node=None): execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") with Scenario(f"mapPopulateSeries with {data_type}"): - sql = (f"SELECT mapPopulateSeries([1,2,3], [{to_data_type(data_type,1)}," - f"{to_data_type(data_type,2)}, {to_data_type(data_type,3)}], 5)") + sql = ( + f"SELECT mapPopulateSeries([1,2,3], [{to_data_type(data_type,1)}," + f"{to_data_type(data_type,2)}, {to_data_type(data_type,3)}], 5)" + ) exitcode, message = 0, None if data_type.startswith("Decimal"): @@ -404,11 +484,15 @@ def map_func(self, data_type, node=None): with Scenario(f"mapPopulateSeries with {data_type} on a table"): table_name = get_table_name() - table(name = table_name, data_type = f'Tuple(Array({data_type}), Array({data_type}))') + table( + name=table_name, data_type=f"Tuple(Array({data_type}), Array({data_type}))" + ) with When("I insert the output into a table"): - sql = (f"INSERT INTO {table_name} SELECT mapPopulateSeries([1,2,3]," - f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}, {to_data_type(data_type,3)}], 5)") + sql = ( + f"INSERT INTO {table_name} SELECT mapPopulateSeries([1,2,3]," + f"[{to_data_type(data_type,1)}, {to_data_type(data_type,2)}, {to_data_type(data_type,3)}], 5)" + ) exitcode, message = 0, None if data_type.startswith("Decimal"): @@ -418,57 +502,73 @@ def map_func(self, data_type, node=None): execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") with Scenario(f"mapContains with {data_type}"): - node.query(f"SELECT mapContains( map('key1', {to_data_type(data_type,1)}," - f"'key2', {to_data_type(data_type,2)}), 'key1')") + node.query( + f"SELECT mapContains( map('key1', {to_data_type(data_type,1)}," + f"'key2', {to_data_type(data_type,2)}), 'key1')" + ) with Scenario(f"mapContains with {data_type} on a table"): table_name = get_table_name() - table(name = table_name, data_type = data_type) + table(name=table_name, data_type=data_type) with When("I insert the output into a table"): - node.query(f"INSERT INTO {table_name} SELECT mapContains( map('key1', {to_data_type(data_type,1)}," - f"'key2', {to_data_type(data_type,2)}), 'key1')") + node.query( + f"INSERT INTO {table_name} SELECT mapContains( map('key1', {to_data_type(data_type,1)}," + f"'key2', {to_data_type(data_type,2)}), 'key1')" + ) execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") with Scenario(f"mapKeys with {data_type}"): - node.query(f"SELECT mapKeys( map('key1', {to_data_type(data_type,1)}, 'key2', {to_data_type(data_type,2)}))") + node.query( + f"SELECT mapKeys( map('key1', {to_data_type(data_type,1)}, 'key2', {to_data_type(data_type,2)}))" + ) with Scenario(f"mapKeys with {data_type} on a table"): table_name = get_table_name() - table(name = table_name, data_type = 'Array(String)') + table(name=table_name, data_type="Array(String)") with When("I insert the output into a table"): - node.query(f"INSERT INTO {table_name} SELECT mapKeys( map('key1', {to_data_type(data_type,1)}," - f"'key2', {to_data_type(data_type,2)}))") + node.query( + f"INSERT INTO {table_name} SELECT mapKeys( map('key1', {to_data_type(data_type,1)}," + f"'key2', {to_data_type(data_type,2)}))" + ) execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") with Scenario(f"mapValues with {data_type}"): - node.query(f"SELECT mapValues( map('key1', {to_data_type(data_type,1)}, 'key2', {to_data_type(data_type,2)}))") + node.query( + f"SELECT mapValues( map('key1', {to_data_type(data_type,1)}, 'key2', {to_data_type(data_type,2)}))" + ) with Scenario(f"mapValues with {data_type} on a table"): table_name = get_table_name() - table(name = table_name, data_type = f'Array({data_type})') + table(name=table_name, data_type=f"Array({data_type})") with When("I insert the output into a table"): - node.query(f"INSERT INTO {table_name} SELECT mapValues( map('key1', {to_data_type(data_type,1)}," - f"'key2', {to_data_type(data_type,2)}))") + node.query( + f"INSERT INTO {table_name} SELECT mapValues( map('key1', {to_data_type(data_type,1)}," + f"'key2', {to_data_type(data_type,2)}))" + ) execute_query(f"SELECT * FROM {table_name} ORDER BY a ASC") + @TestFeature @Name("array, tuple, map") -@Examples("data_type",[ - ('Int128',), - ('Int256',), - ('UInt128',), - ('UInt256',), - ('Decimal256(0)',), -]) +@Examples( + "data_type", + [ + ("Int128",), + ("Int256",), + ("UInt128",), + ("UInt256",), + ("Decimal256(0)",), + ], +) def feature(self, node="clickhouse1", stress=None, parallel=None): """Check that array, tuple, and map functions work with extended precision data types. @@ -477,7 +577,7 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): with allow_experimental_bigint(self.context.node): for example in self.examples: - data_type, = example + (data_type,) = example with Feature(data_type): diff --git a/tests/testflows/extended_precision_data_types/tests/bit.py b/tests/testflows/extended_precision_data_types/tests/bit.py index 24f63532c74..f32ae093607 100644 --- a/tests/testflows/extended_precision_data_types/tests/bit.py +++ b/tests/testflows/extended_precision_data_types/tests/bit.py @@ -3,31 +3,37 @@ from extended_precision_data_types.common import * from extended_precision_data_types.errors import * funcs = [ - ('bitAnd', True, None), - ('bitOr', True, None), - ('bitXor', True, None), - ('bitShiftLeft', True, None), - ('bitShiftRight', True, None), - ('bitRotateLeft', False, not_implemented_bigints('Bit rotate')), - ('bitRotateRight', False, not_implemented_bigints('Bit rotate')), - ('bitTest', False, not_implemented_bigints('bitTest')), - ('bitTestAll', False, illegal_column()), - ('bitTestAny', False, illegal_column()), - ('bitNot', True, None), - ('bitCount', True, None) + ("bitAnd", True, None), + ("bitOr", True, None), + ("bitXor", True, None), + ("bitShiftLeft", True, None), + ("bitShiftRight", True, None), + ("bitRotateLeft", False, not_implemented_bigints("Bit rotate")), + ("bitRotateRight", False, not_implemented_bigints("Bit rotate")), + ("bitTest", False, not_implemented_bigints("bitTest")), + ("bitTestAll", False, illegal_column()), + ("bitTestAny", False, illegal_column()), + ("bitNot", True, None), + ("bitCount", True, None), +] + +Examples_list = [ + tuple(list(func) + list(data_type) + [Name(f"{func[0]} - {data_type[0]}")]) + for func in funcs + for data_type in data_types +] +Examples_dec_list = [ + tuple(list(func) + [Name(f"{func[0]} - Decimal256")]) for func in funcs ] -Examples_list = [tuple(list(func)+list(data_type)+[Name(f'{func[0]} - {data_type[0]}')]) for func in funcs for data_type in data_types] -Examples_dec_list = [tuple(list(func)+[Name(f'{func[0]} - Decimal256')]) for func in funcs] @TestOutline(Scenario) -@Examples('func supported error int_type min max', Examples_list) +@Examples("func supported error int_type min max", Examples_list) def bit_int_inline(self, func, supported, error, int_type, min, max, node=None): - """ Check bit functions with Int128, UInt128, Int256, and UInt256 using inline tests. - """ + """Check bit functions with Int128, UInt128, Int256, and UInt256 using inline tests.""" if error is not None: - exitcode,message = error + exitcode, message = error if node is None: node = self.context.node @@ -35,28 +41,35 @@ def bit_int_inline(self, func, supported, error, int_type, min, max, node=None): if func in ["bitNot", "bitCount"]: with When(f"Check {func} with {int_type}"): - execute_query(f""" + execute_query( + f""" SELECT {func}(to{int_type}(1)), {func}(to{int_type}(\'{max}\')), {func}(to{int_type}(\'{min}\')) - """) + """ + ) elif supported: with When(f"I check {func} with {int_type}"): - execute_query(f""" + execute_query( + f""" SELECT {func}(to{int_type}(1), 1), {func}(to{int_type}(\'{max}\'), 1), {func}(to{int_type}(\'{min}\'), 1) - """) + """ + ) else: with When(f"I check {func} with {int_type}"): - node.query(f"SELECT {func}(to{int_type}(1), 1), {func}(to{int_type}(\'{max}\'), 1), {func}(to{int_type}(\'{min}\'), 1)", - exitcode=exitcode, message = message) + node.query( + f"SELECT {func}(to{int_type}(1), 1), {func}(to{int_type}('{max}'), 1), {func}(to{int_type}('{min}'), 1)", + exitcode=exitcode, + message=message, + ) + @TestOutline(Scenario) -@Examples('func supported error int_type min max', Examples_list) +@Examples("func supported error int_type min max", Examples_list) def bit_int_table(self, func, supported, error, int_type, min, max, node=None): - """ Check bit functions with Int128, UInt128, Int256, and UInt256 using table tests. - """ + """Check bit functions with Int128, UInt128, Int256, and UInt256 using table tests.""" table_name = f"table_{getuid()}" @@ -64,48 +77,59 @@ def bit_int_table(self, func, supported, error, int_type, min, max, node=None): node = self.context.node if error is not None: - exitcode,message = error + exitcode, message = error with Given(f"I have a table"): - table(name = table_name, data_type = int_type) + table(name=table_name, data_type=int_type) if func in ["bitNot", "bitCount"]: for value in [1, min, max]: with When(f"I insert the output of {func} with {int_type} and {value}"): - node.query(f"INSERT INTO {table_name} SELECT {func}(to{int_type}(\'{value}\'))") + node.query( + f"INSERT INTO {table_name} SELECT {func}(to{int_type}('{value}'))" + ) with Then(f"I check the table with values of {func} and {int_type}"): - execute_query(f""" + execute_query( + f""" SELECT * FROM {table_name} ORDER BY a ASC - """) + """ + ) elif supported: for value in [1, min, max]: with When(f"I insert the output of {func} with {int_type} and {value}"): - node.query(f"INSERT INTO {table_name} SELECT {func}(to{int_type}(\'{value}\'), 1)") + node.query( + f"INSERT INTO {table_name} SELECT {func}(to{int_type}('{value}'), 1)" + ) with Then(f"I check the table with values of {func} and {int_type}"): - execute_query(f""" + execute_query( + f""" SELECT * FROM {table_name} ORDER BY a ASC - """) + """ + ) else: for value in [1, min, max]: with When(f"I insert the output of {func} with {int_type} and {value}"): - node.query(f"INSERT INTO {table_name} SELECT {func}(to{int_type}(\'{value}\'), 1)", - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table_name} SELECT {func}(to{int_type}('{value}'), 1)", + exitcode=exitcode, + message=message, + ) + @TestOutline(Scenario) -@Examples('func supported error', Examples_dec_list) +@Examples("func supported error", Examples_dec_list) def bit_dec_inline(self, func, supported, error, node=None): - """ Check bit functions with Decimal256 using inline tests. - """ + """Check bit functions with Decimal256 using inline tests.""" min = Decimal256_min_max[0] max = Decimal256_min_max[1] @@ -117,20 +141,26 @@ def bit_dec_inline(self, func, supported, error, node=None): if func in ["bitNot", "bitCount"]: with When(f"Check {func} with Decimal256"): - node.query(f"SELECT {func}(toDecimal256(1,0)), {func}(toDecimal256(\'{max}\',0)), {func}(toDecimal256(\'{min}\',0))", - exitcode=exitcode, message = message) + node.query( + f"SELECT {func}(toDecimal256(1,0)), {func}(toDecimal256('{max}',0)), {func}(toDecimal256('{min}',0))", + exitcode=exitcode, + message=message, + ) else: with When(f"I check {func} with Decimal256"): - node.query(f"SELECT {func}(toDecimal256(1,0), 1), {func}(toDecimal256(\'{max}\',0), 1), {func}(toDecimal256(\'{min}\',0), 1)", - exitcode=exitcode, message = message) + node.query( + f"SELECT {func}(toDecimal256(1,0), 1), {func}(toDecimal256('{max}',0), 1), {func}(toDecimal256('{min}',0), 1)", + exitcode=exitcode, + message=message, + ) + @TestOutline(Scenario) -@Examples('func supported error', Examples_dec_list) +@Examples("func supported error", Examples_dec_list) def bit_dec_table(self, func, supported, error, node=None): - """ Check bit functions with Decimal256 using table tests. - """ + """Check bit functions with Decimal256 using table tests.""" min = Decimal256_min_max[0] max = Decimal256_min_max[1] @@ -141,23 +171,30 @@ def bit_dec_table(self, func, supported, error, node=None): node = self.context.node with Given(f"I have a table"): - table(name = table_name, data_type = 'Decimal256(0)') + table(name=table_name, data_type="Decimal256(0)") if func in ["bitNot", "bitCount"]: for value in [1, min, max]: with When(f"I insert the output of {func} with Decimal256 and {value}"): - node.query(f"INSERT INTO {table_name} SELECT {func}(toDecimal256(\'{value}\',0))", - exitcode=exitcode, message = message) + node.query( + f"INSERT INTO {table_name} SELECT {func}(toDecimal256('{value}',0))", + exitcode=exitcode, + message=message, + ) else: for value in [1, min, max]: with When(f"I insert the output of {func} with Decimal256 and {value}"): - node.query(f"INSERT INTO {table_name} SELECT {func}(toDecimal256(\'{value}\',0), 1)", - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table_name} SELECT {func}(toDecimal256('{value}',0), 1)", + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("bit") @@ -167,8 +204,7 @@ def bit_dec_table(self, func, supported, error, node=None): RQ_SRS_020_ClickHouse_Extended_Precision_Bit_Dec_NotSupported("1.0"), ) def feature(self, node="clickhouse1", mysql_node="mysql1", stress=None, parallel=None): - """Check that bit functions work with extended precision data types. - """ + """Check that bit functions work with extended precision data types.""" self.context.node = self.context.cluster.node(node) self.context.mysql_node = self.context.cluster.node(mysql_node) diff --git a/tests/testflows/extended_precision_data_types/tests/comparison.py b/tests/testflows/extended_precision_data_types/tests/comparison.py index 6f715e35b91..70d5abdd6a0 100644 --- a/tests/testflows/extended_precision_data_types/tests/comparison.py +++ b/tests/testflows/extended_precision_data_types/tests/comparison.py @@ -2,60 +2,74 @@ from extended_precision_data_types.requirements import * from extended_precision_data_types.common import * funcs = [ - ('equals',), - ('notEquals',), - ('less',), - ('greater',), - ('lessOrEquals',), - ('greaterOrEquals',) + ("equals",), + ("notEquals",), + ("less",), + ("greater",), + ("lessOrEquals",), + ("greaterOrEquals",), +] + +Examples_list = [ + tuple(list(func) + list(data_type) + [Name(f"{func[0]} - {data_type[0]}")]) + for func in funcs + for data_type in data_types +] +Examples_list_dec = [ + tuple(list(func) + [Name(f"{func[0]} - Decimal256")]) for func in funcs ] -Examples_list = [tuple(list(func)+list(data_type)+[Name(f'{func[0]} - {data_type[0]}')]) for func in funcs for data_type in data_types] -Examples_list_dec = [tuple(list(func)+[Name(f'{func[0]} - Decimal256')]) for func in funcs] @TestOutline(Scenario) -@Examples('func int_type min max', Examples_list) +@Examples("func int_type min max", Examples_list) def comp_int_inline(self, func, int_type, min, max, node=None): - """Check comparison functions with Int128, UInt128, Int256, and UInt256 using inline tests. - """ + """Check comparison functions with Int128, UInt128, Int256, and UInt256 using inline tests.""" if node is None: node = self.context.node with When(f"I check {func} with {int_type}"): - execute_query(f""" + execute_query( + f""" SELECT {func}(to{int_type}(1), to{int_type}(1)), {func}(to{int_type}(\'{max}\'), to{int_type}(\'{min}\')) - """) + """ + ) + @TestOutline(Scenario) -@Examples('func int_type min max', Examples_list) +@Examples("func int_type min max", Examples_list) def comp_int_table(self, func, int_type, min, max, node=None): - """Check comparison functions with Int128, UInt128, Int256, and UInt256 using table tests. - """ + """Check comparison functions with Int128, UInt128, Int256, and UInt256 using table tests.""" if node is None: node = self.context.node - table_name = f'table_{getuid()}' + table_name = f"table_{getuid()}" with Given(f"I have a table"): - table(name = table_name, data_type = int_type) + table(name=table_name, data_type=int_type) for value in [1, max, min]: - with When(f"I insert into a table the output {func} with {int_type} and {value}"): - node.query(f"INSERT INTO {table_name} SELECT {func}(to{int_type}(\'{value}\'), to{int_type}(1))") + with When( + f"I insert into a table the output {func} with {int_type} and {value}" + ): + node.query( + f"INSERT INTO {table_name} SELECT {func}(to{int_type}('{value}'), to{int_type}(1))" + ) with Then(f"I check the table for the output of {func} with {int_type}"): - execute_query(f""" + execute_query( + f""" SELECT * FROM {table_name} ORDER BY a ASC - """) + """ + ) + @TestOutline(Scenario) -@Examples('func', Examples_list_dec) +@Examples("func", Examples_list_dec) def comp_dec_inline(self, func, node=None): - """Check comparison functions with Decimal256 using inline tests. - """ + """Check comparison functions with Decimal256 using inline tests.""" min = Decimal256_min_max[0] max = Decimal256_min_max[1] @@ -63,35 +77,44 @@ def comp_dec_inline(self, func, node=None): node = self.context.node with When(f"I check {func} with Decimal256"): - execute_query(f""" + execute_query( + f""" SELECT {func}(toDecimal256(1,0), toDecimal256(1,0)), {func}(toDecimal256(\'{max}\',0), toDecimal256(\'{min}\',0)) - """) + """ + ) + @TestOutline(Scenario) -@Examples('func', Examples_list_dec) +@Examples("func", Examples_list_dec) def comp_dec_table(self, func, node=None): - """Check comparison functions with Decimal256 using table tests. - """ + """Check comparison functions with Decimal256 using table tests.""" min = Decimal256_min_max[0] max = Decimal256_min_max[1] if node is None: node = self.context.node - table_name = f'table_{getuid()}' + table_name = f"table_{getuid()}" with Given(f"I have a table"): - table(name = table_name, data_type = 'Decimal256(0)') + table(name=table_name, data_type="Decimal256(0)") for value in [1, max, min]: - with When(f"I insert into a table the output {func} with Decimal256 and {value}"): - node.query(f"INSERT INTO {table_name} SELECT {func}(toDecimal256(\'{value}\',0), toDecimal256(1,0))") + with When( + f"I insert into a table the output {func} with Decimal256 and {value}" + ): + node.query( + f"INSERT INTO {table_name} SELECT {func}(toDecimal256('{value}',0), toDecimal256(1,0))" + ) with Then(f"I check the table for the output of {func} with Decimal256"): - execute_query(f""" + execute_query( + f""" SELECT * FROM {table_name} ORDER BY a ASC - """) + """ + ) + @TestFeature @Name("comparison") @@ -99,8 +122,7 @@ def comp_dec_table(self, func, node=None): RQ_SRS_020_ClickHouse_Extended_Precision_Comparison("1.0"), ) def feature(self, node="clickhouse1", mysql_node="mysql1", stress=None, parallel=None): - """Check that comparison functions work with extended precision data types. - """ + """Check that comparison functions work with extended precision data types.""" self.context.node = self.context.cluster.node(node) self.context.mysql_node = self.context.cluster.node(mysql_node) diff --git a/tests/testflows/extended_precision_data_types/tests/conversion.py b/tests/testflows/extended_precision_data_types/tests/conversion.py index b98958009a0..942f40c91de 100644 --- a/tests/testflows/extended_precision_data_types/tests/conversion.py +++ b/tests/testflows/extended_precision_data_types/tests/conversion.py @@ -4,10 +4,10 @@ import textwrap from extended_precision_data_types.requirements import * from extended_precision_data_types.common import * + @contextmanager def dictionary(name, node, mysql_node): - """Create a table in MySQL and use it a source for a dictionary. - """ + """Create a table in MySQL and use it a source for a dictionary.""" try: with Given("table in MySQL"): sql = f""" @@ -22,9 +22,15 @@ def dictionary(name, node, mysql_node): ); """ with When("I drop the table if exists"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"DROP TABLE IF EXISTS {name};\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "DROP TABLE IF EXISTS {name};"', + exitcode=0, + ) with And("I create a table"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) with And("dictionary that uses MySQL table as the external source"): with When("I drop the dictionary if exists"): @@ -60,12 +66,15 @@ def dictionary(name, node, mysql_node): node.query(f"DROP DICTIONARY IF EXISTS dict_{name}") with And("I drop a table in MySQL", flags=TE): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"DROP TABLE IF EXISTS {name};\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "DROP TABLE IF EXISTS {name};"', + exitcode=0, + ) + @contextmanager def table(name, node, mysql_node): - """Create a table in MySQL and use it a source for a table in ClickHouse. - """ + """Create a table in MySQL and use it a source for a table in ClickHouse.""" try: with Given("table in MySQL"): sql = f""" @@ -80,10 +89,16 @@ def table(name, node, mysql_node): ); """ with When("I drop the table if exists"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"DROP TABLE IF EXISTS {name};\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "DROP TABLE IF EXISTS {name};"', + exitcode=0, + ) with And("I create a table"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) with And("table that uses MySQL table as the external source"): @@ -111,12 +126,15 @@ def table(name, node, mysql_node): node.query(f"DROP TABLE IF EXISTS {name}") with And("I drop a table in MySQL", flags=TE): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"DROP TABLE IF EXISTS {name};\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "DROP TABLE IF EXISTS {name};"', + exitcode=0, + ) + @contextmanager def table_func(name, node, mysql_node): - """Create a table in MySQL and use it a source for a table using mysql table function. - """ + """Create a table in MySQL and use it a source for a table using mysql table function.""" try: with Given("table in MySQL"): sql = f""" @@ -131,9 +149,15 @@ def table_func(name, node, mysql_node): ); """ with When("I drop the table if exists"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"DROP TABLE IF EXISTS {name};\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "DROP TABLE IF EXISTS {name};"', + exitcode=0, + ) with And("I create a table"): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) yield f"mysql('{mysql_node.name}:3306', 'db', '{name}', 'user', 'password')" @@ -143,33 +167,73 @@ def table_func(name, node, mysql_node): node.query(f"DROP TABLE IF EXISTS {name}") with And("I drop a table in MySQL", flags=TE): - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user -e \"DROP TABLE IF EXISTS {name};\"", exitcode=0) + mysql_node.command( + f'MYSQL_PWD=password mysql -D db -u user -e "DROP TABLE IF EXISTS {name};"', + exitcode=0, + ) + @TestOutline(Scenario) -@Examples('int_type min max',[ - ('Int128', '-170141183460469231731687303715884105728', '170141183460469231731687303715884105727', Requirements(RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_toInt128("1.0")), Name('Int128')), - ('Int256', '-57896044618658097711785492504343953926634992332820282019728792003956564819968', '57896044618658097711785492504343953926634992332820282019728792003956564819967', Requirements(RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_toInt256("1.0")), Name('Int256')), - ('UInt128','0','340282366920938463463374607431768211455', Requirements(RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_toUInt128("1.0")), Name('UInt128')), - ('UInt256', '0', '115792089237316195423570985008687907853269984665640564039457584007913129639935', Requirements(RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_toUInt256("1.0")), Name('UInt256')), -]) +@Examples( + "int_type min max", + [ + ( + "Int128", + "-170141183460469231731687303715884105728", + "170141183460469231731687303715884105727", + Requirements( + RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_toInt128("1.0") + ), + Name("Int128"), + ), + ( + "Int256", + "-57896044618658097711785492504343953926634992332820282019728792003956564819968", + "57896044618658097711785492504343953926634992332820282019728792003956564819967", + Requirements( + RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_toInt256("1.0") + ), + Name("Int256"), + ), + ( + "UInt128", + "0", + "340282366920938463463374607431768211455", + Requirements( + RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_toUInt128("1.0") + ), + Name("UInt128"), + ), + ( + "UInt256", + "0", + "115792089237316195423570985008687907853269984665640564039457584007913129639935", + Requirements( + RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_toUInt256("1.0") + ), + Name("UInt256"), + ), + ], +) def int_conversion(self, int_type, min, max, node=None): - """Check that ClickHouse converts values to Int128. - """ + """Check that ClickHouse converts values to Int128.""" if node is None: node = self.context.node with When(f"I convert {min}, {max}, 1 to {int_type}"): - output = node.query(f"SELECT to{int_type}(\'{min}\'), to{int_type}(\'{max}\'), to{int_type}(1) format TabSeparatedRaw").output - assert output == f'{min}\t{max}\t1', error() + output = node.query( + f"SELECT to{int_type}('{min}'), to{int_type}('{max}'), to{int_type}(1) format TabSeparatedRaw" + ).output + assert output == f"{min}\t{max}\t1", error() + @TestScenario @Requirements( RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_toDecimal256("1.0"), ) def to_decimal256(self, node=None): - """Check that ClickHouse converts values to Int128. - """ + """Check that ClickHouse converts values to Int128.""" min = Decimal256_min_max[0] max = Decimal256_min_max[1] @@ -178,28 +242,32 @@ def to_decimal256(self, node=None): with When(f"I check toDecimal256 with 0 scale with 1, {max}, and {min}"): - for value in [1,min,max]: - output = node.query(f"SELECT toDecimal256(\'{value}\',0)").output + for value in [1, min, max]: + output = node.query(f"SELECT toDecimal256('{value}',0)").output assert output == str(value), error() - for scale in range(1,76): + for scale in range(1, 76): with When(f"I check toDecimal256 with {scale} scale with its max"): - output = node.query(f"SELECT toDecimal256(\'{10**(76-scale)-1}\',{scale})").output - assert float(output) == float(10**(76-scale)-1), error() + output = node.query( + f"SELECT toDecimal256('{10**(76-scale)-1}',{scale})" + ).output + assert float(output) == float(10 ** (76 - scale) - 1), error() with And(f"I check toDecimal256 with {scale} scale with its min"): - output = node.query(f"SELECT toDecimal256(\'{-10**(76-scale)+1}\',{scale})").output - assert float(output) == float(-10**(76-scale)+1), error() + output = node.query( + f"SELECT toDecimal256('{-10**(76-scale)+1}',{scale})" + ).output + assert float(output) == float(-(10 ** (76 - scale)) + 1), error() + @TestScenario @Requirements( RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_ToMySQL("1.0"), ) def MySQL_table(self, node=None): - """Check that ClickHouse converts MySQL values from MySQL table into ClickHouse table. - """ - table_name = f'table_{getuid()}' + """Check that ClickHouse converts MySQL values from MySQL table into ClickHouse table.""" + table_name = f"table_{getuid()}" node = self.context.node mysql_node = self.context.mysql_node @@ -210,20 +278,22 @@ def MySQL_table(self, node=None): sql = f""" INSERT INTO {table_name}(int128, uint128, int256, uint256, dec256) VALUES (1,1,1,1,1); """ - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) with Then("I select from the table on top of the mysql table"): - node.query(f"SELECT * FROM {table_name}", - exitcode=50, message='Exception:') + node.query(f"SELECT * FROM {table_name}", exitcode=50, message="Exception:") + @TestScenario @Requirements( RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_FromMySQL("1.0"), ) def MySQL_func(self, node=None): - """Check that ClickHouse converts MySQL values into a ClickHouse table using the MySQL table function. - """ - table_name = f'table_{getuid()}' + """Check that ClickHouse converts MySQL values into a ClickHouse table using the MySQL table function.""" + table_name = f"table_{getuid()}" node = self.context.node mysql_node = self.context.mysql_node @@ -234,33 +304,38 @@ def MySQL_func(self, node=None): sql = f""" INSERT INTO {table_name}(int128, uint128, int256, uint256, dec256) VALUES (1,1,1,1,1); """ - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) with And("I make sure the table doesn't exist"): node.query(f"DROP TABLE IF EXISTS {table_name}") with And("I create the table"): - node.query(f"CREATE TABLE {table_name} (id UInt8, int128 Int128, uint128 UInt128, int256 Int256, uint256 UInt256, dec256 Decimal256(0)) Engine = Memory") + node.query( + f"CREATE TABLE {table_name} (id UInt8, int128 Int128, uint128 UInt128, int256 Int256, uint256 UInt256, dec256 Decimal256(0)) Engine = Memory" + ) with And("I insert into the clickhouse table from the mysql table"): node.query(f"INSERT INTO {table_name} SELECT * FROM {table_function}") with Then("I select from the clickhouse table"): output = node.query(f"SELECT * FROM {table_name}").output - assert output == '1\t1\t1\t1\t1\t1', error() + assert output == "1\t1\t1\t1\t1\t1", error() + @TestScenario @Requirements( RQ_SRS_020_ClickHouse_Extended_Precision_Conversion_ToMySQL("1.0"), ) def MySQL_dict(self, node=None): - """Check that ClickHouse converts MySQL values from MySQL table into ClickHouse dictionary. - """ + """Check that ClickHouse converts MySQL values from MySQL table into ClickHouse dictionary.""" node = self.context.node mysql_node = self.context.mysql_node - table_name = f'table_{getuid()}' + table_name = f"table_{getuid()}" with dictionary(table_name, node, mysql_node): @@ -268,17 +343,21 @@ def MySQL_dict(self, node=None): sql = f""" INSERT INTO {table_name}(int128, uint128, int256, uint256, dec256) VALUES (1,1,1,1,1); """ - mysql_node.command(f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", exitcode=0) + mysql_node.command( + f"MYSQL_PWD=password mysql -D db -u user <<'EOF'{textwrap.dedent(sql)}\nEOF", + exitcode=0, + ) with Then("I select from the table on top of the mysql table"): - node.query(f"SELECT * FROM dict_{table_name}", - exitcode=50, message='Exception:') + node.query( + f"SELECT * FROM dict_{table_name}", exitcode=50, message="Exception:" + ) + @TestFeature @Name("conversion") def feature(self, node="clickhouse1", mysql_node="mysql1", stress=None, parallel=None): - """Check the conversion of extended precision data types. - """ + """Check the conversion of extended precision data types.""" self.context.node = self.context.cluster.node(node) self.context.mysql_node = self.context.cluster.node(mysql_node) diff --git a/tests/testflows/extended_precision_data_types/tests/feature.py b/tests/testflows/extended_precision_data_types/tests/feature.py index 83293b61f35..dc08ee4adde 100644 --- a/tests/testflows/extended_precision_data_types/tests/feature.py +++ b/tests/testflows/extended_precision_data_types/tests/feature.py @@ -2,11 +2,11 @@ from testflows.core import * from testflows.core.name import basename, parentname from testflows._core.testtype import TestSubType + @TestFeature @Name("tests") def feature(self): - """Check functions with Int128, Int256, UInt256, and Decimal256. - """ + """Check functions with Int128, Int256, UInt256, and Decimal256.""" Feature(run=load("extended_precision_data_types.tests.conversion", "feature")) Feature(run=load("extended_precision_data_types.tests.arithmetic", "feature")) Feature(run=load("extended_precision_data_types.tests.array_tuple_map", "feature")) diff --git a/tests/testflows/extended_precision_data_types/tests/logical.py b/tests/testflows/extended_precision_data_types/tests/logical.py index 18dc33f062e..56ade9c4c3c 100644 --- a/tests/testflows/extended_precision_data_types/tests/logical.py +++ b/tests/testflows/extended_precision_data_types/tests/logical.py @@ -2,54 +2,66 @@ from extended_precision_data_types.requirements import * from extended_precision_data_types.common import * funcs = [ - ('and',), - ('or',), - ('not',), - ('xor',), + ("and",), + ("or",), + ("not",), + ("xor",), +] + +Examples_list = [ + tuple(list(func) + list(data_type) + [Name(f"{func[0]} - {data_type[0]}")]) + for func in funcs + for data_type in data_types +] +Examples_list_dec = [ + tuple(list(func) + [Name(f"{func[0]} - Decimal256")]) for func in funcs ] -Examples_list = [tuple(list(func)+list(data_type)+[Name(f'{func[0]} - {data_type[0]}')]) for func in funcs for data_type in data_types] -Examples_list_dec = [tuple(list(func)+[Name(f'{func[0]} - Decimal256')]) for func in funcs] @TestOutline(Scenario) -@Examples('func int_type min max', Examples_list) +@Examples("func int_type min max", Examples_list) def log_int_inline(self, func, int_type, min, max, node=None): - """Check logical functions with Int128, Int256, and UInt256 using inline tests. - """ - table_name = f'table_{getuid()}' + """Check logical functions with Int128, Int256, and UInt256 using inline tests.""" + table_name = f"table_{getuid()}" if node is None: node = self.context.node with When(f"Check {func} with {int_type}"): - node.query(f"SELECT {func}(to{int_type}(1), to{int_type}(1)), {func}(to{int_type}(\'{max}\'), to{int_type}(1)), {func}(to{int_type}(\'{min}\'), to{int_type}(1))", - exitcode=43, message = 'Exception: Illegal type ') + node.query( + f"SELECT {func}(to{int_type}(1), to{int_type}(1)), {func}(to{int_type}('{max}'), to{int_type}(1)), {func}(to{int_type}('{min}'), to{int_type}(1))", + exitcode=43, + message="Exception: Illegal type ", + ) + @TestOutline(Scenario) -@Examples('func int_type min max', Examples_list) +@Examples("func int_type min max", Examples_list) def log_int_table(self, func, int_type, min, max, node=None): - """Check logical functions with Int128, Int256, and UInt256 using table tests. - """ + """Check logical functions with Int128, Int256, and UInt256 using table tests.""" if node is None: node = self.context.node - table_name = f'table_{getuid()}' + table_name = f"table_{getuid()}" with Given(f"I have a table"): - table(name = table_name, data_type = int_type) + table(name=table_name, data_type=int_type) for value in [1, min, max]: with When(f"Check {func} with {int_type} and {value}"): - node.query(f"INSERT INTO {table_name} SELECT {func}(to{int_type}(\'{value}\'), to{int_type}(\'{value}\'))", - exitcode=43, message = 'Exception: Illegal type') + node.query( + f"INSERT INTO {table_name} SELECT {func}(to{int_type}('{value}'), to{int_type}('{value}'))", + exitcode=43, + message="Exception: Illegal type", + ) + @TestOutline(Scenario) -@Examples('func', funcs) +@Examples("func", funcs) def log_dec_inline(self, func, node=None): - """Check logical functions with Decimal256 using inline tests. - """ + """Check logical functions with Decimal256 using inline tests.""" min = Decimal256_min_max[0] max = Decimal256_min_max[1] @@ -57,30 +69,37 @@ def log_dec_inline(self, func, node=None): node = self.context.node with When(f"Check {func} with Decimal256"): - node.query(f"SELECT {func}(toDecimal256(1,0), toDecimal256(1,0)), {func}(toDecimal256(\'{max}\',0), toDecimal256(1)), {func}(toDecimal256(\'{min}\',0), toDecimal256(1))", - exitcode=43, message = 'Exception: Illegal type ') + node.query( + f"SELECT {func}(toDecimal256(1,0), toDecimal256(1,0)), {func}(toDecimal256('{max}',0), toDecimal256(1)), {func}(toDecimal256('{min}',0), toDecimal256(1))", + exitcode=43, + message="Exception: Illegal type ", + ) + @TestOutline(Scenario) -@Examples('func', funcs) +@Examples("func", funcs) def log_dec_table(self, func, node=None): - """Check logical functions with Decimal256 using table tests. - """ + """Check logical functions with Decimal256 using table tests.""" min = Decimal256_min_max[0] max = Decimal256_min_max[1] if node is None: node = self.context.node - table_name = f'table_{getuid()}' + table_name = f"table_{getuid()}" with Given(f"I have a table"): - table(name = table_name, data_type = 'Decimal256(0)') + table(name=table_name, data_type="Decimal256(0)") for value in [1, min, max]: with When(f"Check {func} with Decimal256 and {value}"): - node.query(f"INSERT INTO {table_name} SELECT {func}(toDecimal256(\'{value}\',0), toDecimal256(\'{value}\',0))", - exitcode=43, message = 'Exception: Illegal type ') + node.query( + f"INSERT INTO {table_name} SELECT {func}(toDecimal256('{value}',0), toDecimal256('{value}',0))", + exitcode=43, + message="Exception: Illegal type ", + ) + @TestFeature @Name("logical") @@ -88,8 +107,7 @@ def log_dec_table(self, func, node=None): RQ_SRS_020_ClickHouse_Extended_Precision_Logical("1.0"), ) def feature(self, node="clickhouse1", mysql_node="mysql1", stress=None, parallel=None): - """Check that comparison functions work with extended precision data types. - """ + """Check that comparison functions work with extended precision data types.""" self.context.node = self.context.cluster.node(node) self.context.mysql_node = self.context.cluster.node(mysql_node) diff --git a/tests/testflows/extended_precision_data_types/tests/mathematical.py b/tests/testflows/extended_precision_data_types/tests/mathematical.py index 65872b766dd..612db532944 100644 --- a/tests/testflows/extended_precision_data_types/tests/mathematical.py +++ b/tests/testflows/extended_precision_data_types/tests/mathematical.py @@ -2,171 +2,214 @@ from extended_precision_data_types.requirements import * from extended_precision_data_types.common import * funcs = [ - ('exp(', 3, 0), - ('log(', 0, 0), - ('ln(', 0, 0), - ('exp2(', 2, 0), - ('log2(', 0, 0), - ('exp10(', 10, 0), - ('log10(', 0, 0), - ('sqrt(', 1, 0), - ('cbrt(', 1, 0), - ('erf(', 1, 0), - ('erfc(', 0, 0), - ('lgamma(', 0, 0), - ('tgamma(', 1, 0), - ('sin(', 1, 0), - ('cos(', 1, 0), - ('tan(', 2, 0), - ('asin(', 2, 0), - ('acos(', 0, 0), - ('atan(', 1, 0), - ('intExp2(', 2, 48), - ('intExp10(', 10, 48), - ('cosh(', 2, 0), - ('acosh(', 0, 0), - ('sinh(', 1, 0), - ('asinh(', 1, 0), - ('tanh(', 1, 0), - ('atanh(', 'inf', 0), - ('log1p(', 1, 0), - ('sign(', 1, 0), - ('pow(1,', 1, 43), - ('power(1,', 1, 43), - ('atan2(1,', 1, 43), - ('hypot(1,', 1, 43), + ("exp(", 3, 0), + ("log(", 0, 0), + ("ln(", 0, 0), + ("exp2(", 2, 0), + ("log2(", 0, 0), + ("exp10(", 10, 0), + ("log10(", 0, 0), + ("sqrt(", 1, 0), + ("cbrt(", 1, 0), + ("erf(", 1, 0), + ("erfc(", 0, 0), + ("lgamma(", 0, 0), + ("tgamma(", 1, 0), + ("sin(", 1, 0), + ("cos(", 1, 0), + ("tan(", 2, 0), + ("asin(", 2, 0), + ("acos(", 0, 0), + ("atan(", 1, 0), + ("intExp2(", 2, 48), + ("intExp10(", 10, 48), + ("cosh(", 2, 0), + ("acosh(", 0, 0), + ("sinh(", 1, 0), + ("asinh(", 1, 0), + ("tanh(", 1, 0), + ("atanh(", "inf", 0), + ("log1p(", 1, 0), + ("sign(", 1, 0), + ("pow(1,", 1, 43), + ("power(1,", 1, 43), + ("atan2(1,", 1, 43), + ("hypot(1,", 1, 43), +] + +Examples_list = [ + tuple(list(func) + list(data_type) + [Name(f"{func[0]}) - {data_type[0]}")]) + for func in funcs + for data_type in data_types +] +Examples_dec_list = [ + tuple(list(func) + [Name(f"{func[0]}) - Decimal256")]) for func in funcs ] -Examples_list = [tuple(list(func)+list(data_type)+[Name(f'{func[0]}) - {data_type[0]}')]) for func in funcs for data_type in data_types] -Examples_dec_list = [tuple(list(func)+[Name(f'{func[0]}) - Decimal256')]) for func in funcs] @TestOutline(Scenario) -@Examples('func expected_result exitcode int_type min max', Examples_list) -def math_int_inline(self, func, expected_result, exitcode, int_type, min, max, node=None): - """Check mathematical functions with Int128, UInt128, Int256, and UInt256 using inline tests. - """ +@Examples("func expected_result exitcode int_type min max", Examples_list) +def math_int_inline( + self, func, expected_result, exitcode, int_type, min, max, node=None +): + """Check mathematical functions with Int128, UInt128, Int256, and UInt256 using inline tests.""" if node is None: node = self.context.node - if func in ['intExp2(', 'intExp10(', 'pow(1,', 'power(1,', 'atan2(1,', 'hypot(1,']: + if func in ["intExp2(", "intExp10(", "pow(1,", "power(1,", "atan2(1,", "hypot(1,"]: with When(f"I check {func} with {int_type} using 1, max, and min"): - node.query(f"SELECT {func} to{int_type}(1)), {func} to{int_type}(\'{max}\')), {func} to{int_type}(\'{min}\'))", - exitcode=exitcode, message = 'Exception:') + node.query( + f"SELECT {func} to{int_type}(1)), {func} to{int_type}('{max}')), {func} to{int_type}('{min}'))", + exitcode=exitcode, + message="Exception:", + ) else: with When(f"I check {func} with {int_type} using 1"): output = node.query(f"SELECT {func} to{int_type}(1))").output - if output == 'inf': + if output == "inf": pass else: assert round(float(output)) == expected_result, error() with And(f"I check {func} with {int_type} using max and min"): - execute_query(f""" + execute_query( + f""" SELECT round({func} to{int_type}(\'{max}\')), {rounding_precision}), round({func} to{int_type}(\'{min}\')), {rounding_precision}) - """) + """ + ) + @TestOutline(Scenario) -@Examples('func expected_result exitcode int_type min max', Examples_list) -def math_int_table(self, func, expected_result, exitcode, int_type, min, max, node=None): - """Check mathematical functions with Int128, UInt128, Int256, and UInt256 using table tests. - """ +@Examples("func expected_result exitcode int_type min max", Examples_list) +def math_int_table( + self, func, expected_result, exitcode, int_type, min, max, node=None +): + """Check mathematical functions with Int128, UInt128, Int256, and UInt256 using table tests.""" if node is None: node = self.context.node - table_name = f'table_{getuid()}' + table_name = f"table_{getuid()}" with Given(f"I have a table"): - table(name = table_name, data_type = f'Nullable({int_type})') + table(name=table_name, data_type=f"Nullable({int_type})") - if func in ['intExp2(', 'intExp10(', 'pow(1,', 'power(1,', 'atan2(1,', 'hypot(1,']: + if func in ["intExp2(", "intExp10(", "pow(1,", "power(1,", "atan2(1,", "hypot(1,"]: for value in [1, max, min]: - with When(f"I insert the output of {func} with {int_type} using {value} into a table"): - node.query(f"INSERT INTO {table_name} SELECT {func} to{int_type}(\'{value}\'))", - exitcode=exitcode, message = 'Exception:') + with When( + f"I insert the output of {func} with {int_type} using {value} into a table" + ): + node.query( + f"INSERT INTO {table_name} SELECT {func} to{int_type}('{value}'))", + exitcode=exitcode, + message="Exception:", + ) else: for value in [1, max, min]: - with And(f"I insert the output of {func} with {int_type} using {value} into a table"): - node.query(f"INSERT INTO {table_name} SELECT round(to{int_type}OrZero( toString({func} to{int_type}(\'{value}\')))), {rounding_precision})") + with And( + f"I insert the output of {func} with {int_type} using {value} into a table" + ): + node.query( + f"INSERT INTO {table_name} SELECT round(to{int_type}OrZero( toString({func} to{int_type}('{value}')))), {rounding_precision})" + ) with Then(f"I check the outputs of {func} with {int_type}"): - execute_query(f""" + execute_query( + f""" SELECT * FROM {table_name} ORDER BY a ASC - """) + """ + ) + @TestOutline(Scenario) -@Examples('func expected_result exitcode', Examples_dec_list) +@Examples("func expected_result exitcode", Examples_dec_list) def math_dec_inline(self, func, expected_result, exitcode, node=None): - """Check mathematical functions with Decimal256 using inline tests. - """ + """Check mathematical functions with Decimal256 using inline tests.""" min = Decimal256_min_max[0] max = Decimal256_min_max[1] if node is None: node = self.context.node - if func in ['intExp2(', 'intExp10(', 'pow(1,', 'power(1,', 'atan2(1,', 'hypot(1,']: + if func in ["intExp2(", "intExp10(", "pow(1,", "power(1,", "atan2(1,", "hypot(1,"]: with When(f"I check {func} with Decimal256 using 1, max, and min"): - node.query(f"SELECT {func} toDecimal256(1,0)), {func} toDecimal256(\'{max}\',0)), {func} toDecimal256(\'{min}\',0))", - exitcode=43, message = 'Exception: Illegal type ') + node.query( + f"SELECT {func} toDecimal256(1,0)), {func} toDecimal256('{max}',0)), {func} toDecimal256('{min}',0))", + exitcode=43, + message="Exception: Illegal type ", + ) else: with When(f"I check {func} with Decimal256 using 1"): output = node.query(f"SELECT {func} toDecimal256(1,0))").output - if output == 'inf': + if output == "inf": pass else: assert round(float(output)) == expected_result, error() with And(f"I check {func} with Decimal256 using max and min"): - execute_query(f""" + execute_query( + f""" SELECT round({func} toDecimal256(\'{max}\',0)),{rounding_precision}), round({func} toDecimal256(\'{min}\',0)),{rounding_precision}) - """) + """ + ) + @TestOutline(Scenario) -@Examples('func expected_result exitcode', Examples_dec_list) +@Examples("func expected_result exitcode", Examples_dec_list) def math_dec_table(self, func, expected_result, exitcode, node=None): - """Check mathematical functions with Decimal256 using table tests. - """ + """Check mathematical functions with Decimal256 using table tests.""" min = Decimal256_min_max[0] max = Decimal256_min_max[1] if node is None: node = self.context.node - table_name = f'table_{getuid()}' + table_name = f"table_{getuid()}" with Given(f"I have a table"): - table(name = table_name, data_type = 'Decimal256(0)') + table(name=table_name, data_type="Decimal256(0)") - if func in ['intExp2(', 'intExp10(', 'pow(1,', 'power(1,', 'atan2(1,', 'hypot(1,']: + if func in ["intExp2(", "intExp10(", "pow(1,", "power(1,", "atan2(1,", "hypot(1,"]: for value in [1, max, min]: - with When(f"I insert the output of {func} with Decimal256 using {value} into a table"): - node.query(f"INSERT INTO {table_name} SELECT {func} toDecimal256(\'{value}\',0))", - exitcode=43, message = 'Exception: Illegal type ') + with When( + f"I insert the output of {func} with Decimal256 using {value} into a table" + ): + node.query( + f"INSERT INTO {table_name} SELECT {func} toDecimal256('{value}',0))", + exitcode=43, + message="Exception: Illegal type ", + ) else: for value in [1, max, min]: - with When(f"I insert the output of {func} with Decimal256 using {value} into a table"): - node.query(f"INSERT INTO {table_name} SELECT round(toDecimal256OrZero( toString({func} toDecimal256(\'{value}\',0))),0), 7)") + with When( + f"I insert the output of {func} with Decimal256 using {value} into a table" + ): + node.query( + f"INSERT INTO {table_name} SELECT round(toDecimal256OrZero( toString({func} toDecimal256('{value}',0))),0), 7)" + ) with Then(f"I check the outputs of {func} with Decimal256"): - execute_query(f""" + execute_query( + f""" SELECT * FROM {table_name} ORDER BY a ASC - """) + """ + ) + @TestFeature @Name("mathematical") @@ -175,8 +218,7 @@ def math_dec_table(self, func, expected_result, exitcode, node=None): RQ_SRS_020_ClickHouse_Extended_Precision_Mathematical_NotSupported("1.0"), ) def feature(self, node="clickhouse1", mysql_node="mysql1", stress=None, parallel=None): - """Check that mathematical functions work with extended precision data types. - """ + """Check that mathematical functions work with extended precision data types.""" self.context.node = self.context.cluster.node(node) self.context.mysql_node = self.context.cluster.node(mysql_node) diff --git a/tests/testflows/extended_precision_data_types/tests/null.py b/tests/testflows/extended_precision_data_types/tests/null.py index f9b93f874bc..2d9f3cedef5 100644 --- a/tests/testflows/extended_precision_data_types/tests/null.py +++ b/tests/testflows/extended_precision_data_types/tests/null.py @@ -2,23 +2,29 @@ from extended_precision_data_types.requirements import * from extended_precision_data_types.common import * funcs = [ - ('isNull(', 0), - ('isNotNull(', 1), - ('coalesce(', 1), - ('assumeNotNull(', 1), - ('toNullable(', 1), - ('ifNull(1,', 1), - ('nullIf(1,', '\\N'), + ("isNull(", 0), + ("isNotNull(", 1), + ("coalesce(", 1), + ("assumeNotNull(", 1), + ("toNullable(", 1), + ("ifNull(1,", 1), + ("nullIf(1,", "\\N"), +] + +Examples_list = [ + tuple(list(func) + list(data_type) + [Name(f"{func[0]}) - {data_type[0]}")]) + for func in funcs + for data_type in data_types +] +Examples_list_dec = [ + tuple(list(func) + [Name(f"{func[0]}) - Decimal256")]) for func in funcs ] -Examples_list = [tuple(list(func)+list(data_type)+[Name(f'{func[0]}) - {data_type[0]}')]) for func in funcs for data_type in data_types] -Examples_list_dec = [tuple(list(func)+[Name(f'{func[0]}) - Decimal256')]) for func in funcs] @TestOutline(Scenario) -@Examples('func expected_result int_type min max', Examples_list) +@Examples("func expected_result int_type min max", Examples_list) def null_int_inline(self, func, expected_result, int_type, min, max, node=None): - """Check null function with Int128, UInt128, Int256, and UInt256 using inline tests. - """ + """Check null function with Int128, UInt128, Int256, and UInt256 using inline tests.""" if node is None: node = self.context.node @@ -28,15 +34,17 @@ def null_int_inline(self, func, expected_result, int_type, min, max, node=None): assert output == str(expected_result), error() with And(f"I check {func} with {int_type} using min and max"): - execute_query(f""" + execute_query( + f""" SELECT {func} to{int_type}(\'{min}\')), {func} to{int_type}(\'{max}\')) - """) + """ + ) + @TestOutline(Scenario) -@Examples('func expected_result int_type min max', Examples_list) +@Examples("func expected_result int_type min max", Examples_list) def null_int_table(self, func, expected_result, int_type, min, max, node=None): - """Check null function with Int128, UInt128, Int256, and UInt256 using table tests. - """ + """Check null function with Int128, UInt128, Int256, and UInt256 using table tests.""" table_name = f"table_{getuid()}" @@ -44,23 +52,27 @@ def null_int_table(self, func, expected_result, int_type, min, max, node=None): node = self.context.node with Given("I have a table"): - table(name = table_name, data_type = f'Nullable({int_type})') + table(name=table_name, data_type=f"Nullable({int_type})") for value in [1, min, max]: with When(f"I insert the output of {func} with {int_type} and {value}"): - node.query(f"INSERT INTO {table_name} SELECT {func} to{int_type}(\'{value}\'))") + node.query( + f"INSERT INTO {table_name} SELECT {func} to{int_type}('{value}'))" + ) with Then(f"I check {func} with {int_type} on the table"): - execute_query(f""" + execute_query( + f""" SELECT * FROM {table_name} ORDER BY a ASC - """) + """ + ) + @TestOutline(Scenario) -@Examples('func expected_result', Examples_list_dec) +@Examples("func expected_result", Examples_list_dec) def null_dec_inline(self, func, expected_result, node=None): - """Check null function with Decimal256 using inline tests. - """ + """Check null function with Decimal256 using inline tests.""" min = Decimal256_min_max[0] max = Decimal256_min_max[1] @@ -72,15 +84,17 @@ def null_dec_inline(self, func, expected_result, node=None): assert output == str(expected_result), error() with And(f"I check {func} with Decimal256 using min and max"): - execute_query(f""" + execute_query( + f""" SELECT {func} toDecimal256(\'{min}\',0)), {func} toDecimal256(\'{max}\',0)) - """) + """ + ) + @TestOutline(Scenario) -@Examples('func expected_result', Examples_list_dec) +@Examples("func expected_result", Examples_list_dec) def null_dec_table(self, func, expected_result, node=None): - """Check null function with Decimal256 using table tests. - """ + """Check null function with Decimal256 using table tests.""" min = Decimal256_min_max[0] max = Decimal256_min_max[1] @@ -90,17 +104,22 @@ def null_dec_table(self, func, expected_result, node=None): node = self.context.node with Given("I have a table"): - table(name = table_name, data_type = 'Nullable(Decimal256(0))') + table(name=table_name, data_type="Nullable(Decimal256(0))") for value in [1, min, max]: with When(f"I insert the output of {func} with Decimal256 and {value}"): - node.query(f"INSERT INTO {table_name} SELECT {func} toDecimal256(\'{value}\',0))") + node.query( + f"INSERT INTO {table_name} SELECT {func} toDecimal256('{value}',0))" + ) with Then(f"I check {func} with Decimal256 on the table"): - execute_query(f""" + execute_query( + f""" SELECT * FROM {table_name} ORDER BY a ASC - """) + """ + ) + @TestFeature @Name("null") @@ -108,8 +127,7 @@ def null_dec_table(self, func, expected_result, node=None): RQ_SRS_020_ClickHouse_Extended_Precision_Null("1.0"), ) def feature(self, node="clickhouse1", mysql_node="mysql1", stress=None, parallel=None): - """Check that null functions work with extended precision data types. - """ + """Check that null functions work with extended precision data types.""" self.context.node = self.context.cluster.node(node) self.context.mysql_node = self.context.cluster.node(mysql_node) diff --git a/tests/testflows/extended_precision_data_types/tests/rounding.py b/tests/testflows/extended_precision_data_types/tests/rounding.py index e32f4e941d3..489c545187c 100644 --- a/tests/testflows/extended_precision_data_types/tests/rounding.py +++ b/tests/testflows/extended_precision_data_types/tests/rounding.py @@ -2,34 +2,45 @@ from extended_precision_data_types.requirements import * from extended_precision_data_types.common import * funcs = [ - ('ceil', 1, True), - ('floor', 1, True), - ('trunc', 1, True), - ('round', 1, True), - ('roundBankers', 1, True), - ('roundToExp2', 1, False), - ('roundDuration', 1, True), - ('roundAge', 17, True), - ('roundDown', 1, False) + ("ceil", 1, True), + ("floor", 1, True), + ("trunc", 1, True), + ("round", 1, True), + ("roundBankers", 1, True), + ("roundToExp2", 1, False), + ("roundDuration", 1, True), + ("roundAge", 17, True), + ("roundDown", 1, False), +] + +Examples_list = [ + tuple(list(func) + list(data_type) + [Name(f"{func[0]} - {data_type[0]}")]) + for func in funcs + for data_type in data_types +] +Examples_dec_list = [ + tuple(list(func) + [Name(f"{func[0]} - Decimal256")]) for func in funcs ] -Examples_list = [tuple(list(func)+list(data_type)+[Name(f'{func[0]} - {data_type[0]}')]) for func in funcs for data_type in data_types] -Examples_dec_list = [tuple(list(func)+[Name(f'{func[0]} - Decimal256')]) for func in funcs] @TestOutline(Scenario) -@Examples('func expected_result supported int_type min max', Examples_list) -def round_int_inline(self, func, expected_result, supported, int_type, min, max, node=None): - """Check rounding functions with Int128, UInt128, Int256, and UInt256 using inline tests. - """ +@Examples("func expected_result supported int_type min max", Examples_list) +def round_int_inline( + self, func, expected_result, supported, int_type, min, max, node=None +): + """Check rounding functions with Int128, UInt128, Int256, and UInt256 using inline tests.""" if node is None: node = self.context.node - if func == 'roundDown': + if func == "roundDown": with When(f"I check roundDown with {int_type}"): - node.query(f"SELECT roundDown(to{int_type}(1), [0,2]), roundDown(to{int_type}(\'{max}\'), [0,2]), roundDown(to{int_type}(\'{min}\'), [0,2])", - exitcode=44, message=f'Exception: Illegal column {int_type} of first argument of function roundDown') + node.query( + f"SELECT roundDown(to{int_type}(1), [0,2]), roundDown(to{int_type}('{max}'), [0,2]), roundDown(to{int_type}('{min}'), [0,2])", + exitcode=44, + message=f"Exception: Illegal column {int_type} of first argument of function roundDown", + ) elif supported: @@ -37,22 +48,29 @@ def round_int_inline(self, func, expected_result, supported, int_type, min, max, output = node.query(f"SELECT {func}(to{int_type}(1))").output assert output == str(expected_result), error() - with And(f'I check {func} with {int_type} using min and max values'): - execute_query(f""" + with And(f"I check {func} with {int_type} using min and max values"): + execute_query( + f""" SELECT {func}(to{int_type}(\'{min}\')), {func}(to{int_type}(\'{max}\')) - """) + """ + ) else: with When(f"I check {func} with {int_type}"): - node.query(f"SELECT {func}(to{int_type}(1)), {func}(to{int_type}(\'{max}\')), {func}(to{int_type}(\'{min}\'))", - exitcode=48, message=f'Exception: {func}() for big integers is not implemented:') + node.query( + f"SELECT {func}(to{int_type}(1)), {func}(to{int_type}('{max}')), {func}(to{int_type}('{min}'))", + exitcode=48, + message=f"Exception: {func}() for big integers is not implemented:", + ) + @TestOutline(Scenario) -@Examples('func expected_result supported int_type min max', Examples_list) -def round_int_table(self, func, expected_result, supported, int_type, min, max, node=None): - """Check rounding functions with Int128, UInt128, Int256, and UInt256 using table tests. - """ +@Examples("func expected_result supported int_type min max", Examples_list) +def round_int_table( + self, func, expected_result, supported, int_type, min, max, node=None +): + """Check rounding functions with Int128, UInt128, Int256, and UInt256 using table tests.""" table_name = f"table_{getuid()}" @@ -60,77 +78,99 @@ def round_int_table(self, func, expected_result, supported, int_type, min, max, node = self.context.node with Given("I have a table"): - table(name = table_name, data_type = int_type) + table(name=table_name, data_type=int_type) - if func == 'roundDown': + if func == "roundDown": - for value in [1,max,min]: + for value in [1, max, min]: with When(f"I check roundDown with {int_type} and {value}"): - node.query(f"INSERT INTO {table_name} SELECT roundDown(to{int_type}(\'{value}\'), [0,2])", - exitcode=44, message=f'Exception: Illegal column {int_type} of first argument of function roundDown') + node.query( + f"INSERT INTO {table_name} SELECT roundDown(to{int_type}('{value}'), [0,2])", + exitcode=44, + message=f"Exception: Illegal column {int_type} of first argument of function roundDown", + ) elif supported: - for value in [1,max,min]: + for value in [1, max, min]: - with When(f"I insert the output of {func} with {int_type} and {value} into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}(to{int_type}(\'{value}\'))") + with When( + f"I insert the output of {func} with {int_type} and {value} into the table" + ): + node.query( + f"INSERT INTO {table_name} SELECT {func}(to{int_type}('{value}'))" + ) with Then(f"I select the output of {func} with {int_type} from the table"): - execute_query(f""" + execute_query( + f""" SELECT * FROM {table_name} ORDER BY a ASC - """) + """ + ) else: - for value in [1,max,min]: + for value in [1, max, min]: + + with When( + f"I insert the output of {func} with {int_type} and {value} into the table" + ): + node.query( + f"INSERT INTO {table_name} SELECT {func}(to{int_type}(1))", + exitcode=48, + message=f"Exception: {func}() for big integers is not implemented:", + ) - with When(f"I insert the output of {func} with {int_type} and {value} into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}(to{int_type}(1))", - exitcode=48, message=f'Exception: {func}() for big integers is not implemented:') @TestOutline(Scenario) -@Examples('func expected_result supported', Examples_dec_list) +@Examples("func expected_result supported", Examples_dec_list) def round_dec_inline(self, func, expected_result, supported, node=None): - """Check rounding functions with Decimal256 using inline tests. - """ + """Check rounding functions with Decimal256 using inline tests.""" min = Decimal256_min_max[0] max = Decimal256_min_max[1] if node is None: node = self.context.node - if func == 'roundDown': + if func == "roundDown": with When(f"I check roundDown with Decimal256"): - node.query(f"""SELECT roundDown(toDecimal256(1,0), [toDecimal256(0,0),toDecimal256(2,0)]), + node.query( + f"""SELECT roundDown(toDecimal256(1,0), [toDecimal256(0,0),toDecimal256(2,0)]), roundDown(toDecimal256(\'{max}\',0), [toDecimal256(0,0),toDecimal256(2,0)]), roundDown(toDecimal256(\'{min}\',0), [toDecimal256(0,0),toDecimal256(2,0)])""", - exitcode=44, message=f'Exception: Illegal column Decimal256 of first argument of function roundDown') + exitcode=44, + message=f"Exception: Illegal column Decimal256 of first argument of function roundDown", + ) - elif func not in ['roundDuration', 'roundAge', 'roundToExp2']: + elif func not in ["roundDuration", "roundAge", "roundToExp2"]: with When(f"I check {func} with Decimal256"): output = node.query(f"SELECT {func}(toDecimal256(1,0))").output assert output == str(expected_result), error() - with And(f'I check {func} with Decimal256 using min and max values'): - execute_query(f""" + with And(f"I check {func} with Decimal256 using min and max values"): + execute_query( + f""" SELECT {func}(toDecimal256(\'{min}\',0)), {func}(toDecimal256(\'{max}\',0)) - """) + """ + ) else: with When(f"I check {func} with Decimal256"): - node.query(f"SELECT {func}(toDecimal256(1,0)), {func}(toDecimal256(\'{max}\',0)), {func}(toDecimal256(\'{min}\',0))", - exitcode=43, message=f'Exception: Illegal type Decimal(76, 0)') + node.query( + f"SELECT {func}(toDecimal256(1,0)), {func}(toDecimal256('{max}',0)), {func}(toDecimal256('{min}',0))", + exitcode=43, + message=f"Exception: Illegal type Decimal(76, 0)", + ) + @TestOutline(Scenario) -@Examples('func expected_result supported', Examples_dec_list) +@Examples("func expected_result supported", Examples_dec_list) def round_dec_table(self, func, expected_result, supported, node=None): - """Check rounding functions with Decimal256 using table tests. - """ + """Check rounding functions with Decimal256 using table tests.""" min = Decimal256_min_max[0] max = Decimal256_min_max[1] @@ -140,35 +180,50 @@ def round_dec_table(self, func, expected_result, supported, node=None): node = self.context.node with Given("I have a table"): - table(name = table_name, data_type = 'Decimal256(0)') + table(name=table_name, data_type="Decimal256(0)") - if func == 'roundDown': + if func == "roundDown": for value in [1, max, min]: with When(f"I check roundDown with Decimal256 and {value}"): - node.query(f"INSERT INTO {table_name} SELECT roundDown(toDecimal256(\'{value}\',0), [toDecimal256(0,0),toDecimal256(2,0)])", - exitcode=44, message=f'Exception: Illegal column Decimal256 of first argument of function roundDown') + node.query( + f"INSERT INTO {table_name} SELECT roundDown(toDecimal256('{value}',0), [toDecimal256(0,0),toDecimal256(2,0)])", + exitcode=44, + message=f"Exception: Illegal column Decimal256 of first argument of function roundDown", + ) - elif func not in ['roundDuration', 'roundAge', 'roundToExp2']: + elif func not in ["roundDuration", "roundAge", "roundToExp2"]: for value in [1, max, min]: - with When(f"I insert the output of {func} with Decimal256 and {value} into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}(toDecimal256(\'{value}\',0))") + with When( + f"I insert the output of {func} with Decimal256 and {value} into the table" + ): + node.query( + f"INSERT INTO {table_name} SELECT {func}(toDecimal256('{value}',0))" + ) with Then(f"I select the output of {func} with Decimal256 from the table"): - execute_query(f""" + execute_query( + f""" SELECT * FROM {table_name} ORDER BY a ASC - """) + """ + ) else: for value in [1, max, min]: - with When(f"I insert the output of {func} with Decimal256 and {value} into the table"): - node.query(f"INSERT INTO {table_name} SELECT {func}(toDecimal256(\'{value}\',0))", - exitcode=43, message=f'Exception: Illegal type Decimal(76, 0)') + with When( + f"I insert the output of {func} with Decimal256 and {value} into the table" + ): + node.query( + f"INSERT INTO {table_name} SELECT {func}(toDecimal256('{value}',0))", + exitcode=43, + message=f"Exception: Illegal type Decimal(76, 0)", + ) + @TestFeature @Name("rounding") @@ -179,8 +234,7 @@ def round_dec_table(self, func, expected_result, supported, node=None): RQ_SRS_020_ClickHouse_Extended_Precision_Rounding_Dec_NotSupported("1.0"), ) def feature(self, node="clickhouse1", mysql_node="mysql1", stress=None, parallel=None): - """Check that rounding functions work with extended precision data types. - """ + """Check that rounding functions work with extended precision data types.""" self.context.node = self.context.cluster.node(node) self.context.mysql_node = self.context.cluster.node(mysql_node) diff --git a/tests/testflows/extended_precision_data_types/tests/table.py b/tests/testflows/extended_precision_data_types/tests/table.py index 1548d6b20c2..58ec41f8e82 100644 --- a/tests/testflows/extended_precision_data_types/tests/table.py +++ b/tests/testflows/extended_precision_data_types/tests/table.py @@ -5,14 +5,14 @@ from contextlib import contextmanager from extended_precision_data_types.requirements import * from extended_precision_data_types.common import * + @TestFeature @Name("table") @Requirements( RQ_SRS_020_ClickHouse_Extended_Precision_Create_Table("1.0"), ) def feature(self, node="clickhouse1", mysql_node="mysql1", stress=None, parallel=None): - """Check that clickhouse is able to create a table with extended precision data types. - """ + """Check that clickhouse is able to create a table with extended precision data types.""" node = self.context.cluster.node(node) table_name = f"table_{getuid()}" @@ -20,15 +20,21 @@ def feature(self, node="clickhouse1", mysql_node="mysql1", stress=None, parallel with allow_experimental_bigint(node): try: - with When("I create a table with Int128, UInt128, Int256, UInt256, Decimal256"): - node.query(f"CREATE TABLE {table_name}(a Int128, b UInt128, c Int256, d UInt256, e Decimal256(0)) ENGINE = Memory") + with When( + "I create a table with Int128, UInt128, Int256, UInt256, Decimal256" + ): + node.query( + f"CREATE TABLE {table_name}(a Int128, b UInt128, c Int256, d UInt256, e Decimal256(0)) ENGINE = Memory" + ) with And("I insert values into the table"): - node.query(f"INSERT INTO {table_name} VALUES (toInt128(1), toUInt128(1), toInt256(1), toUInt256(1), toDecimal256(1,0))") + node.query( + f"INSERT INTO {table_name} VALUES (toInt128(1), toUInt128(1), toInt256(1), toUInt256(1), toDecimal256(1,0))" + ) with Then("I select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert output == '1\t1\t1\t1\t1', error() + assert output == "1\t1\t1\t1\t1", error() finally: with Finally("I drop the table"): diff --git a/tests/testflows/helpers/argparser.py b/tests/testflows/helpers/argparser.py index db6f1abb30b..ec26b8f654b 100644 --- a/tests/testflows/helpers/argparser.py +++ b/tests/testflows/helpers/argparser.py @@ -1,20 +1,36 @@ import os + def argparser(parser): - """Default argument parser for regressions. - """ - parser.add_argument("--local", + """Default argument parser for regressions.""" + parser.add_argument( + "--local", action="store_true", - help="run regression in local mode", default=False) + help="run regression in local mode", + default=False, + ) - parser.add_argument("--clickhouse-version", type=str, dest="clickhouse_version", - help="clickhouse server version", metavar="version", - default=os.getenv("CLICKHOUSE_TESTS_SERVER_VERSION", None)) + parser.add_argument( + "--clickhouse-version", + type=str, + dest="clickhouse_version", + help="clickhouse server version", + metavar="version", + default=os.getenv("CLICKHOUSE_TESTS_SERVER_VERSION", None), + ) - parser.add_argument("--clickhouse-binary-path", - type=str, dest="clickhouse_binary_path", - help="path to ClickHouse binary, default: /usr/bin/clickhouse", metavar="path", - default=os.getenv("CLICKHOUSE_TESTS_SERVER_BIN_PATH", "/usr/bin/clickhouse")) + parser.add_argument( + "--clickhouse-binary-path", + type=str, + dest="clickhouse_binary_path", + help="path to ClickHouse binary, default: /usr/bin/clickhouse", + metavar="path", + default=os.getenv("CLICKHOUSE_TESTS_SERVER_BIN_PATH", "/usr/bin/clickhouse"), + ) - parser.add_argument("--stress", action="store_true", default=False, - help="enable stress testing (might take a long time)") + parser.add_argument( + "--stress", + action="store_true", + default=False, + help="enable stress testing (might take a long time)", + ) diff --git a/tests/testflows/helpers/cluster.py b/tests/testflows/helpers/cluster.py index a2a9ec92c18..ae9f9d6623e 100755 --- a/tests/testflows/helpers/cluster.py +++ b/tests/testflows/helpers/cluster.py @@ -26,10 +26,11 @@ MESSAGES_TO_RETRY = [ "ConnectionPoolWithFailover: Connection failed at try", "DB::Exception: New table appeared in database being dropped or detached. Try again", "is already started to be removing by another replica right now", - "Shutdown is called for table", # happens in SYSTEM SYNC REPLICA query if session with ZooKeeper is being reinitialized. - "is executing longer than distributed_ddl_task_timeout" # distributed TTL timeout message + "Shutdown is called for table", # happens in SYSTEM SYNC REPLICA query if session with ZooKeeper is being reinitialized. + "is executing longer than distributed_ddl_task_timeout", # distributed TTL timeout message ] + class Shell(ShellBase): def __exit__(self, type, value, traceback): # send exit and Ctrl-D repeatedly @@ -42,20 +43,22 @@ class Shell(ShellBase): for i in range(10): if self.child is not None: try: - self.send('exit\r', eol='') - self.send('\x04\r', eol='') + self.send("exit\r", eol="") + self.send("\x04\r", eol="") except OSError: pass return super(Shell, self).__exit__(type, value, traceback) + class QueryRuntimeException(Exception): - """Exception during query execution on the server. - """ + """Exception during query execution on the server.""" + pass + class Node(object): - """Generic cluster node. - """ + """Generic cluster node.""" + config_d_dir = "/etc/clickhouse-server/config.d/" def __init__(self, cluster, name): @@ -66,8 +69,7 @@ class Node(object): return f"Node(name='{self.name}')" def close_bashes(self): - """Close all active bashes to the node. - """ + """Close all active bashes to the node.""" with self.cluster.lock: for key in list(self.cluster._bash.keys()): if key.endswith(f"-{self.name}"): @@ -80,36 +82,56 @@ class Node(object): with attempt: if self.command("echo 1", no_checks=1, steps=False).exitcode != 0: fail("container is not healthy") - + def restart(self, timeout=300, retry_count=5, safe=True): - """Restart node. - """ + """Restart node.""" self.close_bashes() retry(self.cluster.command, retry_count)( - None, f'{self.cluster.docker_compose} restart {self.name}', - timeout=timeout, exitcode=0, steps=False) + None, + f"{self.cluster.docker_compose} restart {self.name}", + timeout=timeout, + exitcode=0, + steps=False, + ) def start(self, timeout=300, retry_count=5): - """Start node. - """ + """Start node.""" retry(self.cluster.command, retry_count)( - None, f'{self.cluster.docker_compose} start {self.name}', - timeout=timeout, exitcode=0, steps=False) + None, + f"{self.cluster.docker_compose} start {self.name}", + timeout=timeout, + exitcode=0, + steps=False, + ) def stop(self, timeout=300, retry_count=5, safe=True): - """Stop node. - """ + """Stop node.""" self.close_bashes() retry(self.cluster.command, retry_count)( - None, f'{self.cluster.docker_compose} stop {self.name}', - timeout=timeout, exitcode=0, steps=False) + None, + f"{self.cluster.docker_compose} stop {self.name}", + timeout=timeout, + exitcode=0, + steps=False, + ) def command(self, *args, **kwargs): return self.cluster.command(self.name, *args, **kwargs) - def cmd(self, cmd, message=None, exitcode=None, steps=True, shell_command="bash --noediting", no_checks=False, - raise_on_exception=False, step=By, *args, **kwargs): + def cmd( + self, + cmd, + message=None, + exitcode=None, + steps=True, + shell_command="bash --noediting", + no_checks=False, + raise_on_exception=False, + step=By, + *args, + **kwargs, + ): """Execute and check command. :param cmd: command :param message: expected message that should be in the output, default: None @@ -117,9 +139,13 @@ class Node(object): """ command = f"{cmd}" - with step("executing command", description=command, format_description=False) if steps else NullStep(): + with step( + "executing command", description=command, format_description=False + ) if steps else NullStep(): try: - r = self.cluster.bash(self.name, command=shell_command)(command, *args, **kwargs) + r = self.cluster.bash(self.name, command=shell_command)( + command, *args, **kwargs + ) except ExpectTimeoutError: self.cluster.close_bash(self.name) raise @@ -132,42 +158,75 @@ class Node(object): assert r.exitcode == exitcode, error(r.output) if message is not None: - with Then(f"output should contain message", description=message) if steps else NullStep(): + with Then( + f"output should contain message", description=message + ) if steps else NullStep(): assert message in r.output, error(r.output) return r class ClickHouseNode(Node): - """Node with ClickHouse server. - """ + """Node with ClickHouse server.""" + def thread_fuzzer(self): with Given("exporting THREAD_FUZZER"): self.command("export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000") self.command("export THREAD_FUZZER_SLEEP_PROBABILITY=0.1") self.command("export THREAD_FUZZER_SLEEP_TIME_US=100000") - self.command("export THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY=1") - self.command("export THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY=1") - self.command("export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_MIGRATE_PROBABILITY=1") - self.command("export THREAD_FUZZER_pthread_mutex_unlock_AFTER_MIGRATE_PROBABILITY=1") + self.command( + "export THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY=1" + ) + self.command( + "export THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY=1" + ) + self.command( + "export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_MIGRATE_PROBABILITY=1" + ) + self.command( + "export THREAD_FUZZER_pthread_mutex_unlock_AFTER_MIGRATE_PROBABILITY=1" + ) - self.command("export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_PROBABILITY=0.001") - self.command("export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001") - self.command("export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY=0.001") - self.command("export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY=0.001") - self.command("export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US=10000") - self.command("export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US=10000") - self.command("export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US=10000") - self.command("export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US=10000") + self.command( + "export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_PROBABILITY=0.001" + ) + self.command( + "export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001" + ) + self.command( + "export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY=0.001" + ) + self.command( + "export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY=0.001" + ) + self.command( + "export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US=10000" + ) + self.command( + "export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US=10000" + ) + self.command( + "export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US=10000" + ) + self.command( + "export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US=10000" + ) def wait_clickhouse_healthy(self, timeout=300): with By(f"waiting until ClickHouse server on {self.name} is healthy"): for attempt in retries(timeout=timeout, delay=1): with attempt: - if self.query("SELECT version()", no_checks=1, steps=False).exitcode != 0: + if ( + self.query( + "SELECT version()", no_checks=1, steps=False + ).exitcode + != 0 + ): fail("ClickHouse server is not healthy") - node_version = self.query("SELECT version()", no_checks=1, steps=False).output + node_version = self.query( + "SELECT version()", no_checks=1, steps=False + ).output if current().context.clickhouse_version is None: current().context.clickhouse_version = node_version else: @@ -182,8 +241,7 @@ class ClickHouseNode(Node): return None def stop_clickhouse(self, timeout=300, safe=True): - """Stop ClickHouse server. - """ + """Stop ClickHouse server.""" if safe: self.query("SYSTEM STOP MOVES") self.query("SYSTEM STOP MERGES") @@ -202,15 +260,24 @@ class ClickHouseNode(Node): with attempt: if i > 0 and i % 20 == 0: self.command(f"kill -KILL {pid}", steps=False) - if self.command(f"ps {pid}", steps=False, no_checks=True).exitcode != 1: + if ( + self.command(f"ps {pid}", steps=False, no_checks=True).exitcode + != 1 + ): fail("pid still alive") with And("deleting ClickHouse server pid file"): self.command("rm -rf /tmp/clickhouse-server.pid", exitcode=0, steps=False) - def start_clickhouse(self, timeout=300, wait_healthy=True, retry_count=5, user=None, thread_fuzzer=False): - """Start ClickHouse server. - """ + def start_clickhouse( + self, + timeout=300, + wait_healthy=True, + retry_count=5, + user=None, + thread_fuzzer=False, + ): + """Start ClickHouse server.""" pid = self.clickhouse_pid() if pid: raise RuntimeError(f"ClickHouse server already running with pid {pid}") @@ -225,53 +292,81 @@ class ClickHouseNode(Node): " --log-file=/var/log/clickhouse-server/clickhouse-server.log" " --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log" " --pidfile=/tmp/clickhouse-server.pid --daemon", - exitcode=0, steps=False) + exitcode=0, + steps=False, + ) else: with By(f"starting ClickHouse server process from {user}"): - self.command(f'su {user} -c' + self.command( + f"su {user} -c" '"clickhouse server --config-file=/etc/clickhouse-server/config.xml' - ' --log-file=/var/log/clickhouse-server/clickhouse-server.log' - ' --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log' + " --log-file=/var/log/clickhouse-server/clickhouse-server.log" + " --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log" ' --pidfile=/tmp/clickhouse-server.pid --daemon"', - exitcode=0, steps=False) + exitcode=0, + steps=False, + ) with And("checking that ClickHouse server pid file was created"): for attempt in retries(timeout=timeout, delay=1): with attempt: - if self.command("ls /tmp/clickhouse-server.pid", steps=False, no_checks=True).exitcode != 0: + if ( + self.command( + "ls /tmp/clickhouse-server.pid", steps=False, no_checks=True + ).exitcode + != 0 + ): fail("no pid file yet") if wait_healthy: self.wait_clickhouse_healthy(timeout=timeout) - def restart_clickhouse(self, timeout=300, safe=True, wait_healthy=True, retry_count=5, user=None): - """Restart ClickHouse server. - """ + def restart_clickhouse( + self, timeout=300, safe=True, wait_healthy=True, retry_count=5, user=None + ): + """Restart ClickHouse server.""" if self.clickhouse_pid(): self.stop_clickhouse(timeout=timeout, safe=safe) self.start_clickhouse(timeout=timeout, wait_healthy=wait_healthy, user=user) def stop(self, timeout=300, safe=True, retry_count=5): - """Stop node. - """ + """Stop node.""" if self.clickhouse_pid(): self.stop_clickhouse(timeout=timeout, safe=safe) - return super(ClickHouseNode, self).stop(timeout=timeout, retry_count=retry_count) + return super(ClickHouseNode, self).stop( + timeout=timeout, retry_count=retry_count + ) - def start(self, timeout=300, start_clickhouse=True, wait_healthy=True, retry_count=5, user=None): - """Start node. - """ + def start( + self, + timeout=300, + start_clickhouse=True, + wait_healthy=True, + retry_count=5, + user=None, + ): + """Start node.""" super(ClickHouseNode, self).start(timeout=timeout, retry_count=retry_count) if start_clickhouse: - self.start_clickhouse(timeout=timeout, wait_healthy=wait_healthy, user=user,) + self.start_clickhouse( + timeout=timeout, + wait_healthy=wait_healthy, + user=user, + ) - def restart(self, timeout=300, safe=True, start_clickhouse=True, - wait_healthy=True, retry_count=5, user=None): - """Restart node. - """ + def restart( + self, + timeout=300, + safe=True, + start_clickhouse=True, + wait_healthy=True, + retry_count=5, + user=None, + ): + """Restart node.""" if self.clickhouse_pid(): self.stop_clickhouse(timeout=timeout, safe=safe) @@ -280,8 +375,17 @@ class ClickHouseNode(Node): if start_clickhouse: self.start_clickhouse(timeout=timeout, wait_healthy=wait_healthy, user=user) - def hash_query(self, sql, hash_utility="sha1sum", steps=True, step=By, - settings=None, secure=False, *args, **kwargs): + def hash_query( + self, + sql, + hash_utility="sha1sum", + steps=True, + step=By, + settings=None, + secure=False, + *args, + **kwargs, + ): """Execute sql query inside the container and return the hash of the output. :param sql: sql query @@ -301,27 +405,31 @@ class ClickHouseNode(Node): with tempfile.NamedTemporaryFile("w", encoding="utf-8") as query: query.write(sql) query.flush() - command = f"set -o pipefail && cat \"{query.name}\" | {self.cluster.docker_compose} exec -T {self.name} {client} | {hash_utility}" + command = f'set -o pipefail && cat "{query.name}" | {self.cluster.docker_compose} exec -T {self.name} {client} | {hash_utility}' for setting in query_settings: name, value = setting - command += f" --{name} \"{value}\"" + command += f' --{name} "{value}"' description = f""" echo -e \"{sql[:100]}...\" > {query.name} {command} """ - with step("executing command", description=description, - format_description=False) if steps else NullStep(): + with step( + "executing command", + description=description, + format_description=False, + ) if steps else NullStep(): try: r = self.cluster.bash(None)(command, *args, **kwargs) except ExpectTimeoutError: self.cluster.close_bash(None) else: - command = f"set -o pipefail && echo -e \"{sql}\" | {client} | {hash_utility}" + command = f'set -o pipefail && echo -e "{sql}" | {client} | {hash_utility}' for setting in query_settings: name, value = setting - command += f" --{name} \"{value}\"" - with step("executing command", description=command, - format_description=False) if steps else NullStep(): + command += f' --{name} "{value}"' + with step( + "executing command", description=command, format_description=False + ) if steps else NullStep(): try: r = self.cluster.bash(self.name)(command, *args, **kwargs) except ExpectTimeoutError: @@ -332,8 +440,17 @@ class ClickHouseNode(Node): return r.output - def diff_query(self, sql, expected_output, steps=True, step=By, - settings=None, secure=False, *args, **kwargs): + def diff_query( + self, + sql, + expected_output, + steps=True, + step=By, + settings=None, + secure=False, + *args, + **kwargs, + ): """Execute inside the container but from the host and compare its output to file that is located on the host. @@ -357,26 +474,31 @@ class ClickHouseNode(Node): with tempfile.NamedTemporaryFile("w", encoding="utf-8") as query: query.write(sql) query.flush() - command = f"diff <(cat \"{query.name}\" | {self.cluster.docker_compose} exec -T {self.name} {client}) {expected_output}" + command = f'diff <(cat "{query.name}" | {self.cluster.docker_compose} exec -T {self.name} {client}) {expected_output}' for setting in query_settings: name, value = setting - command += f" --{name} \"{value}\"" + command += f' --{name} "{value}"' description = f""" echo -e \"{sql[:100]}...\" > {query.name} {command} """ - with step("executing command", description=description, format_description=False) if steps else NullStep(): + with step( + "executing command", + description=description, + format_description=False, + ) if steps else NullStep(): try: r = self.cluster.bash(None)(command, *args, **kwargs) except ExpectTimeoutError: self.cluster.close_bash(None) else: - command = f"diff <(echo -e \"{sql}\" | {self.cluster.docker_compose} exec -T {self.name} {client}) {expected_output}" + command = f'diff <(echo -e "{sql}" | {self.cluster.docker_compose} exec -T {self.name} {client}) {expected_output}' for setting in query_settings: name, value = setting - command += f" --{name} \"{value}\"" - with step("executing command", description=command, - format_description=False) if steps else NullStep(): + command += f' --{name} "{value}"' + with step( + "executing command", description=command, format_description=False + ) if steps else NullStep(): try: r = self.cluster.bash(None)(command, *args, **kwargs) except ExpectTimeoutError: @@ -385,10 +507,23 @@ class ClickHouseNode(Node): with Then(f"exitcode should be 0") if steps else NullStep(): assert r.exitcode == 0, error(r.output) - def query(self, sql, message=None, exitcode=None, steps=True, no_checks=False, - raise_on_exception=False, step=By, settings=None, - retry_count=5, messages_to_retry=None, retry_delay=5, secure=False, - *args, **kwargs): + def query( + self, + sql, + message=None, + exitcode=None, + steps=True, + no_checks=False, + raise_on_exception=False, + step=By, + settings=None, + retry_count=5, + messages_to_retry=None, + retry_delay=5, + secure=False, + *args, + **kwargs, + ): """Execute and check query. :param sql: sql query :param message: expected message that should be in the output, default: None @@ -422,26 +557,32 @@ class ClickHouseNode(Node): with tempfile.NamedTemporaryFile("w", encoding="utf-8") as query: query.write(sql) query.flush() - command = f"cat \"{query.name}\" | {self.cluster.docker_compose} exec -T {self.name} {client}" + command = f'cat "{query.name}" | {self.cluster.docker_compose} exec -T {self.name} {client}' for setting in query_settings: name, value = setting - command += f" --{name} \"{value}\"" + command += f' --{name} "{value}"' description = f""" echo -e \"{sql[:100]}...\" > {query.name} {command} """ - with step("executing command", description=description, format_description=False) if steps else NullStep(): + with step( + "executing command", + description=description, + format_description=False, + ) if steps else NullStep(): try: r = self.cluster.bash(None)(command, *args, **kwargs) except ExpectTimeoutError: self.cluster.close_bash(None) raise else: - command = f"echo -e \"{sql}\" | {client}" + command = f'echo -e "{sql}" | {client}' for setting in query_settings: name, value = setting - command += f" --{name} \"{value}\"" - with step("executing command", description=command, format_description=False) if steps else NullStep(): + command += f' --{name} "{value}"' + with step( + "executing command", description=command, format_description=False + ) if steps else NullStep(): try: r = self.cluster.bash(self.name)(command, *args, **kwargs) except ExpectTimeoutError: @@ -451,11 +592,20 @@ class ClickHouseNode(Node): if retry_count and retry_count > 0: if any(msg in r.output for msg in messages_to_retry): time.sleep(retry_delay) - return self.query(sql=sql, message=message, exitcode=exitcode, - steps=steps, no_checks=no_checks, - raise_on_exception=raise_on_exception, step=step, settings=settings, - retry_count=retry_count-1, messages_to_retry=messages_to_retry, - *args, **kwargs) + return self.query( + sql=sql, + message=message, + exitcode=exitcode, + steps=steps, + no_checks=no_checks, + raise_on_exception=raise_on_exception, + step=step, + settings=settings, + retry_count=retry_count - 1, + messages_to_retry=messages_to_retry, + *args, + **kwargs, + ) if no_checks: return r @@ -465,7 +615,9 @@ class ClickHouseNode(Node): assert r.exitcode == exitcode, error(r.output) if message is not None: - with Then(f"output should contain message", description=message) if steps else NullStep(): + with Then( + f"output should contain message", description=message + ) if steps else NullStep(): assert message in r.output, error(r.output) if message is None or "Exception:" not in message: @@ -479,16 +631,20 @@ class ClickHouseNode(Node): class Cluster(object): - """Simple object around docker-compose cluster. - """ - def __init__(self, local=False, - clickhouse_binary_path=None, - clickhouse_odbc_bridge_binary_path=None, - configs_dir=None, - nodes=None, - docker_compose="docker-compose", docker_compose_project_dir=None, - docker_compose_file="docker-compose.yml", - environ=None): + """Simple object around docker-compose cluster.""" + + def __init__( + self, + local=False, + clickhouse_binary_path=None, + clickhouse_odbc_bridge_binary_path=None, + configs_dir=None, + nodes=None, + docker_compose="docker-compose", + docker_compose_project_dir=None, + docker_compose_file="docker-compose.yml", + environ=None, + ): self._bash = {} self._control_shell = None @@ -515,29 +671,46 @@ class Cluster(object): if docker_compose_project_dir is None: raise TypeError("docker compose directory must be specified.") - docker_compose_file_path = os.path.join(docker_compose_project_dir or "", docker_compose_file) + docker_compose_file_path = os.path.join( + docker_compose_project_dir or "", docker_compose_file + ) if not os.path.exists(docker_compose_file_path): - raise TypeError(f"docker compose file '{docker_compose_file_path}' does not exist") + raise TypeError( + f"docker compose file '{docker_compose_file_path}' does not exist" + ) - if self.clickhouse_binary_path and self.clickhouse_binary_path.startswith("docker://"): - if current().context.clickhouse_version is None: + if self.clickhouse_binary_path and self.clickhouse_binary_path.startswith( + "docker://" + ): + if current().context.clickhouse_version is None: try: - current().context.clickhouse_version = self.clickhouse_binary_path.split(":")[2] - debug(f"auto setting clickhouse version to {current().context.clickhouse_version}") + current().context.clickhouse_version = ( + self.clickhouse_binary_path.split(":")[2] + ) + debug( + f"auto setting clickhouse version to {current().context.clickhouse_version}" + ) except IndexError: current().context.clickhouse_version = None - self.clickhouse_binary_path, self.clickhouse_odbc_bridge_binary_path = self.get_clickhouse_binary_from_docker_container( - self.clickhouse_binary_path) + ( + self.clickhouse_binary_path, + self.clickhouse_odbc_bridge_binary_path, + ) = self.get_clickhouse_binary_from_docker_container( + self.clickhouse_binary_path + ) - self.docker_compose += f" --ansi never --project-directory \"{docker_compose_project_dir}\" --file \"{docker_compose_file_path}\"" + self.docker_compose += f' --ansi never --project-directory "{docker_compose_project_dir}" --file "{docker_compose_file_path}"' self.lock = threading.Lock() - def get_clickhouse_binary_from_docker_container(self, docker_image, - container_clickhouse_binary_path="/usr/bin/clickhouse", - container_clickhouse_odbc_bridge_binary_path="/usr/bin/clickhouse-odbc-bridge", - host_clickhouse_binary_path=None, - host_clickhouse_odbc_bridge_binary_path=None): + def get_clickhouse_binary_from_docker_container( + self, + docker_image, + container_clickhouse_binary_path="/usr/bin/clickhouse", + container_clickhouse_odbc_bridge_binary_path="/usr/bin/clickhouse-odbc-bridge", + host_clickhouse_binary_path=None, + host_clickhouse_odbc_bridge_binary_path=None, + ): """Get clickhouse-server and clickhouse-odbc-bridge binaries from some Docker container. """ @@ -545,25 +718,38 @@ class Cluster(object): docker_container_name = str(uuid.uuid1()) if host_clickhouse_binary_path is None: - host_clickhouse_binary_path = os.path.join(tempfile.gettempdir(), f"{docker_image.rsplit('/',1)[-1].replace(':','_')}") + host_clickhouse_binary_path = os.path.join( + tempfile.gettempdir(), + f"{docker_image.rsplit('/',1)[-1].replace(':','_')}", + ) if host_clickhouse_odbc_bridge_binary_path is None: - host_clickhouse_odbc_bridge_binary_path = host_clickhouse_binary_path + "_odbc_bridge" + host_clickhouse_odbc_bridge_binary_path = ( + host_clickhouse_binary_path + "_odbc_bridge" + ) - with Given("I get ClickHouse server binary from docker container", description=f"{docker_image}"): + with Given( + "I get ClickHouse server binary from docker container", + description=f"{docker_image}", + ): with Shell() as bash: bash.timeout = 300 - bash(f"docker run -d --name \"{docker_container_name}\" {docker_image} | tee") - bash(f"docker cp \"{docker_container_name}:{container_clickhouse_binary_path}\" \"{host_clickhouse_binary_path}\"") - bash(f"docker cp \"{docker_container_name}:{container_clickhouse_odbc_bridge_binary_path}\" \"{host_clickhouse_odbc_bridge_binary_path}\"") - bash(f"docker stop \"{docker_container_name}\"") + bash( + f'docker run -d --name "{docker_container_name}" {docker_image} | tee' + ) + bash( + f'docker cp "{docker_container_name}:{container_clickhouse_binary_path}" "{host_clickhouse_binary_path}"' + ) + bash( + f'docker cp "{docker_container_name}:{container_clickhouse_odbc_bridge_binary_path}" "{host_clickhouse_odbc_bridge_binary_path}"' + ) + bash(f'docker stop "{docker_container_name}"') return host_clickhouse_binary_path, host_clickhouse_odbc_bridge_binary_path @property def control_shell(self, timeout=300): - """Must be called with self.lock.acquired. - """ + """Must be called with self.lock.acquired.""" if self._control_shell is not None: return self._control_shell @@ -584,8 +770,7 @@ class Cluster(object): return self._control_shell def close_control_shell(self): - """Must be called with self.lock.acquired. - """ + """Must be called with self.lock.acquired.""" if self._control_shell is None: return shell = self._control_shell @@ -593,13 +778,14 @@ class Cluster(object): shell.__exit__(None, None, None) def node_container_id(self, node, timeout=300): - """Must be called with self.lock acquired. - """ + """Must be called with self.lock acquired.""" container_id = None time_start = time.time() while True: try: - c = self.control_shell(f"{self.docker_compose} ps -q {node}", timeout=timeout) + c = self.control_shell( + f"{self.docker_compose} ps -q {node}", timeout=timeout + ) container_id = c.output.strip() if c.exitcode == 0 and len(container_id) > 1: break @@ -609,12 +795,13 @@ class Cluster(object): self.close_control_shell() timeout = timeout - (time.time() - time_start) if timeout <= 0: - raise RuntimeError(f"failed to get docker container id for the {node} service") + raise RuntimeError( + f"failed to get docker container id for the {node} service" + ) return container_id def shell(self, node, timeout=300): - """Returns unique shell terminal to be used. - """ + """Returns unique shell terminal to be used.""" container_id = None if node is not None: @@ -627,9 +814,15 @@ class Cluster(object): if node is None: shell = Shell() else: - shell = Shell(command=[ - "/bin/bash", "--noediting", "-c", f"docker exec -it {container_id} bash --noediting" - ], name=node) + shell = Shell( + command=[ + "/bin/bash", + "--noediting", + "-c", + f"docker exec -it {container_id} bash --noediting", + ], + name=node, + ) shell.timeout = 30 shell("echo 1") break @@ -664,9 +857,15 @@ class Cluster(object): if node is None: self._bash[id] = Shell() else: - self._bash[id] = Shell(command=[ - "/bin/bash", "--noediting", "-c", f"docker exec -it {container_id} {command}" - ], name=node).__enter__() + self._bash[id] = Shell( + command=[ + "/bin/bash", + "--noediting", + "-c", + f"docker exec -it {container_id} {command}", + ], + name=node, + ).__enter__() self._bash[id].timeout = 30 self._bash[id]("echo 1") break @@ -678,7 +877,7 @@ class Cluster(object): raise RuntimeError(f"failed to open bash to node {node}") if node is None: - for name,value in self.environ.items(): + for name, value in self.environ.items(): self._bash[id](f"export {name}={value}") self._bash[id].timeout = timeout @@ -732,7 +931,10 @@ class Cluster(object): # add message to each clickhouse-server.log if settings.debug: for node in self.nodes["clickhouse"]: - self.command(node=node, command=f"echo -e \"\n-- sending stop to: {node} --\n\" >> /var/log/clickhouse-server/clickhouse-server.log") + self.command( + node=node, + command=f'echo -e "\n-- sending stop to: {node} --\n" >> /var/log/clickhouse-server/clickhouse-server.log', + ) try: bash = self.bash(None) with self.lock: @@ -744,7 +946,12 @@ class Cluster(object): else: self._bash[id] = shell finally: - cmd = self.command(None, f"{self.docker_compose} down -v --remove-orphans --timeout 60", bash=bash, timeout=timeout) + cmd = self.command( + None, + f"{self.docker_compose} down -v --remove-orphans --timeout 60", + bash=bash, + timeout=timeout, + ) with self.lock: if self._control_shell: self._control_shell.__exit__(None, None, None) @@ -752,31 +959,37 @@ class Cluster(object): return cmd def temp_path(self): - """Return temporary folder path. - """ + """Return temporary folder path.""" p = f"{self.environ['CLICKHOUSE_TESTS_DIR']}/_temp" if not os.path.exists(p): os.mkdir(p) return p def temp_file(self, name): - """Return absolute temporary file path. - """ + """Return absolute temporary file path.""" return f"{os.path.join(self.temp_path(), name)}" - def up(self, timeout=30*60): + def up(self, timeout=30 * 60): if self.local: with Given("I am running in local mode"): with Then("check --clickhouse-binary-path is specified"): - assert self.clickhouse_binary_path, "when running in local mode then --clickhouse-binary-path must be specified" + assert ( + self.clickhouse_binary_path + ), "when running in local mode then --clickhouse-binary-path must be specified" with And("path should exist"): assert os.path.exists(self.clickhouse_binary_path) with And("I set all the necessary environment variables"): self.environ["COMPOSE_HTTP_TIMEOUT"] = "300" - self.environ["CLICKHOUSE_TESTS_SERVER_BIN_PATH"] = self.clickhouse_binary_path - self.environ["CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH"] = self.clickhouse_odbc_bridge_binary_path or os.path.join( - os.path.dirname(self.clickhouse_binary_path), "clickhouse-odbc-bridge") + self.environ[ + "CLICKHOUSE_TESTS_SERVER_BIN_PATH" + ] = self.clickhouse_binary_path + self.environ[ + "CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH" + ] = self.clickhouse_odbc_bridge_binary_path or os.path.join( + os.path.dirname(self.clickhouse_binary_path), + "clickhouse-odbc-bridge", + ) self.environ["CLICKHOUSE_TESTS_DIR"] = self.configs_dir with And("I list environment variables to show their values"): @@ -789,7 +1002,12 @@ class Cluster(object): for attempt in range(max_attempts): with When(f"attempt {attempt}/{max_attempts}"): with By("pulling images for all the services"): - cmd = self.command(None, f"{self.docker_compose} pull 2>&1 | tee", exitcode=None, timeout=timeout) + cmd = self.command( + None, + f"{self.docker_compose} pull 2>&1 | tee", + exitcode=None, + timeout=timeout, + ) if cmd.exitcode != 0: continue @@ -797,7 +1015,12 @@ class Cluster(object): self.command(None, f"{self.docker_compose} ps | tee") with And("executing docker-compose down just in case it is up"): - cmd = self.command(None, f"{self.docker_compose} down 2>&1 | tee", exitcode=None, timeout=timeout) + cmd = self.command( + None, + f"{self.docker_compose} down 2>&1 | tee", + exitcode=None, + timeout=timeout, + ) if cmd.exitcode != 0: continue @@ -807,20 +1030,34 @@ class Cluster(object): with And("executing docker-compose up"): for up_attempt in range(max_up_attempts): with By(f"attempt {up_attempt}/{max_up_attempts}"): - cmd = self.command(None, f"{self.docker_compose} up --renew-anon-volumes --force-recreate --timeout 300 -d 2>&1 | tee", timeout=timeout) + cmd = self.command( + None, + f"{self.docker_compose} up --renew-anon-volumes --force-recreate --timeout 300 -d 2>&1 | tee", + timeout=timeout, + ) if "is unhealthy" not in cmd.output: break with Then("check there are no unhealthy containers"): - ps_cmd = self.command(None, f"{self.docker_compose} ps | tee | grep -v \"Exit 0\"") + ps_cmd = self.command( + None, f'{self.docker_compose} ps | tee | grep -v "Exit 0"' + ) if "is unhealthy" in cmd.output or "Exit" in ps_cmd.output: self.command(None, f"{self.docker_compose} logs | tee") continue - if cmd.exitcode == 0 and "is unhealthy" not in cmd.output and "Exit" not in ps_cmd.output: + if ( + cmd.exitcode == 0 + and "is unhealthy" not in cmd.output + and "Exit" not in ps_cmd.output + ): break - if cmd.exitcode != 0 or "is unhealthy" in cmd.output or "Exit" in ps_cmd.output: + if ( + cmd.exitcode != 0 + or "is unhealthy" in cmd.output + or "Exit" in ps_cmd.output + ): fail("could not bring up docker-compose cluster") with Then("wait all nodes report healthy"): @@ -829,8 +1066,19 @@ class Cluster(object): if name.startswith("clickhouse"): self.node(name).start_clickhouse() - def command(self, node, command, message=None, exitcode=None, steps=True, - bash=None, no_checks=False, use_error=True, *args, **kwargs): + def command( + self, + node, + command, + message=None, + exitcode=None, + steps=True, + bash=None, + no_checks=False, + use_error=True, + *args, + **kwargs, + ): """Execute and check command. :param node: name of the service :param command: command @@ -838,7 +1086,9 @@ class Cluster(object): :param exitcode: expected exitcode, default: None :param steps: don't break command into steps, default: True """ - with By("executing command", description=command, format_description=False) if steps else NullStep(): + with By( + "executing command", description=command, format_description=False + ) if steps else NullStep(): if bash is None: bash = self.bash(node) try: @@ -851,11 +1101,17 @@ class Cluster(object): return r if exitcode is not None: - with Then(f"exitcode should be {exitcode}", format_name=False) if steps else NullStep(): + with Then( + f"exitcode should be {exitcode}", format_name=False + ) if steps else NullStep(): assert r.exitcode == exitcode, error(r.output) if message is not None: - with Then(f"output should contain message", description=message, format_description=False) if steps else NullStep(): + with Then( + f"output should contain message", + description=message, + format_description=False, + ) if steps else NullStep(): assert message in r.output, error(r.output) return r diff --git a/tests/testflows/helpers/common.py b/tests/testflows/helpers/common.py index ee56b1f1661..2ba6aef11ee 100644 --- a/tests/testflows/helpers/common.py +++ b/tests/testflows/helpers/common.py @@ -11,47 +11,68 @@ from testflows.asserts import error from testflows.core.name import basename, parentname from testflows._core.testtype import TestSubType + def check_clickhouse_version(version): - """Compare ClickHouse version. - """ + """Compare ClickHouse version.""" + def check(test): if getattr(test.context, "clickhouse_version", None) is None: - return False + return False clickhouse_version = pkg_version.parse(str(test.context.clickhouse_version)) if version.startswith("=="): - return clickhouse_version == pkg_version.parse(str(version.split("==",1)[-1])) + return clickhouse_version == pkg_version.parse( + str(version.split("==", 1)[-1]) + ) elif version.startswith(">="): - return clickhouse_version >= pkg_version.parse(str(version.split(">=",1)[-1])) + return clickhouse_version >= pkg_version.parse( + str(version.split(">=", 1)[-1]) + ) elif version.startswith("<="): - return clickhouse_version <= pkg_version.parse(str(version.split("<=",1)[-1])) + return clickhouse_version <= pkg_version.parse( + str(version.split("<=", 1)[-1]) + ) elif version.startswith("="): - return clickhouse_version == pkg_version.parse(str(version.split("=",1)[-1])) + return clickhouse_version == pkg_version.parse( + str(version.split("=", 1)[-1]) + ) elif version.startswith(">"): - return clickhouse_version > pkg_version.parse(str(version.split(">",1)[-1])) + return clickhouse_version > pkg_version.parse( + str(version.split(">", 1)[-1]) + ) elif version.startswith("<"): - return clickhouse_version < pkg_version.parse(str(version.split("<",1)[-1])) + return clickhouse_version < pkg_version.parse( + str(version.split("<", 1)[-1]) + ) else: return clickhouse_version == pkg_version.parse(str(version)) return check + def getuid(with_test_name=False): if not with_test_name: - return str(uuid.uuid1()).replace('-', '_') + return str(uuid.uuid1()).replace("-", "_") if current().subtype == TestSubType.Example: - testname = f"{basename(parentname(current().name)).replace(' ', '_').replace(',', '')}" + testname = ( + f"{basename(parentname(current().name)).replace(' ', '_').replace(',', '')}" + ) else: testname = f"{basename(current().name).replace(' ', '_').replace(',', '')}" - return testname + "_" + str(uuid.uuid1()).replace('-', '_') + return testname + "_" + str(uuid.uuid1()).replace("-", "_") @TestStep(Given) -def instrument_clickhouse_server_log(self, node=None, test=None, - clickhouse_server_log="/var/log/clickhouse-server/clickhouse-server.log", always_dump=False): +def instrument_clickhouse_server_log( + self, + node=None, + test=None, + clickhouse_server_log="/var/log/clickhouse-server/clickhouse-server.log", + always_dump=False, +): """Instrument clickhouse-server.log for the current test (default) by adding start and end messages that include test name to log of the specified node. If we are in the debug mode and the test @@ -67,22 +88,31 @@ def instrument_clickhouse_server_log(self, node=None, test=None, with By("getting current log size"): cmd = node.command(f"stat --format=%s {clickhouse_server_log}") - if cmd.output == f"stat: cannot stat '{clickhouse_server_log}': No such file or directory": + if ( + cmd.output + == f"stat: cannot stat '{clickhouse_server_log}': No such file or directory" + ): start_logsize = 0 else: start_logsize = cmd.output.split(" ")[0].strip() try: with And("adding test name start message to the clickhouse-server.log"): - node.command(f"echo -e \"\\n-- start: {test.name} --\\n\" >> {clickhouse_server_log}") + node.command( + f'echo -e "\\n-- start: {test.name} --\\n" >> {clickhouse_server_log}' + ) yield finally: if test.terminating is True: return - with Finally("adding test name end message to the clickhouse-server.log", flags=TE): - node.command(f"echo -e \"\\n-- end: {test.name} --\\n\" >> {clickhouse_server_log}") + with Finally( + "adding test name end message to the clickhouse-server.log", flags=TE + ): + node.command( + f'echo -e "\\n-- end: {test.name} --\\n" >> {clickhouse_server_log}' + ) with And("getting current log size at the end of the test"): cmd = node.command(f"stat --format=%s {clickhouse_server_log}") @@ -92,8 +122,10 @@ def instrument_clickhouse_server_log(self, node=None, test=None, if dump_log: with Then("dumping clickhouse-server.log for this test"): - node.command(f"tail -c +{start_logsize} {clickhouse_server_log}" - f" | head -c {int(end_logsize) - int(start_logsize)}") + node.command( + f"tail -c +{start_logsize} {clickhouse_server_log}" + f" | head -c {int(end_logsize) - int(start_logsize)}" + ) xml_with_utf8 = '\n' @@ -142,7 +174,9 @@ class KeyWithAttributes: self.attributes = dict(attributes) -def create_xml_config_content(entries, config_file, config_d_dir="/etc/clickhouse-server/config.d"): +def create_xml_config_content( + entries, config_file, config_d_dir="/etc/clickhouse-server/config.d" +): """Create XML configuration file from a dictionary. :param entries: dictionary that defines xml @@ -184,27 +218,34 @@ def create_xml_config_content(entries, config_file, config_d_dir="/etc/clickhous create_xml_tree(entries, root) xml_indent(root) content = xml_with_utf8 + str( - xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"), - "utf-8") + xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"), "utf-8" + ) return Config(content, path, name, uid, "config.xml") -def add_invalid_config(config, message, recover_config=None, tail=30, timeout=300, restart=True, user=None): - """Check that ClickHouse errors when trying to load invalid configuration file. - """ +def add_invalid_config( + config, message, recover_config=None, tail=30, timeout=300, restart=True, user=None +): + """Check that ClickHouse errors when trying to load invalid configuration file.""" cluster = current().context.cluster node = current().context.node try: with Given("I prepare the error log by writing empty lines into it"): - node.command("echo -e \"%s\" > /var/log/clickhouse-server/clickhouse-server.err.log" % ("-\\n" * tail)) + node.command( + 'echo -e "%s" > /var/log/clickhouse-server/clickhouse-server.err.log' + % ("-\\n" * tail) + ) with When("I add the config", description=config.path): command = f"cat < {config.path}\n{config.content}\nHEREDOC" node.command(command, steps=False, exitcode=0) - with Then(f"{config.preprocessed_name} should be updated", description=f"timeout {timeout}"): + with Then( + f"{config.preprocessed_name} should be updated", + description=f"timeout {timeout}", + ): started = time.time() command = f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name} | grep {config.uid}{' > /dev/null' if not settings.debug else ''}" while time.time() - started < timeout: @@ -222,9 +263,19 @@ def add_invalid_config(config, message, recover_config=None, tail=30, timeout=30 if recover_config is None: with Finally(f"I remove {config.name}"): with By("removing invalid configuration file"): - system_config_path = os.path.join(cluster.environ["CLICKHOUSE_TESTS_DIR"], "configs", node.name, - "config.d", config.path.split("config.d/")[-1]) - cluster.command(None, f'rm -rf {system_config_path}', timeout=timeout, exitcode=0) + system_config_path = os.path.join( + cluster.environ["CLICKHOUSE_TESTS_DIR"], + "configs", + node.name, + "config.d", + config.path.split("config.d/")[-1], + ) + cluster.command( + None, + f"rm -rf {system_config_path}", + timeout=timeout, + exitcode=0, + ) if restart: with And("restarting ClickHouse"): @@ -233,9 +284,19 @@ def add_invalid_config(config, message, recover_config=None, tail=30, timeout=30 else: with Finally(f"I change {config.name}"): with By("changing invalid configuration file"): - system_config_path = os.path.join(cluster.environ["CLICKHOUSE_TESTS_DIR"], "configs", node.name, - "config.d", config.path.split("config.d/")[-1]) - cluster.command(None, f'rm -rf {system_config_path}', timeout=timeout, exitcode=0) + system_config_path = os.path.join( + cluster.environ["CLICKHOUSE_TESTS_DIR"], + "configs", + node.name, + "config.d", + config.path.split("config.d/")[-1], + ) + cluster.command( + None, + f"rm -rf {system_config_path}", + timeout=timeout, + exitcode=0, + ) command = f"cat < {system_config_path}\n{recover_config.content}\nHEREDOC" cluster.command(None, command, timeout=timeout, exitcode=0) @@ -245,7 +306,7 @@ def add_invalid_config(config, message, recover_config=None, tail=30, timeout=30 with Then("error log should contain the expected error message"): started = time.time() - command = f"tail -n {tail} /var/log/clickhouse-server/clickhouse-server.err.log | grep \"{message}\"" + command = f'tail -n {tail} /var/log/clickhouse-server/clickhouse-server.err.log | grep "{message}"' while time.time() - started < timeout: exitcode = node.command(command, steps=False).exitcode if exitcode == 0: @@ -254,8 +315,16 @@ def add_invalid_config(config, message, recover_config=None, tail=30, timeout=30 assert exitcode == 0, error() -def add_config(config, timeout=300, restart=False, modify=False, node=None, user=None, wait_healthy=True, - check_preprocessed = True): +def add_config( + config, + timeout=300, + restart=False, + modify=False, + node=None, + user=None, + wait_healthy=True, + check_preprocessed=True, +): """Add dynamic configuration file to ClickHouse. :param config: configuration file description @@ -268,8 +337,7 @@ def add_config(config, timeout=300, restart=False, modify=False, node=None, user cluster = current().context.cluster def check_preprocessed_config_is_updated(after_removal=False): - """Check that preprocessed config is updated. - """ + """Check that preprocessed config is updated.""" started = time.time() command = f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name} | grep {config.uid}{' > /dev/null' if not settings.debug else ''}" @@ -284,7 +352,9 @@ def add_config(config, timeout=300, restart=False, modify=False, node=None, user time.sleep(1) if settings.debug: - node.command(f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name}") + node.command( + f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name}" + ) if after_removal: assert exitcode == 1, error() @@ -292,8 +362,7 @@ def add_config(config, timeout=300, restart=False, modify=False, node=None, user assert exitcode == 0, error() def wait_for_config_to_be_loaded(user=None): - """Wait for config to be loaded. - """ + """Wait for config to be loaded.""" if restart: with When("I close terminal to the node to be restarted"): bash.close() @@ -302,8 +371,10 @@ def add_config(config, timeout=300, restart=False, modify=False, node=None, user node.stop_clickhouse(safe=False) with And("I get the current log size"): - cmd = node.cluster.command(None, - f"stat --format=%s {cluster.environ['CLICKHOUSE_TESTS_DIR']}/_instances/{node.name}/logs/clickhouse-server.log") + cmd = node.cluster.command( + None, + f"stat --format=%s {cluster.environ['CLICKHOUSE_TESTS_DIR']}/_instances/{node.name}/logs/clickhouse-server.log", + ) logsize = cmd.output.split(" ")[0].strip() with And("I start ClickHouse back up"): @@ -312,17 +383,21 @@ def add_config(config, timeout=300, restart=False, modify=False, node=None, user with Then("I tail the log file from using previous log size as the offset"): bash.prompt = bash.__class__.prompt bash.open() - bash.send(f"tail -c +{logsize} -f /var/log/clickhouse-server/clickhouse-server.log") + bash.send( + f"tail -c +{logsize} -f /var/log/clickhouse-server/clickhouse-server.log" + ) with Then("I wait for config reload message in the log file"): if restart: bash.expect( f"ConfigReloader: Loaded config '/etc/clickhouse-server/config.xml', performed update on configuration", - timeout=timeout) + timeout=timeout, + ) else: bash.expect( f"ConfigReloader: Loaded config '/etc/clickhouse-server/{config.preprocessed_name}', performed update on configuration", - timeout=timeout) + timeout=timeout, + ) try: with Given(f"{config.name}"): @@ -332,17 +407,24 @@ def add_config(config, timeout=300, restart=False, modify=False, node=None, user with node.cluster.shell(node.name) as bash: bash.expect(bash.prompt) - bash.send("tail -v -n 0 -f /var/log/clickhouse-server/clickhouse-server.log") + bash.send( + "tail -v -n 0 -f /var/log/clickhouse-server/clickhouse-server.log" + ) # make sure tail process is launched and started to follow the file bash.expect("<==") bash.expect("\n") with When("I add the config", description=config.path): - command = f"cat < {config.path}\n{config.content}\nHEREDOC" + command = ( + f"cat < {config.path}\n{config.content}\nHEREDOC" + ) node.command(command, steps=False, exitcode=0) if check_preprocessed: - with Then(f"{config.preprocessed_name} should be updated", description=f"timeout {timeout}"): + with Then( + f"{config.preprocessed_name} should be updated", + description=f"timeout {timeout}", + ): check_preprocessed_config_is_updated() with And("I wait for config to be reloaded"): @@ -354,7 +436,9 @@ def add_config(config, timeout=300, restart=False, modify=False, node=None, user with Finally(f"I remove {config.name} on {node.name}"): with node.cluster.shell(node.name) as bash: bash.expect(bash.prompt) - bash.send("tail -v -n 0 -f /var/log/clickhouse-server/clickhouse-server.log") + bash.send( + "tail -v -n 0 -f /var/log/clickhouse-server/clickhouse-server.log" + ) # make sure tail process is launched and started to follow the file bash.expect("<==") bash.expect("\n") @@ -362,7 +446,10 @@ def add_config(config, timeout=300, restart=False, modify=False, node=None, user with By("removing the config file", description=config.path): node.command(f"rm -rf {config.path}", exitcode=0) - with Then(f"{config.preprocessed_name} should be updated", description=f"timeout {timeout}"): + with Then( + f"{config.preprocessed_name} should be updated", + description=f"timeout {timeout}", + ): check_preprocessed_config_is_updated(after_removal=True) with And("I wait for config to be reloaded"): @@ -370,9 +457,17 @@ def add_config(config, timeout=300, restart=False, modify=False, node=None, user @TestStep(When) -def copy(self, dest_node, src_path, dest_path, bash=None, binary=False, eof="EOF", src_node=None): - """Copy file from source to destination node. - """ +def copy( + self, + dest_node, + src_path, + dest_path, + bash=None, + binary=False, + eof="EOF", + src_node=None, +): + """Copy file from source to destination node.""" if binary: raise NotImplementedError("not yet implemented; need to use base64 encoding") @@ -387,9 +482,10 @@ def copy(self, dest_node, src_path, dest_path, bash=None, binary=False, eof="EOF @TestStep(Given) -def add_user_to_group_on_node(self, node=None, group="clickhouse", username="clickhouse"): - """Add user {username} into group {group}. - """ +def add_user_to_group_on_node( + self, node=None, group="clickhouse", username="clickhouse" +): + """Add user {username} into group {group}.""" if node is None: node = self.context.node @@ -398,8 +494,7 @@ def add_user_to_group_on_node(self, node=None, group="clickhouse", username="cl @TestStep(Given) def change_user_on_node(self, node=None, username="clickhouse"): - """Change user on node. - """ + """Change user on node.""" if node is None: node = self.context.node try: @@ -411,8 +506,7 @@ def change_user_on_node(self, node=None, username="clickhouse"): @TestStep(Given) def add_user_on_node(self, node=None, groupname=None, username="clickhouse"): - """Create user on node with group specifying. - """ + """Create user on node with group specifying.""" if node is None: node = self.context.node try: @@ -427,8 +521,7 @@ def add_user_on_node(self, node=None, groupname=None, username="clickhouse"): @TestStep(Given) def add_group_on_node(self, node=None, groupname="clickhouse"): - """Create group on node - """ + """Create group on node""" if node is None: node = self.context.node try: diff --git a/tests/testflows/kerberos/regression.py b/tests/testflows/kerberos/regression.py index 5d557bd1e38..0206cd3bf00 100755 --- a/tests/testflows/kerberos/regression.py +++ b/tests/testflows/kerberos/regression.py @@ -10,36 +10,42 @@ from helpers.argparser import argparser from kerberos.requirements.requirements import * xfails = { - "config/principal and realm specified/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/26197")], + "config/principal and realm specified/:": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/26197") + ], } @TestModule @Name("kerberos") @ArgumentParser(argparser) -@Requirements( - RQ_SRS_016_Kerberos("1.0") -) +@Requirements(RQ_SRS_016_Kerberos("1.0")) @XFails(xfails) -def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None): - """ClickHouse Kerberos authentication test regression module. - """ +def regression( + self, local, clickhouse_binary_path, clickhouse_version=None, stress=None +): + """ClickHouse Kerberos authentication test regression module.""" nodes = { "clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3"), - "kerberos": ("kerberos", ), + "kerberos": ("kerberos",), } if stress is not None: self.context.stress = stress self.context.clickhouse_version = clickhouse_version - with Cluster(local, clickhouse_binary_path, nodes=nodes, - docker_compose_project_dir=os.path.join(current_dir(), "kerberos_env")) as cluster: + with Cluster( + local, + clickhouse_binary_path, + nodes=nodes, + docker_compose_project_dir=os.path.join(current_dir(), "kerberos_env"), + ) as cluster: self.context.cluster = cluster Feature(run=load("kerberos.tests.generic", "generic"), flags=TE) Feature(run=load("kerberos.tests.config", "config"), flags=TE) Feature(run=load("kerberos.tests.parallel", "parallel"), flags=TE) + if main(): regression() diff --git a/tests/testflows/kerberos/requirements/requirements.py b/tests/testflows/kerberos/requirements/requirements.py index 774f533373a..07f3e1edf42 100644 --- a/tests/testflows/kerberos/requirements/requirements.py +++ b/tests/testflows/kerberos/requirements/requirements.py @@ -9,434 +9,454 @@ from testflows.core import Requirement Heading = Specification.Heading RQ_SRS_016_Kerberos = Requirement( - name='RQ.SRS-016.Kerberos', - version='1.0', + name="RQ.SRS-016.Kerberos", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support user authentication using [Kerberos] server.\n' - '\n' - ), + "[ClickHouse] SHALL support user authentication using [Kerberos] server.\n" "\n" + ), link=None, level=3, - num='4.1.1') + num="4.1.1", +) RQ_SRS_016_Kerberos_Ping = Requirement( - name='RQ.SRS-016.Kerberos.Ping', - version='1.0', + name="RQ.SRS-016.Kerberos.Ping", + version="1.0", priority=None, group=None, type=None, uid=None, - description=( - 'Docker containers SHALL be able to ping each other.\n' - '\n' - ), + description=("Docker containers SHALL be able to ping each other.\n" "\n"), link=None, level=3, - num='4.2.1') + num="4.2.1", +) RQ_SRS_016_Kerberos_Configuration_MultipleAuthMethods = Requirement( - name='RQ.SRS-016.Kerberos.Configuration.MultipleAuthMethods', - version='1.0', + name="RQ.SRS-016.Kerberos.Configuration.MultipleAuthMethods", + version="1.0", priority=None, group=None, type=None, uid=None, description=( "[ClickHouse] SHALL generate an exception and TERMINATE in case some user in `users.xml` has a `` section specified alongside with any other authentication method's section, e.g. `ldap`, `password`.\n" - '\n' - ), + "\n" + ), link=None, level=3, - num='4.3.1') + num="4.3.1", +) RQ_SRS_016_Kerberos_Configuration_KerberosNotEnabled = Requirement( - name='RQ.SRS-016.Kerberos.Configuration.KerberosNotEnabled', - version='1.0', + name="RQ.SRS-016.Kerberos.Configuration.KerberosNotEnabled", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL reject [Kerberos] authentication in case user is properly configured for using Kerberos, but Kerberos itself is not enabled in `config.xml`. For example:\n' - '\n' - '```xml\n' - '\n' - ' \n' - ' \n' - '\n' - '```\n' - '```xml\n' - '\n' - ' \n' - ' \n' - ' HTTP/clickhouse.example.com@EXAMPLE.COM\n' - ' \n' - '\n' - '```\n' - '```xml\n' - '\n' - ' \n' - ' \n' - ' EXAMPLE.COM\n' - ' \n' - '\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL reject [Kerberos] authentication in case user is properly configured for using Kerberos, but Kerberos itself is not enabled in `config.xml`. For example:\n" + "\n" + "```xml\n" + "\n" + " \n" + " \n" + "\n" + "```\n" + "```xml\n" + "\n" + " \n" + " \n" + " HTTP/clickhouse.example.com@EXAMPLE.COM\n" + " \n" + "\n" + "```\n" + "```xml\n" + "\n" + " \n" + " \n" + " EXAMPLE.COM\n" + " \n" + "\n" + "```\n" + "\n" + ), link=None, level=3, - num='4.3.2') + num="4.3.2", +) RQ_SRS_016_Kerberos_Configuration_MultipleKerberosSections = Requirement( - name='RQ.SRS-016.Kerberos.Configuration.MultipleKerberosSections', - version='1.0', + name="RQ.SRS-016.Kerberos.Configuration.MultipleKerberosSections", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL disable [Kerberos] and reject [Kerberos] authentication in case multiple `kerberos` sections are present in `config.xml`.\n' - '\n' - ), + "[ClickHouse] SHALL disable [Kerberos] and reject [Kerberos] authentication in case multiple `kerberos` sections are present in `config.xml`.\n" + "\n" + ), link=None, level=3, - num='4.3.3') + num="4.3.3", +) RQ_SRS_016_Kerberos_Configuration_WrongUserRealm = Requirement( - name='RQ.SRS-016.Kerberos.Configuration.WrongUserRealm', - version='1.0', + name="RQ.SRS-016.Kerberos.Configuration.WrongUserRealm", + version="1.0", priority=None, group=None, type=None, uid=None, description=( "[ClickHouse] SHALL reject [Kerberos] authentication if user's realm specified in `users.xml` doesn't match the realm of the principal trying to authenticate.\n" - '\n' - ), + "\n" + ), link=None, level=3, - num='4.3.4') + num="4.3.4", +) RQ_SRS_016_Kerberos_Configuration_PrincipalAndRealmSpecified = Requirement( - name='RQ.SRS-016.Kerberos.Configuration.PrincipalAndRealmSpecified', - version='1.0', + name="RQ.SRS-016.Kerberos.Configuration.PrincipalAndRealmSpecified", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL generate an exception and disable [Kerberos] in case both `realm` and `principal` sections are defined in `config.xml`.\n' - '\n' - ), + "[ClickHouse] SHALL generate an exception and disable [Kerberos] in case both `realm` and `principal` sections are defined in `config.xml`.\n" + "\n" + ), link=None, level=3, - num='4.3.5') + num="4.3.5", +) RQ_SRS_016_Kerberos_Configuration_MultiplePrincipalSections = Requirement( - name='RQ.SRS-016.Kerberos.Configuration.MultiplePrincipalSections', - version='1.0', + name="RQ.SRS-016.Kerberos.Configuration.MultiplePrincipalSections", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL generate an exception and disable [Kerberos] in case multiple `principal` sections are specified inside `kerberos` section in `config.xml`.\n' - '\n' - ), + "[ClickHouse] SHALL generate an exception and disable [Kerberos] in case multiple `principal` sections are specified inside `kerberos` section in `config.xml`.\n" + "\n" + ), link=None, level=3, - num='4.3.6') + num="4.3.6", +) RQ_SRS_016_Kerberos_Configuration_MultipleRealmSections = Requirement( - name='RQ.SRS-016.Kerberos.Configuration.MultipleRealmSections', - version='1.0', + name="RQ.SRS-016.Kerberos.Configuration.MultipleRealmSections", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL generate an exception and disable [Kerberos] in case multiple `realm` sections are specified inside `kerberos` section in `config.xml`.\n' - '\n' - ), + "[ClickHouse] SHALL generate an exception and disable [Kerberos] in case multiple `realm` sections are specified inside `kerberos` section in `config.xml`.\n" + "\n" + ), link=None, level=3, - num='4.3.7') + num="4.3.7", +) RQ_SRS_016_Kerberos_ValidUser_XMLConfiguredUser = Requirement( - name='RQ.SRS-016.Kerberos.ValidUser.XMLConfiguredUser', - version='1.0', + name="RQ.SRS-016.Kerberos.ValidUser.XMLConfiguredUser", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL accept [Kerberos] authentication for a user that is configured in `users.xml` and has [Kerberos] enabled, i.e.:\n' - '\n' - '```xml\n' - '\n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' EXAMPLE.COM\n' - ' \n' - ' \n' - ' \n' - '\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL accept [Kerberos] authentication for a user that is configured in `users.xml` and has [Kerberos] enabled, i.e.:\n" + "\n" + "```xml\n" + "\n" + " \n" + " \n" + " \n" + " \n" + " \n" + " \n" + " EXAMPLE.COM\n" + " \n" + " \n" + " \n" + "\n" + "```\n" + "\n" + ), link=None, level=3, - num='4.4.1') + num="4.4.1", +) RQ_SRS_016_Kerberos_ValidUser_RBACConfiguredUser = Requirement( - name='RQ.SRS-016.Kerberos.ValidUser.RBACConfiguredUser', - version='1.0', + name="RQ.SRS-016.Kerberos.ValidUser.RBACConfiguredUser", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL accept [Kerberos] authentication if user is configured to authenticate via [Kerberos] using SQL queries\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL accept [Kerberos] authentication if user is configured to authenticate via [Kerberos] using SQL queries\n" + "\n" + "```sql\n" "CREATE USER my_user IDENTIFIED WITH kerberos REALM 'EXAMPLE.COM'\n" - '```\n' - '\n' - 'or\n' - '\n' - '```sql\n' - 'CREATE USER my_user IDENTIFIED WITH kerberos\n' - '```\n' - '\n' - ), + "```\n" + "\n" + "or\n" + "\n" + "```sql\n" + "CREATE USER my_user IDENTIFIED WITH kerberos\n" + "```\n" + "\n" + ), link=None, level=3, - num='4.4.2') + num="4.4.2", +) RQ_SRS_016_Kerberos_ValidUser_KerberosNotConfigured = Requirement( - name='RQ.SRS-016.Kerberos.ValidUser.KerberosNotConfigured', - version='1.0', + name="RQ.SRS-016.Kerberos.ValidUser.KerberosNotConfigured", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL reject [Kerberos] authentication if username is valid but [ClickHouse] user is not configured to be authenticated using [Kerberos].\n' - '\n' - ), + "[ClickHouse] SHALL reject [Kerberos] authentication if username is valid but [ClickHouse] user is not configured to be authenticated using [Kerberos].\n" + "\n" + ), link=None, level=3, - num='4.4.3') + num="4.4.3", +) RQ_SRS_016_Kerberos_InvalidUser = Requirement( - name='RQ.SRS-016.Kerberos.InvalidUser', - version='1.0', + name="RQ.SRS-016.Kerberos.InvalidUser", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL reject [Kerberos] authentication if name of the principal attempting to authenticate does not translate to a valid [ClickHouse] username configured in `users.xml` or via SQL workflow.\n' - '\n' - ), + "[ClickHouse] SHALL reject [Kerberos] authentication if name of the principal attempting to authenticate does not translate to a valid [ClickHouse] username configured in `users.xml` or via SQL workflow.\n" + "\n" + ), link=None, level=3, - num='4.5.1') + num="4.5.1", +) RQ_SRS_016_Kerberos_InvalidUser_UserDeleted = Requirement( - name='RQ.SRS-016.Kerberos.InvalidUser.UserDeleted', - version='1.0', + name="RQ.SRS-016.Kerberos.InvalidUser.UserDeleted", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL reject [Kerberos] authentication if [ClickHouse] user was removed from the database using an SQL query.\n' - '\n' - ), + "[ClickHouse] SHALL reject [Kerberos] authentication if [ClickHouse] user was removed from the database using an SQL query.\n" + "\n" + ), link=None, level=3, - num='4.5.2') + num="4.5.2", +) RQ_SRS_016_Kerberos_KerberosNotAvailable_InvalidServerTicket = Requirement( - name='RQ.SRS-016.Kerberos.KerberosNotAvailable.InvalidServerTicket', - version='1.0', + name="RQ.SRS-016.Kerberos.KerberosNotAvailable.InvalidServerTicket", + version="1.0", priority=None, group=None, type=None, uid=None, description=( "[ClickHouse] SHALL reject [Kerberos] authentication if [ClickHouse] user is configured to be authenticated using [Kerberos] and [Kerberos] server is unavailable, but [ClickHouse] doesn't have a valid Kerberos ticket or the ticket is expired.\n" - '\n' - ), + "\n" + ), link=None, level=3, - num='4.6.1') + num="4.6.1", +) RQ_SRS_016_Kerberos_KerberosNotAvailable_InvalidClientTicket = Requirement( - name='RQ.SRS-016.Kerberos.KerberosNotAvailable.InvalidClientTicket', - version='1.0', + name="RQ.SRS-016.Kerberos.KerberosNotAvailable.InvalidClientTicket", + version="1.0", priority=None, group=None, type=None, uid=None, description=( "[ClickHouse] SHALL reject [Kerberos] authentication if [ClickHouse] user is configured to to be authenticated using [Kerberos] and [Kerberos] server is unavailable, but the client doesn't have a valid Kerberos ticket or the ticket is expired.\n" - '\n' - ), + "\n" + ), link=None, level=3, - num='4.6.2') + num="4.6.2", +) RQ_SRS_016_Kerberos_KerberosNotAvailable_ValidTickets = Requirement( - name='RQ.SRS-016.Kerberos.KerberosNotAvailable.ValidTickets', - version='1.0', + name="RQ.SRS-016.Kerberos.KerberosNotAvailable.ValidTickets", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL accept [Kerberos] authentication if no [Kerberos] server is reachable, but [ClickHouse] is configured to use valid credentials and [ClickHouse] has already processed some valid kerberized request (so it was granted a ticket), and the client has a valid ticket as well.\n' - '\n' - ), + "[ClickHouse] SHALL accept [Kerberos] authentication if no [Kerberos] server is reachable, but [ClickHouse] is configured to use valid credentials and [ClickHouse] has already processed some valid kerberized request (so it was granted a ticket), and the client has a valid ticket as well.\n" + "\n" + ), link=None, level=3, - num='4.6.3') + num="4.6.3", +) RQ_SRS_016_Kerberos_KerberosServerRestarted = Requirement( - name='RQ.SRS-016.Kerberos.KerberosServerRestarted', - version='1.0', + name="RQ.SRS-016.Kerberos.KerberosServerRestarted", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL accept [Kerberos] authentication if [Kerberos] server was restarted.\n' - '\n' - ), + "[ClickHouse] SHALL accept [Kerberos] authentication if [Kerberos] server was restarted.\n" + "\n" + ), link=None, level=3, - num='4.7.1') + num="4.7.1", +) RQ_SRS_016_Kerberos_Performance = Requirement( - name='RQ.SRS-016.Kerberos.Performance', - version='1.0', + name="RQ.SRS-016.Kerberos.Performance", + version="1.0", priority=None, group=None, type=None, uid=None, description=( "[ClickHouse]'s performance for [Kerberos] authentication SHALL be comparable to regular authentication.\n" - '\n' - ), + "\n" + ), link=None, level=3, - num='4.8.1') + num="4.8.1", +) RQ_SRS_016_Kerberos_Parallel = Requirement( - name='RQ.SRS-016.Kerberos.Parallel', - version='1.0', + name="RQ.SRS-016.Kerberos.Parallel", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support parallel authentication using [Kerberos].\n' - '\n' - ), + "[ClickHouse] SHALL support parallel authentication using [Kerberos].\n" "\n" + ), link=None, level=3, - num='4.9.1') + num="4.9.1", +) RQ_SRS_016_Kerberos_Parallel_ValidRequests_KerberosAndNonKerberos = Requirement( - name='RQ.SRS-016.Kerberos.Parallel.ValidRequests.KerberosAndNonKerberos', - version='1.0', + name="RQ.SRS-016.Kerberos.Parallel.ValidRequests.KerberosAndNonKerberos", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support processing of simultaneous kerberized (for users configured to authenticate via [Kerberos]) and non-kerberized (for users configured to authenticate with any other means) requests.\n' - '\n' - ), + "[ClickHouse] SHALL support processing of simultaneous kerberized (for users configured to authenticate via [Kerberos]) and non-kerberized (for users configured to authenticate with any other means) requests.\n" + "\n" + ), link=None, level=3, - num='4.9.2') + num="4.9.2", +) RQ_SRS_016_Kerberos_Parallel_ValidRequests_SameCredentials = Requirement( - name='RQ.SRS-016.Kerberos.Parallel.ValidRequests.SameCredentials', - version='1.0', + name="RQ.SRS-016.Kerberos.Parallel.ValidRequests.SameCredentials", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support processing of simultaneously sent [Kerberos] requests under the same credentials.\n' - '\n' - ), + "[ClickHouse] SHALL support processing of simultaneously sent [Kerberos] requests under the same credentials.\n" + "\n" + ), link=None, level=3, - num='4.9.3') + num="4.9.3", +) RQ_SRS_016_Kerberos_Parallel_ValidRequests_DifferentCredentials = Requirement( - name='RQ.SRS-016.Kerberos.Parallel.ValidRequests.DifferentCredentials', - version='1.0', + name="RQ.SRS-016.Kerberos.Parallel.ValidRequests.DifferentCredentials", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support processing of simultaneously sent [Kerberos] requests under different credentials.\n' - '\n' - ), + "[ClickHouse] SHALL support processing of simultaneously sent [Kerberos] requests under different credentials.\n" + "\n" + ), link=None, level=3, - num='4.9.4') + num="4.9.4", +) RQ_SRS_016_Kerberos_Parallel_ValidInvalid = Requirement( - name='RQ.SRS-016.Kerberos.Parallel.ValidInvalid', - version='1.0', + name="RQ.SRS-016.Kerberos.Parallel.ValidInvalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( "[ClickHouse] SHALL support parallel authentication of users using [Kerberos] server, some of which are valid and some invalid. Valid users' authentication should not be affected by invalid users' attempts.\n" - '\n' - ), + "\n" + ), link=None, level=3, - num='4.9.5') + num="4.9.5", +) RQ_SRS_016_Kerberos_Parallel_Deletion = Requirement( - name='RQ.SRS-016.Kerberos.Parallel.Deletion', - version='1.0', + name="RQ.SRS-016.Kerberos.Parallel.Deletion", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not crash when two or more [Kerberos] users are simultaneously deleting one another.\n' - '\n' - ), + "[ClickHouse] SHALL not crash when two or more [Kerberos] users are simultaneously deleting one another.\n" + "\n" + ), link=None, level=3, - num='4.9.6') + num="4.9.6", +) QA_SRS016_ClickHouse_Kerberos_Authentication = Specification( - name='QA-SRS016 ClickHouse Kerberos Authentication', + name="QA-SRS016 ClickHouse Kerberos Authentication", description=None, author=None, - date=None, - status=None, + date=None, + status=None, approved_by=None, approved_date=None, approved_version=None, @@ -448,46 +468,112 @@ QA_SRS016_ClickHouse_Kerberos_Authentication = Specification( parent=None, children=None, headings=( - Heading(name='Revision History', level=1, num='1'), - Heading(name='Introduction', level=1, num='2'), - Heading(name='Terminology', level=1, num='3'), - Heading(name='Requirements', level=1, num='4'), - Heading(name='Generic', level=2, num='4.1'), - Heading(name='RQ.SRS-016.Kerberos', level=3, num='4.1.1'), - Heading(name='Ping', level=2, num='4.2'), - Heading(name='RQ.SRS-016.Kerberos.Ping', level=3, num='4.2.1'), - Heading(name='Configuration', level=2, num='4.3'), - Heading(name='RQ.SRS-016.Kerberos.Configuration.MultipleAuthMethods', level=3, num='4.3.1'), - Heading(name='RQ.SRS-016.Kerberos.Configuration.KerberosNotEnabled', level=3, num='4.3.2'), - Heading(name='RQ.SRS-016.Kerberos.Configuration.MultipleKerberosSections', level=3, num='4.3.3'), - Heading(name='RQ.SRS-016.Kerberos.Configuration.WrongUserRealm', level=3, num='4.3.4'), - Heading(name='RQ.SRS-016.Kerberos.Configuration.PrincipalAndRealmSpecified', level=3, num='4.3.5'), - Heading(name='RQ.SRS-016.Kerberos.Configuration.MultiplePrincipalSections', level=3, num='4.3.6'), - Heading(name='RQ.SRS-016.Kerberos.Configuration.MultipleRealmSections', level=3, num='4.3.7'), - Heading(name='Valid User', level=2, num='4.4'), - Heading(name='RQ.SRS-016.Kerberos.ValidUser.XMLConfiguredUser', level=3, num='4.4.1'), - Heading(name='RQ.SRS-016.Kerberos.ValidUser.RBACConfiguredUser', level=3, num='4.4.2'), - Heading(name='RQ.SRS-016.Kerberos.ValidUser.KerberosNotConfigured', level=3, num='4.4.3'), - Heading(name='Invalid User', level=2, num='4.5'), - Heading(name='RQ.SRS-016.Kerberos.InvalidUser', level=3, num='4.5.1'), - Heading(name='RQ.SRS-016.Kerberos.InvalidUser.UserDeleted', level=3, num='4.5.2'), - Heading(name='Kerberos Not Available', level=2, num='4.6'), - Heading(name='RQ.SRS-016.Kerberos.KerberosNotAvailable.InvalidServerTicket', level=3, num='4.6.1'), - Heading(name='RQ.SRS-016.Kerberos.KerberosNotAvailable.InvalidClientTicket', level=3, num='4.6.2'), - Heading(name='RQ.SRS-016.Kerberos.KerberosNotAvailable.ValidTickets', level=3, num='4.6.3'), - Heading(name='Kerberos Restarted', level=2, num='4.7'), - Heading(name='RQ.SRS-016.Kerberos.KerberosServerRestarted', level=3, num='4.7.1'), - Heading(name='Performance', level=2, num='4.8'), - Heading(name='RQ.SRS-016.Kerberos.Performance', level=3, num='4.8.1'), - Heading(name='Parallel Requests processing', level=2, num='4.9'), - Heading(name='RQ.SRS-016.Kerberos.Parallel', level=3, num='4.9.1'), - Heading(name='RQ.SRS-016.Kerberos.Parallel.ValidRequests.KerberosAndNonKerberos', level=3, num='4.9.2'), - Heading(name='RQ.SRS-016.Kerberos.Parallel.ValidRequests.SameCredentials', level=3, num='4.9.3'), - Heading(name='RQ.SRS-016.Kerberos.Parallel.ValidRequests.DifferentCredentials', level=3, num='4.9.4'), - Heading(name='RQ.SRS-016.Kerberos.Parallel.ValidInvalid', level=3, num='4.9.5'), - Heading(name='RQ.SRS-016.Kerberos.Parallel.Deletion', level=3, num='4.9.6'), - Heading(name='References', level=1, num='5'), + Heading(name="Revision History", level=1, num="1"), + Heading(name="Introduction", level=1, num="2"), + Heading(name="Terminology", level=1, num="3"), + Heading(name="Requirements", level=1, num="4"), + Heading(name="Generic", level=2, num="4.1"), + Heading(name="RQ.SRS-016.Kerberos", level=3, num="4.1.1"), + Heading(name="Ping", level=2, num="4.2"), + Heading(name="RQ.SRS-016.Kerberos.Ping", level=3, num="4.2.1"), + Heading(name="Configuration", level=2, num="4.3"), + Heading( + name="RQ.SRS-016.Kerberos.Configuration.MultipleAuthMethods", + level=3, + num="4.3.1", ), + Heading( + name="RQ.SRS-016.Kerberos.Configuration.KerberosNotEnabled", + level=3, + num="4.3.2", + ), + Heading( + name="RQ.SRS-016.Kerberos.Configuration.MultipleKerberosSections", + level=3, + num="4.3.3", + ), + Heading( + name="RQ.SRS-016.Kerberos.Configuration.WrongUserRealm", + level=3, + num="4.3.4", + ), + Heading( + name="RQ.SRS-016.Kerberos.Configuration.PrincipalAndRealmSpecified", + level=3, + num="4.3.5", + ), + Heading( + name="RQ.SRS-016.Kerberos.Configuration.MultiplePrincipalSections", + level=3, + num="4.3.6", + ), + Heading( + name="RQ.SRS-016.Kerberos.Configuration.MultipleRealmSections", + level=3, + num="4.3.7", + ), + Heading(name="Valid User", level=2, num="4.4"), + Heading( + name="RQ.SRS-016.Kerberos.ValidUser.XMLConfiguredUser", level=3, num="4.4.1" + ), + Heading( + name="RQ.SRS-016.Kerberos.ValidUser.RBACConfiguredUser", + level=3, + num="4.4.2", + ), + Heading( + name="RQ.SRS-016.Kerberos.ValidUser.KerberosNotConfigured", + level=3, + num="4.4.3", + ), + Heading(name="Invalid User", level=2, num="4.5"), + Heading(name="RQ.SRS-016.Kerberos.InvalidUser", level=3, num="4.5.1"), + Heading( + name="RQ.SRS-016.Kerberos.InvalidUser.UserDeleted", level=3, num="4.5.2" + ), + Heading(name="Kerberos Not Available", level=2, num="4.6"), + Heading( + name="RQ.SRS-016.Kerberos.KerberosNotAvailable.InvalidServerTicket", + level=3, + num="4.6.1", + ), + Heading( + name="RQ.SRS-016.Kerberos.KerberosNotAvailable.InvalidClientTicket", + level=3, + num="4.6.2", + ), + Heading( + name="RQ.SRS-016.Kerberos.KerberosNotAvailable.ValidTickets", + level=3, + num="4.6.3", + ), + Heading(name="Kerberos Restarted", level=2, num="4.7"), + Heading( + name="RQ.SRS-016.Kerberos.KerberosServerRestarted", level=3, num="4.7.1" + ), + Heading(name="Performance", level=2, num="4.8"), + Heading(name="RQ.SRS-016.Kerberos.Performance", level=3, num="4.8.1"), + Heading(name="Parallel Requests processing", level=2, num="4.9"), + Heading(name="RQ.SRS-016.Kerberos.Parallel", level=3, num="4.9.1"), + Heading( + name="RQ.SRS-016.Kerberos.Parallel.ValidRequests.KerberosAndNonKerberos", + level=3, + num="4.9.2", + ), + Heading( + name="RQ.SRS-016.Kerberos.Parallel.ValidRequests.SameCredentials", + level=3, + num="4.9.3", + ), + Heading( + name="RQ.SRS-016.Kerberos.Parallel.ValidRequests.DifferentCredentials", + level=3, + num="4.9.4", + ), + Heading(name="RQ.SRS-016.Kerberos.Parallel.ValidInvalid", level=3, num="4.9.5"), + Heading(name="RQ.SRS-016.Kerberos.Parallel.Deletion", level=3, num="4.9.6"), + Heading(name="References", level=1, num="5"), + ), requirements=( RQ_SRS_016_Kerberos, RQ_SRS_016_Kerberos_Ping, @@ -514,8 +600,8 @@ QA_SRS016_ClickHouse_Kerberos_Authentication = Specification( RQ_SRS_016_Kerberos_Parallel_ValidRequests_DifferentCredentials, RQ_SRS_016_Kerberos_Parallel_ValidInvalid, RQ_SRS_016_Kerberos_Parallel_Deletion, - ), - content=''' + ), + content=""" # QA-SRS016 ClickHouse Kerberos Authentication # Software Requirements Specification @@ -806,4 +892,5 @@ version: 1.0 [Revision History]: https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/kerberos/requirements/requirements.md [Git]: https://git-scm.com/ [Kerberos terminology]: https://web.mit.edu/kerberos/kfw-4.1/kfw-4.1/kfw-4.1-help/html/kerberos_terminology.htm -''') +""", +) diff --git a/tests/testflows/kerberos/tests/common.py b/tests/testflows/kerberos/tests/common.py index 5dd0f734d8f..0e0f7f2ebc2 100644 --- a/tests/testflows/kerberos/tests/common.py +++ b/tests/testflows/kerberos/tests/common.py @@ -8,7 +8,7 @@ import uuid def getuid(): - return str(uuid.uuid1()).replace('-', '_') + return str(uuid.uuid1()).replace("-", "_") def xml_append(root, tag, text=Null): @@ -31,18 +31,21 @@ def xml_parse_file(filename): def create_default_config(filename): contents = "" if "kerberos_users.xml" in filename: - contents = "EXAMPLE.COM" \ - "" + contents = ( + "EXAMPLE.COM" + "" + ) elif "kerberos.xml" in filename: - contents = "EXAMPLE.COM" + contents = ( + "EXAMPLE.COM" + ) with open(filename, "w") as f: f.write(contents) def test_select_query(node, krb_auth=True, req="SELECT currentUser()"): - """ Helper forming a HTTP query to ClickHouse server - """ + """Helper forming a HTTP query to ClickHouse server""" if krb_auth: return f"echo '{req}' | curl --negotiate -u : 'http://{node.name}:8123/' --data-binary @-" else: @@ -51,11 +54,10 @@ def test_select_query(node, krb_auth=True, req="SELECT currentUser()"): @TestStep(Given) def kinit_no_keytab(self, node, principal="kerberos_user", lifetime_option="-l 10:00"): - """ Helper for obtaining Kerberos ticket for client - """ + """Helper for obtaining Kerberos ticket for client""" try: node.cmd("echo pwd | kinit admin/admin") - node.cmd(f"kadmin -w pwd -q \"add_principal -pw pwd {principal}\"") + node.cmd(f'kadmin -w pwd -q "add_principal -pw pwd {principal}"') node.cmd(f"echo pwd | kinit {lifetime_option} {principal}") yield finally: @@ -64,12 +66,15 @@ def kinit_no_keytab(self, node, principal="kerberos_user", lifetime_option="-l 1 @TestStep(Given) def create_server_principal(self, node): - """ Helper for obtaining Kerberos ticket for server - """ + """Helper for obtaining Kerberos ticket for server""" try: node.cmd("echo pwd | kinit admin/admin") - node.cmd(f"kadmin -w pwd -q \"add_principal -randkey HTTP/kerberos_env_{node.name}_1.krbnet\"") - node.cmd(f"kadmin -w pwd -q \"ktadd -k /etc/krb5.keytab HTTP/kerberos_env_{node.name}_1.krbnet\"") + node.cmd( + f'kadmin -w pwd -q "add_principal -randkey HTTP/kerberos_env_{node.name}_1.krbnet"' + ) + node.cmd( + f'kadmin -w pwd -q "ktadd -k /etc/krb5.keytab HTTP/kerberos_env_{node.name}_1.krbnet"' + ) yield finally: node.cmd("kdestroy") @@ -78,47 +83,48 @@ def create_server_principal(self, node): @TestStep(Given) def save_file_state(self, node, filename): - """ Save current file and then restore it, restarting the node - """ + """Save current file and then restore it, restarting the node""" try: with When("I save file state"): - with open(filename, 'r') as f: + with open(filename, "r") as f: a = f.read() yield finally: with Finally("I restore initial state"): - with open(filename, 'w') as f: + with open(filename, "w") as f: f.write(a) node.restart() @TestStep(Given) def temp_erase(self, node, filename=None): - """ Temporary erasing config file and restarting the node - """ + """Temporary erasing config file and restarting the node""" if filename is None: filename = f"kerberos/configs/{node.name}/config.d/kerberos.xml" with When("I save file state"): - with open(filename, 'r') as f: + with open(filename, "r") as f: a = f.read() try: with Then("I overwrite file to be dummy"): - with open(filename, 'w') as f: + with open(filename, "w") as f: f.write("\n") node.restart() yield finally: with Finally("I restore initial file state"): - with open(filename, 'w') as f: + with open(filename, "w") as f: f.write(a) node.restart() def restart(node, config_path, safe=False, timeout=60): - """Restart ClickHouse server and wait for config to be reloaded. - """ + """Restart ClickHouse server and wait for config to be reloaded.""" - filename = '/etc/clickhouse-server/config.xml' if 'config.d' in config_path else '/etc/clickhouse-server/users.xml' + filename = ( + "/etc/clickhouse-server/config.xml" + if "config.d" in config_path + else "/etc/clickhouse-server/users.xml" + ) with When("I restart ClickHouse server node"): with node.cluster.shell(node.name) as bash: bash.expect(bash.prompt) @@ -127,52 +133,82 @@ def restart(node, config_path, safe=False, timeout=60): bash.close() with And("getting current log size"): - logsize = \ - node.command("stat --format=%s /var/log/clickhouse-server/clickhouse-server.log").output.split(" ")[0].strip() + logsize = ( + node.command( + "stat --format=%s /var/log/clickhouse-server/clickhouse-server.log" + ) + .output.split(" ")[0] + .strip() + ) with And("restarting ClickHouse server"): node.restart(safe=safe) - with Then("tailing the log file from using previous log size as the offset"): + with Then( + "tailing the log file from using previous log size as the offset" + ): bash.prompt = bash.__class__.prompt bash.open() - bash.send(f"tail -c +{logsize} -f /var/log/clickhouse-server/clickhouse-server.log") + bash.send( + f"tail -c +{logsize} -f /var/log/clickhouse-server/clickhouse-server.log" + ) with And("waiting for config reload message in the log file"): bash.expect( f"ConfigReloader: Loaded config '{filename}', performed update on configuration", - timeout=timeout) + timeout=timeout, + ) @TestStep -def check_wrong_config(self, node, client, config_path, modify_file, log_error="", output="", - tail=120, timeout=60, healthy_on_restart=True): - """Check that ClickHouse errors when trying to load invalid configuration file. - """ +def check_wrong_config( + self, + node, + client, + config_path, + modify_file, + log_error="", + output="", + tail=120, + timeout=60, + healthy_on_restart=True, +): + """Check that ClickHouse errors when trying to load invalid configuration file.""" preprocessed_name = "config.xml" if "config.d" in config_path else "users.xml" - full_config_path = "/etc/clickhouse-server/config.d/kerberos.xml" if "config.d" in config_path else "/etc/clickhouse-server/users.d/kerberos-users.xml" + full_config_path = ( + "/etc/clickhouse-server/config.d/kerberos.xml" + if "config.d" in config_path + else "/etc/clickhouse-server/users.d/kerberos-users.xml" + ) uid = getuid() try: with Given("I save config file to restore it later"): - with open(config_path, 'r') as f: + with open(config_path, "r") as f: initial_contents = f.read() with And("I prepare the error log by writing empty lines into it"): - node.command("echo -e \"%s\" > /var/log/clickhouse-server/clickhouse-server.err.log" % ("-\\n" * tail)) + node.command( + 'echo -e "%s" > /var/log/clickhouse-server/clickhouse-server.err.log' + % ("-\\n" * tail) + ) with When("I modify xml file"): root = xml_parse_file(config_path) root = modify_file(root) root.append(xmltree.fromstring(f"{uid}")) - config_contents = xmltree.tostring(root, encoding='utf8', method='xml').decode('utf-8') + config_contents = xmltree.tostring( + root, encoding="utf8", method="xml" + ).decode("utf-8") command = f"cat < {full_config_path}\n{config_contents}\nHEREDOC" node.command(command, steps=False, exitcode=0) time.sleep(1) - with Then(f"{preprocessed_name} should be updated", description=f"timeout {timeout}"): + with Then( + f"{preprocessed_name} should be updated", description=f"timeout {timeout}" + ): started = time.time() command = f"cat /var/lib/clickhouse/preprocessed_configs/{preprocessed_name} | grep {uid} > /dev/null" while time.time() - started < timeout: @@ -190,7 +226,6 @@ def check_wrong_config(self, node, client, config_path, modify_file, log_error=" else: node.restart(safe=False, wait_healthy=False) - if output != "": with Then(f"check {output} is in output"): time.sleep(5) @@ -209,7 +244,7 @@ def check_wrong_config(self, node, client, config_path, modify_file, log_error=" finally: with Finally("I restore original config"): with By("restoring the (correct) config file"): - with open(config_path, 'w') as f: + with open(config_path, "w") as f: f.write(initial_contents) with And("restarting the node"): node.restart(safe=False) @@ -217,7 +252,7 @@ def check_wrong_config(self, node, client, config_path, modify_file, log_error=" if log_error != "": with Then("error log should contain the expected error message"): started = time.time() - command = f"tail -n {tail} /var/log/clickhouse-server/clickhouse-server.err.log | grep \"{log_error}\"" + command = f'tail -n {tail} /var/log/clickhouse-server/clickhouse-server.err.log | grep "{log_error}"' while time.time() - started < timeout: exitcode = node.command(command, steps=False).exitcode if exitcode == 0: @@ -227,7 +262,9 @@ def check_wrong_config(self, node, client, config_path, modify_file, log_error=" @TestStep(Given) -def instrument_clickhouse_server_log(self, clickhouse_server_log="/var/log/clickhouse-server/clickhouse-server.log"): +def instrument_clickhouse_server_log( + self, clickhouse_server_log="/var/log/clickhouse-server/clickhouse-server.log" +): """Instrument clickhouse-server.log for the current test by adding start and end messages that include current test name to the clickhouse-server.log of the specified node and @@ -239,6 +276,10 @@ def instrument_clickhouse_server_log(self, clickhouse_server_log="/var/log/click for node in all_nodes: if node.name != "kerberos": with When(f"output stats for {node.repr()}"): - node.command(f"echo -e \"\\n-- {current().name} -- top --\\n\" && top -bn1") - node.command(f"echo -e \"\\n-- {current().name} -- df --\\n\" && df -h") - node.command(f"echo -e \"\\n-- {current().name} -- free --\\n\" && free -mh") + node.command( + f'echo -e "\\n-- {current().name} -- top --\\n" && top -bn1' + ) + node.command(f'echo -e "\\n-- {current().name} -- df --\\n" && df -h') + node.command( + f'echo -e "\\n-- {current().name} -- free --\\n" && free -mh' + ) diff --git a/tests/testflows/kerberos/tests/config.py b/tests/testflows/kerberos/tests/config.py index 35cec9527d8..e682858d557 100644 --- a/tests/testflows/kerberos/tests/config.py +++ b/tests/testflows/kerberos/tests/config.py @@ -8,9 +8,7 @@ import itertools @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_Configuration_KerberosNotEnabled("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_Configuration_KerberosNotEnabled("1.0")) def kerberos_not_enabled(self): """ClickHouse SHALL reject Kerberos authentication if user is properly configured for Kerberos, but Kerberos itself is not enabled in config.xml. @@ -21,17 +19,19 @@ def kerberos_not_enabled(self): def modify_file(root): return xmltree.fromstring("") - check_wrong_config(node=ch_nodes[0], client=ch_nodes[2], config_path=config_path, modify_file=modify_file, - output="Kerberos is not enabled") + check_wrong_config( + node=ch_nodes[0], + client=ch_nodes[2], + config_path=config_path, + modify_file=modify_file, + output="Kerberos is not enabled", + ) @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_Configuration_MultipleKerberosSections("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_Configuration_MultipleKerberosSections("1.0")) def multiple_kerberos(self): - """ClickHouse SHALL disable Kerberos authentication if more than one kerberos sections specified in config.xml. - """ + """ClickHouse SHALL disable Kerberos authentication if more than one kerberos sections specified in config.xml.""" ch_nodes = self.context.ch_nodes config_path = f"kerberos/configs/{ch_nodes[0].name}/config.d/kerberos.xml" @@ -40,14 +40,18 @@ def multiple_kerberos(self): root.append(xmltree.fromstring(second_section)) return root - check_wrong_config(node=ch_nodes[0], client=ch_nodes[2], config_path=config_path, modify_file=modify_file, - log_error="Multiple kerberos sections are not allowed", healthy_on_restart=False) + check_wrong_config( + node=ch_nodes[0], + client=ch_nodes[2], + config_path=config_path, + modify_file=modify_file, + log_error="Multiple kerberos sections are not allowed", + healthy_on_restart=False, + ) @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_Configuration_WrongUserRealm("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_Configuration_WrongUserRealm("1.0")) def wrong_user_realm(self): """ClickHouse SHALL reject Kerberos authentication if user's realm specified in users.xml doesn't match the realm of the principal trying to authenticate. @@ -57,18 +61,21 @@ def wrong_user_realm(self): config_path = f"kerberos/configs/{ch_nodes[0].name}/users.d/kerberos-users.xml" def modify_file(root): - krb = root.find('users').find('kerberos_user') - krb.find('kerberos').find('realm').text = "OTHER.COM" + krb = root.find("users").find("kerberos_user") + krb.find("kerberos").find("realm").text = "OTHER.COM" return root - check_wrong_config(node=ch_nodes[0], client=ch_nodes[2], config_path=config_path, modify_file=modify_file, - output="Authentication failed") + check_wrong_config( + node=ch_nodes[0], + client=ch_nodes[2], + config_path=config_path, + modify_file=modify_file, + output="Authentication failed", + ) @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_Configuration_MultipleAuthMethods("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_Configuration_MultipleAuthMethods("1.0")) def multiple_auth_methods(self): """ClickHouse SHALL reject Kerberos authentication if other auth method is specified for user alongside with Kerberos. @@ -77,83 +84,98 @@ def multiple_auth_methods(self): config_path = f"kerberos/configs/{ch_nodes[0].name}/users.d/kerberos-users.xml" def modify_file(root): - krb = root.find('users').find('kerberos_user') - xml_append(krb, 'password', 'qwerty') + krb = root.find("users").find("kerberos_user") + xml_append(krb, "password", "qwerty") return root - check_wrong_config(node=ch_nodes[0], client=ch_nodes[2], config_path=config_path, modify_file=modify_file, - log_error="More than one field of", healthy_on_restart=False) + check_wrong_config( + node=ch_nodes[0], + client=ch_nodes[2], + config_path=config_path, + modify_file=modify_file, + log_error="More than one field of", + healthy_on_restart=False, + ) @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_Configuration_PrincipalAndRealmSpecified("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_Configuration_PrincipalAndRealmSpecified("1.0")) def principal_and_realm_specified(self): - """ClickHouse SHALL drop an exception if both realm and principal fields are specified in config.xml. - """ + """ClickHouse SHALL drop an exception if both realm and principal fields are specified in config.xml.""" ch_nodes = self.context.ch_nodes config_path = f"kerberos/configs/{ch_nodes[0].name}/config.d/kerberos.xml" def modify_file(root): - krb = root.find('kerberos') - xml_append(krb, 'principal', 'HTTP/srv1@EXAMPLE.COM') + krb = root.find("kerberos") + xml_append(krb, "principal", "HTTP/srv1@EXAMPLE.COM") return root - check_wrong_config(node=ch_nodes[0], client=ch_nodes[2], config_path=config_path, modify_file=modify_file, - log_error="Realm and principal name cannot be specified simultaneously", - output="Kerberos is not enabled") + check_wrong_config( + node=ch_nodes[0], + client=ch_nodes[2], + config_path=config_path, + modify_file=modify_file, + log_error="Realm and principal name cannot be specified simultaneously", + output="Kerberos is not enabled", + ) @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_Configuration_MultipleRealmSections("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_Configuration_MultipleRealmSections("1.0")) def multiple_realm(self): - """ClickHouse SHALL throw an exception and disable Kerberos if more than one realm is specified in config.xml. - """ + """ClickHouse SHALL throw an exception and disable Kerberos if more than one realm is specified in config.xml.""" ch_nodes = self.context.ch_nodes config_path = f"kerberos/configs/{ch_nodes[0].name}/config.d/kerberos.xml" def modify_file(root): - krb = root.find('kerberos') - xml_append(krb, 'realm', 'EXAM.COM') + krb = root.find("kerberos") + xml_append(krb, "realm", "EXAM.COM") return root - check_wrong_config(node=ch_nodes[0], client=ch_nodes[2], config_path=config_path, modify_file=modify_file, - log_error="Multiple realm sections are not allowed") + check_wrong_config( + node=ch_nodes[0], + client=ch_nodes[2], + config_path=config_path, + modify_file=modify_file, + log_error="Multiple realm sections are not allowed", + ) @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_Configuration_MultiplePrincipalSections("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_Configuration_MultiplePrincipalSections("1.0")) def multiple_principal(self): - """ClickHouse SHALL throw an exception and disable Kerberos if more than one principal is specified in config.xml. - """ + """ClickHouse SHALL throw an exception and disable Kerberos if more than one principal is specified in config.xml.""" ch_nodes = self.context.ch_nodes config_path = f"kerberos/configs/{ch_nodes[0].name}/config.d/kerberos.xml" def modify_file(root): - krb = root.find('kerberos') - krb.remove(krb.find('realm')) - xml_append(krb, 'principal', 'HTTP/s1@EXAMPLE.COM') - xml_append(krb, 'principal', 'HTTP/s2@EXAMPLE.COM') + krb = root.find("kerberos") + krb.remove(krb.find("realm")) + xml_append(krb, "principal", "HTTP/s1@EXAMPLE.COM") + xml_append(krb, "principal", "HTTP/s2@EXAMPLE.COM") return root - check_wrong_config(node=ch_nodes[0], client=ch_nodes[2], config_path=config_path, modify_file=modify_file, - log_error="Multiple principal sections are not allowed") + check_wrong_config( + node=ch_nodes[0], + client=ch_nodes[2], + config_path=config_path, + modify_file=modify_file, + log_error="Multiple principal sections are not allowed", + ) @TestFeature @Name("config") def config(self): - """Perform ClickHouse Kerberos authentication testing for incorrect configuration files - """ + """Perform ClickHouse Kerberos authentication testing for incorrect configuration files""" - self.context.ch_nodes = [self.context.cluster.node(f"clickhouse{i}") for i in range(1, 4)] + self.context.ch_nodes = [ + self.context.cluster.node(f"clickhouse{i}") for i in range(1, 4) + ] self.context.krb_server = self.context.cluster.node("kerberos") - self.context.clients = [self.context.cluster.node(f"krb-client{i}") for i in range(1, 6)] + self.context.clients = [ + self.context.cluster.node(f"krb-client{i}") for i in range(1, 6) + ] for scenario in loads(current_module(), Scenario, Suite): Scenario(run=scenario, flags=TE) diff --git a/tests/testflows/kerberos/tests/generic.py b/tests/testflows/kerberos/tests/generic.py index 642b99b4fc3..03629a7bdd7 100644 --- a/tests/testflows/kerberos/tests/generic.py +++ b/tests/testflows/kerberos/tests/generic.py @@ -6,12 +6,9 @@ import time @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_Ping("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_Ping("1.0")) def ping(self): - """Containers should be reachable - """ + """Containers should be reachable""" ch_nodes = self.context.ch_nodes for i in range(3): @@ -22,12 +19,9 @@ def ping(self): @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_ValidUser_XMLConfiguredUser("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_ValidUser_XMLConfiguredUser("1.0")) def xml_configured_user(self): - """ClickHouse SHALL accept Kerberos authentication for valid XML-configured user - """ + """ClickHouse SHALL accept Kerberos authentication for valid XML-configured user""" ch_nodes = self.context.ch_nodes with Given("kinit for client"): @@ -44,12 +38,9 @@ def xml_configured_user(self): @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_ValidUser_RBACConfiguredUser("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_ValidUser_RBACConfiguredUser("1.0")) def rbac_configured_user(self): - """ClickHouse SHALL accept Kerberos authentication for valid RBAC-configured user - """ + """ClickHouse SHALL accept Kerberos authentication for valid RBAC-configured user""" ch_nodes = self.context.ch_nodes with Given("kinit for client"): @@ -59,7 +50,9 @@ def rbac_configured_user(self): create_server_principal(node=ch_nodes[0]) with When("I create a RBAC user"): - ch_nodes[0].query("CREATE USER krb_rbac IDENTIFIED WITH kerberos REALM 'EXAMPLE.COM'") + ch_nodes[0].query( + "CREATE USER krb_rbac IDENTIFIED WITH kerberos REALM 'EXAMPLE.COM'" + ) with When("I attempt to authenticate"): r = ch_nodes[2].cmd(test_select_query(node=ch_nodes[0])) @@ -72,9 +65,7 @@ def rbac_configured_user(self): @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_KerberosNotAvailable_InvalidServerTicket("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_KerberosNotAvailable_InvalidServerTicket("1.0")) def invalid_server_ticket(self): """ClickHouse SHALL reject Kerberos authentication no Kerberos server is reachable and CH-server has no valid ticket (or the existing ticket is outdated). @@ -99,7 +90,10 @@ def invalid_server_ticket(self): while True: kinit_no_keytab(node=ch_nodes[2]) create_server_principal(node=ch_nodes[0]) - if ch_nodes[2].cmd(test_select_query(node=ch_nodes[0])).output == "kerberos_user": + if ( + ch_nodes[2].cmd(test_select_query(node=ch_nodes[0])).output + == "kerberos_user" + ): break debug(test_select_query(node=ch_nodes[0])) ch_nodes[2].cmd("kdestroy") @@ -109,12 +103,10 @@ def invalid_server_ticket(self): @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_KerberosNotAvailable_InvalidClientTicket("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_KerberosNotAvailable_InvalidClientTicket("1.0")) def invalid_client_ticket(self): """ClickHouse SHALL reject Kerberos authentication in case client has - no valid ticket (or the existing ticket is outdated). + no valid ticket (or the existing ticket is outdated). """ ch_nodes = self.context.ch_nodes @@ -142,15 +134,16 @@ def invalid_client_ticket(self): ch_nodes[2].cmd(f"echo pwd | kinit -l 10:00 kerberos_user") while True: time.sleep(1) - if ch_nodes[2].cmd(test_select_query(node=ch_nodes[0])).output == "kerberos_user": + if ( + ch_nodes[2].cmd(test_select_query(node=ch_nodes[0])).output + == "kerberos_user" + ): break ch_nodes[2].cmd("kdestroy") @TestCase -@Requirements( - RQ_SRS_016_Kerberos_KerberosNotAvailable_ValidTickets("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_KerberosNotAvailable_ValidTickets("1.0")) def kerberos_unreachable_valid_tickets(self): """ClickHouse SHALL accept Kerberos authentication if no Kerberos server is reachable but both CH-server and client have valid tickets. @@ -180,28 +173,30 @@ def kerberos_unreachable_valid_tickets(self): ch_nodes[2].cmd("kdestroy") while True: kinit_no_keytab(node=ch_nodes[2]) - if ch_nodes[2].cmd(test_select_query(node=ch_nodes[0])).output == "kerberos_user": + if ( + ch_nodes[2].cmd(test_select_query(node=ch_nodes[0])).output + == "kerberos_user" + ): break ch_nodes[2].cmd("kdestroy") @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_ValidUser_KerberosNotConfigured("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_ValidUser_KerberosNotConfigured("1.0")) def kerberos_not_configured(self): - """ClickHouse SHALL reject Kerberos authentication if user is not a kerberos-auth user. - """ + """ClickHouse SHALL reject Kerberos authentication if user is not a kerberos-auth user.""" ch_nodes = self.context.ch_nodes with Given("kinit for client"): kinit_no_keytab(node=ch_nodes[2], principal="unkerberized") - with And('Kinit for server'): + with And("Kinit for server"): create_server_principal(node=ch_nodes[0]) with By("I add non-Kerberos user to ClickHouse"): - ch_nodes[0].query("CREATE USER unkerberized IDENTIFIED WITH plaintext_password BY 'qwerty'") + ch_nodes[0].query( + "CREATE USER unkerberized IDENTIFIED WITH plaintext_password BY 'qwerty'" + ) with When("I attempt to authenticate"): r = ch_nodes[2].cmd(test_select_query(node=ch_nodes[0]), no_checks=True) @@ -214,12 +209,9 @@ def kerberos_not_configured(self): @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_KerberosServerRestarted("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_KerberosServerRestarted("1.0")) def kerberos_server_restarted(self): - """ClickHouse SHALL accept Kerberos authentication if Kerberos server was restarted. - """ + """ClickHouse SHALL accept Kerberos authentication if Kerberos server was restarted.""" ch_nodes = self.context.ch_nodes krb_server = self.context.krb_server @@ -241,7 +233,10 @@ def kerberos_server_restarted(self): ch_nodes[2].cmd("kdestroy") while True: kinit_no_keytab(node=ch_nodes[2]) - if ch_nodes[2].cmd(test_select_query(node=ch_nodes[0])).output == "kerberos_user": + if ( + ch_nodes[2].cmd(test_select_query(node=ch_nodes[0])).output + == "kerberos_user" + ): break with Then(f"I expect kerberos_user"): @@ -249,12 +244,9 @@ def kerberos_server_restarted(self): @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_InvalidUser("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_InvalidUser("1.0")) def invalid_user(self): - """ClickHouse SHALL reject Kerberos authentication for invalid principal - """ + """ClickHouse SHALL reject Kerberos authentication for invalid principal""" ch_nodes = self.context.ch_nodes with Given("I obtain keytab for invalid user"): @@ -267,16 +259,16 @@ def invalid_user(self): r = ch_nodes[2].cmd(test_select_query(node=ch_nodes[0]), no_checks=True) with Then(f"I expect default"): - assert "Authentication failed: password is incorrect or there is no user with such name" in r.output, error() + assert ( + "Authentication failed: password is incorrect or there is no user with such name" + in r.output + ), error() @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_InvalidUser_UserDeleted("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_InvalidUser_UserDeleted("1.0")) def user_deleted(self): - """ClickHouse SHALL reject Kerberos authentication if Kerberos user was deleted prior to query. - """ + """ClickHouse SHALL reject Kerberos authentication if Kerberos user was deleted prior to query.""" ch_nodes = self.context.ch_nodes with Given("I obtain keytab for a user"): @@ -286,23 +278,25 @@ def user_deleted(self): create_server_principal(node=ch_nodes[0]) with And("I create and then delete kerberized user"): - ch_nodes[0].query("CREATE USER krb_rbac IDENTIFIED WITH kerberos REALM 'EXAMPLE.COM'") + ch_nodes[0].query( + "CREATE USER krb_rbac IDENTIFIED WITH kerberos REALM 'EXAMPLE.COM'" + ) ch_nodes[0].query("DROP USER krb_rbac") with When("I attempt to authenticate"): r = ch_nodes[2].cmd(test_select_query(node=ch_nodes[0]), no_checks=True) with Then(f"I expect error"): - assert "Authentication failed: password is incorrect or there is no user with such name" in r.output, error() + assert ( + "Authentication failed: password is incorrect or there is no user with such name" + in r.output + ), error() @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_Performance("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_Performance("1.0")) def authentication_performance(self): - """ClickHouse's performance for Kerberos authentication SHALL shall be comparable to regular authentication. - """ + """ClickHouse's performance for Kerberos authentication SHALL shall be comparable to regular authentication.""" ch_nodes = self.context.ch_nodes with Given("I obtain keytab for a user"): @@ -312,7 +306,9 @@ def authentication_performance(self): create_server_principal(node=ch_nodes[0]) with And("I create a password-identified user"): - ch_nodes[0].query("CREATE USER pwd_user IDENTIFIED WITH plaintext_password BY 'pwd'") + ch_nodes[0].query( + "CREATE USER pwd_user IDENTIFIED WITH plaintext_password BY 'pwd'" + ) with When("I measure kerberos auth time"): start_time_krb = time.time() @@ -323,11 +319,17 @@ def authentication_performance(self): with And("I measure password auth time"): start_time_usual = time.time() for i in range(100): - ch_nodes[2].cmd(f"echo 'SELECT 1' | curl 'http://pwd_user:pwd@clickhouse1:8123/' -d @-") + ch_nodes[2].cmd( + f"echo 'SELECT 1' | curl 'http://pwd_user:pwd@clickhouse1:8123/' -d @-" + ) usual_time = (time.time() - start_time_usual) / 100 with Then("measuring the performance compared to password auth"): - metric("percentage_improvement", units="%", value=100*(krb_time - usual_time)/usual_time) + metric( + "percentage_improvement", + units="%", + value=100 * (krb_time - usual_time) / usual_time, + ) with Finally("I drop pwd_user"): ch_nodes[0].query("DROP USER pwd_user") @@ -335,12 +337,15 @@ def authentication_performance(self): @TestFeature def generic(self): - """Perform ClickHouse Kerberos authentication testing - """ + """Perform ClickHouse Kerberos authentication testing""" - self.context.ch_nodes = [self.context.cluster.node(f"clickhouse{i}") for i in range(1, 4)] + self.context.ch_nodes = [ + self.context.cluster.node(f"clickhouse{i}") for i in range(1, 4) + ] self.context.krb_server = self.context.cluster.node("kerberos") - self.context.clients = [self.context.cluster.node(f"krb-client{i}") for i in range(1, 6)] + self.context.clients = [ + self.context.cluster.node(f"krb-client{i}") for i in range(1, 6) + ] for scenario in loads(current_module(), Scenario, Suite): - Scenario(run=scenario, flags=TE) #, setup=instrument_clickhouse_server_log) + Scenario(run=scenario, flags=TE) # , setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/kerberos/tests/parallel.py b/tests/testflows/kerberos/tests/parallel.py index 5d352af7df4..4c1c988baff 100644 --- a/tests/testflows/kerberos/tests/parallel.py +++ b/tests/testflows/kerberos/tests/parallel.py @@ -2,20 +2,18 @@ from testflows.core import * from kerberos.tests.common import * from kerberos.requirements.requirements import * + @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_Parallel_ValidRequests_SameCredentials("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_Parallel_ValidRequests_SameCredentials("1.0")) def valid_requests_same_credentials(self): - """ClickHouse should be able to process parallel requests sent under the same credentials. - """ + """ClickHouse should be able to process parallel requests sent under the same credentials.""" ch_nodes = self.context.ch_nodes with Given("kinit for clients"): kinit_no_keytab(node=ch_nodes[1]) kinit_no_keytab(node=ch_nodes[2]) - with And('create server principal'): + with And("create server principal"): create_server_principal(node=ch_nodes[0]) def helper(cmd): @@ -25,8 +23,8 @@ def valid_requests_same_credentials(self): tasks = [] with Pool(2) as pool: with When("I try simultaneous authentication"): - tasks.append(pool.submit(helper, (ch_nodes[1].cmd, ))) - tasks.append(pool.submit(helper, (ch_nodes[2].cmd, ))) + tasks.append(pool.submit(helper, (ch_nodes[1].cmd,))) + tasks.append(pool.submit(helper, (ch_nodes[2].cmd,))) tasks[0].result(timeout=200) tasks[1].result(timeout=200) @@ -36,12 +34,9 @@ def valid_requests_same_credentials(self): @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_Parallel_ValidRequests_DifferentCredentials("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_Parallel_ValidRequests_DifferentCredentials("1.0")) def valid_requests_different_credentials(self): - """ClickHouse should be able to process parallel requests by different users. - """ + """ClickHouse should be able to process parallel requests by different users.""" ch_nodes = self.context.ch_nodes with Given("kinit for clients"): @@ -59,12 +54,16 @@ def valid_requests_different_credentials(self): tasks = [] with Pool(2) as pool: with And("add 2 kerberos users via RBAC"): - ch_nodes[0].query("CREATE USER krb1 IDENTIFIED WITH kerberos REALM 'EXAMPLE.COM'") - ch_nodes[0].query("CREATE USER krb2 IDENTIFIED WITH kerberos REALM 'EXAMPLE.COM'") + ch_nodes[0].query( + "CREATE USER krb1 IDENTIFIED WITH kerberos REALM 'EXAMPLE.COM'" + ) + ch_nodes[0].query( + "CREATE USER krb2 IDENTIFIED WITH kerberos REALM 'EXAMPLE.COM'" + ) with When("I try simultaneous authentication for valid and invalid"): - tasks.append(pool.submit(helper, (ch_nodes[1].cmd, ))) - tasks.append(pool.submit(helper, (ch_nodes[2].cmd, ))) + tasks.append(pool.submit(helper, (ch_nodes[1].cmd,))) + tasks.append(pool.submit(helper, (ch_nodes[2].cmd,))) tasks[0].result(timeout=200) tasks[1].result(timeout=200) @@ -78,19 +77,16 @@ def valid_requests_different_credentials(self): @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_Parallel_ValidInvalid("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_Parallel_ValidInvalid("1.0")) def valid_invalid(self): - """Valid users' Kerberos authentication should not be affected by invalid users' attempts. - """ + """Valid users' Kerberos authentication should not be affected by invalid users' attempts.""" ch_nodes = self.context.ch_nodes with Given("kinit for clients"): kinit_no_keytab(node=ch_nodes[2]) kinit_no_keytab(node=ch_nodes[1], principal="invalid_user") - with And('create server principal'): + with And("create server principal"): create_server_principal(node=ch_nodes[0]) def helper(cmd): @@ -100,8 +96,8 @@ def valid_invalid(self): tasks = [] with Pool(2) as pool: with When("I try simultaneous authentication for valid and invalid"): - tasks.append(pool.submit(helper, (ch_nodes[1].cmd,))) # invalid - tasks.append(pool.submit(helper, (ch_nodes[2].cmd,))) # valid + tasks.append(pool.submit(helper, (ch_nodes[1].cmd,))) # invalid + tasks.append(pool.submit(helper, (ch_nodes[2].cmd,))) # valid with Then(f"I expect have auth failure"): assert tasks[1].result(timeout=300).output == "kerberos_user", error() @@ -109,12 +105,9 @@ def valid_invalid(self): @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_Parallel_Deletion("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_Parallel_Deletion("1.0")) def deletion(self): - """ClickHouse SHALL NOT crash when 2 Kerberos users are simultaneously deleting one another. - """ + """ClickHouse SHALL NOT crash when 2 Kerberos users are simultaneously deleting one another.""" ch_nodes = self.context.ch_nodes with Given("kinit for clients"): @@ -125,18 +118,24 @@ def deletion(self): create_server_principal(node=ch_nodes[0]) def helper(cmd, todel): - return cmd(test_select_query(node=ch_nodes[0], req=f"DROP USER {todel}"), no_checks=True) + return cmd( + test_select_query(node=ch_nodes[0], req=f"DROP USER {todel}"), + no_checks=True, + ) for i in range(15): tasks = [] with Pool(2) as pool: with And("add 2 kerberos users via RBAC"): - ch_nodes[0].query("CREATE USER krb1 IDENTIFIED WITH kerberos REALM 'EXAMPLE.COM'") - ch_nodes[0].query("CREATE USER krb2 IDENTIFIED WITH kerberos REALM 'EXAMPLE.COM'") + ch_nodes[0].query( + "CREATE USER krb1 IDENTIFIED WITH kerberos REALM 'EXAMPLE.COM'" + ) + ch_nodes[0].query( + "CREATE USER krb2 IDENTIFIED WITH kerberos REALM 'EXAMPLE.COM'" + ) ch_nodes[0].query("GRANT ACCESS MANAGEMENT ON *.* TO krb1") ch_nodes[0].query("GRANT ACCESS MANAGEMENT ON *.* TO krb2") - with When("I try simultaneous authentication for valid and invalid"): tasks.append(pool.submit(helper, (ch_nodes[1].cmd, "krb2"))) tasks.append(pool.submit(helper, (ch_nodes[2].cmd, "krb1"))) @@ -152,28 +151,29 @@ def deletion(self): @TestScenario -@Requirements( - RQ_SRS_016_Kerberos_Parallel_ValidRequests_KerberosAndNonKerberos("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_Parallel_ValidRequests_KerberosAndNonKerberos("1.0")) def kerberos_and_nonkerberos(self): - """ClickHouse SHALL support processing of simultaneous kerberized and non-kerberized requests. - """ + """ClickHouse SHALL support processing of simultaneous kerberized and non-kerberized requests.""" ch_nodes = self.context.ch_nodes with Given("kinit for clients"): kinit_no_keytab(node=ch_nodes[2]) - with And('create server principal'): + with And("create server principal"): create_server_principal(node=ch_nodes[0]) def helper(cmd, krb_auth): - return cmd(test_select_query(node=ch_nodes[0], krb_auth=krb_auth), no_checks=True) + return cmd( + test_select_query(node=ch_nodes[0], krb_auth=krb_auth), no_checks=True + ) for i in range(15): tasks = [] with Pool(2) as pool: with When("I try simultaneous authentication for valid and invalid"): - tasks.append(pool.submit(helper, (ch_nodes[1].cmd, False))) # non-kerberos + tasks.append( + pool.submit(helper, (ch_nodes[1].cmd, False)) + ) # non-kerberos tasks.append(pool.submit(helper, (ch_nodes[2].cmd, True))) # kerberos with Then(f"I expect have auth failure"): @@ -182,16 +182,17 @@ def kerberos_and_nonkerberos(self): @TestFeature -@Requirements( - RQ_SRS_016_Kerberos_Parallel("1.0") -) +@Requirements(RQ_SRS_016_Kerberos_Parallel("1.0")) def parallel(self): - """Perform ClickHouse Kerberos authentication testing for incorrect configuration files - """ + """Perform ClickHouse Kerberos authentication testing for incorrect configuration files""" - self.context.ch_nodes = [self.context.cluster.node(f"clickhouse{i}") for i in range(1, 4)] + self.context.ch_nodes = [ + self.context.cluster.node(f"clickhouse{i}") for i in range(1, 4) + ] self.context.krb_server = self.context.cluster.node("kerberos") - self.context.clients = [self.context.cluster.node(f"krb-client{i}") for i in range(1, 6)] + self.context.clients = [ + self.context.cluster.node(f"krb-client{i}") for i in range(1, 6) + ] for scenario in loads(current_module(), Scenario, Suite): Scenario(run=scenario, flags=TE) diff --git a/tests/testflows/ldap/authentication/regression.py b/tests/testflows/ldap/authentication/regression.py index 7a6b35a2658..d2e541598ea 100755 --- a/tests/testflows/ldap/authentication/regression.py +++ b/tests/testflows/ldap/authentication/regression.py @@ -11,35 +11,41 @@ from ldap.authentication.requirements import * # Cross-outs of known fails xfails = { - "connection protocols/tls/tls_require_cert='try'": - [(Fail, "can't be tested with self-signed certificates")], - "connection protocols/tls/tls_require_cert='demand'": - [(Fail, "can't be tested with self-signed certificates")], - "connection protocols/starttls/tls_require_cert='try'": - [(Fail, "can't be tested with self-signed certificates")], - "connection protocols/starttls/tls_require_cert='demand'": - [(Fail, "can't be tested with self-signed certificates")], - "connection protocols/tls require cert default demand": - [(Fail, "can't be tested with self-signed certificates")], - "connection protocols/starttls with custom port": - [(Fail, "it seems that starttls is not enabled by default on custom plain-text ports in LDAP server")], - "connection protocols/tls cipher suite": - [(Fail, "can't get it to work")] + "connection protocols/tls/tls_require_cert='try'": [ + (Fail, "can't be tested with self-signed certificates") + ], + "connection protocols/tls/tls_require_cert='demand'": [ + (Fail, "can't be tested with self-signed certificates") + ], + "connection protocols/starttls/tls_require_cert='try'": [ + (Fail, "can't be tested with self-signed certificates") + ], + "connection protocols/starttls/tls_require_cert='demand'": [ + (Fail, "can't be tested with self-signed certificates") + ], + "connection protocols/tls require cert default demand": [ + (Fail, "can't be tested with self-signed certificates") + ], + "connection protocols/starttls with custom port": [ + ( + Fail, + "it seems that starttls is not enabled by default on custom plain-text ports in LDAP server", + ) + ], + "connection protocols/tls cipher suite": [(Fail, "can't get it to work")], } + @TestFeature @Name("authentication") @ArgumentParser(argparser) -@Specifications( - SRS_007_ClickHouse_Authentication_of_Users_via_LDAP -) -@Requirements( - RQ_SRS_007_LDAP_Authentication("1.0") -) +@Specifications(SRS_007_ClickHouse_Authentication_of_Users_via_LDAP) +@Requirements(RQ_SRS_007_LDAP_Authentication("1.0")) @XFails(xfails) -def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None): - """ClickHouse integration with LDAP regression module. - """ +def regression( + self, local, clickhouse_binary_path, clickhouse_version=None, stress=None +): + """ClickHouse integration with LDAP regression module.""" nodes = { "clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3"), } @@ -52,13 +58,17 @@ def regression(self, local, clickhouse_binary_path, clickhouse_version=None, str from platform import processor as current_cpu folder_name = os.path.basename(current_dir()) - if current_cpu() == 'aarch64': + if current_cpu() == "aarch64": env = f"{folder_name}_env_arm64" else: env = f"{folder_name}_env" - with Cluster(local, clickhouse_binary_path, nodes=nodes, - docker_compose_project_dir=os.path.join(current_dir(), env)) as cluster: + with Cluster( + local, + clickhouse_binary_path, + nodes=nodes, + docker_compose_project_dir=os.path.join(current_dir(), env), + ) as cluster: self.context.cluster = cluster Scenario(run=load("ldap.authentication.tests.sanity", "scenario")) @@ -68,5 +78,6 @@ def regression(self, local, clickhouse_binary_path, clickhouse_version=None, str Feature(run=load("ldap.authentication.tests.user_config", "feature")) Feature(run=load("ldap.authentication.tests.authentications", "feature")) + if main(): regression() diff --git a/tests/testflows/ldap/authentication/requirements/requirements.py b/tests/testflows/ldap/authentication/requirements/requirements.py index 97c85d93c86..6ee904bd40e 100644 --- a/tests/testflows/ldap/authentication/requirements/requirements.py +++ b/tests/testflows/ldap/authentication/requirements/requirements.py @@ -9,1204 +9,1269 @@ from testflows.core import Requirement Heading = Specification.Heading RQ_SRS_007_LDAP_Authentication = Requirement( - name='RQ.SRS-007.LDAP.Authentication', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support user authentication via an [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support user authentication via an [LDAP] server.\n" "\n" + ), link=None, level=3, - num='4.1.1') + num="4.1.1", +) RQ_SRS_007_LDAP_Authentication_MultipleServers = Requirement( - name='RQ.SRS-007.LDAP.Authentication.MultipleServers', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.MultipleServers", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying multiple [LDAP] servers that can be used to authenticate\n' - 'users.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying multiple [LDAP] servers that can be used to authenticate\n" + "users.\n" + "\n" + ), link=None, level=3, - num='4.1.2') + num="4.1.2", +) RQ_SRS_007_LDAP_Authentication_Protocol_PlainText = Requirement( - name='RQ.SRS-007.LDAP.Authentication.Protocol.PlainText', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.Protocol.PlainText", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support user authentication using plain text `ldap://` non secure protocol.\n' - '\n' - ), + "[ClickHouse] SHALL support user authentication using plain text `ldap://` non secure protocol.\n" + "\n" + ), link=None, level=3, - num='4.1.3') + num="4.1.3", +) RQ_SRS_007_LDAP_Authentication_Protocol_TLS = Requirement( - name='RQ.SRS-007.LDAP.Authentication.Protocol.TLS', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.Protocol.TLS", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support user authentication using `SSL/TLS` `ldaps://` secure protocol.\n' - '\n' - ), + "[ClickHouse] SHALL support user authentication using `SSL/TLS` `ldaps://` secure protocol.\n" + "\n" + ), link=None, level=3, - num='4.1.4') + num="4.1.4", +) RQ_SRS_007_LDAP_Authentication_Protocol_StartTLS = Requirement( - name='RQ.SRS-007.LDAP.Authentication.Protocol.StartTLS', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.Protocol.StartTLS", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support user authentication using legacy `StartTLS` protocol which is a\n' - 'plain text `ldap://` protocol that is upgraded to [TLS].\n' - '\n' - ), + "[ClickHouse] SHALL support user authentication using legacy `StartTLS` protocol which is a\n" + "plain text `ldap://` protocol that is upgraded to [TLS].\n" + "\n" + ), link=None, level=3, - num='4.1.5') + num="4.1.5", +) RQ_SRS_007_LDAP_Authentication_TLS_Certificate_Validation = Requirement( - name='RQ.SRS-007.LDAP.Authentication.TLS.Certificate.Validation', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.TLS.Certificate.Validation", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support certificate validation used for [TLS] connections.\n' - '\n' - ), + "[ClickHouse] SHALL support certificate validation used for [TLS] connections.\n" + "\n" + ), link=None, level=3, - num='4.1.6') + num="4.1.6", +) RQ_SRS_007_LDAP_Authentication_TLS_Certificate_SelfSigned = Requirement( - name='RQ.SRS-007.LDAP.Authentication.TLS.Certificate.SelfSigned', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.TLS.Certificate.SelfSigned", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support self-signed certificates for [TLS] connections.\n' - '\n' - ), + "[ClickHouse] SHALL support self-signed certificates for [TLS] connections.\n" + "\n" + ), link=None, level=3, - num='4.1.7') + num="4.1.7", +) RQ_SRS_007_LDAP_Authentication_TLS_Certificate_SpecificCertificationAuthority = Requirement( - name='RQ.SRS-007.LDAP.Authentication.TLS.Certificate.SpecificCertificationAuthority', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.TLS.Certificate.SpecificCertificationAuthority", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support certificates signed by specific Certification Authority for [TLS] connections.\n' - '\n' - ), + "[ClickHouse] SHALL support certificates signed by specific Certification Authority for [TLS] connections.\n" + "\n" + ), link=None, level=3, - num='4.1.8') + num="4.1.8", +) RQ_SRS_007_LDAP_Server_Configuration_Invalid = Requirement( - name='RQ.SRS-007.LDAP.Server.Configuration.Invalid', - version='1.0', + name="RQ.SRS-007.LDAP.Server.Configuration.Invalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error and prohibit user login if [LDAP] server configuration is not valid.\n' - '\n' - ), + "[ClickHouse] SHALL return an error and prohibit user login if [LDAP] server configuration is not valid.\n" + "\n" + ), link=None, level=3, - num='4.1.9') + num="4.1.9", +) RQ_SRS_007_LDAP_User_Configuration_Invalid = Requirement( - name='RQ.SRS-007.LDAP.User.Configuration.Invalid', - version='1.0', + name="RQ.SRS-007.LDAP.User.Configuration.Invalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error and prohibit user login if user configuration is not valid.\n' - '\n' - ), + "[ClickHouse] SHALL return an error and prohibit user login if user configuration is not valid.\n" + "\n" + ), link=None, level=3, - num='4.1.10') + num="4.1.10", +) RQ_SRS_007_LDAP_Authentication_Mechanism_Anonymous = Requirement( - name='RQ.SRS-007.LDAP.Authentication.Mechanism.Anonymous', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.Mechanism.Anonymous", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error and prohibit authentication using [Anonymous Authentication Mechanism of Simple Bind]\n' - 'authentication mechanism.\n' - '\n' - ), + "[ClickHouse] SHALL return an error and prohibit authentication using [Anonymous Authentication Mechanism of Simple Bind]\n" + "authentication mechanism.\n" + "\n" + ), link=None, level=3, - num='4.1.11') + num="4.1.11", +) RQ_SRS_007_LDAP_Authentication_Mechanism_Unauthenticated = Requirement( - name='RQ.SRS-007.LDAP.Authentication.Mechanism.Unauthenticated', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.Mechanism.Unauthenticated", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error and prohibit authentication using [Unauthenticated Authentication Mechanism of Simple Bind]\n' - 'authentication mechanism.\n' - '\n' - ), + "[ClickHouse] SHALL return an error and prohibit authentication using [Unauthenticated Authentication Mechanism of Simple Bind]\n" + "authentication mechanism.\n" + "\n" + ), link=None, level=3, - num='4.1.12') + num="4.1.12", +) RQ_SRS_007_LDAP_Authentication_Mechanism_NamePassword = Requirement( - name='RQ.SRS-007.LDAP.Authentication.Mechanism.NamePassword', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.Mechanism.NamePassword", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL allow authentication using only [Name/Password Authentication Mechanism of Simple Bind]\n' - 'authentication mechanism.\n' - '\n' - ), + "[ClickHouse] SHALL allow authentication using only [Name/Password Authentication Mechanism of Simple Bind]\n" + "authentication mechanism.\n" + "\n" + ), link=None, level=3, - num='4.1.13') + num="4.1.13", +) RQ_SRS_007_LDAP_Authentication_Valid = Requirement( - name='RQ.SRS-007.LDAP.Authentication.Valid', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.Valid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only allow user authentication using [LDAP] server if and only if\n' - 'user name and password match [LDAP] server records for the user.\n' - '\n' - ), + "[ClickHouse] SHALL only allow user authentication using [LDAP] server if and only if\n" + "user name and password match [LDAP] server records for the user.\n" + "\n" + ), link=None, level=3, - num='4.1.14') + num="4.1.14", +) RQ_SRS_007_LDAP_Authentication_Invalid = Requirement( - name='RQ.SRS-007.LDAP.Authentication.Invalid', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.Invalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error and prohibit authentication if either user name or password\n' - 'do not match [LDAP] server records for the user.\n' - '\n' - ), + "[ClickHouse] SHALL return an error and prohibit authentication if either user name or password\n" + "do not match [LDAP] server records for the user.\n" + "\n" + ), link=None, level=3, - num='4.1.15') + num="4.1.15", +) RQ_SRS_007_LDAP_Authentication_Invalid_DeletedUser = Requirement( - name='RQ.SRS-007.LDAP.Authentication.Invalid.DeletedUser', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.Invalid.DeletedUser", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error and prohibit authentication if the user\n' - 'has been deleted from the [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL return an error and prohibit authentication if the user\n" + "has been deleted from the [LDAP] server.\n" + "\n" + ), link=None, level=3, - num='4.1.16') + num="4.1.16", +) RQ_SRS_007_LDAP_Authentication_UsernameChanged = Requirement( - name='RQ.SRS-007.LDAP.Authentication.UsernameChanged', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.UsernameChanged", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error and prohibit authentication if the username is changed\n' - 'on the [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL return an error and prohibit authentication if the username is changed\n" + "on the [LDAP] server.\n" + "\n" + ), link=None, level=3, - num='4.1.17') + num="4.1.17", +) RQ_SRS_007_LDAP_Authentication_PasswordChanged = Requirement( - name='RQ.SRS-007.LDAP.Authentication.PasswordChanged', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.PasswordChanged", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error and prohibit authentication if the password\n' - 'for the user is changed on the [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL return an error and prohibit authentication if the password\n" + "for the user is changed on the [LDAP] server.\n" + "\n" + ), link=None, level=3, - num='4.1.18') + num="4.1.18", +) RQ_SRS_007_LDAP_Authentication_LDAPServerRestart = Requirement( - name='RQ.SRS-007.LDAP.Authentication.LDAPServerRestart', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.LDAPServerRestart", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support authenticating users after [LDAP] server is restarted.\n' - '\n' - ), + "[ClickHouse] SHALL support authenticating users after [LDAP] server is restarted.\n" + "\n" + ), link=None, level=3, - num='4.1.19') + num="4.1.19", +) RQ_SRS_007_LDAP_Authentication_ClickHouseServerRestart = Requirement( - name='RQ.SRS-007.LDAP.Authentication.ClickHouseServerRestart', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.ClickHouseServerRestart", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support authenticating users after server is restarted.\n' - '\n' - ), + "[ClickHouse] SHALL support authenticating users after server is restarted.\n" + "\n" + ), link=None, level=3, - num='4.1.20') + num="4.1.20", +) RQ_SRS_007_LDAP_Authentication_Parallel = Requirement( - name='RQ.SRS-007.LDAP.Authentication.Parallel', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.Parallel", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support parallel authentication of users using [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support parallel authentication of users using [LDAP] server.\n" + "\n" + ), link=None, level=3, - num='4.1.21') + num="4.1.21", +) RQ_SRS_007_LDAP_Authentication_Parallel_ValidAndInvalid = Requirement( - name='RQ.SRS-007.LDAP.Authentication.Parallel.ValidAndInvalid', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.Parallel.ValidAndInvalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support authentication of valid users and\n' - 'prohibit authentication of invalid users using [LDAP] server\n' - 'in parallel without having invalid attempts affecting valid authentications.\n' - '\n' - ), + "[ClickHouse] SHALL support authentication of valid users and\n" + "prohibit authentication of invalid users using [LDAP] server\n" + "in parallel without having invalid attempts affecting valid authentications.\n" + "\n" + ), link=None, level=3, - num='4.1.22') + num="4.1.22", +) RQ_SRS_007_LDAP_UnreachableServer = Requirement( - name='RQ.SRS-007.LDAP.UnreachableServer', - version='1.0', + name="RQ.SRS-007.LDAP.UnreachableServer", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error and prohibit user login if [LDAP] server is unreachable.\n' - '\n' - ), + "[ClickHouse] SHALL return an error and prohibit user login if [LDAP] server is unreachable.\n" + "\n" + ), link=None, level=3, - num='4.2.1') + num="4.2.1", +) RQ_SRS_007_LDAP_Configuration_Server_Name = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.Name', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.Name", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not support empty string as a server name.\n' - '\n' - ), + "[ClickHouse] SHALL not support empty string as a server name.\n" "\n" + ), link=None, level=3, - num='4.2.2') + num="4.2.2", +) RQ_SRS_007_LDAP_Configuration_Server_Host = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.Host', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.Host", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` parameter to specify [LDAP]\n' - 'server hostname or IP, this parameter SHALL be mandatory and SHALL not be empty.\n' - '\n' - ), + "[ClickHouse] SHALL support `` parameter to specify [LDAP]\n" + "server hostname or IP, this parameter SHALL be mandatory and SHALL not be empty.\n" + "\n" + ), link=None, level=3, - num='4.2.3') + num="4.2.3", +) RQ_SRS_007_LDAP_Configuration_Server_Port = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.Port', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.Port", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` parameter to specify [LDAP] server port.\n' - '\n' - ), + "[ClickHouse] SHALL support `` parameter to specify [LDAP] server port.\n" + "\n" + ), link=None, level=3, - num='4.2.4') + num="4.2.4", +) RQ_SRS_007_LDAP_Configuration_Server_Port_Default = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.Port.Default', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.Port.Default", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL use default port number `636` if `enable_tls` is set to `yes` or `389` otherwise.\n' - '\n' - ), + "[ClickHouse] SHALL use default port number `636` if `enable_tls` is set to `yes` or `389` otherwise.\n" + "\n" + ), link=None, level=3, - num='4.2.5') + num="4.2.5", +) RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Prefix = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Prefix', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Prefix", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` parameter to specify the prefix\n' - 'of value used to construct the DN to bound to during authentication via [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support `` parameter to specify the prefix\n" + "of value used to construct the DN to bound to during authentication via [LDAP] server.\n" + "\n" + ), link=None, level=3, - num='4.2.6') + num="4.2.6", +) RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Suffix = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Suffix', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Suffix", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` parameter to specify the suffix\n' - 'of value used to construct the DN to bound to during authentication via [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support `` parameter to specify the suffix\n" + "of value used to construct the DN to bound to during authentication via [LDAP] server.\n" + "\n" + ), link=None, level=3, - num='4.2.7') + num="4.2.7", +) RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Value = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Value', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Value", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL construct DN as `auth_dn_prefix + escape(user_name) + auth_dn_suffix` string.\n' - '\n' + "[ClickHouse] SHALL construct DN as `auth_dn_prefix + escape(user_name) + auth_dn_suffix` string.\n" + "\n" "> This implies that auth_dn_suffix should usually have comma ',' as its first non-space character.\n" - '\n' - ), + "\n" + ), link=None, level=3, - num='4.2.8') + num="4.2.8", +) RQ_SRS_007_LDAP_Configuration_Server_EnableTLS = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.EnableTLS", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` parameter to trigger the use of secure connection to the [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support `` parameter to trigger the use of secure connection to the [LDAP] server.\n" + "\n" + ), link=None, level=3, - num='4.2.9') + num="4.2.9", +) RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_Default = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.Default', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.Default", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL use `yes` value as the default for `` parameter\n' - 'to enable SSL/TLS `ldaps://` protocol.\n' - '\n' - ), + "[ClickHouse] SHALL use `yes` value as the default for `` parameter\n" + "to enable SSL/TLS `ldaps://` protocol.\n" + "\n" + ), link=None, level=3, - num='4.2.10') + num="4.2.10", +) RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_No = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.No', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.No", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying `no` as the value of `` parameter to enable\n' - 'plain text `ldap://` protocol.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying `no` as the value of `` parameter to enable\n" + "plain text `ldap://` protocol.\n" + "\n" + ), link=None, level=3, - num='4.2.11') + num="4.2.11", +) RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_Yes = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.Yes', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.Yes", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying `yes` as the value of `` parameter to enable\n' - 'SSL/TLS `ldaps://` protocol.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying `yes` as the value of `` parameter to enable\n" + "SSL/TLS `ldaps://` protocol.\n" + "\n" + ), link=None, level=3, - num='4.2.12') + num="4.2.12", +) RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_StartTLS = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.StartTLS', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.StartTLS", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying `starttls` as the value of `` parameter to enable\n' - 'legacy `StartTLS` protocol that used plain text `ldap://` protocol, upgraded to [TLS].\n' - '\n' - ), + "[ClickHouse] SHALL support specifying `starttls` as the value of `` parameter to enable\n" + "legacy `StartTLS` protocol that used plain text `ldap://` protocol, upgraded to [TLS].\n" + "\n" + ), link=None, level=3, - num='4.2.13') + num="4.2.13", +) RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` parameter to specify\n' - 'the minimum protocol version of SSL/TLS.\n' - '\n' - ), + "[ClickHouse] SHALL support `` parameter to specify\n" + "the minimum protocol version of SSL/TLS.\n" + "\n" + ), link=None, level=3, - num='4.2.14') + num="4.2.14", +) RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion_Values = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion.Values', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion.Values", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying `ssl2`, `ssl3`, `tls1.0`, `tls1.1`, and `tls1.2`\n' - 'as a value of the `` parameter.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying `ssl2`, `ssl3`, `tls1.0`, `tls1.1`, and `tls1.2`\n" + "as a value of the `` parameter.\n" + "\n" + ), link=None, level=3, - num='4.2.15') + num="4.2.15", +) RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion_Default = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion.Default', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion.Default", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL set `tls1.2` as the default value of the `` parameter.\n' - '\n' - ), + "[ClickHouse] SHALL set `tls1.2` as the default value of the `` parameter.\n" + "\n" + ), link=None, level=3, - num='4.2.16') + num="4.2.16", +) RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` parameter to specify [TLS] peer\n' - 'certificate verification behavior.\n' - '\n' - ), + "[ClickHouse] SHALL support `` parameter to specify [TLS] peer\n" + "certificate verification behavior.\n" + "\n" + ), link=None, level=3, - num='4.2.17') + num="4.2.17", +) RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Default = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Default', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Default", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL use `demand` value as the default for the `` parameter.\n' - '\n' - ), + "[ClickHouse] SHALL use `demand` value as the default for the `` parameter.\n" + "\n" + ), link=None, level=3, - num='4.2.18') + num="4.2.18", +) RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Demand = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Demand', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Demand", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying `demand` as the value of `` parameter to\n' - 'enable requesting of client certificate. If no certificate is provided, or a bad certificate is\n' - 'provided, the session SHALL be immediately terminated.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying `demand` as the value of `` parameter to\n" + "enable requesting of client certificate. If no certificate is provided, or a bad certificate is\n" + "provided, the session SHALL be immediately terminated.\n" + "\n" + ), link=None, level=3, - num='4.2.19') + num="4.2.19", +) RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Allow = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Allow', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Allow", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying `allow` as the value of `` parameter to\n' - 'enable requesting of client certificate. If no\n' - 'certificate is provided, the session SHALL proceed normally.\n' - 'If a bad certificate is provided, it SHALL be ignored and the session SHALL proceed normally.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying `allow` as the value of `` parameter to\n" + "enable requesting of client certificate. If no\n" + "certificate is provided, the session SHALL proceed normally.\n" + "If a bad certificate is provided, it SHALL be ignored and the session SHALL proceed normally.\n" + "\n" + ), link=None, level=3, - num='4.2.20') + num="4.2.20", +) RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Try = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Try', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Try", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying `try` as the value of `` parameter to\n' - 'enable requesting of client certificate. If no certificate is provided, the session\n' - 'SHALL proceed normally. If a bad certificate is provided, the session SHALL be\n' - 'immediately terminated.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying `try` as the value of `` parameter to\n" + "enable requesting of client certificate. If no certificate is provided, the session\n" + "SHALL proceed normally. If a bad certificate is provided, the session SHALL be\n" + "immediately terminated.\n" + "\n" + ), link=None, level=3, - num='4.2.21') + num="4.2.21", +) RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Never = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Never', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Never", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying `never` as the value of `` parameter to\n' - 'disable requesting of client certificate.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying `never` as the value of `` parameter to\n" + "disable requesting of client certificate.\n" + "\n" + ), link=None, level=3, - num='4.2.22') + num="4.2.22", +) RQ_SRS_007_LDAP_Configuration_Server_TLSCertFile = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.TLSCertFile', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.TLSCertFile", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` to specify the path to certificate file used by\n' - '[ClickHouse] to establish connection with the [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support `` to specify the path to certificate file used by\n" + "[ClickHouse] to establish connection with the [LDAP] server.\n" + "\n" + ), link=None, level=3, - num='4.2.23') + num="4.2.23", +) RQ_SRS_007_LDAP_Configuration_Server_TLSKeyFile = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.TLSKeyFile', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.TLSKeyFile", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` to specify the path to key file for the certificate\n' - 'specified by the `` parameter.\n' - '\n' - ), + "[ClickHouse] SHALL support `` to specify the path to key file for the certificate\n" + "specified by the `` parameter.\n" + "\n" + ), link=None, level=3, - num='4.2.24') + num="4.2.24", +) RQ_SRS_007_LDAP_Configuration_Server_TLSCACertDir = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.TLSCACertDir', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.TLSCACertDir", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` parameter to specify to a path to\n' - 'the directory containing [CA] certificates used to verify certificates provided by the [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support `` parameter to specify to a path to\n" + "the directory containing [CA] certificates used to verify certificates provided by the [LDAP] server.\n" + "\n" + ), link=None, level=3, - num='4.2.25') + num="4.2.25", +) RQ_SRS_007_LDAP_Configuration_Server_TLSCACertFile = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.TLSCACertFile', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.TLSCACertFile", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` parameter to specify a path to a specific\n' - '[CA] certificate file used to verify certificates provided by the [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support `` parameter to specify a path to a specific\n" + "[CA] certificate file used to verify certificates provided by the [LDAP] server.\n" + "\n" + ), link=None, level=3, - num='4.2.26') + num="4.2.26", +) RQ_SRS_007_LDAP_Configuration_Server_TLSCipherSuite = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.TLSCipherSuite', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.TLSCipherSuite", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `tls_cipher_suite` parameter to specify allowed cipher suites.\n' - 'The value SHALL use the same format as the `ciphersuites` in the [OpenSSL Ciphers].\n' - '\n' - 'For example,\n' - '\n' - '```xml\n' - 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384\n' - '```\n' - '\n' - 'The available suites SHALL depend on the [OpenSSL] library version and variant used to build\n' - '[ClickHouse] and therefore might change.\n' - '\n' - ), + "[ClickHouse] SHALL support `tls_cipher_suite` parameter to specify allowed cipher suites.\n" + "The value SHALL use the same format as the `ciphersuites` in the [OpenSSL Ciphers].\n" + "\n" + "For example,\n" + "\n" + "```xml\n" + "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384\n" + "```\n" + "\n" + "The available suites SHALL depend on the [OpenSSL] library version and variant used to build\n" + "[ClickHouse] and therefore might change.\n" + "\n" + ), link=None, level=3, - num='4.2.27') + num="4.2.27", +) RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `verification_cooldown` parameter in the [LDAP] server configuration section\n' - 'that SHALL define a period of time, in seconds, after a successful bind attempt, during which a user SHALL be assumed\n' - 'to be successfully authenticated for all consecutive requests without contacting the [LDAP] server.\n' - 'After period of time since the last successful attempt expires then on the authentication attempt\n' - 'SHALL result in contacting the [LDAP] server to verify the username and password. \n' - '\n' - ), + "[ClickHouse] SHALL support `verification_cooldown` parameter in the [LDAP] server configuration section\n" + "that SHALL define a period of time, in seconds, after a successful bind attempt, during which a user SHALL be assumed\n" + "to be successfully authenticated for all consecutive requests without contacting the [LDAP] server.\n" + "After period of time since the last successful attempt expires then on the authentication attempt\n" + "SHALL result in contacting the [LDAP] server to verify the username and password. \n" + "\n" + ), link=None, level=3, - num='4.2.28') + num="4.2.28", +) RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown_Default = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Default', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Default", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] `verification_cooldown` parameter in the [LDAP] server configuration section\n' - 'SHALL have a default value of `0` that disables caching and forces contacting\n' - 'the [LDAP] server for each authentication request.\n' - '\n' - ), + "[ClickHouse] `verification_cooldown` parameter in the [LDAP] server configuration section\n" + "SHALL have a default value of `0` that disables caching and forces contacting\n" + "the [LDAP] server for each authentication request.\n" + "\n" + ), link=None, level=3, - num='4.2.29') + num="4.2.29", +) RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown_Invalid = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Invalid', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Invalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[Clickhouse] SHALL return an error if the value provided for the `verification_cooldown` parameter is not a valid positive integer.\n' - '\n' - 'For example:\n' - '\n' - '* negative integer\n' - '* string\n' - '* empty value\n' - '* extremely large positive value (overflow)\n' - '* extremely large negative value (overflow)\n' - '\n' - 'The error SHALL appear in the log and SHALL be similar to the following:\n' - '\n' - '```bash\n' - ' Access(user directories): Could not parse LDAP server `openldap1`: Poco::Exception. Code: 1000, e.code() = 0, e.displayText() = Syntax error: Not a valid unsigned integer: *input value*\n' - '```\n' - '\n' - ), + "[Clickhouse] SHALL return an error if the value provided for the `verification_cooldown` parameter is not a valid positive integer.\n" + "\n" + "For example:\n" + "\n" + "* negative integer\n" + "* string\n" + "* empty value\n" + "* extremely large positive value (overflow)\n" + "* extremely large negative value (overflow)\n" + "\n" + "The error SHALL appear in the log and SHALL be similar to the following:\n" + "\n" + "```bash\n" + " Access(user directories): Could not parse LDAP server `openldap1`: Poco::Exception. Code: 1000, e.code() = 0, e.displayText() = Syntax error: Not a valid unsigned integer: *input value*\n" + "```\n" + "\n" + ), link=None, level=3, - num='4.2.30') + num="4.2.30", +) RQ_SRS_007_LDAP_Configuration_Server_Syntax = Requirement( - name='RQ.SRS-007.LDAP.Configuration.Server.Syntax', - version='2.0', + name="RQ.SRS-007.LDAP.Configuration.Server.Syntax", + version="2.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following example syntax to create an entry for an [LDAP] server inside the `config.xml`\n' - 'configuration file or of any configuration file inside the `config.d` directory.\n' - '\n' - '```xml\n' - '\n' - ' \n' - ' localhost\n' - ' 636\n' - ' cn=\n' - ' , ou=users, dc=example, dc=com\n' - ' 0\n' - ' yes\n' - ' tls1.2\n' - ' demand\n' - ' /path/to/tls_cert_file\n' - ' /path/to/tls_key_file\n' - ' /path/to/tls_ca_cert_file\n' - ' /path/to/tls_ca_cert_dir\n' - ' ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384\n' - ' \n' - '\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following example syntax to create an entry for an [LDAP] server inside the `config.xml`\n" + "configuration file or of any configuration file inside the `config.d` directory.\n" + "\n" + "```xml\n" + "\n" + " \n" + " localhost\n" + " 636\n" + " cn=\n" + " , ou=users, dc=example, dc=com\n" + " 0\n" + " yes\n" + " tls1.2\n" + " demand\n" + " /path/to/tls_cert_file\n" + " /path/to/tls_key_file\n" + " /path/to/tls_ca_cert_file\n" + " /path/to/tls_ca_cert_dir\n" + " ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384\n" + " \n" + "\n" + "```\n" + "\n" + ), link=None, level=3, - num='4.2.31') + num="4.2.31", +) RQ_SRS_007_LDAP_Configuration_User_RBAC = Requirement( - name='RQ.SRS-007.LDAP.Configuration.User.RBAC', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.User.RBAC", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support creating users identified using an [LDAP] server using\n' - 'the following RBAC command\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL support creating users identified using an [LDAP] server using\n" + "the following RBAC command\n" + "\n" + "```sql\n" "CREATE USER name IDENTIFIED WITH ldap SERVER 'server_name'\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=3, - num='4.2.32') + num="4.2.32", +) RQ_SRS_007_LDAP_Configuration_User_Syntax = Requirement( - name='RQ.SRS-007.LDAP.Configuration.User.Syntax', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.User.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following example syntax to create a user that is authenticated using\n' - 'an [LDAP] server inside the `users.xml` file or any configuration file inside the `users.d` directory.\n' - '\n' - '```xml\n' - '\n' - ' \n' - ' \n' - ' \n' - ' my_ldap_server\n' - ' \n' - ' \n' - ' \n' - '\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following example syntax to create a user that is authenticated using\n" + "an [LDAP] server inside the `users.xml` file or any configuration file inside the `users.d` directory.\n" + "\n" + "```xml\n" + "\n" + " \n" + " \n" + " \n" + " my_ldap_server\n" + " \n" + " \n" + " \n" + "\n" + "```\n" + "\n" + ), link=None, level=3, - num='4.2.33') + num="4.2.33", +) RQ_SRS_007_LDAP_Configuration_User_Name_Empty = Requirement( - name='RQ.SRS-007.LDAP.Configuration.User.Name.Empty', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.User.Name.Empty", + version="1.0", priority=None, group=None, type=None, uid=None, - description=( - '[ClickHouse] SHALL not support empty string as a user name.\n' - '\n' - ), + description=("[ClickHouse] SHALL not support empty string as a user name.\n" "\n"), link=None, level=3, - num='4.2.34') + num="4.2.34", +) RQ_SRS_007_LDAP_Configuration_User_BothPasswordAndLDAP = Requirement( - name='RQ.SRS-007.LDAP.Configuration.User.BothPasswordAndLDAP', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.User.BothPasswordAndLDAP", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL throw an error if `` is specified for the user and at the same\n' - 'time user configuration contains any of the `` entries.\n' - '\n' - ), + "[ClickHouse] SHALL throw an error if `` is specified for the user and at the same\n" + "time user configuration contains any of the `` entries.\n" + "\n" + ), link=None, level=3, - num='4.2.35') + num="4.2.35", +) RQ_SRS_007_LDAP_Configuration_User_LDAP_InvalidServerName_NotDefined = Requirement( - name='RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.NotDefined', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.NotDefined", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL throw an error during any authentication attempt\n' - 'if the name of the [LDAP] server used inside the `` entry\n' - 'is not defined in the `` section.\n' - '\n' - ), + "[ClickHouse] SHALL throw an error during any authentication attempt\n" + "if the name of the [LDAP] server used inside the `` entry\n" + "is not defined in the `` section.\n" + "\n" + ), link=None, level=3, - num='4.2.36') + num="4.2.36", +) RQ_SRS_007_LDAP_Configuration_User_LDAP_InvalidServerName_Empty = Requirement( - name='RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.Empty', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.Empty", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL throw an error during any authentication attempt\n' - 'if the name of the [LDAP] server used inside the `` entry\n' - 'is empty.\n' - '\n' - ), + "[ClickHouse] SHALL throw an error during any authentication attempt\n" + "if the name of the [LDAP] server used inside the `` entry\n" + "is empty.\n" + "\n" + ), link=None, level=3, - num='4.2.37') + num="4.2.37", +) RQ_SRS_007_LDAP_Configuration_User_OnlyOneServer = Requirement( - name='RQ.SRS-007.LDAP.Configuration.User.OnlyOneServer', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.User.OnlyOneServer", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying only one [LDAP] server for a given user.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying only one [LDAP] server for a given user.\n" + "\n" + ), link=None, level=3, - num='4.2.38') + num="4.2.38", +) RQ_SRS_007_LDAP_Configuration_User_Name_Long = Requirement( - name='RQ.SRS-007.LDAP.Configuration.User.Name.Long', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.User.Name.Long", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support long user names of at least 256 bytes\n' - 'to specify users that can be authenticated using an [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support long user names of at least 256 bytes\n" + "to specify users that can be authenticated using an [LDAP] server.\n" + "\n" + ), link=None, level=3, - num='4.2.39') + num="4.2.39", +) RQ_SRS_007_LDAP_Configuration_User_Name_UTF8 = Requirement( - name='RQ.SRS-007.LDAP.Configuration.User.Name.UTF8', - version='1.0', + name="RQ.SRS-007.LDAP.Configuration.User.Name.UTF8", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support user names that contain [UTF-8] characters.\n' - '\n' - ), + "[ClickHouse] SHALL support user names that contain [UTF-8] characters.\n" "\n" + ), link=None, level=3, - num='4.2.40') + num="4.2.40", +) RQ_SRS_007_LDAP_Authentication_Username_Empty = Requirement( - name='RQ.SRS-007.LDAP.Authentication.Username.Empty', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.Username.Empty", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not support authenticating users with empty username.\n' - '\n' - ), + "[ClickHouse] SHALL not support authenticating users with empty username.\n" + "\n" + ), link=None, level=3, - num='4.2.41') + num="4.2.41", +) RQ_SRS_007_LDAP_Authentication_Username_Long = Requirement( - name='RQ.SRS-007.LDAP.Authentication.Username.Long', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.Username.Long", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support authenticating users with a long username of at least 256 bytes.\n' - '\n' - ), + "[ClickHouse] SHALL support authenticating users with a long username of at least 256 bytes.\n" + "\n" + ), link=None, level=3, - num='4.2.42') + num="4.2.42", +) RQ_SRS_007_LDAP_Authentication_Username_UTF8 = Requirement( - name='RQ.SRS-007.LDAP.Authentication.Username.UTF8', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.Username.UTF8", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support authentication users with a username that contains [UTF-8] characters.\n' - '\n' - ), + "[ClickHouse] SHALL support authentication users with a username that contains [UTF-8] characters.\n" + "\n" + ), link=None, level=3, - num='4.2.43') + num="4.2.43", +) RQ_SRS_007_LDAP_Authentication_Password_Empty = Requirement( - name='RQ.SRS-007.LDAP.Authentication.Password.Empty', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.Password.Empty", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not support authenticating users with empty passwords\n' - 'even if an empty password is valid for the user and\n' - 'is allowed by the [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL not support authenticating users with empty passwords\n" + "even if an empty password is valid for the user and\n" + "is allowed by the [LDAP] server.\n" + "\n" + ), link=None, level=3, - num='4.2.44') + num="4.2.44", +) RQ_SRS_007_LDAP_Authentication_Password_Long = Requirement( - name='RQ.SRS-007.LDAP.Authentication.Password.Long', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.Password.Long", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support long password of at least 256 bytes\n' - 'that can be used to authenticate users using an [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support long password of at least 256 bytes\n" + "that can be used to authenticate users using an [LDAP] server.\n" + "\n" + ), link=None, level=3, - num='4.2.45') + num="4.2.45", +) RQ_SRS_007_LDAP_Authentication_Password_UTF8 = Requirement( - name='RQ.SRS-007.LDAP.Authentication.Password.UTF8', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.Password.UTF8", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support [UTF-8] characters in passwords\n' - 'used to authenticate users using an [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support [UTF-8] characters in passwords\n" + "used to authenticate users using an [LDAP] server.\n" + "\n" + ), link=None, level=3, - num='4.2.46') + num="4.2.46", +) RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Performance = Requirement( - name='RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Performance', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Performance", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL provide better login performance of [LDAP] authenticated users\n' - 'when `verification_cooldown` parameter is set to a positive value when comparing\n' - 'to the the case when `verification_cooldown` is turned off either for a single user or multiple users\n' - 'making a large number of repeated requests.\n' - '\n' - ), + "[ClickHouse] SHALL provide better login performance of [LDAP] authenticated users\n" + "when `verification_cooldown` parameter is set to a positive value when comparing\n" + "to the the case when `verification_cooldown` is turned off either for a single user or multiple users\n" + "making a large number of repeated requests.\n" + "\n" + ), link=None, level=3, - num='4.2.47') + num="4.2.47", +) RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters = Requirement( - name='RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL reset any currently cached [LDAP] authentication bind requests enabled by the\n' - '`verification_cooldown` parameter in the [LDAP] server configuration section\n' - 'if either `host`, `port`, `auth_dn_prefix`, or `auth_dn_suffix` parameter values\n' - 'change in the configuration file. The reset SHALL cause any subsequent authentication attempts for any user\n' + "[ClickHouse] SHALL reset any currently cached [LDAP] authentication bind requests enabled by the\n" + "`verification_cooldown` parameter in the [LDAP] server configuration section\n" + "if either `host`, `port`, `auth_dn_prefix`, or `auth_dn_suffix` parameter values\n" + "change in the configuration file. The reset SHALL cause any subsequent authentication attempts for any user\n" "to result in contacting the [LDAP] server to verify user's username and password.\n" - '\n' - ), + "\n" + ), link=None, level=3, - num='4.2.48') + num="4.2.48", +) RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_InvalidPassword = Requirement( - name='RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.InvalidPassword', - version='1.0', + name="RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.InvalidPassword", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL reset current cached [LDAP] authentication bind request enabled by the\n' - '`verification_cooldown` parameter in the [LDAP] server configuration section\n' - 'for the user if the password provided in the current authentication attempt does not match\n' - 'the valid password provided during the first successful authentication request that was cached\n' - 'for this exact user. The reset SHALL cause the next authentication attempt for this user\n' + "[ClickHouse] SHALL reset current cached [LDAP] authentication bind request enabled by the\n" + "`verification_cooldown` parameter in the [LDAP] server configuration section\n" + "for the user if the password provided in the current authentication attempt does not match\n" + "the valid password provided during the first successful authentication request that was cached\n" + "for this exact user. The reset SHALL cause the next authentication attempt for this user\n" "to result in contacting the [LDAP] server to verify user's username and password.\n" - '\n' - ), + "\n" + ), link=None, level=3, - num='4.2.49') + num="4.2.49", +) SRS_007_ClickHouse_Authentication_of_Users_via_LDAP = Specification( - name='SRS-007 ClickHouse Authentication of Users via LDAP', + name="SRS-007 ClickHouse Authentication of Users via LDAP", description=None, author=None, - date=None, - status=None, + date=None, + status=None, approved_by=None, approved_date=None, approved_version=None, @@ -1218,85 +1283,297 @@ SRS_007_ClickHouse_Authentication_of_Users_via_LDAP = Specification( parent=None, children=None, headings=( - Heading(name='Revision History', level=1, num='1'), - Heading(name='Introduction', level=1, num='2'), - Heading(name='Terminology', level=1, num='3'), - Heading(name='Requirements', level=1, num='4'), - Heading(name='Generic', level=2, num='4.1'), - Heading(name='RQ.SRS-007.LDAP.Authentication', level=3, num='4.1.1'), - Heading(name='RQ.SRS-007.LDAP.Authentication.MultipleServers', level=3, num='4.1.2'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Protocol.PlainText', level=3, num='4.1.3'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Protocol.TLS', level=3, num='4.1.4'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Protocol.StartTLS', level=3, num='4.1.5'), - Heading(name='RQ.SRS-007.LDAP.Authentication.TLS.Certificate.Validation', level=3, num='4.1.6'), - Heading(name='RQ.SRS-007.LDAP.Authentication.TLS.Certificate.SelfSigned', level=3, num='4.1.7'), - Heading(name='RQ.SRS-007.LDAP.Authentication.TLS.Certificate.SpecificCertificationAuthority', level=3, num='4.1.8'), - Heading(name='RQ.SRS-007.LDAP.Server.Configuration.Invalid', level=3, num='4.1.9'), - Heading(name='RQ.SRS-007.LDAP.User.Configuration.Invalid', level=3, num='4.1.10'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Mechanism.Anonymous', level=3, num='4.1.11'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Mechanism.Unauthenticated', level=3, num='4.1.12'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Mechanism.NamePassword', level=3, num='4.1.13'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Valid', level=3, num='4.1.14'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Invalid', level=3, num='4.1.15'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Invalid.DeletedUser', level=3, num='4.1.16'), - Heading(name='RQ.SRS-007.LDAP.Authentication.UsernameChanged', level=3, num='4.1.17'), - Heading(name='RQ.SRS-007.LDAP.Authentication.PasswordChanged', level=3, num='4.1.18'), - Heading(name='RQ.SRS-007.LDAP.Authentication.LDAPServerRestart', level=3, num='4.1.19'), - Heading(name='RQ.SRS-007.LDAP.Authentication.ClickHouseServerRestart', level=3, num='4.1.20'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Parallel', level=3, num='4.1.21'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Parallel.ValidAndInvalid', level=3, num='4.1.22'), - Heading(name='Specific', level=2, num='4.2'), - Heading(name='RQ.SRS-007.LDAP.UnreachableServer', level=3, num='4.2.1'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.Name', level=3, num='4.2.2'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.Host', level=3, num='4.2.3'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.Port', level=3, num='4.2.4'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.Port.Default', level=3, num='4.2.5'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Prefix', level=3, num='4.2.6'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Suffix', level=3, num='4.2.7'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Value', level=3, num='4.2.8'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS', level=3, num='4.2.9'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.Default', level=3, num='4.2.10'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.No', level=3, num='4.2.11'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.Yes', level=3, num='4.2.12'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.StartTLS', level=3, num='4.2.13'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion', level=3, num='4.2.14'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion.Values', level=3, num='4.2.15'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion.Default', level=3, num='4.2.16'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert', level=3, num='4.2.17'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Default', level=3, num='4.2.18'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Demand', level=3, num='4.2.19'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Allow', level=3, num='4.2.20'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Try', level=3, num='4.2.21'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Never', level=3, num='4.2.22'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.TLSCertFile', level=3, num='4.2.23'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.TLSKeyFile', level=3, num='4.2.24'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.TLSCACertDir', level=3, num='4.2.25'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.TLSCACertFile', level=3, num='4.2.26'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.TLSCipherSuite', level=3, num='4.2.27'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown', level=3, num='4.2.28'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Default', level=3, num='4.2.29'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Invalid', level=3, num='4.2.30'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.Syntax', level=3, num='4.2.31'), - Heading(name='RQ.SRS-007.LDAP.Configuration.User.RBAC', level=3, num='4.2.32'), - Heading(name='RQ.SRS-007.LDAP.Configuration.User.Syntax', level=3, num='4.2.33'), - Heading(name='RQ.SRS-007.LDAP.Configuration.User.Name.Empty', level=3, num='4.2.34'), - Heading(name='RQ.SRS-007.LDAP.Configuration.User.BothPasswordAndLDAP', level=3, num='4.2.35'), - Heading(name='RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.NotDefined', level=3, num='4.2.36'), - Heading(name='RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.Empty', level=3, num='4.2.37'), - Heading(name='RQ.SRS-007.LDAP.Configuration.User.OnlyOneServer', level=3, num='4.2.38'), - Heading(name='RQ.SRS-007.LDAP.Configuration.User.Name.Long', level=3, num='4.2.39'), - Heading(name='RQ.SRS-007.LDAP.Configuration.User.Name.UTF8', level=3, num='4.2.40'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Username.Empty', level=3, num='4.2.41'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Username.Long', level=3, num='4.2.42'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Username.UTF8', level=3, num='4.2.43'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Password.Empty', level=3, num='4.2.44'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Password.Long', level=3, num='4.2.45'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Password.UTF8', level=3, num='4.2.46'), - Heading(name='RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Performance', level=3, num='4.2.47'), - Heading(name='RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters', level=3, num='4.2.48'), - Heading(name='RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.InvalidPassword', level=3, num='4.2.49'), - Heading(name='References', level=1, num='5'), + Heading(name="Revision History", level=1, num="1"), + Heading(name="Introduction", level=1, num="2"), + Heading(name="Terminology", level=1, num="3"), + Heading(name="Requirements", level=1, num="4"), + Heading(name="Generic", level=2, num="4.1"), + Heading(name="RQ.SRS-007.LDAP.Authentication", level=3, num="4.1.1"), + Heading( + name="RQ.SRS-007.LDAP.Authentication.MultipleServers", level=3, num="4.1.2" ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.Protocol.PlainText", + level=3, + num="4.1.3", + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.Protocol.TLS", level=3, num="4.1.4" + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.Protocol.StartTLS", + level=3, + num="4.1.5", + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.TLS.Certificate.Validation", + level=3, + num="4.1.6", + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.TLS.Certificate.SelfSigned", + level=3, + num="4.1.7", + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.TLS.Certificate.SpecificCertificationAuthority", + level=3, + num="4.1.8", + ), + Heading( + name="RQ.SRS-007.LDAP.Server.Configuration.Invalid", level=3, num="4.1.9" + ), + Heading( + name="RQ.SRS-007.LDAP.User.Configuration.Invalid", level=3, num="4.1.10" + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.Mechanism.Anonymous", + level=3, + num="4.1.11", + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.Mechanism.Unauthenticated", + level=3, + num="4.1.12", + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.Mechanism.NamePassword", + level=3, + num="4.1.13", + ), + Heading(name="RQ.SRS-007.LDAP.Authentication.Valid", level=3, num="4.1.14"), + Heading(name="RQ.SRS-007.LDAP.Authentication.Invalid", level=3, num="4.1.15"), + Heading( + name="RQ.SRS-007.LDAP.Authentication.Invalid.DeletedUser", + level=3, + num="4.1.16", + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.UsernameChanged", level=3, num="4.1.17" + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.PasswordChanged", level=3, num="4.1.18" + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.LDAPServerRestart", + level=3, + num="4.1.19", + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.ClickHouseServerRestart", + level=3, + num="4.1.20", + ), + Heading(name="RQ.SRS-007.LDAP.Authentication.Parallel", level=3, num="4.1.21"), + Heading( + name="RQ.SRS-007.LDAP.Authentication.Parallel.ValidAndInvalid", + level=3, + num="4.1.22", + ), + Heading(name="Specific", level=2, num="4.2"), + Heading(name="RQ.SRS-007.LDAP.UnreachableServer", level=3, num="4.2.1"), + Heading(name="RQ.SRS-007.LDAP.Configuration.Server.Name", level=3, num="4.2.2"), + Heading(name="RQ.SRS-007.LDAP.Configuration.Server.Host", level=3, num="4.2.3"), + Heading(name="RQ.SRS-007.LDAP.Configuration.Server.Port", level=3, num="4.2.4"), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.Port.Default", + level=3, + num="4.2.5", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Prefix", + level=3, + num="4.2.6", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Suffix", + level=3, + num="4.2.7", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.AuthDN.Value", + level=3, + num="4.2.8", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.EnableTLS", level=3, num="4.2.9" + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.Default", + level=3, + num="4.2.10", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.No", + level=3, + num="4.2.11", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.Yes", + level=3, + num="4.2.12", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.EnableTLS.Options.StartTLS", + level=3, + num="4.2.13", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion", + level=3, + num="4.2.14", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion.Values", + level=3, + num="4.2.15", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.TLSMinimumProtocolVersion.Default", + level=3, + num="4.2.16", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert", + level=3, + num="4.2.17", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Default", + level=3, + num="4.2.18", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Demand", + level=3, + num="4.2.19", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Allow", + level=3, + num="4.2.20", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Try", + level=3, + num="4.2.21", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.TLSRequireCert.Options.Never", + level=3, + num="4.2.22", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.TLSCertFile", + level=3, + num="4.2.23", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.TLSKeyFile", + level=3, + num="4.2.24", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.TLSCACertDir", + level=3, + num="4.2.25", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.TLSCACertFile", + level=3, + num="4.2.26", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.TLSCipherSuite", + level=3, + num="4.2.27", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown", + level=3, + num="4.2.28", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Default", + level=3, + num="4.2.29", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Invalid", + level=3, + num="4.2.30", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.Server.Syntax", level=3, num="4.2.31" + ), + Heading(name="RQ.SRS-007.LDAP.Configuration.User.RBAC", level=3, num="4.2.32"), + Heading( + name="RQ.SRS-007.LDAP.Configuration.User.Syntax", level=3, num="4.2.33" + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.User.Name.Empty", level=3, num="4.2.34" + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.User.BothPasswordAndLDAP", + level=3, + num="4.2.35", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.NotDefined", + level=3, + num="4.2.36", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.Empty", + level=3, + num="4.2.37", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.User.OnlyOneServer", + level=3, + num="4.2.38", + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.User.Name.Long", level=3, num="4.2.39" + ), + Heading( + name="RQ.SRS-007.LDAP.Configuration.User.Name.UTF8", level=3, num="4.2.40" + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.Username.Empty", level=3, num="4.2.41" + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.Username.Long", level=3, num="4.2.42" + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.Username.UTF8", level=3, num="4.2.43" + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.Password.Empty", level=3, num="4.2.44" + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.Password.Long", level=3, num="4.2.45" + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.Password.UTF8", level=3, num="4.2.46" + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Performance", + level=3, + num="4.2.47", + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters", + level=3, + num="4.2.48", + ), + Heading( + name="RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.InvalidPassword", + level=3, + num="4.2.49", + ), + Heading(name="References", level=1, num="5"), + ), requirements=( RQ_SRS_007_LDAP_Authentication, RQ_SRS_007_LDAP_Authentication_MultipleServers, @@ -1369,8 +1646,8 @@ SRS_007_ClickHouse_Authentication_of_Users_via_LDAP = Specification( RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Performance, RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters, RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_InvalidPassword, - ), - content=''' + ), + content=""" # SRS-007 ClickHouse Authentication of Users via LDAP # Software Requirements Specification @@ -1982,4 +2259,5 @@ to result in contacting the [LDAP] server to verify user's username and password [GitHub Repository]: https://github.com/ClickHouse/ClickHouse/blob/master/tests/testflows/ldap/authentication/requirements/requirements.md [Revision History]: https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/ldap/authentication/requirements/requirements.md [Git]: https://git-scm.com/ -''') +""", +) diff --git a/tests/testflows/ldap/authentication/tests/authentications.py b/tests/testflows/ldap/authentication/tests/authentications.py index 8e60f94ca8e..8f98adce746 100644 --- a/tests/testflows/ldap/authentication/tests/authentications.py +++ b/tests/testflows/ldap/authentication/tests/authentications.py @@ -13,7 +13,7 @@ servers = { "port": "389", "enable_tls": "no", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, "openldap2": { "host": "openldap2", @@ -22,24 +22,38 @@ servers = { "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", "tls_require_cert": "never", - } + }, } + @TestStep(When) @Name("I login as {username} and execute query") @Args(format_name=True) -def login_and_execute_query(self, username, password, exitcode=None, message=None, steps=True): - """Execute query as some user. - """ - self.context.node.query("SELECT 1", +def login_and_execute_query( + self, username, password, exitcode=None, message=None, steps=True +): + """Execute query as some user.""" + self.context.node.query( + "SELECT 1", settings=[("user", username), ("password", password)], exitcode=exitcode or 0, - message=message, steps=steps) + message=message, + steps=steps, + ) + @TestScenario -def add_user_to_ldap_and_login(self, server, user=None, ch_user=None, login=None, exitcode=None, message=None, rbac=False): - """Add user to LDAP and ClickHouse and then try to login. - """ +def add_user_to_ldap_and_login( + self, + server, + user=None, + ch_user=None, + login=None, + exitcode=None, + message=None, + rbac=False, +): + """Add user to LDAP and ClickHouse and then try to login.""" self.context.ldap_node = self.context.cluster.node(server) if ch_user is None: @@ -53,75 +67,123 @@ def add_user_to_ldap_and_login(self, server, user=None, ch_user=None, login=None ch_user["username"] = ch_user.get("username", user["cn"]) ch_user["server"] = ch_user.get("server", user["_server"]) - with ldap_authenticated_users(ch_user, config_file=f"ldap_users_{getuid()}.xml", restart=True, rbac=rbac): + with ldap_authenticated_users( + ch_user, config_file=f"ldap_users_{getuid()}.xml", restart=True, rbac=rbac + ): username = login.get("username", user["cn"]) password = login.get("password", user["userpassword"]) - login_and_execute_query(username=username, password=password, exitcode=exitcode, message=message) + login_and_execute_query( + username=username, password=password, exitcode=exitcode, message=message + ) + @TestScenario @Requirements( RQ_SRS_007_LDAP_Authentication_Parallel("1.0"), - RQ_SRS_007_LDAP_Authentication_Parallel_ValidAndInvalid("1.0") + RQ_SRS_007_LDAP_Authentication_Parallel_ValidAndInvalid("1.0"), ) def parallel_login(self, server, user_count=10, timeout=300, rbac=False): - """Check that login of valid and invalid LDAP authenticated users works in parallel. - """ + """Check that login of valid and invalid LDAP authenticated users works in parallel.""" self.context.ldap_node = self.context.cluster.node(server) user = None - users = [{"cn": f"parallel_user{i}", "userpassword": randomword(20)} for i in range(user_count)] + users = [ + {"cn": f"parallel_user{i}", "userpassword": randomword(20)} + for i in range(user_count) + ] with ldap_users(*users): - with ldap_authenticated_users(*[{"username": user["cn"], "server": server} for user in users], rbac=rbac): + with ldap_authenticated_users( + *[{"username": user["cn"], "server": server} for user in users], rbac=rbac + ): def login_with_valid_username_and_password(users, i, iterations=10): with When(f"valid users try to login #{i}"): for i in range(iterations): - random_user = users[random.randint(0, len(users)-1)] - login_and_execute_query(username=random_user["cn"], password=random_user["userpassword"], steps=False) + random_user = users[random.randint(0, len(users) - 1)] + login_and_execute_query( + username=random_user["cn"], + password=random_user["userpassword"], + steps=False, + ) def login_with_valid_username_and_invalid_password(users, i, iterations=10): - with When(f"users try to login with valid username and invalid password #{i}"): + with When( + f"users try to login with valid username and invalid password #{i}" + ): for i in range(iterations): - random_user = users[random.randint(0, len(users)-1)] - login_and_execute_query(username=random_user["cn"], - password=(random_user["userpassword"] + randomword(1)), - exitcode=4, - message=f"DB::Exception: {random_user['cn']}: Authentication failed: password is incorrect or there is no user with such name", - steps=False) + random_user = users[random.randint(0, len(users) - 1)] + login_and_execute_query( + username=random_user["cn"], + password=(random_user["userpassword"] + randomword(1)), + exitcode=4, + message=f"DB::Exception: {random_user['cn']}: Authentication failed: password is incorrect or there is no user with such name", + steps=False, + ) def login_with_invalid_username_and_valid_password(users, i, iterations=10): - with When(f"users try to login with invalid username and valid password #{i}"): + with When( + f"users try to login with invalid username and valid password #{i}" + ): for i in range(iterations): - random_user = dict(users[random.randint(0, len(users)-1)]) + random_user = dict(users[random.randint(0, len(users) - 1)]) random_user["cn"] += randomword(1) - login_and_execute_query(username=random_user["cn"], - password=random_user["userpassword"], - exitcode=4, - message=f"DB::Exception: {random_user['cn']}: Authentication failed: password is incorrect or there is no user with such name", - steps=False) + login_and_execute_query( + username=random_user["cn"], + password=random_user["userpassword"], + exitcode=4, + message=f"DB::Exception: {random_user['cn']}: Authentication failed: password is incorrect or there is no user with such name", + steps=False, + ) with When("I login in parallel"): tasks = [] with Pool(4) as pool: try: for i in range(5): - tasks.append(pool.submit(login_with_valid_username_and_password, (users, i, 50,))) - tasks.append(pool.submit(login_with_valid_username_and_invalid_password, (users, i, 50,))) - tasks.append(pool.submit(login_with_invalid_username_and_valid_password, (users, i, 50,))) + tasks.append( + pool.submit( + login_with_valid_username_and_password, + ( + users, + i, + 50, + ), + ) + ) + tasks.append( + pool.submit( + login_with_valid_username_and_invalid_password, + ( + users, + i, + 50, + ), + ) + ) + tasks.append( + pool.submit( + login_with_invalid_username_and_valid_password, + ( + users, + i, + 50, + ), + ) + ) finally: with Then("it should work"): for task in tasks: task.result(timeout=timeout) + @TestScenario @Requirements( RQ_SRS_007_LDAP_Authentication_Invalid("1.0"), - RQ_SRS_007_LDAP_Authentication_Invalid_DeletedUser("1.0") + RQ_SRS_007_LDAP_Authentication_Invalid_DeletedUser("1.0"), ) def login_after_user_is_deleted_from_ldap(self, server, rbac=False): - """Check that login fails after user is deleted from LDAP. - """ + """Check that login fails after user is deleted from LDAP.""" self.context.ldap_node = self.context.cluster.node(server) user = None @@ -130,31 +192,37 @@ def login_after_user_is_deleted_from_ldap(self, server, rbac=False): user = {"cn": "myuser", "userpassword": "myuser"} user = add_user_to_ldap(**user) - with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml", - restart=True, rbac=rbac): + with ldap_authenticated_users( + {"username": user["cn"], "server": server}, + config_file=f"ldap_users_{getuid()}.xml", + restart=True, + rbac=rbac, + ): login_and_execute_query(username=user["cn"], password=user["userpassword"]) with When("I delete this user from LDAP"): delete_user_from_ldap(user) with Then("when I try to login again it should fail"): - login_and_execute_query(username=user["cn"], password=user["userpassword"], + login_and_execute_query( + username=user["cn"], + password=user["userpassword"], exitcode=4, - message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" + message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name", ) finally: with Finally("I make sure LDAP user is deleted"): if user is not None: delete_user_from_ldap(user, exitcode=None) + @TestScenario @Requirements( RQ_SRS_007_LDAP_Authentication_Invalid("1.0"), - RQ_SRS_007_LDAP_Authentication_PasswordChanged("1.0") + RQ_SRS_007_LDAP_Authentication_PasswordChanged("1.0"), ) def login_after_user_password_changed_in_ldap(self, server, rbac=False): - """Check that login fails after user password is changed in LDAP. - """ + """Check that login fails after user password is changed in LDAP.""" self.context.ldap_node = self.context.cluster.node(server) user = None @@ -163,17 +231,23 @@ def login_after_user_password_changed_in_ldap(self, server, rbac=False): user = {"cn": "myuser", "userpassword": "myuser"} user = add_user_to_ldap(**user) - with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml", - restart=True, rbac=rbac): + with ldap_authenticated_users( + {"username": user["cn"], "server": server}, + config_file=f"ldap_users_{getuid()}.xml", + restart=True, + rbac=rbac, + ): login_and_execute_query(username=user["cn"], password=user["userpassword"]) with When("I change user password in LDAP"): change_user_password_in_ldap(user, "newpassword") with Then("when I try to login again it should fail"): - login_and_execute_query(username=user["cn"], password=user["userpassword"], + login_and_execute_query( + username=user["cn"], + password=user["userpassword"], exitcode=4, - message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" + message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name", ) with And("when I try to login with the new password it should work"): @@ -184,14 +258,14 @@ def login_after_user_password_changed_in_ldap(self, server, rbac=False): if user is not None: delete_user_from_ldap(user, exitcode=None) + @TestScenario @Requirements( RQ_SRS_007_LDAP_Authentication_Invalid("1.0"), - RQ_SRS_007_LDAP_Authentication_UsernameChanged("1.0") + RQ_SRS_007_LDAP_Authentication_UsernameChanged("1.0"), ) def login_after_user_cn_changed_in_ldap(self, server, rbac=False): - """Check that login fails after user cn is changed in LDAP. - """ + """Check that login fails after user cn is changed in LDAP.""" self.context.ldap_node = self.context.cluster.node(server) user = None new_user = None @@ -201,31 +275,37 @@ def login_after_user_cn_changed_in_ldap(self, server, rbac=False): user = {"cn": "myuser", "userpassword": "myuser"} user = add_user_to_ldap(**user) - with ldap_authenticated_users({"username": user["cn"], "server": server}, - config_file=f"ldap_users_{getuid()}.xml", restart=True, rbac=rbac): + with ldap_authenticated_users( + {"username": user["cn"], "server": server}, + config_file=f"ldap_users_{getuid()}.xml", + restart=True, + rbac=rbac, + ): login_and_execute_query(username=user["cn"], password=user["userpassword"]) with When("I change user password in LDAP"): new_user = change_user_cn_in_ldap(user, "myuser2") with Then("when I try to login again it should fail"): - login_and_execute_query(username=user["cn"], password=user["userpassword"], + login_and_execute_query( + username=user["cn"], + password=user["userpassword"], exitcode=4, - message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" + message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name", ) finally: with Finally("I make sure LDAP user is deleted"): if new_user is not None: delete_user_from_ldap(new_user, exitcode=None) + @TestScenario @Requirements( RQ_SRS_007_LDAP_Authentication_Valid("1.0"), - RQ_SRS_007_LDAP_Authentication_LDAPServerRestart("1.0") + RQ_SRS_007_LDAP_Authentication_LDAPServerRestart("1.0"), ) def login_after_ldap_server_is_restarted(self, server, timeout=300, rbac=False): - """Check that login succeeds after LDAP server is restarted. - """ + """Check that login succeeds after LDAP server is restarted.""" self.context.ldap_node = self.context.cluster.node(server) user = None @@ -234,18 +314,27 @@ def login_after_ldap_server_is_restarted(self, server, timeout=300, rbac=False): user = {"cn": "myuser", "userpassword": getuid()} user = add_user_to_ldap(**user) - with ldap_authenticated_users({"username": user["cn"], "server": server}, rbac=rbac): + with ldap_authenticated_users( + {"username": user["cn"], "server": server}, rbac=rbac + ): login_and_execute_query(username=user["cn"], password=user["userpassword"]) with When("I restart LDAP server"): self.context.ldap_node.restart() - with Then("I try to login until it works", description=f"timeout {timeout} sec"): + with Then( + "I try to login until it works", description=f"timeout {timeout} sec" + ): started = time.time() while True: - r = self.context.node.query("SELECT 1", - settings=[("user", user["cn"]), ("password", user["userpassword"])], - no_checks=True) + r = self.context.node.query( + "SELECT 1", + settings=[ + ("user", user["cn"]), + ("password", user["userpassword"]), + ], + no_checks=True, + ) if r.exitcode == 0: break assert time.time() - started < timeout, error(r.output) @@ -254,14 +343,14 @@ def login_after_ldap_server_is_restarted(self, server, timeout=300, rbac=False): if user is not None: delete_user_from_ldap(user, exitcode=None) + @TestScenario @Requirements( RQ_SRS_007_LDAP_Authentication_Valid("1.0"), - RQ_SRS_007_LDAP_Authentication_ClickHouseServerRestart("1.0") + RQ_SRS_007_LDAP_Authentication_ClickHouseServerRestart("1.0"), ) def login_after_clickhouse_server_is_restarted(self, server, timeout=300, rbac=False): - """Check that login succeeds after ClickHouse server is restarted. - """ + """Check that login succeeds after ClickHouse server is restarted.""" self.context.ldap_node = self.context.cluster.node(server) user = None @@ -270,18 +359,27 @@ def login_after_clickhouse_server_is_restarted(self, server, timeout=300, rbac=F user = {"cn": "myuser", "userpassword": getuid()} user = add_user_to_ldap(**user) - with ldap_authenticated_users({"username": user["cn"], "server": server}, rbac=rbac): + with ldap_authenticated_users( + {"username": user["cn"], "server": server}, rbac=rbac + ): login_and_execute_query(username=user["cn"], password=user["userpassword"]) with When("I restart ClickHouse server"): self.context.node.restart() - with Then("I try to login until it works", description=f"timeout {timeout} sec"): + with Then( + "I try to login until it works", description=f"timeout {timeout} sec" + ): started = time.time() while True: - r = self.context.node.query("SELECT 1", - settings=[("user", user["cn"]), ("password", user["userpassword"])], - no_checks=True) + r = self.context.node.query( + "SELECT 1", + settings=[ + ("user", user["cn"]), + ("password", user["userpassword"]), + ], + no_checks=True, + ) if r.exitcode == 0: break assert time.time() - started < timeout, error(r.output) @@ -290,28 +388,30 @@ def login_after_clickhouse_server_is_restarted(self, server, timeout=300, rbac=F if user is not None: delete_user_from_ldap(user, exitcode=None) + @TestScenario @Requirements( RQ_SRS_007_LDAP_Authentication_Invalid("1.0"), - RQ_SRS_007_LDAP_Authentication_Password_Empty("1.0") + RQ_SRS_007_LDAP_Authentication_Password_Empty("1.0"), ) def valid_username_with_valid_empty_password(self, server, rbac=False): - """Check that we can't login using valid username that has empty password. - """ + """Check that we can't login using valid username that has empty password.""" user = {"cn": "empty_password", "userpassword": ""} exitcode = 4 message = f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" - add_user_to_ldap_and_login(user=user, exitcode=exitcode, message=message, server=server, rbac=rbac) + add_user_to_ldap_and_login( + user=user, exitcode=exitcode, message=message, server=server, rbac=rbac + ) + @TestScenario @Requirements( RQ_SRS_007_LDAP_Authentication_Invalid("1.0"), - RQ_SRS_007_LDAP_Authentication_Password_Empty("1.0") + RQ_SRS_007_LDAP_Authentication_Password_Empty("1.0"), ) def valid_username_and_invalid_empty_password(self, server, rbac=False): - """Check that we can't login using valid username but invalid empty password. - """ + """Check that we can't login using valid username but invalid empty password.""" username = "user_non_empty_password" user = {"cn": username, "userpassword": username} login = {"password": ""} @@ -319,25 +419,29 @@ def valid_username_and_invalid_empty_password(self, server, rbac=False): exitcode = 4 message = f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name" - add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server, rbac=rbac) + add_user_to_ldap_and_login( + user=user, + login=login, + exitcode=exitcode, + message=message, + server=server, + rbac=rbac, + ) + @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Authentication_Valid("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Authentication_Valid("1.0")) def valid_username_and_password(self, server, rbac=False): - """Check that we can login using valid username and password. - """ + """Check that we can login using valid username and password.""" username = "valid_username_and_password" user = {"cn": username, "userpassword": username} with When(f"I add user {username} to LDAP and try to login"): add_user_to_ldap_and_login(user=user, server=server, rbac=rbac) + @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Authentication_Invalid("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Authentication_Invalid("1.0")) def valid_username_and_password_invalid_server(self, server=None, rbac=False): """Check that we can't login using valid username and valid password but for a different server. @@ -349,126 +453,157 @@ def valid_username_and_password_invalid_server(self, server=None, rbac=False): exitcode = 4 message = f"DB::Exception: user2: Authentication failed: password is incorrect or there is no user with such name" - with ldap_authenticated_users(user, config_file=f"ldap_users_{getuid()}.xml", restart=True, rbac=rbac): - login_and_execute_query(username="user2", password="user2", exitcode=exitcode, message=message) + with ldap_authenticated_users( + user, config_file=f"ldap_users_{getuid()}.xml", restart=True, rbac=rbac + ): + login_and_execute_query( + username="user2", password="user2", exitcode=exitcode, message=message + ) + @TestScenario @Requirements( RQ_SRS_007_LDAP_Authentication_Valid("1.0"), RQ_SRS_007_LDAP_Authentication_Username_Long("1.0"), - RQ_SRS_007_LDAP_Configuration_User_Name_Long("1.0") + RQ_SRS_007_LDAP_Configuration_User_Name_Long("1.0"), ) def valid_long_username_and_short_password(self, server, rbac=False): - """Check that we can login using valid very long username and short password. - """ + """Check that we can login using valid very long username and short password.""" username = "long_username_12345678901234567890123456789012345678901234567890123456789012345678901234567890" user = {"cn": username, "userpassword": "long_username"} add_user_to_ldap_and_login(user=user, server=server, rbac=rbac) + @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Authentication_Invalid("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Authentication_Invalid("1.0")) def invalid_long_username_and_valid_short_password(self, server, rbac=False): - """Check that we can't login using slightly invalid long username but valid password. - """ + """Check that we can't login using slightly invalid long username but valid password.""" username = "long_username_12345678901234567890123456789012345678901234567890123456789012345678901234567890" user = {"cn": username, "userpassword": "long_username"} login = {"username": f"{username}?"} exitcode = 4 - message=f"DB::Exception: {login['username']}: Authentication failed: password is incorrect or there is no user with such name" + message = f"DB::Exception: {login['username']}: Authentication failed: password is incorrect or there is no user with such name" + + add_user_to_ldap_and_login( + user=user, + login=login, + exitcode=exitcode, + message=message, + server=server, + rbac=rbac, + ) - add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server, rbac=rbac) @TestScenario @Requirements( RQ_SRS_007_LDAP_Authentication_Valid("1.0"), - RQ_SRS_007_LDAP_Authentication_Password_Long("1.0") + RQ_SRS_007_LDAP_Authentication_Password_Long("1.0"), ) def valid_short_username_and_long_password(self, server, rbac=False): - """Check that we can login using valid short username with very long password. - """ + """Check that we can login using valid short username with very long password.""" username = "long_password" - user = {"cn": username, "userpassword": "long_password_12345678901234567890123456789012345678901234567890123456789012345678901234567890"} + user = { + "cn": username, + "userpassword": "long_password_12345678901234567890123456789012345678901234567890123456789012345678901234567890", + } add_user_to_ldap_and_login(user=user, server=server, rbac=rbac) + @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Authentication_Invalid("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Authentication_Invalid("1.0")) def valid_short_username_and_invalid_long_password(self, server, rbac=False): - """Check that we can't login using valid short username and invalid long password. - """ + """Check that we can't login using valid short username and invalid long password.""" username = "long_password" - user = {"cn": username, "userpassword": "long_password_12345678901234567890123456789012345678901234567890123456789012345678901234567890"} + user = { + "cn": username, + "userpassword": "long_password_12345678901234567890123456789012345678901234567890123456789012345678901234567890", + } login = {"password": user["userpassword"] + "1"} exitcode = 4 - message=f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name" + message = f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name" + + add_user_to_ldap_and_login( + user=user, + login=login, + exitcode=exitcode, + message=message, + server=server, + rbac=rbac, + ) - add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server, rbac=rbac) @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Authentication_Invalid("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Authentication_Invalid("1.0")) def valid_username_and_invalid_password(self, server, rbac=False): - """Check that we can't login using valid username and invalid password. - """ + """Check that we can't login using valid username and invalid password.""" username = "valid_username_and_invalid_password" user = {"cn": username, "userpassword": username} login = {"password": user["userpassword"] + "1"} exitcode = 4 - message=f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name" + message = f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name" + + add_user_to_ldap_and_login( + user=user, + login=login, + exitcode=exitcode, + message=message, + server=server, + rbac=rbac, + ) - add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server, rbac=rbac) @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Authentication_Invalid("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Authentication_Invalid("1.0")) def invalid_username_and_valid_password(self, server, rbac=False): - """Check that we can't login using slightly invalid username but valid password. - """ + """Check that we can't login using slightly invalid username but valid password.""" username = "invalid_username_and_valid_password" user = {"cn": username, "userpassword": username} login = {"username": user["cn"] + "1"} exitcode = 4 - message=f"DB::Exception: {login['username']}: Authentication failed: password is incorrect or there is no user with such name" + message = f"DB::Exception: {login['username']}: Authentication failed: password is incorrect or there is no user with such name" + + add_user_to_ldap_and_login( + user=user, + login=login, + exitcode=exitcode, + message=message, + server=server, + rbac=rbac, + ) - add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server, rbac=rbac) @TestScenario @Requirements( RQ_SRS_007_LDAP_Authentication_Valid("1.0"), RQ_SRS_007_LDAP_Authentication_Username_UTF8("1.0"), - RQ_SRS_007_LDAP_Configuration_User_Name_UTF8("1.0") + RQ_SRS_007_LDAP_Configuration_User_Name_UTF8("1.0"), ) def valid_utf8_username_and_ascii_password(self, server, rbac=False): - """Check that we can login using valid utf-8 username with ascii password. - """ + """Check that we can login using valid utf-8 username with ascii password.""" username = "utf8_username_Gãńdåłf_Thê_Gręât" user = {"cn": username, "userpassword": "utf8_username"} add_user_to_ldap_and_login(user=user, server=server, rbac=rbac) + @TestScenario @Requirements( RQ_SRS_007_LDAP_Authentication_Valid("1.0"), - RQ_SRS_007_LDAP_Authentication_Password_UTF8("1.0") + RQ_SRS_007_LDAP_Authentication_Password_UTF8("1.0"), ) def valid_ascii_username_and_utf8_password(self, server, rbac=False): - """Check that we can login using valid ascii username with utf-8 password. - """ + """Check that we can login using valid ascii username with utf-8 password.""" username = "utf8_password" user = {"cn": username, "userpassword": "utf8_password_Gãńdåłf_Thê_Gręât"} add_user_to_ldap_and_login(user=user, server=server, rbac=rbac) + @TestScenario def empty_username_and_empty_password(self, server=None, rbac=False): """Check that we can login using empty username and empty password as @@ -476,11 +611,10 @@ def empty_username_and_empty_password(self, server=None, rbac=False): """ login_and_execute_query(username="", password="") + @TestScenario @Tags("verification_cooldown") -@Requirements( - RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown_Default("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown_Default("1.0")) def default_verification_cooldown_value(self, server, rbac=False): """Check that the default value (0) for the verification cooldown parameter disables caching and forces contacting the LDAP server for each @@ -491,10 +625,18 @@ def default_verification_cooldown_value(self, server, rbac=False): error_exitcode = 4 user = None - with Given("I have an LDAP configuration that uses the default verification_cooldown value (0)"): - servers = {"openldap1": {"host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" - }} + with Given( + "I have an LDAP configuration that uses the default verification_cooldown value (0)" + ): + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + } + } self.context.ldap_node = self.context.cluster.node(server) @@ -504,27 +646,37 @@ def default_verification_cooldown_value(self, server, rbac=False): user = add_user_to_ldap(**user) with ldap_servers(servers): - with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml"): + with ldap_authenticated_users( + {"username": user["cn"], "server": server}, + config_file=f"ldap_users_{getuid()}.xml", + ): with When("I login and execute a query"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) with And("I change user password in LDAP"): change_user_password_in_ldap(user, "newpassword") - with Then("when I try to login immediately with the old user password it should fail"): - login_and_execute_query(username=user["cn"], password=user["userpassword"], - exitcode=error_exitcode, message=error_message) + with Then( + "when I try to login immediately with the old user password it should fail" + ): + login_and_execute_query( + username=user["cn"], + password=user["userpassword"], + exitcode=error_exitcode, + message=error_message, + ) finally: with Finally("I make sure LDAP user is deleted"): if user is not None: delete_user_from_ldap(user, exitcode=None) + @TestScenario @Tags("verification_cooldown") -@Requirements( - RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown("1.0")) def valid_verification_cooldown_value_cn_change(self, server, rbac=False): """Check that we can perform requests without contacting the LDAP server after successful authentication when the verification_cooldown parameter @@ -533,15 +685,19 @@ def valid_verification_cooldown_value_cn_change(self, server, rbac=False): user = None new_user = None - with Given("I have an LDAP configuration that sets verification_cooldown parameter to 2 sec"): - servers = { "openldap1": { - "host": "openldap1", - "port": "389", - "enable_tls": "no", - "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "verification_cooldown": "600" - }} + with Given( + "I have an LDAP configuration that sets verification_cooldown parameter to 2 sec" + ): + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "600", + } + } self.context.ldap_node = self.context.cluster.node(server) @@ -551,25 +707,33 @@ def valid_verification_cooldown_value_cn_change(self, server, rbac=False): user = add_user_to_ldap(**user) with ldap_servers(servers): - with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml"): + with ldap_authenticated_users( + {"username": user["cn"], "server": server}, + config_file=f"ldap_users_{getuid()}.xml", + ): with When("I login and execute a query"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) with And("I change user cn in LDAP"): new_user = change_user_cn_in_ldap(user, "testVCD2") - with Then("when I try to login again with the old user cn it should work"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + with Then( + "when I try to login again with the old user cn it should work" + ): + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) finally: with Finally("I make sure LDAP user is deleted"): if new_user is not None: delete_user_from_ldap(new_user, exitcode=None) + @TestScenario @Tags("verification_cooldown") -@Requirements( - RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown("1.0")) def valid_verification_cooldown_value_password_change(self, server, rbac=False): """Check that we can perform requests without contacting the LDAP server after successful authentication when the verification_cooldown parameter @@ -577,15 +741,19 @@ def valid_verification_cooldown_value_password_change(self, server, rbac=False): """ user = None - with Given("I have an LDAP configuration that sets verification_cooldown parameter to 2 sec"): - servers = { "openldap1": { - "host": "openldap1", - "port": "389", - "enable_tls": "no", - "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "verification_cooldown": "600" - }} + with Given( + "I have an LDAP configuration that sets verification_cooldown parameter to 2 sec" + ): + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "600", + } + } self.context.ldap_node = self.context.cluster.node(server) @@ -595,25 +763,33 @@ def valid_verification_cooldown_value_password_change(self, server, rbac=False): user = add_user_to_ldap(**user) with ldap_servers(servers): - with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml"): + with ldap_authenticated_users( + {"username": user["cn"], "server": server}, + config_file=f"ldap_users_{getuid()}.xml", + ): with When("I login and execute a query"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) with And("I change user password in LDAP"): change_user_password_in_ldap(user, "newpassword") - with Then("when I try to login again with the old password it should work"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + with Then( + "when I try to login again with the old password it should work" + ): + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) finally: with Finally("I make sure LDAP user is deleted"): if user is not None: delete_user_from_ldap(user, exitcode=None) + @TestScenario @Tags("verification_cooldown") -@Requirements( - RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown("1.0")) def valid_verification_cooldown_value_ldap_unavailable(self, server, rbac=False): """Check that we can perform requests without contacting the LDAP server after successful authentication when the verification_cooldown parameter @@ -621,15 +797,19 @@ def valid_verification_cooldown_value_ldap_unavailable(self, server, rbac=False) """ user = None - with Given("I have an LDAP configuration that sets verification_cooldown parameter to 2 sec"): - servers = { "openldap1": { - "host": "openldap1", - "port": "389", - "enable_tls": "no", - "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "verification_cooldown": "600" - }} + with Given( + "I have an LDAP configuration that sets verification_cooldown parameter to 2 sec" + ): + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "600", + } + } self.context.ldap_node = self.context.cluster.node(server) @@ -639,18 +819,26 @@ def valid_verification_cooldown_value_ldap_unavailable(self, server, rbac=False) user = add_user_to_ldap(**user) with ldap_servers(servers): - with ldap_authenticated_users({"username": user["cn"], "server": server}, - config_file=f"ldap_users_{getuid()}.xml"): + with ldap_authenticated_users( + {"username": user["cn"], "server": server}, + config_file=f"ldap_users_{getuid()}.xml", + ): with When("I login and execute a query"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) try: with And("then I stop the ldap server"): self.context.ldap_node.stop() - with Then("when I try to login again with the server offline it should work"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + with Then( + "when I try to login again with the server offline it should work" + ): + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) finally: with Finally("I start the ldap server back up"): self.context.ldap_node.start() @@ -660,22 +848,26 @@ def valid_verification_cooldown_value_ldap_unavailable(self, server, rbac=False) if user is not None: delete_user_from_ldap(user, exitcode=None) + @TestOutline def repeat_requests(self, server, iterations, vcd_value, rbac=False, timeout=600): - """Run repeated requests from some user to the LDAP server. - """ + """Run repeated requests from some user to the LDAP server.""" user = None - with Given(f"I have an LDAP configuration that sets verification_cooldown parameter to {vcd_value} sec"): - servers = { "openldap1": { - "host": "openldap1", - "port": "389", - "enable_tls": "no", - "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "verification_cooldown": vcd_value - }} + with Given( + f"I have an LDAP configuration that sets verification_cooldown parameter to {vcd_value} sec" + ): + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": vcd_value, + } + } self.context.ldap_node = self.context.cluster.node(server) @@ -685,10 +877,16 @@ def repeat_requests(self, server, iterations, vcd_value, rbac=False, timeout=600 user = add_user_to_ldap(**user) with ldap_servers(servers): - with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml"): + with ldap_authenticated_users( + {"username": user["cn"], "server": server}, + config_file=f"ldap_users_{getuid()}.xml", + ): with When(f"I login and execute some query {iterations} times"): start_time = time.time() - r = self.context.node.command(f"time for i in {{1..{iterations}}}; do clickhouse client -q \"SELECT 1\" --user {user['cn']} --password {user['userpassword']} > /dev/null; done", timeout=timeout) + r = self.context.node.command( + f"time for i in {{1..{iterations}}}; do clickhouse client -q \"SELECT 1\" --user {user['cn']} --password {user['userpassword']} > /dev/null; done", + timeout=timeout, + ) end_time = time.time() return end_time - start_time @@ -698,11 +896,10 @@ def repeat_requests(self, server, iterations, vcd_value, rbac=False, timeout=600 if user is not None: delete_user_from_ldap(user, exitcode=None) + @TestScenario @Tags("verification_cooldown") -@Requirements( - RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Performance("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Performance("1.0")) def verification_cooldown_performance(self, server, rbac=False, iterations=5000): """Check that login performance is better when the verification cooldown parameter is set to a positive value when comparing to the case when @@ -712,49 +909,67 @@ def verification_cooldown_performance(self, server, rbac=False, iterations=5000) vcd_time = 0 no_vcd_time = 0 - with Example(f"Repeated requests with verification cooldown parameter set to 600 seconds, {iterations} iterations"): - vcd_time = repeat_requests(server=server, iterations=iterations, vcd_value="600", rbac=rbac) + with Example( + f"Repeated requests with verification cooldown parameter set to 600 seconds, {iterations} iterations" + ): + vcd_time = repeat_requests( + server=server, iterations=iterations, vcd_value="600", rbac=rbac + ) metric("login_with_vcd_value_600", units="seconds", value=vcd_time) - with Example(f"Repeated requests with verification cooldown parameter set to 0 seconds, {iterations} iterations"): - no_vcd_time = repeat_requests(server=server, iterations=iterations, vcd_value="0", rbac=rbac) + with Example( + f"Repeated requests with verification cooldown parameter set to 0 seconds, {iterations} iterations" + ): + no_vcd_time = repeat_requests( + server=server, iterations=iterations, vcd_value="0", rbac=rbac + ) metric("login_with_vcd_value_0", units="seconds", value=no_vcd_time) with Then("Log the performance improvement as a percentage"): - metric("percentage_improvement", units="%", value=100*(no_vcd_time - vcd_time)/vcd_time) + metric( + "percentage_improvement", + units="%", + value=100 * (no_vcd_time - vcd_time) / vcd_time, + ) + @TestOutline -def check_verification_cooldown_reset_on_core_server_parameter_change(self, server, - parameter_name, parameter_value, rbac=False): +def check_verification_cooldown_reset_on_core_server_parameter_change( + self, server, parameter_name, parameter_value, rbac=False +): """Check that the LDAP login cache is reset for all the LDAP authentication users when verification_cooldown parameter is set after one of the core server parameters is changed in the LDAP server configuration. """ - config_d_dir="/etc/clickhouse-server/config.d" - config_file="ldap_servers.xml" + config_d_dir = "/etc/clickhouse-server/config.d" + config_file = "ldap_servers.xml" error_message = "DB::Exception: {user}: Authentication failed: password is incorrect or there is no user with such name" error_exitcode = 4 user = None - config=None - updated_config=None + config = None + updated_config = None - with Given("I have an LDAP configuration that sets verification_cooldown parameter to 600 sec"): - servers = { "openldap1": { - "host": "openldap1", - "port": "389", - "enable_tls": "no", - "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "verification_cooldown": "600" - }} + with Given( + "I have an LDAP configuration that sets verification_cooldown parameter to 600 sec" + ): + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "600", + } + } self.context.ldap_node = self.context.cluster.node(server) with And("LDAP authenticated user"): users = [ {"cn": f"testVCD_0", "userpassword": "testVCD_0"}, - {"cn": f"testVCD_1", "userpassword": "testVCD_1"} + {"cn": f"testVCD_1", "userpassword": "testVCD_1"}, ] with And("I create LDAP servers configuration file"): @@ -762,86 +977,132 @@ def check_verification_cooldown_reset_on_core_server_parameter_change(self, serv with ldap_users(*users) as users: with ldap_servers(servers, restart=True): - with ldap_authenticated_users(*[{"username": user["cn"], "server": server} for user in users]): + with ldap_authenticated_users( + *[{"username": user["cn"], "server": server} for user in users] + ): with When("I login and execute a query"): for user in users: with By(f"as user {user['cn']}"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) with And("I change user password in LDAP"): for user in users: with By(f"for user {user['cn']}"): change_user_password_in_ldap(user, "newpassword") - with And(f"I change the server {parameter_name} core parameter", description=f"{parameter_value}"): + with And( + f"I change the server {parameter_name} core parameter", + description=f"{parameter_value}", + ): servers["openldap1"][parameter_name] = parameter_value - with And("I create an updated the config file that has a different server host name"): - updated_config = create_ldap_servers_config_content(servers, config_d_dir, config_file) + with And( + "I create an updated the config file that has a different server host name" + ): + updated_config = create_ldap_servers_config_content( + servers, config_d_dir, config_file + ) with modify_config(updated_config, restart=False): - with Then("when I try to log in it should fail as cache should have been reset"): + with Then( + "when I try to log in it should fail as cache should have been reset" + ): for user in users: with By(f"as user {user['cn']}"): - login_and_execute_query(username=user["cn"], password=user["userpassword"], - exitcode=error_exitcode, message=error_message.format(user=user["cn"])) + login_and_execute_query( + username=user["cn"], + password=user["userpassword"], + exitcode=error_exitcode, + message=error_message.format(user=user["cn"]), + ) + @TestScenario @Tags("verification_cooldown") @Requirements( - RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0") + RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters( + "1.0" + ) ) -def verification_cooldown_reset_on_server_host_parameter_change(self, server, rbac=False): +def verification_cooldown_reset_on_server_host_parameter_change( + self, server, rbac=False +): """Check that the LDAP login cache is reset for all the LDAP authentication users when verification_cooldown parameter is set after server host name is changed in the LDAP server configuration. """ - check_verification_cooldown_reset_on_core_server_parameter_change(server=server, - parameter_name="host", parameter_value="openldap2", rbac=rbac) + check_verification_cooldown_reset_on_core_server_parameter_change( + server=server, parameter_name="host", parameter_value="openldap2", rbac=rbac + ) + @TestScenario @Tags("verification_cooldown") @Requirements( - RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0") + RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters( + "1.0" + ) ) -def verification_cooldown_reset_on_server_port_parameter_change(self, server, rbac=False): +def verification_cooldown_reset_on_server_port_parameter_change( + self, server, rbac=False +): """Check that the LDAP login cache is reset for all the LDAP authentication users when verification_cooldown parameter is set after server port is changed in the LDAP server configuration. """ - check_verification_cooldown_reset_on_core_server_parameter_change(server=server, - parameter_name="port", parameter_value="9006", rbac=rbac) + check_verification_cooldown_reset_on_core_server_parameter_change( + server=server, parameter_name="port", parameter_value="9006", rbac=rbac + ) + @TestScenario @Tags("verification_cooldown") @Requirements( - RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0") + RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters( + "1.0" + ) ) -def verification_cooldown_reset_on_server_auth_dn_prefix_parameter_change(self, server, rbac=False): +def verification_cooldown_reset_on_server_auth_dn_prefix_parameter_change( + self, server, rbac=False +): """Check that the LDAP login cache is reset for all the LDAP authentication users when verification_cooldown parameter is set after server auth_dn_prefix is changed in the LDAP server configuration. """ - check_verification_cooldown_reset_on_core_server_parameter_change(server=server, - parameter_name="auth_dn_prefix", parameter_value="cxx=", rbac=rbac) + check_verification_cooldown_reset_on_core_server_parameter_change( + server=server, + parameter_name="auth_dn_prefix", + parameter_value="cxx=", + rbac=rbac, + ) + @TestScenario @Tags("verification_cooldown") @Requirements( - RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0") + RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters( + "1.0" + ) ) -def verification_cooldown_reset_on_server_auth_dn_suffix_parameter_change(self, server, rbac=False): +def verification_cooldown_reset_on_server_auth_dn_suffix_parameter_change( + self, server, rbac=False +): """Check that the LDAP login cache is reset for all the LDAP authentication users when verification_cooldown parameter is set after server auth_dn_suffix is changed in the LDAP server configuration. """ - check_verification_cooldown_reset_on_core_server_parameter_change(server=server, + check_verification_cooldown_reset_on_core_server_parameter_change( + server=server, parameter_name="auth_dn_suffix", - parameter_value=",ou=company,dc=users,dc=com", rbac=rbac) + parameter_value=",ou=company,dc=users,dc=com", + rbac=rbac, + ) @TestScenario @@ -859,15 +1120,19 @@ def scenario(self, server, rbac=False): error_exitcode = 4 error_message = "DB::Exception: testVCD: Authentication failed: password is incorrect or there is no user with such name" - with Given("I have an LDAP configuration that sets verification_cooldown parameter to 600 sec"): - servers = { "openldap1": { - "host": "openldap1", - "port": "389", - "enable_tls": "no", - "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "verification_cooldown": "600" - }} + with Given( + "I have an LDAP configuration that sets verification_cooldown parameter to 600 sec" + ): + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "600", + } + } self.context.ldap_node = self.context.cluster.node(server) @@ -877,48 +1142,68 @@ def scenario(self, server, rbac=False): user = add_user_to_ldap(**user) with ldap_servers(servers): - with ldap_authenticated_users({"username": user["cn"], "server": server}, - config_file=f"ldap_users_{getuid()}.xml"): + with ldap_authenticated_users( + {"username": user["cn"], "server": server}, + config_file=f"ldap_users_{getuid()}.xml", + ): with When("I login and execute a query"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) with And("I change user password in LDAP"): change_user_password_in_ldap(user, "newpassword") - with Then("When I try to log in with the cached password it should work"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + with Then( + "When I try to log in with the cached password it should work" + ): + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) - with And("When I try to log in with an incorrect password it should fail"): - login_and_execute_query(username=user["cn"], password="incorrect", exitcode=error_exitcode, - message=error_message) + with And( + "When I try to log in with an incorrect password it should fail" + ): + login_and_execute_query( + username=user["cn"], + password="incorrect", + exitcode=error_exitcode, + message=error_message, + ) - with And("When I try to log in with the cached password it should fail"): - login_and_execute_query(username=user["cn"], password="incorrect", exitcode=error_exitcode, - message=error_message) + with And( + "When I try to log in with the cached password it should fail" + ): + login_and_execute_query( + username=user["cn"], + password="incorrect", + exitcode=error_exitcode, + message=error_message, + ) finally: with Finally("I make sure LDAP user is deleted"): if user is not None: delete_user_from_ldap(user, exitcode=None) + @TestFeature def verification_cooldown(self, rbac, servers=None, node="clickhouse1"): - """Check verification cooldown parameter functionality. - """ - for scenario in loads(current_module(), Scenario, filter=has.tag("verification_cooldown")): + """Check verification cooldown parameter functionality.""" + for scenario in loads( + current_module(), Scenario, filter=has.tag("verification_cooldown") + ): scenario(server="openldap1", rbac=rbac) @TestOutline(Feature) @Name("user authentications") -@Requirements( - RQ_SRS_007_LDAP_Authentication_Mechanism_NamePassword("1.0") +@Requirements(RQ_SRS_007_LDAP_Authentication_Mechanism_NamePassword("1.0")) +@Examples( + "rbac", + [(False,), (True, Requirements(RQ_SRS_007_LDAP_Configuration_User_RBAC("1.0")))], ) -@Examples("rbac", [ - (False,), - (True, Requirements(RQ_SRS_007_LDAP_Configuration_User_RBAC("1.0"))) -]) def feature(self, rbac, servers=None, node="clickhouse1"): """Check that users can be authenticated using an LDAP server when users are configured either using an XML configuration file or RBAC. @@ -929,11 +1214,9 @@ def feature(self, rbac, servers=None, node="clickhouse1"): servers = globals()["servers"] with ldap_servers(servers): - for scenario in loads(current_module(), Scenario, filter=~has.tag("verification_cooldown")): + for scenario in loads( + current_module(), Scenario, filter=~has.tag("verification_cooldown") + ): scenario(server="openldap1", rbac=rbac) Feature(test=verification_cooldown)(rbac=rbac, servers=servers, node=node) - - - - diff --git a/tests/testflows/ldap/authentication/tests/common.py b/tests/testflows/ldap/authentication/tests/common.py index e01b9321e39..17b4fcd3e62 100644 --- a/tests/testflows/ldap/authentication/tests/common.py +++ b/tests/testflows/ldap/authentication/tests/common.py @@ -14,16 +14,24 @@ import testflows.settings as settings from testflows.core import * from testflows.asserts import error -from helpers.common import xml_indent, xml_with_utf8, xml_append, add_config, getuid, Config +from helpers.common import ( + xml_indent, + xml_with_utf8, + xml_append, + add_config, + getuid, + Config, +) + +ASCII_CHARS = string.ascii_lowercase + string.ascii_uppercase + string.digits -ASCII_CHARS = string.ascii_lowercase + string.ascii_uppercase + string.digits def randomword(length, chars=ASCII_CHARS): - return ''.join(random.choice(chars) for i in range(length)) + return "".join(random.choice(chars) for i in range(length)) + def restart(node=None, safe=False, timeout=300): - """Restart ClickHouse server and wait for config to be reloaded. - """ + """Restart ClickHouse server and wait for config to be reloaded.""" with When("I restart ClickHouse server node"): if node is None: node = current().context.node @@ -35,26 +43,39 @@ def restart(node=None, safe=False, timeout=300): bash.close() with And("getting current log size"): - logsize = \ - node.command("stat --format=%s /var/log/clickhouse-server/clickhouse-server.log").output.split(" ")[ - 0].strip() + logsize = ( + node.command( + "stat --format=%s /var/log/clickhouse-server/clickhouse-server.log" + ) + .output.split(" ")[0] + .strip() + ) with And("restarting ClickHouse server"): node.restart(safe=safe) - with Then("tailing the log file from using previous log size as the offset"): + with Then( + "tailing the log file from using previous log size as the offset" + ): bash.prompt = bash.__class__.prompt bash.open() - bash.send(f"tail -c +{logsize} -f /var/log/clickhouse-server/clickhouse-server.log") + bash.send( + f"tail -c +{logsize} -f /var/log/clickhouse-server/clickhouse-server.log" + ) with And("waiting for config reload message in the log file"): bash.expect( f"ConfigReloader: Loaded config '/etc/clickhouse-server/config.xml', performed update on configuration", - timeout=timeout) + timeout=timeout, + ) -def create_ldap_servers_config_content(servers, config_d_dir="/etc/clickhouse-server/config.d", config_file="ldap_servers.xml"): - """Create LDAP servers configuration content. - """ + +def create_ldap_servers_config_content( + servers, + config_d_dir="/etc/clickhouse-server/config.d", + config_file="ldap_servers.xml", +): + """Create LDAP servers configuration content.""" uid = getuid() path = os.path.join(config_d_dir, config_file) name = config_file @@ -70,28 +91,39 @@ def create_ldap_servers_config_content(servers, config_d_dir="/etc/clickhouse-se xml_servers.append(xml_server) xml_indent(root) - content = xml_with_utf8 + str(xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"), "utf-8") + content = xml_with_utf8 + str( + xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"), "utf-8" + ) return Config(content, path, name, uid, "config.xml") -@contextmanager -def modify_config(config, restart=False, node=None): - """Apply updated configuration file. - """ - return add_config(config, restart=restart, modify=True, node=node) @contextmanager -def ldap_servers(servers, config_d_dir="/etc/clickhouse-server/config.d", config_file="ldap_servers.xml", - timeout=300, restart=False, config=None, node=None): - """Add LDAP servers configuration. - """ +def modify_config(config, restart=False, node=None): + """Apply updated configuration file.""" + return add_config(config, restart=restart, modify=True, node=node) + + +@contextmanager +def ldap_servers( + servers, + config_d_dir="/etc/clickhouse-server/config.d", + config_file="ldap_servers.xml", + timeout=300, + restart=False, + config=None, + node=None, +): + """Add LDAP servers configuration.""" if config is None: config = create_ldap_servers_config_content(servers, config_d_dir, config_file) return add_config(config, restart=restart, node=node) -def create_ldap_users_config_content(*users, config_d_dir="/etc/clickhouse-server/users.d", config_file="ldap_users.xml"): - """Create LDAP users configuration file content. - """ + +def create_ldap_users_config_content( + *users, config_d_dir="/etc/clickhouse-server/users.d", config_file="ldap_users.xml" +): + """Create LDAP users configuration file content.""" uid = getuid() path = os.path.join(config_d_dir, config_file) name = config_file @@ -101,17 +133,20 @@ def create_ldap_users_config_content(*users, config_d_dir="/etc/clickhouse-serve xml_users.append(xmltree.Comment(text=f"LDAP users {uid}")) for user in users: - xml_user = xmltree.Element(user['username']) + xml_user = xmltree.Element(user["username"]) xml_user_server = xmltree.Element("ldap") xml_append(xml_user_server, "server", user["server"]) xml_user.append(xml_user_server) xml_users.append(xml_user) xml_indent(root) - content = xml_with_utf8 + str(xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"), "utf-8") + content = xml_with_utf8 + str( + xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"), "utf-8" + ) return Config(content, path, name, uid, "users.xml") + def add_users_identified_with_ldap(*users, node=None): """Add one or more users that are identified via an ldap server using RBAC. @@ -121,7 +156,9 @@ def add_users_identified_with_ldap(*users, node=None): try: with Given("I create users"): for user in users: - node.query(f"CREATE USER '{user['username']}' IDENTIFIED WITH LDAP SERVER '{user['server']}'") + node.query( + f"CREATE USER '{user['username']}' IDENTIFIED WITH LDAP SERVER '{user['server']}'" + ) yield finally: with Finally("I remove users"): @@ -129,11 +166,19 @@ def add_users_identified_with_ldap(*users, node=None): with By(f"dropping user {user['username']}", flags=TE): node.query(f"DROP USER IF EXISTS '{user['username']}'") + @contextmanager -def ldap_authenticated_users(*users, config_d_dir="/etc/clickhouse-server/users.d", - config_file=None, timeout=300, restart=True, config=None, rbac=False, node=None): - """Add LDAP authenticated users. - """ +def ldap_authenticated_users( + *users, + config_d_dir="/etc/clickhouse-server/users.d", + config_file=None, + timeout=300, + restart=True, + config=None, + rbac=False, + node=None, +): + """Add LDAP authenticated users.""" if node is None: node = current().context.node @@ -143,19 +188,24 @@ def ldap_authenticated_users(*users, config_d_dir="/etc/clickhouse-server/users. if config_file is None: config_file = f"ldap_users_{getuid()}.xml" if config is None: - config = create_ldap_users_config_content(*users, config_d_dir=config_d_dir, config_file=config_file) + config = create_ldap_users_config_content( + *users, config_d_dir=config_d_dir, config_file=config_file + ) return add_config(config, timeout=timeout, restart=restart, node=node) + def invalid_server_config(servers, message=None, tail=30, timeout=300): - """Check that ClickHouse errors when trying to load invalid LDAP servers configuration file. - """ + """Check that ClickHouse errors when trying to load invalid LDAP servers configuration file.""" node = current().context.node if message is None: message = "Exception: Failed to merge config with '/etc/clickhouse-server/config.d/ldap_servers.xml'" config = create_ldap_servers_config_content(servers) try: - node.command("echo -e \"%s\" > /var/log/clickhouse-server/clickhouse-server.err.log" % ("-\\n" * tail)) + node.command( + 'echo -e "%s" > /var/log/clickhouse-server/clickhouse-server.err.log' + % ("-\\n" * tail) + ) with When("I add the config", description=config.path): command = f"cat < {config.path}\n{config.content}\nHEREDOC" @@ -163,7 +213,7 @@ def invalid_server_config(servers, message=None, tail=30, timeout=300): with Then("server shall fail to merge the new config"): started = time.time() - command = f"tail -n {tail} /var/log/clickhouse-server/clickhouse-server.err.log | grep \"{message}\"" + command = f'tail -n {tail} /var/log/clickhouse-server/clickhouse-server.err.log | grep "{message}"' while time.time() - started < timeout: exitcode = node.command(command, steps=False).exitcode if exitcode == 0: @@ -175,23 +225,26 @@ def invalid_server_config(servers, message=None, tail=30, timeout=300): with By("removing the config file", description=config.path): node.command(f"rm -rf {config.path}", exitcode=0) + def invalid_user_config(servers, config, message=None, tail=30, timeout=300): - """Check that ClickHouse errors when trying to load invalid LDAP users configuration file. - """ + """Check that ClickHouse errors when trying to load invalid LDAP users configuration file.""" node = current().context.node if message is None: message = "Exception: Failed to merge config with '/etc/clickhouse-server/users.d/ldap_users.xml'" with ldap_servers(servers): try: - node.command("echo -e \"%s\" > /var/log/clickhouse-server/clickhouse-server.err.log" % ("\\n" * tail)) + node.command( + 'echo -e "%s" > /var/log/clickhouse-server/clickhouse-server.err.log' + % ("\\n" * tail) + ) with When("I add the config", description=config.path): command = f"cat < {config.path}\n{config.content}\nHEREDOC" node.command(command, steps=False, exitcode=0) with Then("server shall fail to merge the new config"): started = time.time() - command = f"tail -n {tail} /var/log/clickhouse-server/clickhouse-server.err.log | grep \"{message}\"" + command = f'tail -n {tail} /var/log/clickhouse-server/clickhouse-server.err.log | grep "{message}"' while time.time() - started < timeout: exitcode = node.command(command, steps=False).exitcode if exitcode == 0: @@ -203,7 +256,17 @@ def invalid_user_config(servers, config, message=None, tail=30, timeout=300): with By("removing the config file", description=config.path): node.command(f"rm -rf {config.path}", exitcode=0) -def add_user_to_ldap(cn, userpassword, givenname=None, homedirectory=None, sn=None, uid=None, uidnumber=None, node=None): + +def add_user_to_ldap( + cn, + userpassword, + givenname=None, + homedirectory=None, + sn=None, + uid=None, + uidnumber=None, + node=None, +): """Add user entry to LDAP.""" if node is None: node = current().context.ldap_node @@ -229,7 +292,7 @@ def add_user_to_ldap(cn, userpassword, givenname=None, homedirectory=None, sn=No "uid": uid, "uidnumber": uidnumber, "userpassword": userpassword, - "_server": node.name + "_server": node.name, } lines = [] @@ -246,73 +309,102 @@ def add_user_to_ldap(cn, userpassword, givenname=None, homedirectory=None, sn=No ldif = "\n".join(lines) r = node.command( - f"echo -e \"{ldif}\" | ldapadd -x -H ldap://localhost -D \"cn=admin,dc=company,dc=com\" -w admin") + f'echo -e "{ldif}" | ldapadd -x -H ldap://localhost -D "cn=admin,dc=company,dc=com" -w admin' + ) assert r.exitcode == 0, error() return user + def delete_user_from_ldap(user, node=None, exitcode=0): """Delete user entry from LDAP.""" if node is None: node = current().context.ldap_node r = node.command( - f"ldapdelete -x -H ldap://localhost -D \"cn=admin,dc=company,dc=com\" -w admin \"{user['dn']}\"") + f"ldapdelete -x -H ldap://localhost -D \"cn=admin,dc=company,dc=com\" -w admin \"{user['dn']}\"" + ) if exitcode is not None: assert r.exitcode == exitcode, error() + def change_user_password_in_ldap(user, new_password, node=None, exitcode=0): """Change user password in LDAP.""" if node is None: node = current().context.ldap_node - ldif = (f"dn: {user['dn']}\n" + ldif = ( + f"dn: {user['dn']}\n" "changetype: modify\n" "replace: userpassword\n" - f"userpassword: {new_password}") + f"userpassword: {new_password}" + ) r = node.command( - f"echo -e \"{ldif}\" | ldapmodify -x -H ldap://localhost -D \"cn=admin,dc=company,dc=com\" -w admin") + f'echo -e "{ldif}" | ldapmodify -x -H ldap://localhost -D "cn=admin,dc=company,dc=com" -w admin' + ) if exitcode is not None: assert r.exitcode == exitcode, error() + def change_user_cn_in_ldap(user, new_cn, node=None, exitcode=0): """Change user password in LDAP.""" if node is None: node = current().context.ldap_node new_user = dict(user) - new_user['dn'] = f"cn={new_cn},ou=users,dc=company,dc=com" - new_user['cn'] = new_cn + new_user["dn"] = f"cn={new_cn},ou=users,dc=company,dc=com" + new_user["cn"] = new_cn ldif = ( f"dn: {user['dn']}\n" "changetype: modrdn\n" f"newrdn: cn = {new_user['cn']}\n" f"deleteoldrdn: 1\n" - ) + ) r = node.command( - f"echo -e \"{ldif}\" | ldapmodify -x -H ldap://localhost -D \"cn=admin,dc=company,dc=com\" -w admin") + f'echo -e "{ldif}" | ldapmodify -x -H ldap://localhost -D "cn=admin,dc=company,dc=com" -w admin' + ) if exitcode is not None: assert r.exitcode == exitcode, error() return new_user + @contextmanager -def ldap_user(cn, userpassword, givenname=None, homedirectory=None, sn=None, uid=None, uidnumber=None, node=None): +def ldap_user( + cn, + userpassword, + givenname=None, + homedirectory=None, + sn=None, + uid=None, + uidnumber=None, + node=None, +): """Add new user to the LDAP server.""" try: user = None with Given(f"I add user {cn} to LDAP"): - user = add_user_to_ldap(cn, userpassword, givenname, homedirectory, sn, uid, uidnumber, node=node) + user = add_user_to_ldap( + cn, + userpassword, + givenname, + homedirectory, + sn, + uid, + uidnumber, + node=node, + ) yield user finally: with Finally(f"I delete user {cn} from LDAP"): if user is not None: delete_user_from_ldap(user, node=node) + @contextmanager def ldap_users(*users, node=None): """Add multiple new users to the LDAP server.""" @@ -328,6 +420,7 @@ def ldap_users(*users, node=None): for _user in _users: delete_user_from_ldap(_user, node=node) + def login(servers, *users, config=None): """Configure LDAP server and LDAP authenticated users and try to login and execute a query""" @@ -336,7 +429,12 @@ def login(servers, *users, config=None): for user in users: if user.get("login", False): with When(f"I login as {user['username']} and execute query"): - current().context.node.query("SELECT 1", - settings=[("user", user["username"]), ("password", user["password"])], + current().context.node.query( + "SELECT 1", + settings=[ + ("user", user["username"]), + ("password", user["password"]), + ], exitcode=user.get("exitcode", None), - message=user.get("message", None)) + message=user.get("message", None), + ) diff --git a/tests/testflows/ldap/authentication/tests/connections.py b/tests/testflows/ldap/authentication/tests/connections.py index dfb920181e1..4dbbfb2070a 100644 --- a/tests/testflows/ldap/authentication/tests/connections.py +++ b/tests/testflows/ldap/authentication/tests/connections.py @@ -4,22 +4,22 @@ from testflows.asserts import error from ldap.authentication.tests.common import login from ldap.authentication.requirements import * + @TestScenario @Requirements( RQ_SRS_007_LDAP_Authentication_Protocol_PlainText("1.0"), RQ_SRS_007_LDAP_Configuration_Server_EnableTLS("1.0"), RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_No("1.0"), - RQ_SRS_007_LDAP_Configuration_Server_Port_Default("1.0") + RQ_SRS_007_LDAP_Configuration_Server_Port_Default("1.0"), ) def plain_text(self): - """Check that we can perform LDAP user authentication using `plain text` connection protocol. - """ + """Check that we can perform LDAP user authentication using `plain text` connection protocol.""" servers = { "openldap1": { "host": "openldap1", "enable_tls": "no", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } users = [ @@ -27,10 +27,11 @@ def plain_text(self): ] login(servers, *users) + @TestScenario @Requirements( RQ_SRS_007_LDAP_Authentication_Protocol_PlainText("1.0"), - RQ_SRS_007_LDAP_Configuration_Server_Port("1.0") + RQ_SRS_007_LDAP_Configuration_Server_Port("1.0"), ) def plain_text_with_custom_port(self): """Check that we can perform LDAP user authentication using `plain text` connection protocol @@ -42,7 +43,7 @@ def plain_text_with_custom_port(self): "port": "3089", "enable_tls": "no", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } users = [ @@ -50,10 +51,11 @@ def plain_text_with_custom_port(self): ] login(servers, *users) + @TestScenario @Requirements( RQ_SRS_007_LDAP_Authentication_Protocol_TLS("1.0"), - RQ_SRS_007_LDAP_Configuration_Server_Port("1.0") + RQ_SRS_007_LDAP_Configuration_Server_Port("1.0"), ) def tls_with_custom_port(self): """Check that we can perform LDAP user authentication using `TLS` connection protocol @@ -65,7 +67,7 @@ def tls_with_custom_port(self): "port": "6036", "tls_require_cert": "never", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } users = [ @@ -73,10 +75,11 @@ def tls_with_custom_port(self): ] login(servers, *users) + @TestScenario @Requirements( RQ_SRS_007_LDAP_Authentication_Protocol_StartTLS("1.0"), - RQ_SRS_007_LDAP_Configuration_Server_Port("1.0") + RQ_SRS_007_LDAP_Configuration_Server_Port("1.0"), ) def starttls_with_custom_port(self): """Check that we can perform LDAP user authentication using `StartTLS` connection protocol @@ -89,7 +92,7 @@ def starttls_with_custom_port(self): "enable_tls": "starttls", "tls_require_cert": "never", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } users = [ @@ -97,16 +100,16 @@ def starttls_with_custom_port(self): ] login(servers, *users) + def tls_connection(enable_tls, tls_require_cert): - """Try to login using LDAP user authentication over a TLS connection. - """ + """Try to login using LDAP user authentication over a TLS connection.""" servers = { "openldap2": { "host": "openldap2", "enable_tls": enable_tls, "tls_require_cert": tls_require_cert, "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } users = [ @@ -116,51 +119,57 @@ def tls_connection(enable_tls, tls_require_cert): requirements = [] if tls_require_cert == "never": - requirements = [RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Never("1.0")] + requirements = [ + RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Never("1.0") + ] elif tls_require_cert == "allow": - requirements = [RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Allow("1.0")] + requirements = [ + RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Allow("1.0") + ] elif tls_require_cert == "try": - requirements = [RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Try("1.0")] + requirements = [ + RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Try("1.0") + ] elif tls_require_cert == "demand": - requirements = [RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Demand("1.0")] + requirements = [ + RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Demand("1.0") + ] - with Example(name=f"tls_require_cert='{tls_require_cert}'", requirements=requirements): + with Example( + name=f"tls_require_cert='{tls_require_cert}'", requirements=requirements + ): login(servers, *users) + @TestScenario -@Examples("enable_tls tls_require_cert", [ - ("yes", "never"), - ("yes", "allow"), - ("yes", "try"), - ("yes", "demand") -]) +@Examples( + "enable_tls tls_require_cert", + [("yes", "never"), ("yes", "allow"), ("yes", "try"), ("yes", "demand")], +) @Requirements( RQ_SRS_007_LDAP_Authentication_Protocol_TLS("1.0"), RQ_SRS_007_LDAP_Configuration_Server_EnableTLS("1.0"), RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_Yes("1.0"), RQ_SRS_007_LDAP_Configuration_Server_Port_Default("1.0"), RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert("1.0"), - RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion_Default("1.0") + RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion_Default("1.0"), ) def tls(self): - """Check that we can perform LDAP user authentication using `TLS` connection protocol. - """ + """Check that we can perform LDAP user authentication using `TLS` connection protocol.""" for example in self.examples: tls_connection(*example) + @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_Default("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_Default("1.0")) def tls_enable_tls_default_yes(self): - """Check that the default value for the `enable_tls` is set to `yes`. - """ + """Check that the default value for the `enable_tls` is set to `yes`.""" servers = { "openldap2": { "host": "openldap2", "tls_require_cert": "never", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } users = [ @@ -168,20 +177,20 @@ def tls_enable_tls_default_yes(self): ] login(servers, *users) + @TestScenario @Requirements( RQ_SRS_007_LDAP_Configuration_Server_TLSRequireCert_Options_Default("1.0") ) def tls_require_cert_default_demand(self): - """Check that the default value for the `tls_require_cert` is set to `demand`. - """ + """Check that the default value for the `tls_require_cert` is set to `demand`.""" servers = { "openldap2": { "host": "openldap2", "enable_tls": "yes", "port": "636", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } users = [ @@ -189,32 +198,33 @@ def tls_require_cert_default_demand(self): ] login(servers, *users) + @TestScenario -@Examples("enable_tls tls_require_cert", [ - ("starttls", "never"), - ("starttls", "allow"), - ("starttls", "try"), - ("starttls", "demand") -]) +@Examples( + "enable_tls tls_require_cert", + [ + ("starttls", "never"), + ("starttls", "allow"), + ("starttls", "try"), + ("starttls", "demand"), + ], +) @Requirements( RQ_SRS_007_LDAP_Authentication_Protocol_StartTLS("1.0"), RQ_SRS_007_LDAP_Configuration_Server_EnableTLS("1.0"), RQ_SRS_007_LDAP_Configuration_Server_EnableTLS_Options_StartTLS("1.0"), - RQ_SRS_007_LDAP_Configuration_Server_Port_Default("1.0") + RQ_SRS_007_LDAP_Configuration_Server_Port_Default("1.0"), ) def starttls(self): - """Check that we can perform LDAP user authentication using legacy `StartTLS` connection protocol. - """ + """Check that we can perform LDAP user authentication using legacy `StartTLS` connection protocol.""" for example in self.examples: tls_connection(*example) + @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Configuration_Server_TLSCipherSuite("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Configuration_Server_TLSCipherSuite("1.0")) def tls_cipher_suite(self): - """Check that `tls_cipher_suite` parameter can be used specify allowed cipher suites. - """ + """Check that `tls_cipher_suite` parameter can be used specify allowed cipher suites.""" servers = { "openldap4": { "host": "openldap4", @@ -223,7 +233,7 @@ def tls_cipher_suite(self): "tls_cipher_suite": "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC", "tls_minimum_protocol_version": "tls1.2", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } users = [ @@ -231,18 +241,22 @@ def tls_cipher_suite(self): ] login(servers, *users) + @TestOutline(Scenario) @Requirements( RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion("1.0"), - RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion_Values("1.0") + RQ_SRS_007_LDAP_Configuration_Server_TLSMinimumProtocolVersion_Values("1.0"), +) +@Examples( + "version exitcode message", + [ + ("ssl2", None, None), + ("ssl3", None, None), + ("tls1.0", None, None), + ("tls1.1", None, None), + ("tls1.2", None, None), + ], ) -@Examples("version exitcode message", [ - ("ssl2", None, None), - ("ssl3", None, None), - ("tls1.0", None, None), - ("tls1.1", None, None), - ("tls1.2", None, None) -]) def tls_minimum_protocol_version(self, version, exitcode, message): """Check that `tls_minimum_protocol_version` parameter can be used specify to specify the minimum protocol version of SSL/TLS. @@ -255,14 +269,20 @@ def tls_minimum_protocol_version(self, version, exitcode, message): "tls_require_cert": "never", "tls_minimum_protocol_version": version, "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } - users = [{ - "server": "openldap4", "username": "user4", "password": "user4", - "login": True, "exitcode": int(exitcode) if exitcode is not None else None, "message": message - }] + users = [ + { + "server": "openldap4", + "username": "user4", + "password": "user4", + "login": True, + "exitcode": int(exitcode) if exitcode is not None else None, + "message": message, + } + ] # Note: this code was an attempt to produce a negative case but did not work # ldap_node = self.context.cluster.node("openldap4") @@ -280,11 +300,11 @@ def tls_minimum_protocol_version(self, version, exitcode, message): login(servers, *users) + @TestFeature @Name("connection protocols") def feature(self, node="clickhouse1"): - """Check different LDAP connection protocols. - """ + """Check different LDAP connection protocols.""" self.context.node = self.context.cluster.node(node) for scenario in loads(current_module(), Scenario): diff --git a/tests/testflows/ldap/authentication/tests/multiple_servers.py b/tests/testflows/ldap/authentication/tests/multiple_servers.py index c4317187b74..4295c428e1c 100644 --- a/tests/testflows/ldap/authentication/tests/multiple_servers.py +++ b/tests/testflows/ldap/authentication/tests/multiple_servers.py @@ -2,13 +2,14 @@ from testflows.core import * from testflows.asserts import error from ldap.authentication.tests.common import login -from ldap.authentication.requirements import RQ_SRS_007_LDAP_Authentication_MultipleServers +from ldap.authentication.requirements import ( + RQ_SRS_007_LDAP_Authentication_MultipleServers, +) + @TestScenario @Name("multiple servers") -@Requirements( - RQ_SRS_007_LDAP_Authentication_MultipleServers("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Authentication_MultipleServers("1.0")) def scenario(self, node="clickhouse1"): """Check that multiple LDAP servers can be used to authenticate users. @@ -21,7 +22,7 @@ def scenario(self, node="clickhouse1"): "port": "389", "enable_tls": "no", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, "openldap2": { "host": "openldap2", @@ -33,9 +34,21 @@ def scenario(self, node="clickhouse1"): }, } users = [ - {"server": "openldap1", "username": "user1", "password": "user1", "login": True}, - {"server": "openldap2", "username": "user2", "password": "user2", "login": True} + { + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, + }, + { + "server": "openldap2", + "username": "user2", + "password": "user2", + "login": True, + }, ] - with When("I add multiple LDAP servers and users that use different servers and try to login"): + with When( + "I add multiple LDAP servers and users that use different servers and try to login" + ): login(servers, *users) diff --git a/tests/testflows/ldap/authentication/tests/sanity.py b/tests/testflows/ldap/authentication/tests/sanity.py index 542fa2a48b1..cb23c33f3b5 100644 --- a/tests/testflows/ldap/authentication/tests/sanity.py +++ b/tests/testflows/ldap/authentication/tests/sanity.py @@ -3,6 +3,7 @@ from testflows.asserts import error from ldap.authentication.tests.common import add_user_to_ldap, delete_user_from_ldap + @TestScenario @Name("sanity") def scenario(self, server="openldap1"): @@ -13,7 +14,8 @@ def scenario(self, server="openldap1"): with When("I search LDAP database"): r = self.context.ldap_node.command( - "ldapsearch -x -H ldap://localhost -b \"dc=company,dc=com\" -D \"cn=admin,dc=company,dc=com\" -w admin") + 'ldapsearch -x -H ldap://localhost -b "dc=company,dc=com" -D "cn=admin,dc=company,dc=com" -w admin' + ) assert r.exitcode == 0, error() with Then("I should find an entry for user1"): @@ -24,7 +26,8 @@ def scenario(self, server="openldap1"): with And("I search LDAP database again"): r = self.context.ldap_node.command( - "ldapsearch -x -H ldap://localhost -b \"dc=company,dc=com\" -D \"cn=admin,dc=company,dc=com\" -w admin") + 'ldapsearch -x -H ldap://localhost -b "dc=company,dc=com" -D "cn=admin,dc=company,dc=com" -w admin' + ) assert r.exitcode == 0, error() with Then("I should find an entry for the new user"): @@ -35,7 +38,8 @@ def scenario(self, server="openldap1"): with And("I search LDAP database again"): r = self.context.ldap_node.command( - "ldapsearch -x -H ldap://localhost -b \"dc=company,dc=com\" -D \"cn=admin,dc=company,dc=com\" -w admin") + 'ldapsearch -x -H ldap://localhost -b "dc=company,dc=com" -D "cn=admin,dc=company,dc=com" -w admin' + ) assert r.exitcode == 0, error() with Then("I should not find an entry for the deleted user"): diff --git a/tests/testflows/ldap/authentication/tests/server_config.py b/tests/testflows/ldap/authentication/tests/server_config.py index 5e0e145d035..af15a1495df 100644 --- a/tests/testflows/ldap/authentication/tests/server_config.py +++ b/tests/testflows/ldap/authentication/tests/server_config.py @@ -3,232 +3,331 @@ from testflows.core import * from ldap.authentication.tests.common import * from ldap.authentication.requirements import * -@TestScenario -@Requirements( - RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0"), - RQ_SRS_007_LDAP_Configuration_Server_Name("1.0") -) -def empty_server_name(self, timeout=300): - """Check that empty string as a server name is not allowed. - """ - servers = {"": {"host": "foo", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" - }} - invalid_server_config(servers, timeout=timeout) @TestScenario @Requirements( RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0"), - RQ_SRS_007_LDAP_UnreachableServer("1.0") + RQ_SRS_007_LDAP_Configuration_Server_Name("1.0"), +) +def empty_server_name(self, timeout=300): + """Check that empty string as a server name is not allowed.""" + servers = { + "": { + "host": "foo", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + } + } + invalid_server_config(servers, timeout=timeout) + + +@TestScenario +@Requirements( + RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0"), + RQ_SRS_007_LDAP_UnreachableServer("1.0"), ) def invalid_host(self): """Check that server returns an error when LDAP server host name is invalid. """ servers = {"foo": {"host": "foo", "port": "389", "enable_tls": "no"}} - users = [{ - "server": "foo", "username": "user1", "password": "user1", "login": True, - "exitcode": 4, - "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name" - }] + users = [ + { + "server": "foo", + "username": "user1", + "password": "user1", + "login": True, + "exitcode": 4, + "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name", + } + ] login(servers, *users) + @TestScenario @Requirements( RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0"), - RQ_SRS_007_LDAP_Configuration_Server_Host("1.0") + RQ_SRS_007_LDAP_Configuration_Server_Host("1.0"), ) def empty_host(self): """Check that server returns an error when LDAP server host value is empty. """ servers = {"foo": {"host": "", "port": "389", "enable_tls": "no"}} - users = [{ - "server": "foo", "username": "user1", "password": "user1", "login": True, - "exitcode": 4, - "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name" - }] + users = [ + { + "server": "foo", + "username": "user1", + "password": "user1", + "login": True, + "exitcode": 4, + "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name", + } + ] login(servers, *users) + @TestScenario @Requirements( RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0"), - RQ_SRS_007_LDAP_Configuration_Server_Host("1.0") + RQ_SRS_007_LDAP_Configuration_Server_Host("1.0"), ) def missing_host(self): """Check that server returns an error when LDAP server host is missing. """ servers = {"foo": {"port": "389", "enable_tls": "no"}} - users = [{ - "server": "foo", "username": "user1", "password": "user1", "login": True, - "exitcode": 4, - "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name" - }] + users = [ + { + "server": "foo", + "username": "user1", + "password": "user1", + "login": True, + "exitcode": 4, + "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name", + } + ] login(servers, *users) + @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0")) def invalid_port(self): """Check that server returns an error when LDAP server port is not valid. """ servers = {"openldap1": {"host": "openldap1", "port": "3890", "enable_tls": "no"}} - users = [{ - "server": "openldap1", "username": "user1", "password": "user1", "login": True, - "exitcode": 4, - "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name" - }] + users = [ + { + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, + "exitcode": 4, + "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name", + } + ] login(servers, *users) @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0")) def invalid_auth_dn_prefix(self): """Check that server returns an error when LDAP server port is not valid. """ - servers = {"openldap1": {"host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "foo=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" - }} - users = [{ - "server": "openldap1", "username": "user1", "password": "user1", "login": True, - "exitcode": 4, - "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name" - }] + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "foo=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + } + } + users = [ + { + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, + "exitcode": 4, + "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name", + } + ] login(servers, *users) + @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0")) def invalid_auth_dn_suffix(self): """Check that server returns an error when LDAP server port is not valid. """ - servers = {"openldap1": {"host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",foo=users,dc=company,dc=com" - }} - users = [{ - "server": "openldap1", "username": "user1", "password": "user1", "login": True, - "exitcode": 4, - "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name" - }] + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",foo=users,dc=company,dc=com", + } + } + users = [ + { + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, + "exitcode": 4, + "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name", + } + ] login(servers, *users) + @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0")) def invalid_enable_tls_value(self): """Check that server returns an error when enable_tls option has invalid value. """ - servers = {"openldap1": {"host": "openldap1", "port": "389", "enable_tls": "foo", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" - }} - users = [{ - "server": "openldap1", "username": "user1", "password": "user1", "login": True, - "exitcode": 4, - "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name" - }] + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "foo", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + } + } + users = [ + { + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, + "exitcode": 4, + "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name", + } + ] login(servers, *users) + @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0")) def invalid_tls_require_cert_value(self): """Check that server returns an error when tls_require_cert option has invalid value. """ - servers = {"openldap2": { - "host": "openldap2", "port": "636", "enable_tls": "yes", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "tls_require_cert": "foo", - "ca_cert_dir": "/container/service/slapd/assets/certs/", - "ca_cert_file": "/container/service/slapd/assets/certs/ca.crt" - }} - users = [{ - "server": "openldap2", "username": "user2", "password": "user2", "login": True, - "exitcode": 4, - "message": "DB::Exception: user2: Authentication failed: password is incorrect or there is no user with such name" - }] + servers = { + "openldap2": { + "host": "openldap2", + "port": "636", + "enable_tls": "yes", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "tls_require_cert": "foo", + "ca_cert_dir": "/container/service/slapd/assets/certs/", + "ca_cert_file": "/container/service/slapd/assets/certs/ca.crt", + } + } + users = [ + { + "server": "openldap2", + "username": "user2", + "password": "user2", + "login": True, + "exitcode": 4, + "message": "DB::Exception: user2: Authentication failed: password is incorrect or there is no user with such name", + } + ] login(servers, *users) + @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0")) def empty_ca_cert_dir(self): - """Check that server returns an error when ca_cert_dir is empty. - """ - servers = {"openldap2": {"host": "openldap2", "port": "636", "enable_tls": "yes", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "tls_require_cert": "demand", - "ca_cert_dir": "", - "ca_cert_file": "/container/service/slapd/assets/certs/ca.crt" - }} - users = [{ - "server": "openldap2", "username": "user2", "password": "user2", "login": True, - "exitcode": 4, - "message": "DB::Exception: user2: Authentication failed: password is incorrect or there is no user with such name" - }] + """Check that server returns an error when ca_cert_dir is empty.""" + servers = { + "openldap2": { + "host": "openldap2", + "port": "636", + "enable_tls": "yes", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "tls_require_cert": "demand", + "ca_cert_dir": "", + "ca_cert_file": "/container/service/slapd/assets/certs/ca.crt", + } + } + users = [ + { + "server": "openldap2", + "username": "user2", + "password": "user2", + "login": True, + "exitcode": 4, + "message": "DB::Exception: user2: Authentication failed: password is incorrect or there is no user with such name", + } + ] login(servers, *users) + @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Server_Configuration_Invalid("1.0")) def empty_ca_cert_file(self): - """Check that server returns an error when ca_cert_file is empty. - """ - servers = {"openldap2": {"host": "openldap2", "port": "636", "enable_tls": "yes", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "tls_require_cert": "demand", - "ca_cert_dir": "/container/service/slapd/assets/certs/", - "ca_cert_file": "" - }} - users = [{ - "server": "openldap2", "username": "user2", "password": "user2", "login": True, - "exitcode": 4, - "message": "DB::Exception: user2: Authentication failed: password is incorrect or there is no user with such name" - }] + """Check that server returns an error when ca_cert_file is empty.""" + servers = { + "openldap2": { + "host": "openldap2", + "port": "636", + "enable_tls": "yes", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "tls_require_cert": "demand", + "ca_cert_dir": "/container/service/slapd/assets/certs/", + "ca_cert_file": "", + } + } + users = [ + { + "server": "openldap2", + "username": "user2", + "password": "user2", + "login": True, + "exitcode": 4, + "message": "DB::Exception: user2: Authentication failed: password is incorrect or there is no user with such name", + } + ] login(servers, *users) + @TestScenario @Requirements( RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Value("1.0"), RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Prefix("1.0"), - RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Suffix("1.0") + RQ_SRS_007_LDAP_Configuration_Server_AuthDN_Suffix("1.0"), ) def auth_dn_value(self): """Check that server configuration can properly define the `dn` value of the user.""" servers = { "openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" - }} - user = {"server": "openldap1", "username": "user1", "password": "user1", "login": True} + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + } + } + user = { + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, + } login(servers, user) + @TestOutline(Scenario) -@Examples("invalid_value", [ - ("-1", Name("negative int")), - ("foo", Name("string")), - ("", Name("empty string")), - ("36893488147419103232", Name("overflow with extremely large int value")), - ("-36893488147419103232", Name("overflow with extremely large negative int value")), - ("@#", Name("special characters")) -]) -@Requirements( - RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown_Invalid("1.0") +@Examples( + "invalid_value", + [ + ("-1", Name("negative int")), + ("foo", Name("string")), + ("", Name("empty string")), + ("36893488147419103232", Name("overflow with extremely large int value")), + ( + "-36893488147419103232", + Name("overflow with extremely large negative int value"), + ), + ("@#", Name("special characters")), + ], ) +@Requirements(RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown_Invalid("1.0")) def invalid_verification_cooldown_value(self, invalid_value, timeout=300): """Check that server returns an error when LDAP server verification cooldown parameter is invalid. @@ -236,19 +335,26 @@ def invalid_verification_cooldown_value(self, invalid_value, timeout=300): error_message = f" Syntax error: Not a valid unsigned integer{': ' + invalid_value if invalid_value else invalid_value}" - with Given("LDAP server configuration that uses a negative integer for the verification_cooldown parameter"): - servers = {"openldap1": {"host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "verification_cooldown": f"{invalid_value}" - }} + with Given( + "LDAP server configuration that uses a negative integer for the verification_cooldown parameter" + ): + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": f"{invalid_value}", + } + } with When("I try to use this configuration then it should not work"): invalid_server_config(servers, message=error_message, tail=30, timeout=timeout) + @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Configuration_Server_Syntax("2.0") -) +@Requirements(RQ_SRS_007_LDAP_Configuration_Server_Syntax("2.0")) def syntax(self): """Check that server configuration with valid syntax can be loaded. ```xml @@ -279,23 +385,23 @@ def syntax(self): "auth_dn_suffix": ",ou=users,dc=company,dc=com", "verification_cooldown": "0", "enable_tls": "yes", - "tls_minimum_protocol_version": "tls1.2" , + "tls_minimum_protocol_version": "tls1.2", "tls_require_cert": "demand", "tls_cert_file": "/container/service/slapd/assets/certs/ldap.crt", "tls_key_file": "/container/service/slapd/assets/certs/ldap.key", "tls_ca_cert_file": "/container/service/slapd/assets/certs/ca.crt", "tls_ca_cert_dir": "/container/service/slapd/assets/certs/", - "tls_cipher_suite": "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384" + "tls_cipher_suite": "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384", } } with ldap_servers(servers): pass + @TestFeature @Name("server config") def feature(self, node="clickhouse1"): - """Check that LDAP server configuration. - """ + """Check that LDAP server configuration.""" self.context.node = self.context.cluster.node(node) for scenario in loads(current_module(), Scenario): diff --git a/tests/testflows/ldap/authentication/tests/user_config.py b/tests/testflows/ldap/authentication/tests/user_config.py index ebcfb6899c2..e1e2456e381 100644 --- a/tests/testflows/ldap/authentication/tests/user_config.py +++ b/tests/testflows/ldap/authentication/tests/user_config.py @@ -5,91 +5,136 @@ from testflows.core import * from ldap.authentication.tests.common import * from ldap.authentication.requirements import * + @TestScenario @Requirements( RQ_SRS_007_LDAP_User_Configuration_Invalid("1.0"), - RQ_SRS_007_LDAP_Configuration_User_Name_Empty("1.0") + RQ_SRS_007_LDAP_Configuration_User_Name_Empty("1.0"), ) def empty_user_name(self, timeout=300): - """Check that empty string as a user name is not allowed. - """ - servers = {"openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" - }} - users = [{"server": "openldap1", "username": "", "password": "user1", "login": True}] + """Check that empty string as a user name is not allowed.""" + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + } + } + users = [ + {"server": "openldap1", "username": "", "password": "user1", "login": True} + ] config = create_ldap_users_config_content(*users) invalid_user_config(servers, config, timeout=timeout) + @TestScenario @Requirements( RQ_SRS_007_LDAP_User_Configuration_Invalid("1.0"), - RQ_SRS_007_LDAP_Configuration_User_LDAP_InvalidServerName_Empty("1.0") + RQ_SRS_007_LDAP_Configuration_User_LDAP_InvalidServerName_Empty("1.0"), ) def empty_server_name(self, timeout=300): - """Check that if server name is an empty string then login is not allowed. - """ + """Check that if server name is an empty string then login is not allowed.""" message = "Exception: LDAP server name cannot be empty for user" - servers = {"openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" - }} - users = [{"server": "", "username": "user1", "password": "user1", "login": True, - "errorcode": 4, - "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name" - }] + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + } + } + users = [ + { + "server": "", + "username": "user1", + "password": "user1", + "login": True, + "errorcode": 4, + "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name", + } + ] config = create_ldap_users_config_content(*users) invalid_user_config(servers, config, message=message, tail=30, timeout=timeout) + @TestScenario @Requirements( RQ_SRS_007_LDAP_User_Configuration_Invalid("1.0"), - RQ_SRS_007_LDAP_Configuration_User_LDAP_InvalidServerName_NotDefined("1.0") + RQ_SRS_007_LDAP_Configuration_User_LDAP_InvalidServerName_NotDefined("1.0"), ) def empty_server_not_defined(self): - """Check that if server is not defined then login is not allowed. - """ - servers = {"openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" - }} - users = [{"server": "foo", "username": "user1", "password": "user1", "login": True, - "errorcode": 4, - "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name" - }] + """Check that if server is not defined then login is not allowed.""" + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + } + } + users = [ + { + "server": "foo", + "username": "user1", + "password": "user1", + "login": True, + "errorcode": 4, + "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name", + } + ] login(servers, *users) + @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Configuration_User_Syntax("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Configuration_User_Syntax("1.0")) def valid_user_config(self): """Check syntax of valid user configuration of LDAP authenticated user.""" - servers = {"openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" - }} - users = [{"server": "openldap1", "username": "user1", "password": "user1", "login": True}] + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + } + } + users = [ + {"server": "openldap1", "username": "user1", "password": "user1", "login": True} + ] login(servers, *users) + @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Configuration_User_OnlyOneServer("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Configuration_User_OnlyOneServer("1.0")) def multiple_servers(self): """Check that user configuration allows to specify only one LDAP server for a given user and if multiple servers are specified then the first one is used.""" servers = { "openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, "openldap2": { - "host": "openldap2", "enable_tls": "yes", "tls_require_cert": "never", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "host": "openldap2", + "enable_tls": "yes", + "tls_require_cert": "never", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, } - user = {"server": "openldap1", "username": "user1", "password": "user1", "login": True} + user = { + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, + } with When("I first create regular user configuration file"): config = create_ldap_users_config_content(user) @@ -101,17 +146,21 @@ def multiple_servers(self): xml_user_ldap = xml_users.find(user["username"]).find("ldap") xml_append(xml_user_ldap, "server", "openldap2") xml_indent(root) - content = xml_with_utf8 + str(xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"), "utf-8") + content = xml_with_utf8 + str( + xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"), + "utf-8", + ) - new_config = Config(content, config.path, config.name, config.uid, config.preprocessed_name) + new_config = Config( + content, config.path, config.name, config.uid, config.preprocessed_name + ) with Then("I login and expect it to work as the first server shall be used"): login(servers, user, config=new_config) + @TestScenario -@Requirements( - RQ_SRS_007_LDAP_Configuration_User_BothPasswordAndLDAP("1.0") -) +@Requirements(RQ_SRS_007_LDAP_Configuration_User_BothPasswordAndLDAP("1.0")) def ldap_and_password(self): """Check that user can't be authenticated if both `ldap` and `password` is specified for the same user. We expect an error message to be present in the log @@ -120,14 +169,20 @@ def ldap_and_password(self): node = self.context.node servers = { "openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, } user = { - "server": "openldap1", "username": "user1", "password": "user1", "login": True, + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, "errorcode": 4, - "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name" + "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name", } with When("I first create regular user configuration file"): @@ -140,15 +195,24 @@ def ldap_and_password(self): xml_user = xml_users.find(user["username"]) xml_append(xml_user, "password", "hellothere") xml_indent(root) - content = xml_with_utf8 + str(xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"), "utf-8") + content = xml_with_utf8 + str( + xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"), + "utf-8", + ) - new_config = Config(content, config.path, config.name, config.uid, config.preprocessed_name) + new_config = Config( + content, config.path, config.name, config.uid, config.preprocessed_name + ) error_message = "DB::Exception: More than one field of 'password'" - with Then("I expect an error when I try to load the configuration file", description=error_message): + with Then( + "I expect an error when I try to load the configuration file", + description=error_message, + ): invalid_user_config(servers, new_config, message=error_message, tail=30) + @TestFeature @Name("user config") def feature(self, node="clickhouse1"): diff --git a/tests/testflows/ldap/external_user_directory/regression.py b/tests/testflows/ldap/external_user_directory/regression.py index 3ae32ba52fb..de53bf9128e 100755 --- a/tests/testflows/ldap/external_user_directory/regression.py +++ b/tests/testflows/ldap/external_user_directory/regression.py @@ -12,41 +12,53 @@ from helpers.common import check_clickhouse_version # Cross-outs of known fails xfails = { - "connection protocols/tls/tls_require_cert='try'": - [(Fail, "can't be tested with self-signed certificates")], - "connection protocols/tls/tls_require_cert='demand'": - [(Fail, "can't be tested with self-signed certificates")], - "connection protocols/starttls/tls_require_cert='try'": - [(Fail, "can't be tested with self-signed certificates")], - "connection protocols/starttls/tls_require_cert='demand'": - [(Fail, "can't be tested with self-signed certificates")], - "connection protocols/tls require cert default demand": - [(Fail, "can't be tested with self-signed certificates")], - "connection protocols/starttls with custom port": - [(Fail, "it seems that starttls is not enabled by default on custom plain-text ports in LDAP server")], - "connection protocols/tls cipher suite": - [(Fail, "can't get it to work")] + "connection protocols/tls/tls_require_cert='try'": [ + (Fail, "can't be tested with self-signed certificates") + ], + "connection protocols/tls/tls_require_cert='demand'": [ + (Fail, "can't be tested with self-signed certificates") + ], + "connection protocols/starttls/tls_require_cert='try'": [ + (Fail, "can't be tested with self-signed certificates") + ], + "connection protocols/starttls/tls_require_cert='demand'": [ + (Fail, "can't be tested with self-signed certificates") + ], + "connection protocols/tls require cert default demand": [ + (Fail, "can't be tested with self-signed certificates") + ], + "connection protocols/starttls with custom port": [ + ( + Fail, + "it seems that starttls is not enabled by default on custom plain-text ports in LDAP server", + ) + ], + "connection protocols/tls cipher suite": [(Fail, "can't get it to work")], } -ffails ={ - "user authentications/verification cooldown performance/:": - (Skip, "causes timeout on 21.8", (lambda test: check_clickhouse_version(">=21.8")(test) and check_clickhouse_version("<21.9")(test))) +ffails = { + "user authentications/verification cooldown performance/:": ( + Skip, + "causes timeout on 21.8", + ( + lambda test: check_clickhouse_version(">=21.8")(test) + and check_clickhouse_version("<21.9")(test) + ), + ) } + @TestFeature @Name("external user directory") @ArgumentParser(argparser) -@Specifications( - SRS_009_ClickHouse_LDAP_External_User_Directory -) -@Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication("1.0") -) +@Specifications(SRS_009_ClickHouse_LDAP_External_User_Directory) +@Requirements(RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication("1.0")) @XFails(xfails) @FFails(ffails) -def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None): - """ClickHouse LDAP external user directory regression module. - """ +def regression( + self, local, clickhouse_binary_path, clickhouse_version=None, stress=None +): + """ClickHouse LDAP external user directory regression module.""" nodes = { "clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3"), } @@ -59,23 +71,35 @@ def regression(self, local, clickhouse_binary_path, clickhouse_version=None, str from platform import processor as current_cpu folder_name = os.path.basename(current_dir()) - if current_cpu() == 'aarch64': + if current_cpu() == "aarch64": env = f"{folder_name}_env_arm64" else: env = f"{folder_name}_env" - with Cluster(local, clickhouse_binary_path, nodes=nodes, - docker_compose_project_dir=os.path.join(current_dir(), env)) as cluster: + with Cluster( + local, + clickhouse_binary_path, + nodes=nodes, + docker_compose_project_dir=os.path.join(current_dir(), env), + ) as cluster: self.context.cluster = cluster Scenario(run=load("ldap.authentication.tests.sanity", "scenario")) Scenario(run=load("ldap.external_user_directory.tests.simple", "scenario")) Feature(run=load("ldap.external_user_directory.tests.restart", "feature")) Feature(run=load("ldap.external_user_directory.tests.server_config", "feature")) - Feature(run=load("ldap.external_user_directory.tests.external_user_directory_config", "feature")) + Feature( + run=load( + "ldap.external_user_directory.tests.external_user_directory_config", + "feature", + ) + ) Feature(run=load("ldap.external_user_directory.tests.connections", "feature")) - Feature(run=load("ldap.external_user_directory.tests.authentications", "feature")) + Feature( + run=load("ldap.external_user_directory.tests.authentications", "feature") + ) Feature(run=load("ldap.external_user_directory.tests.roles", "feature")) + if main(): regression() diff --git a/tests/testflows/ldap/external_user_directory/requirements/requirements.py b/tests/testflows/ldap/external_user_directory/requirements/requirements.py index 90969725725..e15cc7a034e 100644 --- a/tests/testflows/ldap/external_user_directory/requirements/requirements.py +++ b/tests/testflows/ldap/external_user_directory/requirements/requirements.py @@ -9,1574 +9,1665 @@ from testflows.core import Requirement Heading = Specification.Heading RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support authenticating users that are defined only on the [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support authenticating users that are defined only on the [LDAP] server.\n" + "\n" + ), link=None, level=4, - num='4.1.1.1') + num="4.1.1.1", +) RQ_SRS_009_LDAP_ExternalUserDirectory_MultipleUserDirectories = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.MultipleUserDirectories', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.MultipleUserDirectories", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support authenticating users using multiple [LDAP] external user directories.\n' - '\n' - ), + "[ClickHouse] SHALL support authenticating users using multiple [LDAP] external user directories.\n" + "\n" + ), link=None, level=4, - num='4.1.1.2') + num="4.1.1.2", +) RQ_SRS_009_LDAP_ExternalUserDirectory_MultipleUserDirectories_Lookup = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.MultipleUserDirectories.Lookup', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.MultipleUserDirectories.Lookup", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL attempt to authenticate external [LDAP] user\n' - 'using [LDAP] external user directory in the same order\n' - 'in which user directories are specified in the `config.xml` file.\n' - 'If a user cannot be authenticated using the first [LDAP] external user directory\n' - 'then the next user directory in the list SHALL be used.\n' - '\n' - ), + "[ClickHouse] SHALL attempt to authenticate external [LDAP] user\n" + "using [LDAP] external user directory in the same order\n" + "in which user directories are specified in the `config.xml` file.\n" + "If a user cannot be authenticated using the first [LDAP] external user directory\n" + "then the next user directory in the list SHALL be used.\n" + "\n" + ), link=None, level=4, - num='4.1.1.3') + num="4.1.1.3", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Users_Authentication_NewUsers = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Users.Authentication.NewUsers', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Users.Authentication.NewUsers", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support authenticating users that are defined only on the [LDAP] server\n' - 'as soon as they are added to the [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support authenticating users that are defined only on the [LDAP] server\n" + "as soon as they are added to the [LDAP] server.\n" + "\n" + ), link=None, level=4, - num='4.1.1.4') + num="4.1.1.4", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_DeletedUsers = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.DeletedUsers', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.DeletedUsers", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not allow authentication of users that\n' - 'were previously defined only on the [LDAP] server but were removed\n' - 'from the [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL not allow authentication of users that\n" + "were previously defined only on the [LDAP] server but were removed\n" + "from the [LDAP] server.\n" + "\n" + ), link=None, level=4, - num='4.1.1.5') + num="4.1.1.5", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Valid = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Valid', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Valid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only allow user authentication using [LDAP] server if and only if\n' - 'user name and password match [LDAP] server records for the user\n' - 'when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL only allow user authentication using [LDAP] server if and only if\n" + "user name and password match [LDAP] server records for the user\n" + "when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.1.1.6') + num="4.1.1.6", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Invalid = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Invalid', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Invalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error and prohibit authentication if either user name or password\n' - 'do not match [LDAP] server records for the user\n' - 'when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL return an error and prohibit authentication if either user name or password\n" + "do not match [LDAP] server records for the user\n" + "when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.1.1.7') + num="4.1.1.7", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_UsernameChanged = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.UsernameChanged', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.UsernameChanged", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error and prohibit authentication if the username is changed\n' - 'on the [LDAP] server when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL return an error and prohibit authentication if the username is changed\n" + "on the [LDAP] server when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.1.1.8') + num="4.1.1.8", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_PasswordChanged = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.PasswordChanged', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.PasswordChanged", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error and prohibit authentication if the password\n' - 'for the user is changed on the [LDAP] server when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL return an error and prohibit authentication if the password\n" + "for the user is changed on the [LDAP] server when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.1.1.9') + num="4.1.1.9", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_LDAPServerRestart = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.LDAPServerRestart', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.LDAPServerRestart", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support authenticating users after [LDAP] server is restarted\n' - 'when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL support authenticating users after [LDAP] server is restarted\n" + "when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.1.1.10') + num="4.1.1.10", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_ClickHouseServerRestart = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.ClickHouseServerRestart', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.ClickHouseServerRestart", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support authenticating users after server is restarted\n' - 'when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL support authenticating users after server is restarted\n" + "when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.1.1.11') + num="4.1.1.11", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support parallel authentication of users using [LDAP] server\n' - 'when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL support parallel authentication of users using [LDAP] server\n" + "when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.1.1.12') + num="4.1.1.12", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_ValidAndInvalid = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.ValidAndInvalid', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.ValidAndInvalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support authentication of valid users and\n' - 'prohibit authentication of invalid users using [LDAP] server\n' - 'in parallel without having invalid attempts affecting valid authentications\n' - 'when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL support authentication of valid users and\n" + "prohibit authentication of invalid users using [LDAP] server\n" + "in parallel without having invalid attempts affecting valid authentications\n" + "when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.1.1.13') + num="4.1.1.13", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_MultipleServers = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.MultipleServers', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.MultipleServers", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support parallel authentication of external [LDAP] users\n' - 'authenticated using multiple [LDAP] external user directories.\n' - '\n' - ), + "[ClickHouse] SHALL support parallel authentication of external [LDAP] users\n" + "authenticated using multiple [LDAP] external user directories.\n" + "\n" + ), link=None, level=4, - num='4.1.1.14') + num="4.1.1.14", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_LocalOnly = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.LocalOnly', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.LocalOnly", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support parallel authentication of users defined only locally\n' - 'when one or more [LDAP] external user directories are specified in the configuration file.\n' - '\n' - ), + "[ClickHouse] SHALL support parallel authentication of users defined only locally\n" + "when one or more [LDAP] external user directories are specified in the configuration file.\n" + "\n" + ), link=None, level=4, - num='4.1.1.15') + num="4.1.1.15", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_LocalAndMultipleLDAP = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.LocalAndMultipleLDAP', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.LocalAndMultipleLDAP", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support parallel authentication of local and external [LDAP] users\n' - 'authenticated using multiple [LDAP] external user directories.\n' - '\n' - ), + "[ClickHouse] SHALL support parallel authentication of local and external [LDAP] users\n" + "authenticated using multiple [LDAP] external user directories.\n" + "\n" + ), link=None, level=4, - num='4.1.1.16') + num="4.1.1.16", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_SameUser = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.SameUser', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.SameUser", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support parallel authentication of the same external [LDAP] user\n' - 'authenticated using the same [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL support parallel authentication of the same external [LDAP] user\n" + "authenticated using the same [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.1.1.17') + num="4.1.1.17", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_DynamicallyAddedAndRemovedUsers = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.DynamicallyAddedAndRemovedUsers', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.DynamicallyAddedAndRemovedUsers", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support parallel authentication of users using\n' - '[LDAP] external user directory when [LDAP] users are dynamically added and\n' - 'removed.\n' - '\n' - ), + "[ClickHouse] SHALL support parallel authentication of users using\n" + "[LDAP] external user directory when [LDAP] users are dynamically added and\n" + "removed.\n" + "\n" + ), link=None, level=4, - num='4.1.1.18') + num="4.1.1.18", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Protocol_PlainText = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.PlainText', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.PlainText", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support user authentication using plain text `ldap://` non secure protocol\n' - 'while connecting to the [LDAP] server when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL support user authentication using plain text `ldap://` non secure protocol\n" + "while connecting to the [LDAP] server when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.1.2.1') + num="4.1.2.1", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Protocol_TLS = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.TLS', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.TLS", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support user authentication using `SSL/TLS` `ldaps://` secure protocol\n' - 'while connecting to the [LDAP] server when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL support user authentication using `SSL/TLS` `ldaps://` secure protocol\n" + "while connecting to the [LDAP] server when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.1.2.2') + num="4.1.2.2", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Protocol_StartTLS = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.StartTLS', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.StartTLS", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support user authentication using legacy `StartTLS` protocol which is a\n' - 'plain text `ldap://` protocol that is upgraded to [TLS] when connecting to the [LDAP] server\n' - 'when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL support user authentication using legacy `StartTLS` protocol which is a\n" + "plain text `ldap://` protocol that is upgraded to [TLS] when connecting to the [LDAP] server\n" + "when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.1.2.3') + num="4.1.2.3", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Protocol_TLS_Certificate_Validation = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.TLS.Certificate.Validation', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.TLS.Certificate.Validation", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support certificate validation used for [TLS] connections\n' - 'to the [LDAP] server when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL support certificate validation used for [TLS] connections\n" + "to the [LDAP] server when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.1.2.4') + num="4.1.2.4", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Protocol_TLS_Certificate_SelfSigned = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.TLS.Certificate.SelfSigned', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.TLS.Certificate.SelfSigned", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support self-signed certificates for [TLS] connections\n' - 'to the [LDAP] server when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL support self-signed certificates for [TLS] connections\n" + "to the [LDAP] server when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.1.2.5') + num="4.1.2.5", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Protocol_TLS_Certificate_SpecificCertificationAuthority = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.TLS.Certificate.SpecificCertificationAuthority', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.TLS.Certificate.SpecificCertificationAuthority", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support certificates signed by specific Certification Authority for [TLS] connections\n' - 'to the [LDAP] server when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL support certificates signed by specific Certification Authority for [TLS] connections\n" + "to the [LDAP] server when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.1.2.6') + num="4.1.2.6", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Authentication_Mechanism_Anonymous = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Authentication.Mechanism.Anonymous', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Authentication.Mechanism.Anonymous", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error and prohibit authentication using [Anonymous Authentication Mechanism of Simple Bind]\n' - 'authentication mechanism when connecting to the [LDAP] server when using [LDAP] external server directory.\n' - '\n' - ), + "[ClickHouse] SHALL return an error and prohibit authentication using [Anonymous Authentication Mechanism of Simple Bind]\n" + "authentication mechanism when connecting to the [LDAP] server when using [LDAP] external server directory.\n" + "\n" + ), link=None, level=4, - num='4.1.2.7') + num="4.1.2.7", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Authentication_Mechanism_Unauthenticated = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Authentication.Mechanism.Unauthenticated', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Authentication.Mechanism.Unauthenticated", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error and prohibit authentication using [Unauthenticated Authentication Mechanism of Simple Bind]\n' - 'authentication mechanism when connecting to the [LDAP] server when using [LDAP] external server directory.\n' - '\n' - ), + "[ClickHouse] SHALL return an error and prohibit authentication using [Unauthenticated Authentication Mechanism of Simple Bind]\n" + "authentication mechanism when connecting to the [LDAP] server when using [LDAP] external server directory.\n" + "\n" + ), link=None, level=4, - num='4.1.2.8') + num="4.1.2.8", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Authentication_Mechanism_NamePassword = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Authentication.Mechanism.NamePassword', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Authentication.Mechanism.NamePassword", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL allow authentication using only [Name/Password Authentication Mechanism of Simple Bind]\n' - 'authentication mechanism when connecting to the [LDAP] server when using [LDAP] external server directory.\n' - '\n' - ), + "[ClickHouse] SHALL allow authentication using only [Name/Password Authentication Mechanism of Simple Bind]\n" + "authentication mechanism when connecting to the [LDAP] server when using [LDAP] external server directory.\n" + "\n" + ), link=None, level=4, - num='4.1.2.9') + num="4.1.2.9", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Authentication_UnreachableServer = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Authentication.UnreachableServer', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Authentication.UnreachableServer", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error and prohibit user login if [LDAP] server is unreachable\n' - 'when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL return an error and prohibit user login if [LDAP] server is unreachable\n" + "when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.1.2.10') + num="4.1.2.10", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Users_Lookup_Priority = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Users.Lookup.Priority', - version='2.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Users.Lookup.Priority", + version="2.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL lookup user presence in the same order\n' - 'as user directories are defined in the `config.xml`.\n' - '\n' - ), + "[ClickHouse] SHALL lookup user presence in the same order\n" + "as user directories are defined in the `config.xml`.\n" + "\n" + ), link=None, level=4, - num='4.2.1.1') + num="4.2.1.1", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Restart_Server = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Restart.Server', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Restart.Server", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support restarting server when one or more LDAP external directories\n' - 'are configured.\n' - '\n' - ), + "[ClickHouse] SHALL support restarting server when one or more LDAP external directories\n" + "are configured.\n" + "\n" + ), link=None, level=4, - num='4.2.1.2') + num="4.2.1.2", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Restart_Server_ParallelLogins = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Restart.Server.ParallelLogins', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Restart.Server.ParallelLogins", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support restarting server when one or more LDAP external directories\n' - 'are configured during parallel [LDAP] user logins.\n' - '\n' - ), + "[ClickHouse] SHALL support restarting server when one or more LDAP external directories\n" + "are configured during parallel [LDAP] user logins.\n" + "\n" + ), link=None, level=4, - num='4.2.1.3') + num="4.2.1.3", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Role_Removed = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Role.Removed', - version='2.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Role.Removed", + version="2.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL allow authentication even if the roles that are specified in the configuration\n' - 'of the external user directory are not defined at the time of the authentication attempt.\n' - '\n' - ), + "[ClickHouse] SHALL allow authentication even if the roles that are specified in the configuration\n" + "of the external user directory are not defined at the time of the authentication attempt.\n" + "\n" + ), link=None, level=4, - num='4.2.2.1') + num="4.2.2.1", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Role_Removed_Privileges = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Role.Removed.Privileges', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Role.Removed.Privileges", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL remove the privileges provided by the role from all the LDAP\n' - 'users authenticated using external user directory if it is removed\n' - 'including currently cached users that are still able to authenticated where the removed\n' - 'role is specified in the configuration of the external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL remove the privileges provided by the role from all the LDAP\n" + "users authenticated using external user directory if it is removed\n" + "including currently cached users that are still able to authenticated where the removed\n" + "role is specified in the configuration of the external user directory.\n" + "\n" + ), link=None, level=4, - num='4.2.2.2') + num="4.2.2.2", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Role_Readded_Privileges = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Role.Readded.Privileges', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Role.Readded.Privileges", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL reassign the role and add the privileges provided by the role\n' - 'when it is re-added after removal for all LDAP users authenticated using external user directory\n' - 'including any cached users where the re-added role was specified in the configuration of the external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL reassign the role and add the privileges provided by the role\n" + "when it is re-added after removal for all LDAP users authenticated using external user directory\n" + "including any cached users where the re-added role was specified in the configuration of the external user directory.\n" + "\n" + ), link=None, level=4, - num='4.2.2.3') + num="4.2.2.3", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Role_New = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Role.New', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Role.New", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not allow any new roles to be assigned to any LDAP\n' - 'users authenticated using external user directory unless the role is specified\n' - 'in the configuration of the external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL not allow any new roles to be assigned to any LDAP\n" + "users authenticated using external user directory unless the role is specified\n" + "in the configuration of the external user directory.\n" + "\n" + ), link=None, level=4, - num='4.2.2.4') + num="4.2.2.4", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Role_NewPrivilege = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Role.NewPrivilege', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Role.NewPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL add new privilege to all the LDAP users authenticated using external user directory\n' - 'including cached users when new privilege is added to one of the roles specified\n' - 'in the configuration of the external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL add new privilege to all the LDAP users authenticated using external user directory\n" + "including cached users when new privilege is added to one of the roles specified\n" + "in the configuration of the external user directory.\n" + "\n" + ), link=None, level=4, - num='4.2.2.5') + num="4.2.2.5", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Role_RemovedPrivilege = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Role.RemovedPrivilege', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Role.RemovedPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL remove privilege from all the LDAP users authenticated using external user directory\n' - 'including cached users when privilege is removed from all the roles specified\n' - 'in the configuration of the external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL remove privilege from all the LDAP users authenticated using external user directory\n" + "including cached users when privilege is removed from all the roles specified\n" + "in the configuration of the external user directory.\n" + "\n" + ), link=None, level=4, - num='4.2.2.6') + num="4.2.2.6", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Role_NotPresent_Added = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Role.NotPresent.Added', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Role.NotPresent.Added", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL add a role to the users authenticated using LDAP external user directory\n' - 'that did not exist during the time of authentication but are defined in the \n' - 'configuration file as soon as the role with that name becomes\n' - 'available.\n' - '\n' - ), + "[ClickHouse] SHALL add a role to the users authenticated using LDAP external user directory\n" + "that did not exist during the time of authentication but are defined in the \n" + "configuration file as soon as the role with that name becomes\n" + "available.\n" + "\n" + ), link=None, level=4, - num='4.2.2.7') + num="4.2.2.7", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Invalid = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Invalid', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Invalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error and prohibit user login if [LDAP] server configuration is not valid.\n' - '\n' - ), + "[ClickHouse] SHALL return an error and prohibit user login if [LDAP] server configuration is not valid.\n" + "\n" + ), link=None, level=4, - num='4.2.3.1') + num="4.2.3.1", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Definition = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Definition', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Definition", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using the [LDAP] servers defined in the\n' - '`ldap_servers` section of the `config.xml` as the server to be used\n' - 'for a external user directory that uses an [LDAP] server as a source of user definitions.\n' - '\n' - ), + "[ClickHouse] SHALL support using the [LDAP] servers defined in the\n" + "`ldap_servers` section of the `config.xml` as the server to be used\n" + "for a external user directory that uses an [LDAP] server as a source of user definitions.\n" + "\n" + ), link=None, level=4, - num='4.2.3.2') + num="4.2.3.2", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Name = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Name', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Name", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not support empty string as a server name.\n' - '\n' - ), + "[ClickHouse] SHALL not support empty string as a server name.\n" "\n" + ), link=None, level=4, - num='4.2.3.3') + num="4.2.3.3", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Host = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Host', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Host", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` parameter to specify [LDAP]\n' - 'server hostname or IP, this parameter SHALL be mandatory and SHALL not be empty.\n' - '\n' - ), + "[ClickHouse] SHALL support `` parameter to specify [LDAP]\n" + "server hostname or IP, this parameter SHALL be mandatory and SHALL not be empty.\n" + "\n" + ), link=None, level=4, - num='4.2.3.4') + num="4.2.3.4", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Port = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Port', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Port", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` parameter to specify [LDAP] server port.\n' - '\n' - ), + "[ClickHouse] SHALL support `` parameter to specify [LDAP] server port.\n" + "\n" + ), link=None, level=4, - num='4.2.3.5') + num="4.2.3.5", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Port_Default = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Port.Default', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Port.Default", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL use default port number `636` if `enable_tls` is set to `yes` or `389` otherwise.\n' - '\n' - ), + "[ClickHouse] SHALL use default port number `636` if `enable_tls` is set to `yes` or `389` otherwise.\n" + "\n" + ), link=None, level=4, - num='4.2.3.6') + num="4.2.3.6", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_AuthDN_Prefix = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.AuthDN.Prefix', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.AuthDN.Prefix", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` parameter to specify the prefix\n' - 'of value used to construct the DN to bound to during authentication via [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support `` parameter to specify the prefix\n" + "of value used to construct the DN to bound to during authentication via [LDAP] server.\n" + "\n" + ), link=None, level=4, - num='4.2.3.7') + num="4.2.3.7", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_AuthDN_Suffix = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.AuthDN.Suffix', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.AuthDN.Suffix", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` parameter to specify the suffix\n' - 'of value used to construct the DN to bound to during authentication via [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support `` parameter to specify the suffix\n" + "of value used to construct the DN to bound to during authentication via [LDAP] server.\n" + "\n" + ), link=None, level=4, - num='4.2.3.8') + num="4.2.3.8", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_AuthDN_Value = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.AuthDN.Value', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.AuthDN.Value", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL construct DN as `auth_dn_prefix + escape(user_name) + auth_dn_suffix` string.\n' - '\n' + "[ClickHouse] SHALL construct DN as `auth_dn_prefix + escape(user_name) + auth_dn_suffix` string.\n" + "\n" "> This implies that auth_dn_suffix should usually have comma ',' as its first non-space character.\n" - '\n' - ), + "\n" + ), link=None, level=4, - num='4.2.3.9') + num="4.2.3.9", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` parameter to trigger the use of secure connection to the [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support `` parameter to trigger the use of secure connection to the [LDAP] server.\n" + "\n" + ), link=None, level=4, - num='4.2.3.10') + num="4.2.3.10", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS_Options_Default = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.Default', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.Default", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL use `yes` value as the default for `` parameter\n' - 'to enable SSL/TLS `ldaps://` protocol.\n' - '\n' - ), + "[ClickHouse] SHALL use `yes` value as the default for `` parameter\n" + "to enable SSL/TLS `ldaps://` protocol.\n" + "\n" + ), link=None, level=4, - num='4.2.3.11') + num="4.2.3.11", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS_Options_No = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.No', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.No", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying `no` as the value of `` parameter to enable\n' - 'plain text `ldap://` protocol.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying `no` as the value of `` parameter to enable\n" + "plain text `ldap://` protocol.\n" + "\n" + ), link=None, level=4, - num='4.2.3.12') + num="4.2.3.12", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS_Options_Yes = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.Yes', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.Yes", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying `yes` as the value of `` parameter to enable\n' - 'SSL/TLS `ldaps://` protocol.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying `yes` as the value of `` parameter to enable\n" + "SSL/TLS `ldaps://` protocol.\n" + "\n" + ), link=None, level=4, - num='4.2.3.13') + num="4.2.3.13", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS_Options_StartTLS = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.StartTLS', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.StartTLS", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying `starttls` as the value of `` parameter to enable\n' - 'legacy `StartTLS` protocol that used plain text `ldap://` protocol, upgraded to [TLS].\n' - '\n' - ), + "[ClickHouse] SHALL support specifying `starttls` as the value of `` parameter to enable\n" + "legacy `StartTLS` protocol that used plain text `ldap://` protocol, upgraded to [TLS].\n" + "\n" + ), link=None, level=4, - num='4.2.3.14') + num="4.2.3.14", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSMinimumProtocolVersion = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSMinimumProtocolVersion', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSMinimumProtocolVersion", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` parameter to specify\n' - 'the minimum protocol version of SSL/TLS.\n' - '\n' - ), + "[ClickHouse] SHALL support `` parameter to specify\n" + "the minimum protocol version of SSL/TLS.\n" + "\n" + ), link=None, level=4, - num='4.2.3.15') + num="4.2.3.15", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSMinimumProtocolVersion_Values = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSMinimumProtocolVersion.Values', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSMinimumProtocolVersion.Values", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying `ssl2`, `ssl3`, `tls1.0`, `tls1.1`, and `tls1.2`\n' - 'as a value of the `` parameter.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying `ssl2`, `ssl3`, `tls1.0`, `tls1.1`, and `tls1.2`\n" + "as a value of the `` parameter.\n" + "\n" + ), link=None, level=4, - num='4.2.3.16') + num="4.2.3.16", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSMinimumProtocolVersion_Default = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSMinimumProtocolVersion.Default', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSMinimumProtocolVersion.Default", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL set `tls1.2` as the default value of the `` parameter.\n' - '\n' - ), + "[ClickHouse] SHALL set `tls1.2` as the default value of the `` parameter.\n" + "\n" + ), link=None, level=4, - num='4.2.3.17') + num="4.2.3.17", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` parameter to specify [TLS] peer\n' - 'certificate verification behavior.\n' - '\n' - ), + "[ClickHouse] SHALL support `` parameter to specify [TLS] peer\n" + "certificate verification behavior.\n" + "\n" + ), link=None, level=4, - num='4.2.3.18') + num="4.2.3.18", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Default = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Default', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Default", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL use `demand` value as the default for the `` parameter.\n' - '\n' - ), + "[ClickHouse] SHALL use `demand` value as the default for the `` parameter.\n" + "\n" + ), link=None, level=4, - num='4.2.3.19') + num="4.2.3.19", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Demand = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Demand', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Demand", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying `demand` as the value of `` parameter to\n' - 'enable requesting of client certificate. If no certificate is provided, or a bad certificate is\n' - 'provided, the session SHALL be immediately terminated.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying `demand` as the value of `` parameter to\n" + "enable requesting of client certificate. If no certificate is provided, or a bad certificate is\n" + "provided, the session SHALL be immediately terminated.\n" + "\n" + ), link=None, level=4, - num='4.2.3.20') + num="4.2.3.20", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Allow = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Allow', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Allow", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying `allow` as the value of `` parameter to\n' - 'enable requesting of client certificate. If no\n' - 'certificate is provided, the session SHALL proceed normally.\n' - 'If a bad certificate is provided, it SHALL be ignored and the session SHALL proceed normally.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying `allow` as the value of `` parameter to\n" + "enable requesting of client certificate. If no\n" + "certificate is provided, the session SHALL proceed normally.\n" + "If a bad certificate is provided, it SHALL be ignored and the session SHALL proceed normally.\n" + "\n" + ), link=None, level=4, - num='4.2.3.21') + num="4.2.3.21", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Try = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Try', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Try", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying `try` as the value of `` parameter to\n' - 'enable requesting of client certificate. If no certificate is provided, the session\n' - 'SHALL proceed normally. If a bad certificate is provided, the session SHALL be\n' - 'immediately terminated.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying `try` as the value of `` parameter to\n" + "enable requesting of client certificate. If no certificate is provided, the session\n" + "SHALL proceed normally. If a bad certificate is provided, the session SHALL be\n" + "immediately terminated.\n" + "\n" + ), link=None, level=4, - num='4.2.3.22') + num="4.2.3.22", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Never = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Never', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Never", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying `never` as the value of `` parameter to\n' - 'disable requesting of client certificate.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying `never` as the value of `` parameter to\n" + "disable requesting of client certificate.\n" + "\n" + ), link=None, level=4, - num='4.2.3.23') + num="4.2.3.23", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSCertFile = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCertFile', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCertFile", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` to specify the path to certificate file used by\n' - '[ClickHouse] to establish connection with the [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support `` to specify the path to certificate file used by\n" + "[ClickHouse] to establish connection with the [LDAP] server.\n" + "\n" + ), link=None, level=4, - num='4.2.3.24') + num="4.2.3.24", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSKeyFile = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSKeyFile', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSKeyFile", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` to specify the path to key file for the certificate\n' - 'specified by the `` parameter.\n' - '\n' - ), + "[ClickHouse] SHALL support `` to specify the path to key file for the certificate\n" + "specified by the `` parameter.\n" + "\n" + ), link=None, level=4, - num='4.2.3.25') + num="4.2.3.25", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSCACertDir = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCACertDir', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCACertDir", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` parameter to specify to a path to\n' - 'the directory containing [CA] certificates used to verify certificates provided by the [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support `` parameter to specify to a path to\n" + "the directory containing [CA] certificates used to verify certificates provided by the [LDAP] server.\n" + "\n" + ), link=None, level=4, - num='4.2.3.26') + num="4.2.3.26", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSCACertFile = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCACertFile', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCACertFile", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` parameter to specify a path to a specific\n' - '[CA] certificate file used to verify certificates provided by the [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support `` parameter to specify a path to a specific\n" + "[CA] certificate file used to verify certificates provided by the [LDAP] server.\n" + "\n" + ), link=None, level=4, - num='4.2.3.27') + num="4.2.3.27", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSCipherSuite = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCipherSuite', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCipherSuite", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `tls_cipher_suite` parameter to specify allowed cipher suites.\n' - 'The value SHALL use the same format as the `ciphersuites` in the [OpenSSL Ciphers].\n' - '\n' - 'For example,\n' - '\n' - '```xml\n' - 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384\n' - '```\n' - '\n' - 'The available suites SHALL depend on the [OpenSSL] library version and variant used to build\n' - '[ClickHouse] and therefore might change.\n' - '\n' - ), + "[ClickHouse] SHALL support `tls_cipher_suite` parameter to specify allowed cipher suites.\n" + "The value SHALL use the same format as the `ciphersuites` in the [OpenSSL Ciphers].\n" + "\n" + "For example,\n" + "\n" + "```xml\n" + "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384\n" + "```\n" + "\n" + "The available suites SHALL depend on the [OpenSSL] library version and variant used to build\n" + "[ClickHouse] and therefore might change.\n" + "\n" + ), link=None, level=4, - num='4.2.3.28') + num="4.2.3.28", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `verification_cooldown` parameter in the [LDAP] server configuration section\n' - 'that SHALL define a period of time, in seconds, after a successful bind attempt, during which a user SHALL be assumed\n' - 'to be successfully authenticated for all consecutive requests without contacting the [LDAP] server.\n' - 'After period of time since the last successful attempt expires then on the authentication attempt\n' - 'SHALL result in contacting the [LDAP] server to verify the username and password.\n' - '\n' - ), + "[ClickHouse] SHALL support `verification_cooldown` parameter in the [LDAP] server configuration section\n" + "that SHALL define a period of time, in seconds, after a successful bind attempt, during which a user SHALL be assumed\n" + "to be successfully authenticated for all consecutive requests without contacting the [LDAP] server.\n" + "After period of time since the last successful attempt expires then on the authentication attempt\n" + "SHALL result in contacting the [LDAP] server to verify the username and password.\n" + "\n" + ), link=None, level=4, - num='4.2.3.29') + num="4.2.3.29", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown_Default = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Default', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Default", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] `verification_cooldown` parameter in the [LDAP] server configuration section\n' - 'SHALL have a default value of `0` that disables caching and forces contacting\n' - 'the [LDAP] server for each authentication request.\n' - '\n' - ), + "[ClickHouse] `verification_cooldown` parameter in the [LDAP] server configuration section\n" + "SHALL have a default value of `0` that disables caching and forces contacting\n" + "the [LDAP] server for each authentication request.\n" + "\n" + ), link=None, level=4, - num='4.2.3.30') + num="4.2.3.30", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown_Invalid = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Invalid', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Invalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[Clickhouse] SHALL return an error if the value provided for the `verification_cooldown` parameter is not a valid positive integer.\n' - '\n' - 'For example:\n' - '\n' - '* negative integer\n' - '* string\n' - '* empty value\n' - '* extremely large positive value (overflow)\n' - '* extremely large negative value (overflow)\n' - '\n' - 'The error SHALL appear in the log and SHALL be similar to the following:\n' - '\n' - '```bash\n' - ' Access(user directories): Could not parse LDAP server `openldap1`: Poco::Exception. Code: 1000, e.code() = 0, e.displayText() = Syntax error: Not a valid unsigned integer: *input value*\n' - '```\n' - '\n' - ), + "[Clickhouse] SHALL return an error if the value provided for the `verification_cooldown` parameter is not a valid positive integer.\n" + "\n" + "For example:\n" + "\n" + "* negative integer\n" + "* string\n" + "* empty value\n" + "* extremely large positive value (overflow)\n" + "* extremely large negative value (overflow)\n" + "\n" + "The error SHALL appear in the log and SHALL be similar to the following:\n" + "\n" + "```bash\n" + " Access(user directories): Could not parse LDAP server `openldap1`: Poco::Exception. Code: 1000, e.code() = 0, e.displayText() = Syntax error: Not a valid unsigned integer: *input value*\n" + "```\n" + "\n" + ), link=None, level=4, - num='4.2.3.31') + num="4.2.3.31", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Syntax = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Syntax', - version='2.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Syntax", + version="2.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following example syntax to create an entry for an [LDAP] server inside the `config.xml`\n' - 'configuration file or of any configuration file inside the `config.d` directory.\n' - '\n' - '```xml\n' - '\n' - ' \n' - ' localhost\n' - ' 636\n' - ' cn=\n' - ' , ou=users, dc=example, dc=com\n' - ' 0\n' - ' yes\n' - ' tls1.2\n' - ' demand\n' - ' /path/to/tls_cert_file\n' - ' /path/to/tls_key_file\n' - ' /path/to/tls_ca_cert_file\n' - ' /path/to/tls_ca_cert_dir\n' - ' ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384\n' - ' \n' - '\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following example syntax to create an entry for an [LDAP] server inside the `config.xml`\n" + "configuration file or of any configuration file inside the `config.d` directory.\n" + "\n" + "```xml\n" + "\n" + " \n" + " localhost\n" + " 636\n" + " cn=\n" + " , ou=users, dc=example, dc=com\n" + " 0\n" + " yes\n" + " tls1.2\n" + " demand\n" + " /path/to/tls_cert_file\n" + " /path/to/tls_key_file\n" + " /path/to/tls_ca_cert_file\n" + " /path/to/tls_ca_cert_dir\n" + " ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384\n" + " \n" + "\n" + "```\n" + "\n" + ), link=None, level=4, - num='4.2.3.32') + num="4.2.3.32", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_LDAPUserDirectory = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` sub-section in the `` section of the `config.xml`\n' - 'that SHALL define a external user directory that uses an [LDAP] server as a source of user definitions.\n' - '\n' - ), + "[ClickHouse] SHALL support `` sub-section in the `` section of the `config.xml`\n" + "that SHALL define a external user directory that uses an [LDAP] server as a source of user definitions.\n" + "\n" + ), link=None, level=4, - num='4.2.3.33') + num="4.2.3.33", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_LDAPUserDirectory_MoreThanOne = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory.MoreThanOne', - version='2.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory.MoreThanOne", + version="2.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support more than one `` sub-sections in the `` section of the `config.xml`\n' - 'that SHALL allow to define more than one external user directory that use an [LDAP] server as a source\n' - 'of user definitions.\n' - '\n' - ), + "[ClickHouse] SHALL support more than one `` sub-sections in the `` section of the `config.xml`\n" + "that SHALL allow to define more than one external user directory that use an [LDAP] server as a source\n" + "of user definitions.\n" + "\n" + ), link=None, level=4, - num='4.2.3.34') + num="4.2.3.34", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Syntax = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Syntax', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `` section with the following syntax\n' - '\n' - '```xml\n' - '\n' - ' \n' - ' \n' - ' my_ldap_server\n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - ' \n' - '\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support `` section with the following syntax\n" + "\n" + "```xml\n" + "\n" + " \n" + " \n" + " my_ldap_server\n" + " \n" + " \n" + " \n" + " \n" + " \n" + " \n" + "\n" + "```\n" + "\n" + ), link=None, level=4, - num='4.2.3.35') + num="4.2.3.35", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `server` parameter in the `` sub-section in the ``\n' - 'section of the `config.xml` that SHALL specify one of LDAP server names\n' - 'defined in `` section.\n' - '\n' - ), + "[ClickHouse] SHALL support `server` parameter in the `` sub-section in the ``\n" + "section of the `config.xml` that SHALL specify one of LDAP server names\n" + "defined in `` section.\n" + "\n" + ), link=None, level=4, - num='4.2.3.36') + num="4.2.3.36", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_Empty = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Empty', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Empty", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `server` parameter in the `` sub-section in the ``\n' - 'is empty.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `server` parameter in the `` sub-section in the ``\n" + "is empty.\n" + "\n" + ), link=None, level=4, - num='4.2.3.37') + num="4.2.3.37", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_Missing = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Missing', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Missing", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `server` parameter in the `` sub-section in the ``\n' - 'is missing.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `server` parameter in the `` sub-section in the ``\n" + "is missing.\n" + "\n" + ), link=None, level=4, - num='4.2.3.38') + num="4.2.3.38", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_MoreThanOne = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.MoreThanOne', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.MoreThanOne", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only use the first definitition of the `server` parameter in the `` sub-section in the ``\n' - 'if more than one `server` parameter is defined in the configuration.\n' - '\n' - ), + "[ClickHouse] SHALL only use the first definitition of the `server` parameter in the `` sub-section in the ``\n" + "if more than one `server` parameter is defined in the configuration.\n" + "\n" + ), link=None, level=4, - num='4.2.3.39') + num="4.2.3.39", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_Invalid = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Invalid', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Invalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the server specified as the value of the ``\n' - 'parameter is not defined.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the server specified as the value of the ``\n" + "parameter is not defined.\n" + "\n" + ), link=None, level=4, - num='4.2.3.40') + num="4.2.3.40", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `roles` parameter in the `` sub-section in the ``\n' - 'section of the `config.xml` that SHALL specify the names of a locally defined roles that SHALL\n' - 'be assigned to all users retrieved from the [LDAP] server.\n' - '\n' - ), + "[ClickHouse] SHALL support `roles` parameter in the `` sub-section in the ``\n" + "section of the `config.xml` that SHALL specify the names of a locally defined roles that SHALL\n" + "be assigned to all users retrieved from the [LDAP] server.\n" + "\n" + ), link=None, level=4, - num='4.2.3.41') + num="4.2.3.41", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_MoreThanOne = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.MoreThanOne', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.MoreThanOne", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only use the first definitition of the `roles` parameter\n' - 'in the `` sub-section in the ``\n' - 'if more than one `roles` parameter is defined in the configuration.\n' - '\n' - ), + "[ClickHouse] SHALL only use the first definitition of the `roles` parameter\n" + "in the `` sub-section in the ``\n" + "if more than one `roles` parameter is defined in the configuration.\n" + "\n" + ), link=None, level=4, - num='4.2.3.42') + num="4.2.3.42", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_Invalid = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Invalid', - version='2.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Invalid", + version="2.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not return an error if the role specified in the ``\n' - 'parameter does not exist locally. \n' - '\n' - ), + "[ClickHouse] SHALL not return an error if the role specified in the ``\n" + "parameter does not exist locally. \n" + "\n" + ), link=None, level=4, - num='4.2.3.43') + num="4.2.3.43", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_Empty = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Empty', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Empty", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not allow users authenticated using LDAP external user directory\n' - 'to perform any action if the `roles` parameter in the `` sub-section in the ``\n' - 'section is empty.\n' - '\n' - ), + "[ClickHouse] SHALL not allow users authenticated using LDAP external user directory\n" + "to perform any action if the `roles` parameter in the `` sub-section in the ``\n" + "section is empty.\n" + "\n" + ), link=None, level=4, - num='4.2.3.44') + num="4.2.3.44", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_Missing = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Missing', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Missing", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not allow users authenticated using LDAP external user directory\n' - 'to perform any action if the `roles` parameter in the `` sub-section in the ``\n' - 'section is missing.\n' - '\n' - ), + "[ClickHouse] SHALL not allow users authenticated using LDAP external user directory\n" + "to perform any action if the `roles` parameter in the `` sub-section in the ``\n" + "section is missing.\n" + "\n" + ), link=None, level=4, - num='4.2.3.45') + num="4.2.3.45", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Username_Empty = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.Empty', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.Empty", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not support authenticating users with empty username\n' - 'when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL not support authenticating users with empty username\n" + "when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.2.4.1') + num="4.2.4.1", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Username_Long = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.Long', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.Long", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support authenticating users with a long username of at least 256 bytes\n' - 'when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL support authenticating users with a long username of at least 256 bytes\n" + "when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.2.4.2') + num="4.2.4.2", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Username_UTF8 = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.UTF8', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.UTF8", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support authentication users with a username that contains [UTF-8] characters\n' - 'when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL support authentication users with a username that contains [UTF-8] characters\n" + "when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.2.4.3') + num="4.2.4.3", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Password_Empty = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.Empty', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.Empty", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not support authenticating users with empty passwords\n' - 'even if an empty password is valid for the user and\n' - 'is allowed by the [LDAP] server when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL not support authenticating users with empty passwords\n" + "even if an empty password is valid for the user and\n" + "is allowed by the [LDAP] server when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.2.4.4') + num="4.2.4.4", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Password_Long = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.Long', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.Long", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support long password of at least 256 bytes\n' - 'that can be used to authenticate users when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL support long password of at least 256 bytes\n" + "that can be used to authenticate users when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.2.4.5') + num="4.2.4.5", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Password_UTF8 = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.UTF8', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.UTF8", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support [UTF-8] characters in passwords\n' - 'used to authenticate users when using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL support [UTF-8] characters in passwords\n" + "used to authenticate users when using [LDAP] external user directory.\n" + "\n" + ), link=None, level=4, - num='4.2.4.6') + num="4.2.4.6", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Performance = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Performance', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Performance", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL provide better login performance of users authenticated using [LDAP] external user directory\n' - 'when `verification_cooldown` parameter is set to a positive value when comparing\n' - 'to the the case when `verification_cooldown` is turned off either for a single user or multiple users\n' - 'making a large number of repeated requests.\n' - '\n' - ), + "[ClickHouse] SHALL provide better login performance of users authenticated using [LDAP] external user directory\n" + "when `verification_cooldown` parameter is set to a positive value when comparing\n" + "to the the case when `verification_cooldown` is turned off either for a single user or multiple users\n" + "making a large number of repeated requests.\n" + "\n" + ), link=None, level=4, - num='4.2.4.7') + num="4.2.4.7", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL reset any currently cached [LDAP] authentication bind requests enabled by the\n' - '`verification_cooldown` parameter in the [LDAP] server configuration section\n' - 'if either `host`, `port`, `auth_dn_prefix`, or `auth_dn_suffix` parameter values\n' - 'change in the configuration file. The reset SHALL cause any subsequent authentication attempts for any user\n' + "[ClickHouse] SHALL reset any currently cached [LDAP] authentication bind requests enabled by the\n" + "`verification_cooldown` parameter in the [LDAP] server configuration section\n" + "if either `host`, `port`, `auth_dn_prefix`, or `auth_dn_suffix` parameter values\n" + "change in the configuration file. The reset SHALL cause any subsequent authentication attempts for any user\n" "to result in contacting the [LDAP] server to verify user's username and password.\n" - '\n' - ), + "\n" + ), link=None, level=4, - num='4.2.4.8') + num="4.2.4.8", +) RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_InvalidPassword = Requirement( - name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.InvalidPassword', - version='1.0', + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.InvalidPassword", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL reset current cached [LDAP] authentication bind request enabled by the\n' - '`verification_cooldown` parameter in the [LDAP] server configuration section\n' - 'for the user if the password provided in the current authentication attempt does not match\n' - 'the valid password provided during the first successful authentication request that was cached\n' - 'for this exact user. The reset SHALL cause the next authentication attempt for this user\n' + "[ClickHouse] SHALL reset current cached [LDAP] authentication bind request enabled by the\n" + "`verification_cooldown` parameter in the [LDAP] server configuration section\n" + "for the user if the password provided in the current authentication attempt does not match\n" + "the valid password provided during the first successful authentication request that was cached\n" + "for this exact user. The reset SHALL cause the next authentication attempt for this user\n" "to result in contacting the [LDAP] server to verify user's username and password.\n" - '\n' - ), + "\n" + ), link=None, level=4, - num='4.2.4.9') + num="4.2.4.9", +) SRS_009_ClickHouse_LDAP_External_User_Directory = Specification( - name='SRS-009 ClickHouse LDAP External User Directory', + name="SRS-009 ClickHouse LDAP External User Directory", description=None, author=None, - date=None, - status=None, + date=None, + status=None, approved_by=None, approved_date=None, approved_version=None, @@ -1588,113 +1679,481 @@ SRS_009_ClickHouse_LDAP_External_User_Directory = Specification( parent=None, children=None, headings=( - Heading(name='Revision History', level=1, num='1'), - Heading(name='Introduction', level=1, num='2'), - Heading(name='Terminology', level=1, num='3'), - Heading(name='LDAP', level=2, num='3.1'), - Heading(name='Requirements', level=1, num='4'), - Heading(name='Generic', level=2, num='4.1'), - Heading(name='User Authentication', level=3, num='4.1.1'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication', level=4, num='4.1.1.1'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.MultipleUserDirectories', level=4, num='4.1.1.2'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.MultipleUserDirectories.Lookup', level=4, num='4.1.1.3'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Users.Authentication.NewUsers', level=4, num='4.1.1.4'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.DeletedUsers', level=4, num='4.1.1.5'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Valid', level=4, num='4.1.1.6'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Invalid', level=4, num='4.1.1.7'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.UsernameChanged', level=4, num='4.1.1.8'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.PasswordChanged', level=4, num='4.1.1.9'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.LDAPServerRestart', level=4, num='4.1.1.10'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.ClickHouseServerRestart', level=4, num='4.1.1.11'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel', level=4, num='4.1.1.12'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.ValidAndInvalid', level=4, num='4.1.1.13'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.MultipleServers', level=4, num='4.1.1.14'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.LocalOnly', level=4, num='4.1.1.15'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.LocalAndMultipleLDAP', level=4, num='4.1.1.16'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.SameUser', level=4, num='4.1.1.17'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.DynamicallyAddedAndRemovedUsers', level=4, num='4.1.1.18'), - Heading(name='Connection', level=3, num='4.1.2'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.PlainText', level=4, num='4.1.2.1'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.TLS', level=4, num='4.1.2.2'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.StartTLS', level=4, num='4.1.2.3'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.TLS.Certificate.Validation', level=4, num='4.1.2.4'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.TLS.Certificate.SelfSigned', level=4, num='4.1.2.5'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.TLS.Certificate.SpecificCertificationAuthority', level=4, num='4.1.2.6'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Authentication.Mechanism.Anonymous', level=4, num='4.1.2.7'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Authentication.Mechanism.Unauthenticated', level=4, num='4.1.2.8'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Authentication.Mechanism.NamePassword', level=4, num='4.1.2.9'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Authentication.UnreachableServer', level=4, num='4.1.2.10'), - Heading(name='Specific', level=2, num='4.2'), - Heading(name='User Discovery', level=3, num='4.2.1'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Users.Lookup.Priority', level=4, num='4.2.1.1'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Restart.Server', level=4, num='4.2.1.2'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Restart.Server.ParallelLogins', level=4, num='4.2.1.3'), - Heading(name='Roles', level=3, num='4.2.2'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Role.Removed', level=4, num='4.2.2.1'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Role.Removed.Privileges', level=4, num='4.2.2.2'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Role.Readded.Privileges', level=4, num='4.2.2.3'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Role.New', level=4, num='4.2.2.4'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Role.NewPrivilege', level=4, num='4.2.2.5'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Role.RemovedPrivilege', level=4, num='4.2.2.6'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Role.NotPresent.Added', level=4, num='4.2.2.7'), - Heading(name='Configuration', level=3, num='4.2.3'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Invalid', level=4, num='4.2.3.1'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Definition', level=4, num='4.2.3.2'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Name', level=4, num='4.2.3.3'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Host', level=4, num='4.2.3.4'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Port', level=4, num='4.2.3.5'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Port.Default', level=4, num='4.2.3.6'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.AuthDN.Prefix', level=4, num='4.2.3.7'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.AuthDN.Suffix', level=4, num='4.2.3.8'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.AuthDN.Value', level=4, num='4.2.3.9'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS', level=4, num='4.2.3.10'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.Default', level=4, num='4.2.3.11'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.No', level=4, num='4.2.3.12'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.Yes', level=4, num='4.2.3.13'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.StartTLS', level=4, num='4.2.3.14'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSMinimumProtocolVersion', level=4, num='4.2.3.15'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSMinimumProtocolVersion.Values', level=4, num='4.2.3.16'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSMinimumProtocolVersion.Default', level=4, num='4.2.3.17'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert', level=4, num='4.2.3.18'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Default', level=4, num='4.2.3.19'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Demand', level=4, num='4.2.3.20'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Allow', level=4, num='4.2.3.21'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Try', level=4, num='4.2.3.22'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Never', level=4, num='4.2.3.23'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCertFile', level=4, num='4.2.3.24'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSKeyFile', level=4, num='4.2.3.25'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCACertDir', level=4, num='4.2.3.26'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCACertFile', level=4, num='4.2.3.27'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCipherSuite', level=4, num='4.2.3.28'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown', level=4, num='4.2.3.29'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Default', level=4, num='4.2.3.30'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Invalid', level=4, num='4.2.3.31'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Syntax', level=4, num='4.2.3.32'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory', level=4, num='4.2.3.33'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory.MoreThanOne', level=4, num='4.2.3.34'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Syntax', level=4, num='4.2.3.35'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server', level=4, num='4.2.3.36'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Empty', level=4, num='4.2.3.37'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Missing', level=4, num='4.2.3.38'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.MoreThanOne', level=4, num='4.2.3.39'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Invalid', level=4, num='4.2.3.40'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles', level=4, num='4.2.3.41'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.MoreThanOne', level=4, num='4.2.3.42'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Invalid', level=4, num='4.2.3.43'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Empty', level=4, num='4.2.3.44'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Missing', level=4, num='4.2.3.45'), - Heading(name='Authentication', level=3, num='4.2.4'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.Empty', level=4, num='4.2.4.1'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.Long', level=4, num='4.2.4.2'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.UTF8', level=4, num='4.2.4.3'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.Empty', level=4, num='4.2.4.4'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.Long', level=4, num='4.2.4.5'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.UTF8', level=4, num='4.2.4.6'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Performance', level=4, num='4.2.4.7'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters', level=4, num='4.2.4.8'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.InvalidPassword', level=4, num='4.2.4.9'), - Heading(name='References', level=1, num='5'), + Heading(name="Revision History", level=1, num="1"), + Heading(name="Introduction", level=1, num="2"), + Heading(name="Terminology", level=1, num="3"), + Heading(name="LDAP", level=2, num="3.1"), + Heading(name="Requirements", level=1, num="4"), + Heading(name="Generic", level=2, num="4.1"), + Heading(name="User Authentication", level=3, num="4.1.1"), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication", + level=4, + num="4.1.1.1", ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.MultipleUserDirectories", + level=4, + num="4.1.1.2", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.MultipleUserDirectories.Lookup", + level=4, + num="4.1.1.3", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Users.Authentication.NewUsers", + level=4, + num="4.1.1.4", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.DeletedUsers", + level=4, + num="4.1.1.5", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Valid", + level=4, + num="4.1.1.6", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Invalid", + level=4, + num="4.1.1.7", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.UsernameChanged", + level=4, + num="4.1.1.8", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.PasswordChanged", + level=4, + num="4.1.1.9", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.LDAPServerRestart", + level=4, + num="4.1.1.10", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.ClickHouseServerRestart", + level=4, + num="4.1.1.11", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel", + level=4, + num="4.1.1.12", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.ValidAndInvalid", + level=4, + num="4.1.1.13", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.MultipleServers", + level=4, + num="4.1.1.14", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.LocalOnly", + level=4, + num="4.1.1.15", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.LocalAndMultipleLDAP", + level=4, + num="4.1.1.16", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.SameUser", + level=4, + num="4.1.1.17", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Parallel.DynamicallyAddedAndRemovedUsers", + level=4, + num="4.1.1.18", + ), + Heading(name="Connection", level=3, num="4.1.2"), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.PlainText", + level=4, + num="4.1.2.1", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.TLS", + level=4, + num="4.1.2.2", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.StartTLS", + level=4, + num="4.1.2.3", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.TLS.Certificate.Validation", + level=4, + num="4.1.2.4", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.TLS.Certificate.SelfSigned", + level=4, + num="4.1.2.5", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Protocol.TLS.Certificate.SpecificCertificationAuthority", + level=4, + num="4.1.2.6", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Authentication.Mechanism.Anonymous", + level=4, + num="4.1.2.7", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Authentication.Mechanism.Unauthenticated", + level=4, + num="4.1.2.8", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Authentication.Mechanism.NamePassword", + level=4, + num="4.1.2.9", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Connection.Authentication.UnreachableServer", + level=4, + num="4.1.2.10", + ), + Heading(name="Specific", level=2, num="4.2"), + Heading(name="User Discovery", level=3, num="4.2.1"), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Users.Lookup.Priority", + level=4, + num="4.2.1.1", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Restart.Server", + level=4, + num="4.2.1.2", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Restart.Server.ParallelLogins", + level=4, + num="4.2.1.3", + ), + Heading(name="Roles", level=3, num="4.2.2"), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Role.Removed", + level=4, + num="4.2.2.1", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Role.Removed.Privileges", + level=4, + num="4.2.2.2", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Role.Readded.Privileges", + level=4, + num="4.2.2.3", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Role.New", + level=4, + num="4.2.2.4", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Role.NewPrivilege", + level=4, + num="4.2.2.5", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Role.RemovedPrivilege", + level=4, + num="4.2.2.6", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Role.NotPresent.Added", + level=4, + num="4.2.2.7", + ), + Heading(name="Configuration", level=3, num="4.2.3"), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Invalid", + level=4, + num="4.2.3.1", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Definition", + level=4, + num="4.2.3.2", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Name", + level=4, + num="4.2.3.3", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Host", + level=4, + num="4.2.3.4", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Port", + level=4, + num="4.2.3.5", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Port.Default", + level=4, + num="4.2.3.6", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.AuthDN.Prefix", + level=4, + num="4.2.3.7", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.AuthDN.Suffix", + level=4, + num="4.2.3.8", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.AuthDN.Value", + level=4, + num="4.2.3.9", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS", + level=4, + num="4.2.3.10", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.Default", + level=4, + num="4.2.3.11", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.No", + level=4, + num="4.2.3.12", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.Yes", + level=4, + num="4.2.3.13", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.EnableTLS.Options.StartTLS", + level=4, + num="4.2.3.14", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSMinimumProtocolVersion", + level=4, + num="4.2.3.15", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSMinimumProtocolVersion.Values", + level=4, + num="4.2.3.16", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSMinimumProtocolVersion.Default", + level=4, + num="4.2.3.17", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert", + level=4, + num="4.2.3.18", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Default", + level=4, + num="4.2.3.19", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Demand", + level=4, + num="4.2.3.20", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Allow", + level=4, + num="4.2.3.21", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Try", + level=4, + num="4.2.3.22", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSRequireCert.Options.Never", + level=4, + num="4.2.3.23", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCertFile", + level=4, + num="4.2.3.24", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSKeyFile", + level=4, + num="4.2.3.25", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCACertDir", + level=4, + num="4.2.3.26", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCACertFile", + level=4, + num="4.2.3.27", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCipherSuite", + level=4, + num="4.2.3.28", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown", + level=4, + num="4.2.3.29", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Default", + level=4, + num="4.2.3.30", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Invalid", + level=4, + num="4.2.3.31", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Syntax", + level=4, + num="4.2.3.32", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory", + level=4, + num="4.2.3.33", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory.MoreThanOne", + level=4, + num="4.2.3.34", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Syntax", + level=4, + num="4.2.3.35", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server", + level=4, + num="4.2.3.36", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Empty", + level=4, + num="4.2.3.37", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Missing", + level=4, + num="4.2.3.38", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.MoreThanOne", + level=4, + num="4.2.3.39", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Invalid", + level=4, + num="4.2.3.40", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles", + level=4, + num="4.2.3.41", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.MoreThanOne", + level=4, + num="4.2.3.42", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Invalid", + level=4, + num="4.2.3.43", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Empty", + level=4, + num="4.2.3.44", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Missing", + level=4, + num="4.2.3.45", + ), + Heading(name="Authentication", level=3, num="4.2.4"), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.Empty", + level=4, + num="4.2.4.1", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.Long", + level=4, + num="4.2.4.2", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.UTF8", + level=4, + num="4.2.4.3", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.Empty", + level=4, + num="4.2.4.4", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.Long", + level=4, + num="4.2.4.5", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.UTF8", + level=4, + num="4.2.4.6", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Performance", + level=4, + num="4.2.4.7", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters", + level=4, + num="4.2.4.8", + ), + Heading( + name="RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.InvalidPassword", + level=4, + num="4.2.4.9", + ), + Heading(name="References", level=1, num="5"), + ), requirements=( RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication, RQ_SRS_009_LDAP_ExternalUserDirectory_MultipleUserDirectories, @@ -1788,8 +2247,8 @@ SRS_009_ClickHouse_LDAP_External_User_Directory = Specification( RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Performance, RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters, RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_InvalidPassword, - ), - content=''' + ), + content=""" # SRS-009 ClickHouse LDAP External User Directory # Software Requirements Specification @@ -2600,4 +3059,5 @@ to result in contacting the [LDAP] server to verify user's username and password [Revision History]: https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/ldap/external_user_directory/requirements/requirements.md [Git]: https://git-scm.com/ [GitHub]: https://github.com -''') +""", +) diff --git a/tests/testflows/ldap/external_user_directory/tests/authentications.py b/tests/testflows/ldap/external_user_directory/tests/authentications.py index 4806a75e8b1..83daa175a24 100644 --- a/tests/testflows/ldap/external_user_directory/tests/authentications.py +++ b/tests/testflows/ldap/external_user_directory/tests/authentications.py @@ -13,7 +13,7 @@ servers = { "port": "389", "enable_tls": "no", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, "openldap2": { "host": "openldap2", @@ -22,13 +22,15 @@ servers = { "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", "tls_require_cert": "never", - } + }, } + @TestOutline -def add_user_to_ldap_and_login(self, server, user=None, ch_user=None, login=None, exitcode=None, message=None): - """Add user to LDAP and ClickHouse and then try to login. - """ +def add_user_to_ldap_and_login( + self, server, user=None, ch_user=None, login=None, exitcode=None, message=None +): + """Add user to LDAP and ClickHouse and then try to login.""" self.context.ldap_node = self.context.cluster.node(server) if ch_user is None: @@ -42,78 +44,126 @@ def add_user_to_ldap_and_login(self, server, user=None, ch_user=None, login=None username = login.get("username", user["cn"]) password = login.get("password", user["userpassword"]) - login_and_execute_query(username=username, password=password, exitcode=exitcode, message=message) + login_and_execute_query( + username=username, password=password, exitcode=exitcode, message=message + ) + def login_with_valid_username_and_password(users, i, iterations=10): - """Login with valid username and password. - """ + """Login with valid username and password.""" with When(f"valid users try to login #{i}"): for i in range(iterations): - random_user = users[random.randint(0, len(users)-1)] - login_and_execute_query(username=random_user["cn"], password=random_user["userpassword"], steps=False) + random_user = users[random.randint(0, len(users) - 1)] + login_and_execute_query( + username=random_user["cn"], + password=random_user["userpassword"], + steps=False, + ) + def login_with_valid_username_and_invalid_password(users, i, iterations=10): - """Login with valid username and invalid password. - """ + """Login with valid username and invalid password.""" with When(f"users try to login with valid username and invalid password #{i}"): for i in range(iterations): - random_user = users[random.randint(0, len(users)-1)] - login_and_execute_query(username=random_user["cn"], + random_user = users[random.randint(0, len(users) - 1)] + login_and_execute_query( + username=random_user["cn"], password=(random_user["userpassword"] + randomword(1)), exitcode=4, message=f"DB::Exception: {random_user['cn']}: Authentication failed: password is incorrect or there is no user with such name", - steps=False) + steps=False, + ) + def login_with_invalid_username_and_valid_password(users, i, iterations=10): - """Login with invalid username and valid password. - """ + """Login with invalid username and valid password.""" with When(f"users try to login with invalid username and valid password #{i}"): for i in range(iterations): - random_user = dict(users[random.randint(0, len(users)-1)]) + random_user = dict(users[random.randint(0, len(users) - 1)]) random_user["cn"] += randomword(1) - login_and_execute_query(username=random_user["cn"], + login_and_execute_query( + username=random_user["cn"], password=random_user["userpassword"], exitcode=4, message=f"DB::Exception: {random_user['cn']}: Authentication failed: password is incorrect or there is no user with such name", - steps=False) + steps=False, + ) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_ValidAndInvalid("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_ValidAndInvalid( + "1.0" + ), ) def parallel_login(self, server, user_count=10, timeout=300): - """Check that login of valid and invalid LDAP authenticated users works in parallel. - """ + """Check that login of valid and invalid LDAP authenticated users works in parallel.""" self.context.ldap_node = self.context.cluster.node(server) user = None with Given("a group of LDAP users"): - users = [{"cn": f"parallel_user{i}", "userpassword": randomword(20)} for i in range(user_count)] + users = [ + {"cn": f"parallel_user{i}", "userpassword": randomword(20)} + for i in range(user_count) + ] with ldap_users(*users): tasks = [] with Pool(4) as pool: try: - with When("users try to login in parallel", description=""" + with When( + "users try to login in parallel", + description=""" * with valid username and password * with invalid username and valid password * with valid username and invalid password - """): + """, + ): for i in range(10): - tasks.append(pool.submit(login_with_valid_username_and_password, (users, i, 50,))) - tasks.append(pool.submit(login_with_valid_username_and_invalid_password, (users, i, 50,))) - tasks.append(pool.submit(login_with_invalid_username_and_valid_password, (users, i, 50,))) - + tasks.append( + pool.submit( + login_with_valid_username_and_password, + ( + users, + i, + 50, + ), + ) + ) + tasks.append( + pool.submit( + login_with_valid_username_and_invalid_password, + ( + users, + i, + 50, + ), + ) + ) + tasks.append( + pool.submit( + login_with_invalid_username_and_valid_password, + ( + users, + i, + 50, + ), + ) + ) + finally: with Then("it should work"): for task in tasks: task.result(timeout=timeout) - + + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_SameUser("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_ValidAndInvalid("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_ValidAndInvalid( + "1.0" + ), ) def parallel_login_with_the_same_user(self, server, timeout=300): """Check that valid and invalid logins of the same @@ -129,20 +179,51 @@ def parallel_login_with_the_same_user(self, server, timeout=300): tasks = [] with Pool(4) as pool: try: - with When("the same user tries to login in parallel", description=""" + with When( + "the same user tries to login in parallel", + description=""" * with valid username and password * with invalid username and valid password * with valid username and invalid password - """): + """, + ): for i in range(10): - tasks.append(pool.submit(login_with_valid_username_and_password, (users, i, 50,))) - tasks.append(pool.submit(login_with_valid_username_and_invalid_password, (users, i, 50,))) - tasks.append(pool.submit(login_with_invalid_username_and_valid_password, (users, i, 50,))) + tasks.append( + pool.submit( + login_with_valid_username_and_password, + ( + users, + i, + 50, + ), + ) + ) + tasks.append( + pool.submit( + login_with_valid_username_and_invalid_password, + ( + users, + i, + 50, + ), + ) + ) + tasks.append( + pool.submit( + login_with_invalid_username_and_valid_password, + ( + users, + i, + 50, + ), + ) + ) finally: with Then("it should work"): for task in tasks: task.result(timeout=timeout) + @TestScenario @Tags("custom config") def login_after_ldap_external_user_directory_is_removed(self, server): @@ -156,13 +237,18 @@ def login_after_ldap_external_user_directory_is_removed(self, server): with And("I attempt to login after LDAP external user directory is removed"): exitcode = 4 message = f"DB::Exception: user2: Authentication failed: password is incorrect or there is no user with such name" - login_and_execute_query(username="user2", password="user2", exitcode=exitcode, message=message) + login_and_execute_query( + username="user2", password="user2", exitcode=exitcode, message=message + ) + @TestScenario @Tags("custom config") @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_SameUser("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_ValidAndInvalid("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_ValidAndInvalid( + "1.0" + ), ) def parallel_login_with_the_same_user_multiple_servers(self, server, timeout=300): """Check that valid and invalid logins of the same @@ -170,144 +256,239 @@ def parallel_login_with_the_same_user_multiple_servers(self, server, timeout=300 works in parallel. """ with Given("I have two LDAP servers"): - entries = [ - (["openldap1"], []), - (["openldap2"], []) - ] + entries = [(["openldap1"], []), (["openldap2"], [])] with Given("I define only one LDAP user"): users = [{"cn": f"parallel_user1", "userpassword": randomword(20)}] - with And("I create config file to define LDAP external user directory for each LDAP server"): + with And( + "I create config file to define LDAP external user directory for each LDAP server" + ): config = create_entries_ldap_external_user_directory_config_content(entries) - with ldap_external_user_directory(server=None, roles=None, restart=True, config=config): + with ldap_external_user_directory( + server=None, roles=None, restart=True, config=config + ): with ldap_users(*users, node=self.context.cluster.node("openldap1")): with ldap_users(*users, node=self.context.cluster.node("openldap2")): tasks = [] - with Pool(4) as pool: + with Pool(4) as pool: try: - with When("the same user tries to login in parallel", description=""" + with When( + "the same user tries to login in parallel", + description=""" * with valid username and password * with invalid username and valid password * with valid username and invalid password - """): + """, + ): for i in range(10): - tasks.append(pool.submit(login_with_valid_username_and_password, (users, i, 50,))) - tasks.append(pool.submit(login_with_valid_username_and_invalid_password, (users, i, 50,))) - tasks.append(pool.submit(login_with_invalid_username_and_valid_password, (users, i, 50,))) + tasks.append( + pool.submit( + login_with_valid_username_and_password, + ( + users, + i, + 50, + ), + ) + ) + tasks.append( + pool.submit( + login_with_valid_username_and_invalid_password, + ( + users, + i, + 50, + ), + ) + ) + tasks.append( + pool.submit( + login_with_invalid_username_and_valid_password, + ( + users, + i, + 50, + ), + ) + ) finally: with Then("it should work"): for task in tasks: task.result(timeout=timeout) + @TestScenario @Tags("custom config") @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_MultipleServers("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_ValidAndInvalid("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_MultipleServers( + "1.0" + ), + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_ValidAndInvalid( + "1.0" + ), ) def parallel_login_with_multiple_servers(self, server, user_count=10, timeout=300): """Check that login of valid and invalid LDAP authenticated users works in parallel using multiple LDAP external user directories. """ with Given("I have two LDAP servers"): - entries = [ - (["openldap1"], []), - (["openldap2"], []) - ] + entries = [(["openldap1"], []), (["openldap2"], [])] with And("I define a group of users to be created on each LDAP server"): user_groups = { - "openldap1_users": [{"cn": f"openldap1_parallel_user{i}", "userpassword": randomword(20)} for i in range(user_count)], - "openldap2_users": [{"cn": f"openldap2_parallel_user{i}", "userpassword": randomword(20)} for i in range(user_count)] + "openldap1_users": [ + {"cn": f"openldap1_parallel_user{i}", "userpassword": randomword(20)} + for i in range(user_count) + ], + "openldap2_users": [ + {"cn": f"openldap2_parallel_user{i}", "userpassword": randomword(20)} + for i in range(user_count) + ], } with And("I have a list of checks that I want to run for each user group"): checks = [ login_with_valid_username_and_password, login_with_valid_username_and_invalid_password, - login_with_invalid_username_and_valid_password + login_with_invalid_username_and_valid_password, ] - with And("I create config file to define LDAP external user directory for each LDAP server"): + with And( + "I create config file to define LDAP external user directory for each LDAP server" + ): config = create_entries_ldap_external_user_directory_config_content(entries) - with ldap_external_user_directory(server=None, roles=None, restart=True, config=config): - with ldap_users(*user_groups["openldap1_users"], node=self.context.cluster.node("openldap1")): - with ldap_users(*user_groups["openldap2_users"], node=self.context.cluster.node("openldap2")): + with ldap_external_user_directory( + server=None, roles=None, restart=True, config=config + ): + with ldap_users( + *user_groups["openldap1_users"], node=self.context.cluster.node("openldap1") + ): + with ldap_users( + *user_groups["openldap2_users"], + node=self.context.cluster.node("openldap2"), + ): tasks = [] with Pool(4) as pool: try: - with When("users in each group try to login in parallel", description=""" + with When( + "users in each group try to login in parallel", + description=""" * with valid username and password * with invalid username and valid password * with valid username and invalid password - """): + """, + ): for i in range(10): for users in user_groups.values(): for check in checks: - tasks.append(pool.submit(check, (users, i, 50,))) + tasks.append( + pool.submit( + check, + ( + users, + i, + 50, + ), + ) + ) finally: with Then("it should work"): for task in tasks: task.result(timeout=timeout) + @TestScenario @Tags("custom config") @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_LocalAndMultipleLDAP("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_ValidAndInvalid("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_LocalAndMultipleLDAP( + "1.0" + ), + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_ValidAndInvalid( + "1.0" + ), ) -def parallel_login_with_rbac_and_multiple_servers(self, server, user_count=10, timeout=300): +def parallel_login_with_rbac_and_multiple_servers( + self, server, user_count=10, timeout=300 +): """Check that login of valid and invalid users works in parallel using local users defined using RBAC and LDAP users authenticated using multiple LDAP external user directories. """ with Given("I have two LDAP servers"): - entries = [ - (["openldap1"], []), - (["openldap2"], []) - ] + entries = [(["openldap1"], []), (["openldap2"], [])] with And("I define a group of users to be created on each LDAP server"): user_groups = { - "openldap1_users": [{"cn": f"openldap1_parallel_user{i}", "userpassword": randomword(20)} for i in range(user_count)], - "openldap2_users": [{"cn": f"openldap2_parallel_user{i}", "userpassword": randomword(20)} for i in range(user_count)], - "local_users": [{"cn": f"local_parallel_user{i}", "userpassword": randomword(20)} for i in range(user_count)] + "openldap1_users": [ + {"cn": f"openldap1_parallel_user{i}", "userpassword": randomword(20)} + for i in range(user_count) + ], + "openldap2_users": [ + {"cn": f"openldap2_parallel_user{i}", "userpassword": randomword(20)} + for i in range(user_count) + ], + "local_users": [ + {"cn": f"local_parallel_user{i}", "userpassword": randomword(20)} + for i in range(user_count) + ], } with And("I have a list of checks that I want to run for each user group"): checks = [ login_with_valid_username_and_password, login_with_valid_username_and_invalid_password, - login_with_invalid_username_and_valid_password + login_with_invalid_username_and_valid_password, ] - with And("I create config file to define LDAP external user directory for each LDAP server"): + with And( + "I create config file to define LDAP external user directory for each LDAP server" + ): config = create_entries_ldap_external_user_directory_config_content(entries) - with ldap_external_user_directory(server=None, roles=None, restart=True, config=config): - with ldap_users(*user_groups["openldap1_users"], node=self.context.cluster.node("openldap1")): - with ldap_users(*user_groups["openldap2_users"], node=self.context.cluster.node("openldap2")): + with ldap_external_user_directory( + server=None, roles=None, restart=True, config=config + ): + with ldap_users( + *user_groups["openldap1_users"], node=self.context.cluster.node("openldap1") + ): + with ldap_users( + *user_groups["openldap2_users"], + node=self.context.cluster.node("openldap2"), + ): with rbac_users(*user_groups["local_users"]): tasks = [] with Pool(4) as pool: try: - with When("users in each group try to login in parallel", description=""" + with When( + "users in each group try to login in parallel", + description=""" * with valid username and password * with invalid username and valid password * with valid username and invalid password - """): + """, + ): for i in range(10): for users in user_groups.values(): for check in checks: - tasks.append(pool.submit(check, (users, i, 50,))) + tasks.append( + pool.submit( + check, + ( + users, + i, + 50, + ), + ) + ) finally: with Then("it should work"): for task in tasks: task.result(timeout=timeout) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Parallel_LocalOnly("1.0") @@ -319,7 +500,10 @@ def parallel_login_with_rbac_users(self, server, user_count=10, timeout=300): self.context.ldap_node = self.context.cluster.node(server) user = None - users = [{"cn": f"parallel_user{i}", "userpassword": randomword(20)} for i in range(user_count)] + users = [ + {"cn": f"parallel_user{i}", "userpassword": randomword(20)} + for i in range(user_count) + ] with rbac_users(*users): tasks = [] @@ -327,34 +511,61 @@ def parallel_login_with_rbac_users(self, server, user_count=10, timeout=300): try: with When("I login in parallel"): for i in range(10): - tasks.append(pool.submit(login_with_valid_username_and_password, (users, i, 50,))) - tasks.append(pool.submit(login_with_valid_username_and_invalid_password, (users, i, 50,))) - tasks.append(pool.submit(login_with_invalid_username_and_valid_password, (users, i, 50,))) + tasks.append( + pool.submit( + login_with_valid_username_and_password, + ( + users, + i, + 50, + ), + ) + ) + tasks.append( + pool.submit( + login_with_valid_username_and_invalid_password, + ( + users, + i, + 50, + ), + ) + ) + tasks.append( + pool.submit( + login_with_invalid_username_and_valid_password, + ( + users, + i, + 50, + ), + ) + ) finally: with Then("it should work"): for task in tasks: task.result(timeout=timeout) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Users_Authentication_NewUsers("1.0") ) def login_after_user_is_added_to_ldap(self, server): - """Check that user can login as soon as it is added to LDAP. - """ + """Check that user can login as soon as it is added to LDAP.""" user = {"cn": "myuser", "userpassword": "myuser"} with When(f"I add user to LDAP and try to login"): add_user_to_ldap_and_login(user=user, server=server) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Invalid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_DeletedUsers("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_DeletedUsers("1.0"), ) def login_after_user_is_deleted_from_ldap(self, server): - """Check that login fails after user is deleted from LDAP. - """ + """Check that login fails after user is deleted from LDAP.""" self.context.ldap_node = self.context.cluster.node(server) user = None @@ -369,23 +580,25 @@ def login_after_user_is_deleted_from_ldap(self, server): delete_user_from_ldap(user) with Then("when I try to login again it should fail"): - login_and_execute_query(username=user["cn"], password=user["userpassword"], + login_and_execute_query( + username=user["cn"], + password=user["userpassword"], exitcode=4, - message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" + message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name", ) finally: with Finally("I make sure LDAP user is deleted"): if user is not None: delete_user_from_ldap(user, exitcode=None) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Invalid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_PasswordChanged("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_PasswordChanged("1.0"), ) def login_after_user_password_changed_in_ldap(self, server): - """Check that login fails after user password is changed in LDAP. - """ + """Check that login fails after user password is changed in LDAP.""" self.context.ldap_node = self.context.cluster.node(server) user = None @@ -400,9 +613,11 @@ def login_after_user_password_changed_in_ldap(self, server): change_user_password_in_ldap(user, "newpassword") with Then("when I try to login again it should fail"): - login_and_execute_query(username=user["cn"], password=user["userpassword"], + login_and_execute_query( + username=user["cn"], + password=user["userpassword"], exitcode=4, - message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" + message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name", ) with And("when I try to login with the new password it should work"): @@ -413,14 +628,14 @@ def login_after_user_password_changed_in_ldap(self, server): if user is not None: delete_user_from_ldap(user, exitcode=None) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Invalid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_UsernameChanged("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_UsernameChanged("1.0"), ) def login_after_user_cn_changed_in_ldap(self, server): - """Check that login fails after user cn is changed in LDAP. - """ + """Check that login fails after user cn is changed in LDAP.""" self.context.ldap_node = self.context.cluster.node(server) user = None new_user = None @@ -436,23 +651,25 @@ def login_after_user_cn_changed_in_ldap(self, server): new_user = change_user_cn_in_ldap(user, "myuser2") with Then("when I try to login again it should fail"): - login_and_execute_query(username=user["cn"], password=user["userpassword"], + login_and_execute_query( + username=user["cn"], + password=user["userpassword"], exitcode=4, - message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" + message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name", ) finally: with Finally("I make sure LDAP user is deleted"): if new_user is not None: delete_user_from_ldap(new_user, exitcode=None) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Valid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_LDAPServerRestart("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_LDAPServerRestart("1.0"), ) def login_after_ldap_server_is_restarted(self, server, timeout=60): - """Check that login succeeds after LDAP server is restarted. - """ + """Check that login succeeds after LDAP server is restarted.""" self.context.ldap_node = self.context.cluster.node(server) user = None @@ -466,12 +683,16 @@ def login_after_ldap_server_is_restarted(self, server, timeout=60): with When("I restart LDAP server"): self.context.ldap_node.restart() - with Then("I try to login until it works", description=f"timeout {timeout} sec"): + with Then( + "I try to login until it works", description=f"timeout {timeout} sec" + ): started = time.time() while True: - r = self.context.node.query("SELECT 1", + r = self.context.node.query( + "SELECT 1", settings=[("user", user["cn"]), ("password", user["userpassword"])], - no_checks=True) + no_checks=True, + ) if r.exitcode == 0: break assert time.time() - started < timeout, error(r.output) @@ -480,14 +701,14 @@ def login_after_ldap_server_is_restarted(self, server, timeout=60): if user is not None: delete_user_from_ldap(user, exitcode=None) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Valid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_ClickHouseServerRestart("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_ClickHouseServerRestart("1.0"), ) def login_after_clickhouse_server_is_restarted(self, server, timeout=60): - """Check that login succeeds after ClickHouse server is restarted. - """ + """Check that login succeeds after ClickHouse server is restarted.""" self.context.ldap_node = self.context.cluster.node(server) user = None @@ -501,12 +722,16 @@ def login_after_clickhouse_server_is_restarted(self, server, timeout=60): with When("I restart ClickHouse server"): self.context.node.restart() - with Then("I try to login until it works", description=f"timeout {timeout} sec"): + with Then( + "I try to login until it works", description=f"timeout {timeout} sec" + ): started = time.time() while True: - r = self.context.node.query("SELECT 1", + r = self.context.node.query( + "SELECT 1", settings=[("user", user["cn"]), ("password", user["userpassword"])], - no_checks=True) + no_checks=True, + ) if r.exitcode == 0: break assert time.time() - started < timeout, error(r.output) @@ -515,28 +740,30 @@ def login_after_clickhouse_server_is_restarted(self, server, timeout=60): if user is not None: delete_user_from_ldap(user, exitcode=None) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Invalid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Password_Empty("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Password_Empty("1.0"), ) def valid_username_with_valid_empty_password(self, server): - """Check that we can't login using valid username that has empty password. - """ + """Check that we can't login using valid username that has empty password.""" user = {"cn": "empty_password", "userpassword": ""} exitcode = 4 message = f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name" - add_user_to_ldap_and_login(user=user, exitcode=exitcode, message=message, server=server) + add_user_to_ldap_and_login( + user=user, exitcode=exitcode, message=message, server=server + ) + @TestScenario @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Invalid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Password_Empty("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Invalid("1.0"), + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Password_Empty("1.0"), ) def valid_username_and_invalid_empty_password(self, server): - """Check that we can't login using valid username but invalid empty password. - """ + """Check that we can't login using valid username but invalid empty password.""" username = "user_non_empty_password" user = {"cn": username, "userpassword": username} login = {"password": ""} @@ -544,25 +771,24 @@ def valid_username_and_invalid_empty_password(self, server): exitcode = 4 message = f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name" - add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) + add_user_to_ldap_and_login( + user=user, login=login, exitcode=exitcode, message=message, server=server + ) + @TestScenario -@Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Valid("1.0") -) +@Requirements(RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Valid("1.0")) def valid_username_and_password(self, server): - """Check that we can login using valid username and password. - """ + """Check that we can login using valid username and password.""" username = "valid_username_and_password" user = {"cn": username, "userpassword": username} with When(f"I add user {username} to LDAP and try to login"): add_user_to_ldap_and_login(user=user, server=server) + @TestScenario -@Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Invalid("1.0") -) +@Requirements(RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Invalid("1.0")) def valid_username_and_password_invalid_server(self, server=None): """Check that we can't login using valid username and valid password but for a different server. @@ -572,124 +798,133 @@ def valid_username_and_password_invalid_server(self, server=None): exitcode = 4 message = f"DB::Exception: user2: Authentication failed: password is incorrect or there is no user with such name" - login_and_execute_query(username="user2", password="user2", exitcode=exitcode, message=message) + login_and_execute_query( + username="user2", password="user2", exitcode=exitcode, message=message + ) + @TestScenario @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Valid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Username_Long("1.0"), + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Valid("1.0"), + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Username_Long("1.0"), ) def valid_long_username_and_short_password(self, server): - """Check that we can login using valid very long username and short password. - """ + """Check that we can login using valid very long username and short password.""" username = "long_username_12345678901234567890123456789012345678901234567890123456789012345678901234567890" user = {"cn": username, "userpassword": "long_username"} add_user_to_ldap_and_login(user=user, server=server) + @TestScenario -@Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Invalid("1.0") -) +@Requirements(RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Invalid("1.0")) def invalid_long_username_and_valid_short_password(self, server): - """Check that we can't login using slightly invalid long username but valid password. - """ + """Check that we can't login using slightly invalid long username but valid password.""" username = "long_username_12345678901234567890123456789012345678901234567890123456789012345678901234567890" user = {"cn": username, "userpassword": "long_username"} login = {"username": f"{username}?"} exitcode = 4 - message=f"DB::Exception: {login['username']}: Authentication failed: password is incorrect or there is no user with such name" + message = f"DB::Exception: {login['username']}: Authentication failed: password is incorrect or there is no user with such name" + + add_user_to_ldap_and_login( + user=user, login=login, exitcode=exitcode, message=message, server=server + ) - add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) @TestScenario @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Valid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Password_Long("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Valid("1.0"), + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Password_Long("1.0"), ) def valid_short_username_and_long_password(self, server): - """Check that we can login using valid short username with very long password. - """ + """Check that we can login using valid short username with very long password.""" username = "long_password" - user = {"cn": username, "userpassword": "long_password_12345678901234567890123456789012345678901234567890123456789012345678901234567890"} + user = { + "cn": username, + "userpassword": "long_password_12345678901234567890123456789012345678901234567890123456789012345678901234567890", + } add_user_to_ldap_and_login(user=user, server=server) + @TestScenario -@Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Invalid("1.0") -) +@Requirements(RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Invalid("1.0")) def valid_short_username_and_invalid_long_password(self, server): - """Check that we can't login using valid short username and invalid long password. - """ + """Check that we can't login using valid short username and invalid long password.""" username = "long_password" - user = {"cn": username, "userpassword": "long_password_12345678901234567890123456789012345678901234567890123456789012345678901234567890"} + user = { + "cn": username, + "userpassword": "long_password_12345678901234567890123456789012345678901234567890123456789012345678901234567890", + } login = {"password": user["userpassword"] + "1"} exitcode = 4 - message=f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name" + message = f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name" + + add_user_to_ldap_and_login( + user=user, login=login, exitcode=exitcode, message=message, server=server + ) - add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) @TestScenario -@Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Invalid("1.0") -) +@Requirements(RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Invalid("1.0")) def valid_username_and_invalid_password(self, server): - """Check that we can't login using valid username and invalid password. - """ + """Check that we can't login using valid username and invalid password.""" username = "valid_username_and_invalid_password" user = {"cn": username, "userpassword": username} login = {"password": user["userpassword"] + "1"} exitcode = 4 - message=f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name" + message = f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name" + + add_user_to_ldap_and_login( + user=user, login=login, exitcode=exitcode, message=message, server=server + ) - add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) @TestScenario -@Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Invalid("1.0") -) +@Requirements(RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Invalid("1.0")) def invalid_username_and_valid_password(self, server): - """Check that we can't login using slightly invalid username but valid password. - """ + """Check that we can't login using slightly invalid username but valid password.""" username = "invalid_username_and_valid_password" user = {"cn": username, "userpassword": username} login = {"username": user["cn"] + "1"} exitcode = 4 - message=f"DB::Exception: {login['username']}: Authentication failed: password is incorrect or there is no user with such name" + message = f"DB::Exception: {login['username']}: Authentication failed: password is incorrect or there is no user with such name" + + add_user_to_ldap_and_login( + user=user, login=login, exitcode=exitcode, message=message, server=server + ) - add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server) @TestScenario @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Valid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Username_UTF8("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Valid("1.0"), + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Username_UTF8("1.0"), ) def valid_utf8_username_and_ascii_password(self, server): - """Check that we can login using valid utf-8 username with ascii password. - """ + """Check that we can login using valid utf-8 username with ascii password.""" username = "utf8_username_Gãńdåłf_Thê_Gręât" user = {"cn": username, "userpassword": "utf8_username"} add_user_to_ldap_and_login(user=user, server=server) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Valid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Password_UTF8("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Password_UTF8("1.0"), ) def valid_ascii_username_and_utf8_password(self, server): - """Check that we can login using valid ascii username with utf-8 password. - """ + """Check that we can login using valid ascii username with utf-8 password.""" username = "utf8_password" user = {"cn": username, "userpassword": "utf8_password_Gãńdåłf_Thê_Gręât"} add_user_to_ldap_and_login(user=user, server=server) + @TestScenario def empty_username_and_empty_password(self, server=None): """Check that we can login using empty username and empty password as @@ -697,10 +932,13 @@ def empty_username_and_empty_password(self, server=None): """ login_and_execute_query(username="", password="") + @TestScenario @Tags("verification_cooldown") @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown_Default("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown_Default( + "1.0" + ) ) def default_verification_cooldown_value(self, server, rbac=False): """Check that the default value (0) for the verification cooldown parameter @@ -712,10 +950,18 @@ def default_verification_cooldown_value(self, server, rbac=False): error_exitcode = 4 user = None - with Given("I have an LDAP configuration that uses the default verification_cooldown value (0)"): - servers = {"openldap1": {"host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" - }} + with Given( + "I have an LDAP configuration that uses the default verification_cooldown value (0)" + ): + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + } + } self.context.ldap_node = self.context.cluster.node(server) @@ -726,26 +972,39 @@ def default_verification_cooldown_value(self, server, rbac=False): with ldap_servers(servers): with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): + with ldap_external_user_directory( + server=server, roles=roles, restart=True + ): with When("I login and execute a query"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) with And("I change user password in LDAP"): change_user_password_in_ldap(user, "newpassword") - with Then("when I try to login immediately with the old user password it should fail"): - login_and_execute_query(username=user["cn"], password=user["userpassword"], - exitcode=error_exitcode, message=error_message) + with Then( + "when I try to login immediately with the old user password it should fail" + ): + login_and_execute_query( + username=user["cn"], + password=user["userpassword"], + exitcode=error_exitcode, + message=error_message, + ) finally: with Finally("I make sure LDAP user is deleted"): if user is not None: delete_user_from_ldap(user, exitcode=None) + @TestScenario @Tags("verification_cooldown") @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown( + "1.0" + ) ) def valid_verification_cooldown_value_cn_change(self, server, rbac=False): """Check that we can perform requests without contacting the LDAP server @@ -758,15 +1017,19 @@ def valid_verification_cooldown_value_cn_change(self, server, rbac=False): user = None new_user = None - with Given("I have an LDAP configuration that sets verification_cooldown parameter to 2 sec"): - servers = { "openldap1": { - "host": "openldap1", - "port": "389", - "enable_tls": "no", - "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "verification_cooldown": "2" - }} + with Given( + "I have an LDAP configuration that sets verification_cooldown parameter to 2 sec" + ): + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "2", + } + } self.context.ldap_node = self.context.cluster.node(server) @@ -777,30 +1040,47 @@ def valid_verification_cooldown_value_cn_change(self, server, rbac=False): with ldap_servers(servers): with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): + with ldap_external_user_directory( + server=server, roles=roles, restart=True + ): with When("I login and execute a query"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) with And("I change user cn in LDAP"): new_user = change_user_cn_in_ldap(user, "testVCD2") - with Then("when I try to login again with the old user cn it should work"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + with Then( + "when I try to login again with the old user cn it should work" + ): + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) - with And("when I sleep for 2 seconds and try to log in, it should fail"): + with And( + "when I sleep for 2 seconds and try to log in, it should fail" + ): time.sleep(2) - login_and_execute_query(username=user["cn"], password=user["userpassword"], - exitcode=error_exitcode, message=error_message) + login_and_execute_query( + username=user["cn"], + password=user["userpassword"], + exitcode=error_exitcode, + message=error_message, + ) finally: with Finally("I make sure LDAP user is deleted"): if new_user is not None: delete_user_from_ldap(new_user, exitcode=None) + @TestScenario @Tags("verification_cooldown") @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown( + "1.0" + ) ) def valid_verification_cooldown_value_password_change(self, server, rbac=False): """Check that we can perform requests without contacting the LDAP server @@ -812,15 +1092,19 @@ def valid_verification_cooldown_value_password_change(self, server, rbac=False): error_exitcode = 4 user = None - with Given("I have an LDAP configuration that sets verification_cooldown parameter to 2 sec"): - servers = { "openldap1": { - "host": "openldap1", - "port": "389", - "enable_tls": "no", - "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "verification_cooldown": "2" - }} + with Given( + "I have an LDAP configuration that sets verification_cooldown parameter to 2 sec" + ): + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "2", + } + } self.context.ldap_node = self.context.cluster.node(server) @@ -831,30 +1115,47 @@ def valid_verification_cooldown_value_password_change(self, server, rbac=False): with ldap_servers(servers): with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): + with ldap_external_user_directory( + server=server, roles=roles, restart=True + ): with When("I login and execute a query"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) with And("I change user password in LDAP"): change_user_password_in_ldap(user, "newpassword") - with Then("when I try to login again with the old password it should work"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + with Then( + "when I try to login again with the old password it should work" + ): + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) - with And("when I sleep for 2 seconds and try to log in, it should fail"): + with And( + "when I sleep for 2 seconds and try to log in, it should fail" + ): time.sleep(2) - login_and_execute_query(username=user["cn"], password=user["userpassword"], - exitcode=error_exitcode, message=error_message) + login_and_execute_query( + username=user["cn"], + password=user["userpassword"], + exitcode=error_exitcode, + message=error_message, + ) finally: with Finally("I make sure LDAP user is deleted"): if user is not None: delete_user_from_ldap(user, exitcode=None) + @TestScenario @Tags("verification_cooldown") @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown( + "1.0" + ) ) def valid_verification_cooldown_value_ldap_unavailable(self, server, rbac=False): """Check that we can perform requests without contacting the LDAP server @@ -866,15 +1167,19 @@ def valid_verification_cooldown_value_ldap_unavailable(self, server, rbac=False) error_exitcode = 4 user = None - with Given("I have an LDAP configuration that sets verification_cooldown parameter to 2 sec"): - servers = { "openldap1": { - "host": "openldap1", - "port": "389", - "enable_tls": "no", - "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "verification_cooldown": "300" - }} + with Given( + "I have an LDAP configuration that sets verification_cooldown parameter to 2 sec" + ): + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "300", + } + } self.context.ldap_node = self.context.cluster.node(server) @@ -885,17 +1190,25 @@ def valid_verification_cooldown_value_ldap_unavailable(self, server, rbac=False) with ldap_servers(servers): with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): + with ldap_external_user_directory( + server=server, roles=roles, restart=True + ): with When("I login and execute a query"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) try: with And("then I stop the ldap server"): self.context.ldap_node.stop() - with Then("when I try to login again with the server offline it should work"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + with Then( + "when I try to login again with the server offline it should work" + ): + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) finally: with Finally("I start the ldap server back up"): @@ -906,22 +1219,26 @@ def valid_verification_cooldown_value_ldap_unavailable(self, server, rbac=False) if user is not None: delete_user_from_ldap(user, exitcode=None) + @TestOutline def repeat_requests(self, server, iterations, vcd_value, rbac=False): - """Run repeated requests from some user to the LDAP server. - """ + """Run repeated requests from some user to the LDAP server.""" user = None - with Given(f"I have an LDAP configuration that sets verification_cooldown parameter to {vcd_value} sec"): - servers = { "openldap1": { - "host": "openldap1", - "port": "389", - "enable_tls": "no", - "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "verification_cooldown": vcd_value - }} + with Given( + f"I have an LDAP configuration that sets verification_cooldown parameter to {vcd_value} sec" + ): + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": vcd_value, + } + } self.context.ldap_node = self.context.cluster.node(server) @@ -932,10 +1249,14 @@ def repeat_requests(self, server, iterations, vcd_value, rbac=False): with ldap_servers(servers): with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): + with ldap_external_user_directory( + server=server, roles=roles, restart=True + ): with When(f"I login and execute some query {iterations} times"): start_time = time.time() - r = self.context.node.command(f"time for i in {{1..{iterations}}}; do clickhouse client -q \"SELECT 1\" --user {user['cn']} --password {user['userpassword']} > /dev/null; done") + r = self.context.node.command( + f"time for i in {{1..{iterations}}}; do clickhouse client -q \"SELECT 1\" --user {user['cn']} --password {user['userpassword']} > /dev/null; done" + ) end_time = time.time() return end_time - start_time @@ -945,10 +1266,13 @@ def repeat_requests(self, server, iterations, vcd_value, rbac=False): if user is not None: delete_user_from_ldap(user, exitcode=None) + @TestScenario @Tags("verification_cooldown") @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Performance("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Performance( + "1.0" + ) ) def verification_cooldown_performance(self, server, rbac=False, iterations=5000): """Check login performance when the verification cooldown @@ -959,48 +1283,66 @@ def verification_cooldown_performance(self, server, rbac=False, iterations=5000) vcd_time = 0 no_vcd_time = 0 - with Example(f"Repeated requests with verification cooldown parameter set to 600 seconds, {iterations} iterations"): - vcd_time = repeat_requests(server=server, iterations=iterations, vcd_value="600", rbac=rbac) + with Example( + f"Repeated requests with verification cooldown parameter set to 600 seconds, {iterations} iterations" + ): + vcd_time = repeat_requests( + server=server, iterations=iterations, vcd_value="600", rbac=rbac + ) metric("login_with_vcd_value_600", units="seconds", value=vcd_time) - with Example(f"Repeated requests with verification cooldown parameter set to 0 seconds, {iterations} iterations"): - no_vcd_time = repeat_requests(server=server, iterations=iterations, vcd_value="0", rbac=rbac) + with Example( + f"Repeated requests with verification cooldown parameter set to 0 seconds, {iterations} iterations" + ): + no_vcd_time = repeat_requests( + server=server, iterations=iterations, vcd_value="0", rbac=rbac + ) metric("login_with_vcd_value_0", units="seconds", value=no_vcd_time) with Then("Log the performance improvement as a percentage"): - metric("percentage_improvement", units="%", value=100*(no_vcd_time - vcd_time)/vcd_time) + metric( + "percentage_improvement", + units="%", + value=100 * (no_vcd_time - vcd_time) / vcd_time, + ) + @TestOutline -def check_verification_cooldown_reset_on_core_server_parameter_change(self, server, - parameter_name, parameter_value, rbac=False): +def check_verification_cooldown_reset_on_core_server_parameter_change( + self, server, parameter_name, parameter_value, rbac=False +): """Check that the LDAP login cache is reset for all the LDAP authentication users when verification_cooldown parameter is set after one of the core server parameters is changed in the LDAP server configuration. """ - config_d_dir="/etc/clickhouse-server/config.d" - config_file="ldap_servers.xml" + config_d_dir = "/etc/clickhouse-server/config.d" + config_file = "ldap_servers.xml" error_message = "DB::Exception: {user}: Authentication failed: password is incorrect or there is no user with such name" error_exitcode = 4 user = None - config=None - updated_config=None + config = None + updated_config = None - with Given("I have an LDAP configuration that sets verification_cooldown parameter to 600 sec"): - servers = { "openldap1": { - "host": "openldap1", - "port": "389", - "enable_tls": "no", - "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "verification_cooldown": "600" - }} + with Given( + "I have an LDAP configuration that sets verification_cooldown parameter to 600 sec" + ): + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "600", + } + } self.context.ldap_node = self.context.cluster.node(server) with And("LDAP authenticated user"): users = [ {"cn": f"testVCD_0", "userpassword": "testVCD_0"}, - {"cn": f"testVCD_1", "userpassword": "testVCD_1"} + {"cn": f"testVCD_1", "userpassword": "testVCD_1"}, ] with And("I create LDAP servers configuration file"): @@ -1009,88 +1351,137 @@ def check_verification_cooldown_reset_on_core_server_parameter_change(self, serv with ldap_users(*users) as users: with ldap_servers(servers=None, restart=False, config=config): with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): + with ldap_external_user_directory( + server=server, roles=roles, restart=True + ): with When("I login and execute a query"): for user in users: with By(f"as user {user['cn']}"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) with And("I change user password in LDAP"): for user in users: with By(f"for user {user['cn']}"): change_user_password_in_ldap(user, "newpassword") - with And(f"I change the server {parameter_name} core parameter", description=f"{parameter_value}"): + with And( + f"I change the server {parameter_name} core parameter", + description=f"{parameter_value}", + ): servers["openldap1"][parameter_name] = parameter_value - with And("I create an updated the config file that has a different server host name"): - updated_config = create_ldap_servers_config_content(servers, config_d_dir, config_file) + with And( + "I create an updated the config file that has a different server host name" + ): + updated_config = create_ldap_servers_config_content( + servers, config_d_dir, config_file + ) with modify_config(updated_config, restart=False): - with Then("when I try to log in it should fail as cache should have been reset"): + with Then( + "when I try to log in it should fail as cache should have been reset" + ): for user in users: with By(f"as user {user['cn']}"): - login_and_execute_query(username=user["cn"], password=user["userpassword"], - exitcode=error_exitcode, message=error_message.format(user=user["cn"])) + login_and_execute_query( + username=user["cn"], + password=user["userpassword"], + exitcode=error_exitcode, + message=error_message.format(user=user["cn"]), + ) + @TestScenario @Tags("verification_cooldown") @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters( + "1.0" + ) ) -def verification_cooldown_reset_on_server_host_parameter_change(self, server, rbac=False): +def verification_cooldown_reset_on_server_host_parameter_change( + self, server, rbac=False +): """Check that the LDAP login cache is reset for all the LDAP authentication users when verification_cooldown parameter is set after server host name is changed in the LDAP server configuration. """ - check_verification_cooldown_reset_on_core_server_parameter_change(server=server, - parameter_name="host", parameter_value="openldap2", rbac=rbac) + check_verification_cooldown_reset_on_core_server_parameter_change( + server=server, parameter_name="host", parameter_value="openldap2", rbac=rbac + ) + @TestScenario @Tags("verification_cooldown") @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters( + "1.0" + ) ) -def verification_cooldown_reset_on_server_port_parameter_change(self, server, rbac=False): +def verification_cooldown_reset_on_server_port_parameter_change( + self, server, rbac=False +): """Check that the LDAP login cache is reset for all the LDAP authentication users when verification_cooldown parameter is set after server port is changed in the LDAP server configuration. """ - check_verification_cooldown_reset_on_core_server_parameter_change(server=server, - parameter_name="port", parameter_value="9006", rbac=rbac) + check_verification_cooldown_reset_on_core_server_parameter_change( + server=server, parameter_name="port", parameter_value="9006", rbac=rbac + ) + @TestScenario @Tags("verification_cooldown") @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters( + "1.0" + ) ) -def verification_cooldown_reset_on_server_auth_dn_prefix_parameter_change(self, server, rbac=False): +def verification_cooldown_reset_on_server_auth_dn_prefix_parameter_change( + self, server, rbac=False +): """Check that the LDAP login cache is reset for all the LDAP authentication users when verification_cooldown parameter is set after server auth_dn_prefix is changed in the LDAP server configuration. """ - check_verification_cooldown_reset_on_core_server_parameter_change(server=server, - parameter_name="auth_dn_prefix", parameter_value="cxx=", rbac=rbac) + check_verification_cooldown_reset_on_core_server_parameter_change( + server=server, + parameter_name="auth_dn_prefix", + parameter_value="cxx=", + rbac=rbac, + ) + @TestScenario @Tags("verification_cooldown") @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters( + "1.0" + ) ) -def verification_cooldown_reset_on_server_auth_dn_suffix_parameter_change(self, server, rbac=False): +def verification_cooldown_reset_on_server_auth_dn_suffix_parameter_change( + self, server, rbac=False +): """Check that the LDAP login cache is reset for all the LDAP authentication users when verification_cooldown parameter is set after server auth_dn_suffix is changed in the LDAP server configuration. """ - check_verification_cooldown_reset_on_core_server_parameter_change(server=server, + check_verification_cooldown_reset_on_core_server_parameter_change( + server=server, parameter_name="auth_dn_suffix", - parameter_value=",ou=company,dc=users,dc=com", rbac=rbac) + parameter_value=",ou=company,dc=users,dc=com", + rbac=rbac, + ) + @TestScenario @Name("verification cooldown reset when invalid password is provided") @Tags("verification_cooldown") @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_InvalidPassword("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_InvalidPassword( + "1.0" + ) ) def scenario(self, server, rbac=False): """Check that cached bind requests for the user are discarded when @@ -1100,15 +1491,19 @@ def scenario(self, server, rbac=False): error_exitcode = 4 error_message = "DB::Exception: testVCD: Authentication failed: password is incorrect or there is no user with such name" - with Given("I have an LDAP configuration that sets verification_cooldown parameter to 600 sec"): - servers = { "openldap1": { - "host": "openldap1", - "port": "389", - "enable_tls": "no", - "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "verification_cooldown": "600" - }} + with Given( + "I have an LDAP configuration that sets verification_cooldown parameter to 600 sec" + ): + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "600", + } + } self.context.ldap_node = self.context.cluster.node(server) @@ -1119,34 +1514,53 @@ def scenario(self, server, rbac=False): with ldap_servers(servers): with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): + with ldap_external_user_directory( + server=server, roles=roles, restart=True + ): with When("I login and execute a query"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) with And("I change user password in LDAP"): change_user_password_in_ldap(user, "newpassword") - with Then("When I try to log in with the cached password it should work"): - login_and_execute_query(username=user["cn"], password=user["userpassword"]) + with Then( + "When I try to log in with the cached password it should work" + ): + login_and_execute_query( + username=user["cn"], password=user["userpassword"] + ) - with And("When I try to log in with an incorrect password it should fail"): - login_and_execute_query(username=user["cn"], password="incorrect", exitcode=error_exitcode, - message=error_message) + with And( + "When I try to log in with an incorrect password it should fail" + ): + login_and_execute_query( + username=user["cn"], + password="incorrect", + exitcode=error_exitcode, + message=error_message, + ) - with And("When I try to log in with the cached password it should fail"): - login_and_execute_query(username=user["cn"], password="incorrect", exitcode=error_exitcode, - message=error_message) + with And( + "When I try to log in with the cached password it should fail" + ): + login_and_execute_query( + username=user["cn"], + password="incorrect", + exitcode=error_exitcode, + message=error_message, + ) finally: with Finally("I make sure LDAP user is deleted"): if user is not None: delete_user_from_ldap(user, exitcode=None) + @TestScenario -@Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Users_Lookup_Priority("2.0") -) +@Requirements(RQ_SRS_009_LDAP_ExternalUserDirectory_Users_Lookup_Priority("2.0")) def user_lookup_priority(self, server): """Check that users are looked up in the same priority as they are defined in the `` section @@ -1159,35 +1573,56 @@ def user_lookup_priority(self, server): """ self.context.ldap_node = self.context.cluster.node(server) - message="DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name" + message = "DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name" exitcode = 4 users = { "default": {"username": "default", "password": "userdefault"}, "local": {"username": "local", "password": "userlocal"}, - "ldap": {"username": "ldap", "password": "userldap"} + "ldap": {"username": "ldap", "password": "userldap"}, } - with ldap_users(*[{"cn": user["username"], "userpassword": user["password"]} for user in users.values()]): + with ldap_users( + *[ + {"cn": user["username"], "userpassword": user["password"]} + for user in users.values() + ] + ): with rbac_users({"cn": "local", "userpassword": "local"}): - with When("I try to login as 'default' user which is also defined in users.xml it should fail"): - login_and_execute_query(**users["default"], exitcode=exitcode, message=message.format(username="default")) + with When( + "I try to login as 'default' user which is also defined in users.xml it should fail" + ): + login_and_execute_query( + **users["default"], + exitcode=exitcode, + message=message.format(username="default"), + ) - with When("I try to login as 'local' user which is also defined in local storage it should fail"): - login_and_execute_query(**users["local"], exitcode=exitcode, message=message.format(username="local")) + with When( + "I try to login as 'local' user which is also defined in local storage it should fail" + ): + login_and_execute_query( + **users["local"], + exitcode=exitcode, + message=message.format(username="local"), + ) - with When("I try to login as 'ldap' user defined only in LDAP it should work"): + with When( + "I try to login as 'ldap' user defined only in LDAP it should work" + ): login_and_execute_query(**users["ldap"]) + @TestOutline(Feature) @Name("user authentications") @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Authentication_Mechanism_NamePassword("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Authentication_Mechanism_NamePassword( + "1.0" + ), ) def feature(self, servers=None, server=None, node="clickhouse1"): - """Check that users can be authenticated using an LDAP external user directory. - """ + """Check that users can be authenticated using an LDAP external user directory.""" self.context.node = self.context.cluster.node(node) if servers is None: @@ -1199,11 +1634,20 @@ def feature(self, servers=None, server=None, node="clickhouse1"): with ldap_servers(servers): with rbac_roles("ldap_role") as roles: with ldap_external_user_directory(server=server, roles=roles, restart=True): - for scenario in loads(current_module(), Scenario, filter=~has.tag("custom config") & ~has.tag("verification_cooldown")): + for scenario in loads( + current_module(), + Scenario, + filter=~has.tag("custom config") + & ~has.tag("verification_cooldown"), + ): Scenario(test=scenario)(server=server) - for scenario in loads(current_module(), Scenario, filter=has.tag("custom config")): + for scenario in loads( + current_module(), Scenario, filter=has.tag("custom config") + ): Scenario(test=scenario)(server=server) - for scenario in loads(current_module(), Scenario, filter=has.tag("verification_cooldown")): + for scenario in loads( + current_module(), Scenario, filter=has.tag("verification_cooldown") + ): Scenario(test=scenario)(server=server) diff --git a/tests/testflows/ldap/external_user_directory/tests/common.py b/tests/testflows/ldap/external_user_directory/tests/common.py index 052c5f309bc..871be815a35 100644 --- a/tests/testflows/ldap/external_user_directory/tests/common.py +++ b/tests/testflows/ldap/external_user_directory/tests/common.py @@ -5,13 +5,34 @@ from contextlib import contextmanager import testflows.settings as settings from testflows.core import * from testflows.asserts import error -from ldap.authentication.tests.common import getuid, Config, ldap_servers, add_config, modify_config, restart -from ldap.authentication.tests.common import xmltree, xml_indent, xml_append, xml_with_utf8 -from ldap.authentication.tests.common import ldap_user, ldap_users, add_user_to_ldap, delete_user_from_ldap -from ldap.authentication.tests.common import change_user_password_in_ldap, change_user_cn_in_ldap +from ldap.authentication.tests.common import ( + getuid, + Config, + ldap_servers, + add_config, + modify_config, + restart, +) +from ldap.authentication.tests.common import ( + xmltree, + xml_indent, + xml_append, + xml_with_utf8, +) +from ldap.authentication.tests.common import ( + ldap_user, + ldap_users, + add_user_to_ldap, + delete_user_from_ldap, +) +from ldap.authentication.tests.common import ( + change_user_password_in_ldap, + change_user_cn_in_ldap, +) from ldap.authentication.tests.common import create_ldap_servers_config_content from ldap.authentication.tests.common import randomword + @contextmanager def table(name, create_statement, on_cluster=False): node = current().context.node @@ -26,6 +47,7 @@ def table(name, create_statement, on_cluster=False): else: node.query(f"DROP TABLE IF EXISTS {name}") + @contextmanager def rbac_users(*users, node=None): if node is None: @@ -34,7 +56,9 @@ def rbac_users(*users, node=None): with Given("I have local users"): for user in users: with By(f"creating user {user['cn']}", format_name=False): - node.query(f"CREATE USER OR REPLACE {user['cn']} IDENTIFIED WITH PLAINTEXT_PASSWORD BY '{user['userpassword']}'") + node.query( + f"CREATE USER OR REPLACE {user['cn']} IDENTIFIED WITH PLAINTEXT_PASSWORD BY '{user['userpassword']}'" + ) yield users finally: with Finally("I drop local users"): @@ -42,6 +66,7 @@ def rbac_users(*users, node=None): with By(f"dropping user {user['cn']}", flags=TE, format_name=False): node.query(f"DROP USER IF EXISTS {user['cn']}") + @contextmanager def rbac_roles(*roles, node=None): if node is None: @@ -58,22 +83,31 @@ def rbac_roles(*roles, node=None): with By(f"dropping role {role}", flags=TE): node.query(f"DROP ROLE IF EXISTS {role}") + def verify_ldap_user_exists(server, username, password): - """Check that LDAP user is defined on the LDAP server. - """ + """Check that LDAP user is defined on the LDAP server.""" with By("searching LDAP database"): ldap_node = current().context.cluster.node(server) r = ldap_node.command( - f"ldapwhoami -H ldap://localhost -D 'cn={user_name},ou=users,dc=company,dc=com' -w {password}") + f"ldapwhoami -H ldap://localhost -D 'cn={user_name},ou=users,dc=company,dc=com' -w {password}" + ) assert r.exitcode == 0, error() -def create_ldap_external_user_directory_config_content(server=None, roles=None, **kwargs): - """Create LDAP external user directory configuration file content. - """ - return create_entries_ldap_external_user_directory_config_content(entries=[([server], [roles])], **kwargs) -def create_entries_ldap_external_user_directory_config_content(entries, config_d_dir="/etc/clickhouse-server/config.d", - config_file="ldap_external_user_directories.xml"): +def create_ldap_external_user_directory_config_content( + server=None, roles=None, **kwargs +): + """Create LDAP external user directory configuration file content.""" + return create_entries_ldap_external_user_directory_config_content( + entries=[([server], [roles])], **kwargs + ) + + +def create_entries_ldap_external_user_directory_config_content( + entries, + config_d_dir="/etc/clickhouse-server/config.d", + config_file="ldap_external_user_directories.xml", +): """Create configurattion file content that contains one or more entries for the LDAP external user directory. @@ -95,9 +129,13 @@ def create_entries_ldap_external_user_directory_config_content(entries, config_d path = os.path.join(config_d_dir, config_file) name = config_file - root = xmltree.fromstring("") + root = xmltree.fromstring( + "" + ) xml_user_directories = root.find("user_directories") - xml_user_directories.append(xmltree.Comment(text=f"LDAP external user directories {uid}")) + xml_user_directories.append( + xmltree.Comment(text=f"LDAP external user directories {uid}") + ) for entry in entries: servers, roles_entries = entry @@ -116,11 +154,16 @@ def create_entries_ldap_external_user_directory_config_content(entries, config_d xml_user_directories.append(xml_directory) xml_indent(root) - content = xml_with_utf8 + str(xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"), "utf-8") + content = xml_with_utf8 + str( + xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"), "utf-8" + ) return Config(content, path, name, uid, "config.xml") -def invalid_ldap_external_user_directory_config(server, roles, message, tail=30, timeout=60, config=None): + +def invalid_ldap_external_user_directory_config( + server, roles, message, tail=30, timeout=60, config=None +): """Check that ClickHouse errors when trying to load invalid LDAP external user directory configuration file. """ @@ -128,17 +171,25 @@ def invalid_ldap_external_user_directory_config(server, roles, message, tail=30, node = current().context.node if config is None: - config = create_ldap_external_user_directory_config_content(server=server, roles=roles) + config = create_ldap_external_user_directory_config_content( + server=server, roles=roles + ) try: with Given("I prepare the error log by writting empty lines into it"): - node.command("echo -e \"%s\" > /var/log/clickhouse-server/clickhouse-server.err.log" % ("-\\n" * tail)) + node.command( + 'echo -e "%s" > /var/log/clickhouse-server/clickhouse-server.err.log' + % ("-\\n" * tail) + ) with When("I add the config", description=config.path): command = f"cat < {config.path}\n{config.content}\nHEREDOC" node.command(command, steps=False, exitcode=0) - with Then(f"{config.preprocessed_name} should be updated", description=f"timeout {timeout}"): + with Then( + f"{config.preprocessed_name} should be updated", + description=f"timeout {timeout}", + ): started = time.time() command = f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name} | grep {config.uid}{' > /dev/null' if not settings.debug else ''}" while time.time() - started < timeout: @@ -154,15 +205,24 @@ def invalid_ldap_external_user_directory_config(server, roles, message, tail=30, finally: with Finally(f"I remove {config.name}"): with By("removing invalid configuration file"): - system_config_path = os.path.join(current_dir(), "..", "configs", node.name, "config.d", config.path.split("config.d/")[-1]) - cluster.command(None, f'rm -rf {system_config_path}', timeout=timeout, exitcode=0) + system_config_path = os.path.join( + current_dir(), + "..", + "configs", + node.name, + "config.d", + config.path.split("config.d/")[-1], + ) + cluster.command( + None, f"rm -rf {system_config_path}", timeout=timeout, exitcode=0 + ) with And("restarting the node"): node.restart(safe=False) with Then("error log should contain the expected error message"): started = time.time() - command = f"tail -n {tail} /var/log/clickhouse-server/clickhouse-server.err.log | grep \"{message}\"" + command = f'tail -n {tail} /var/log/clickhouse-server/clickhouse-server.err.log | grep "{message}"' while time.time() - started < timeout: exitcode = node.command(command, steps=False).exitcode if exitcode == 0: @@ -170,34 +230,67 @@ def invalid_ldap_external_user_directory_config(server, roles, message, tail=30, time.sleep(1) assert exitcode == 0, error() + @contextmanager -def ldap_external_user_directory(server, roles, config_d_dir="/etc/clickhouse-server/config.d", - config_file=None, timeout=60, restart=True, config=None): - """Add LDAP external user directory. - """ +def ldap_external_user_directory( + server, + roles, + config_d_dir="/etc/clickhouse-server/config.d", + config_file=None, + timeout=60, + restart=True, + config=None, +): + """Add LDAP external user directory.""" if config_file is None: config_file = f"ldap_external_user_directory_{getuid()}.xml" if config is None: - config = create_ldap_external_user_directory_config_content(server=server, roles=roles, config_d_dir=config_d_dir, config_file=config_file) + config = create_ldap_external_user_directory_config_content( + server=server, + roles=roles, + config_d_dir=config_d_dir, + config_file=config_file, + ) return add_config(config, restart=restart) + def login(servers, directory_server, *users, config=None): """Configure LDAP server and LDAP external user directory and try to login and execute a query""" with ldap_servers(servers): with rbac_roles(f"role_{getuid()}") as roles: - with ldap_external_user_directory(server=servers[directory_server]["host"], roles=roles, restart=True, config=config): + with ldap_external_user_directory( + server=servers[directory_server]["host"], + roles=roles, + restart=True, + config=config, + ): for user in users: if user.get("login", False): with When(f"I login as {user['username']} and execute query"): - current().context.node.query("SELECT 1", - settings=[("user", user["username"]), ("password", user["password"])], + current().context.node.query( + "SELECT 1", + settings=[ + ("user", user["username"]), + ("password", user["password"]), + ], exitcode=user.get("exitcode", None), - message=user.get("message", None)) + message=user.get("message", None), + ) + @TestStep(When) @Name("I login as {username} and execute query") -def login_and_execute_query(self, username, password, exitcode=None, message=None, steps=True, timeout=60, poll=False): +def login_and_execute_query( + self, + username, + password, + exitcode=None, + message=None, + steps=True, + timeout=60, + poll=False, +): if poll: start_time = time.time() attempt = 0 @@ -205,10 +298,17 @@ def login_and_execute_query(self, username, password, exitcode=None, message=Non with By("repeatedly trying to login until successful or timeout"): while True: with When(f"attempt #{attempt}"): - r = self.context.node.query("SELECT 1", settings=[("user", username), ("password", password)], - no_checks=True, steps=False, timeout=timeout) + r = self.context.node.query( + "SELECT 1", + settings=[("user", username), ("password", password)], + no_checks=True, + steps=False, + timeout=timeout, + ) - if r.exitcode == (0 if exitcode is None else exitcode) and (message in r.output if message is not None else True): + if r.exitcode == (0 if exitcode is None else exitcode) and ( + message in r.output if message is not None else True + ): break if time.time() - start_time > timeout: @@ -216,7 +316,11 @@ def login_and_execute_query(self, username, password, exitcode=None, message=Non attempt += 1 else: - self.context.node.query("SELECT 1", + self.context.node.query( + "SELECT 1", settings=[("user", username), ("password", password)], exitcode=(0 if exitcode is None else exitcode), - message=message, steps=steps, timeout=timeout) + message=message, + steps=steps, + timeout=timeout, + ) diff --git a/tests/testflows/ldap/external_user_directory/tests/connections.py b/tests/testflows/ldap/external_user_directory/tests/connections.py index ba734bb6c71..d2c3c15c3d9 100644 --- a/tests/testflows/ldap/external_user_directory/tests/connections.py +++ b/tests/testflows/ldap/external_user_directory/tests/connections.py @@ -4,22 +4,24 @@ from testflows.asserts import error from ldap.external_user_directory.tests.common import login from ldap.external_user_directory.requirements import * + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Protocol_PlainText("1.0"), RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS_Options_No("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Port_Default("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS_Options_No( + "1.0" + ), + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Port_Default("1.0"), ) def plain_text(self): - """Check that we can perform LDAP user authentication using `plain text` connection protocol. - """ + """Check that we can perform LDAP user authentication using `plain text` connection protocol.""" servers = { "openldap1": { "host": "openldap1", "enable_tls": "no", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } users = [ @@ -27,10 +29,11 @@ def plain_text(self): ] login(servers, "openldap1", *users) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Protocol_PlainText("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Port("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Port("1.0"), ) def plain_text_with_custom_port(self): """Check that we can perform LDAP user authentication using `plain text` connection protocol @@ -42,7 +45,7 @@ def plain_text_with_custom_port(self): "port": "3089", "enable_tls": "no", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } users = [ @@ -50,10 +53,11 @@ def plain_text_with_custom_port(self): ] login(servers, "openldap3", *users) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Protocol_TLS("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Port("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Port("1.0"), ) def tls_with_custom_port(self): """Check that we can perform LDAP user authentication using `TLS` connection protocol @@ -65,7 +69,7 @@ def tls_with_custom_port(self): "port": "6036", "tls_require_cert": "never", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } users = [ @@ -73,10 +77,11 @@ def tls_with_custom_port(self): ] login(servers, "openldap4", *users) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Protocol_StartTLS("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Port("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Port("1.0"), ) def starttls_with_custom_port(self): """Check that we can perform LDAP user authentication using `StartTLS` connection protocol @@ -89,7 +94,7 @@ def starttls_with_custom_port(self): "enable_tls": "starttls", "tls_require_cert": "never", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } users = [ @@ -97,6 +102,7 @@ def starttls_with_custom_port(self): ] login(servers, "openldap4", *users) + def tls_connection(enable_tls, tls_require_cert): """Try to login using LDAP user authentication over a TLS connection.""" servers = { @@ -105,7 +111,7 @@ def tls_connection(enable_tls, tls_require_cert): "enable_tls": enable_tls, "tls_require_cert": tls_require_cert, "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } users = [ @@ -115,41 +121,64 @@ def tls_connection(enable_tls, tls_require_cert): requirements = [] if tls_require_cert == "never": - requirements = [RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Never("1.0")] + requirements = [ + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Never( + "1.0" + ) + ] elif tls_require_cert == "allow": - requirements = [RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Allow("1.0")] + requirements = [ + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Allow( + "1.0" + ) + ] elif tls_require_cert == "try": - requirements = [RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Try("1.0")] + requirements = [ + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Try( + "1.0" + ) + ] elif tls_require_cert == "demand": - requirements = [RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Demand("1.0")] + requirements = [ + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Demand( + "1.0" + ) + ] - with Example(name=f"tls_require_cert='{tls_require_cert}'", requirements=requirements): + with Example( + name=f"tls_require_cert='{tls_require_cert}'", requirements=requirements + ): login(servers, "openldap2", *users) + @TestScenario -@Examples("enable_tls tls_require_cert", [ - ("yes", "never"), - ("yes", "allow"), - ("yes", "try"), - ("yes", "demand") -]) +@Examples( + "enable_tls tls_require_cert", + [("yes", "never"), ("yes", "allow"), ("yes", "try"), ("yes", "demand")], +) @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Protocol_TLS("1.0"), RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS_Options_Yes("1.0"), + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS_Options_Yes( + "1.0" + ), RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Port_Default("1.0"), RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSMinimumProtocolVersion_Default("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSMinimumProtocolVersion_Default( + "1.0" + ), ) def tls(self): - """Check that we can perform LDAP user authentication using `TLS` connection protocol. - """ + """Check that we can perform LDAP user authentication using `TLS` connection protocol.""" for example in self.examples: tls_connection(*example) + @TestScenario @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS_Options_Default("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS_Options_Default( + "1.0" + ) ) def tls_enable_tls_default_yes(self): """Check that the default value for the `enable_tls` is set to `yes`.""" @@ -158,7 +187,7 @@ def tls_enable_tls_default_yes(self): "host": "openldap2", "tls_require_cert": "never", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } users = [ @@ -166,9 +195,12 @@ def tls_enable_tls_default_yes(self): ] login(servers, "openldap2", *users) + @TestScenario @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Default("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert_Options_Default( + "1.0" + ) ) def tls_require_cert_default_demand(self): """Check that the default value for the `tls_require_cert` is set to `demand`.""" @@ -178,7 +210,7 @@ def tls_require_cert_default_demand(self): "enable_tls": "yes", "port": "636", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } users = [ @@ -186,25 +218,31 @@ def tls_require_cert_default_demand(self): ] login(servers, "openldap2", *users) + @TestScenario -@Examples("enable_tls tls_require_cert", [ - ("starttls", "never"), - ("starttls", "allow"), - ("starttls", "try"), - ("starttls", "demand") -]) +@Examples( + "enable_tls tls_require_cert", + [ + ("starttls", "never"), + ("starttls", "allow"), + ("starttls", "try"), + ("starttls", "demand"), + ], +) @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Protocol_StartTLS("1.0"), RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS_Options_StartTLS("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Port_Default("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS_Options_StartTLS( + "1.0" + ), + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Port_Default("1.0"), ) def starttls(self): - """Check that we can perform LDAP user authentication using legacy `StartTLS` connection protocol. - """ + """Check that we can perform LDAP user authentication using legacy `StartTLS` connection protocol.""" for example in self.examples: tls_connection(*example) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSCipherSuite("1.0") @@ -219,7 +257,7 @@ def tls_cipher_suite(self): "tls_cipher_suite": "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC", "tls_minimum_protocol_version": "tls1.2", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } users = [ @@ -227,18 +265,26 @@ def tls_cipher_suite(self): ] login(servers, "openldap4", *users) + @TestOutline(Scenario) @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSMinimumProtocolVersion("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSMinimumProtocolVersion_Values("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSMinimumProtocolVersion( + "1.0" + ), + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSMinimumProtocolVersion_Values( + "1.0" + ), +) +@Examples( + "version exitcode message", + [ + ("ssl2", None, None), + ("ssl3", None, None), + ("tls1.0", None, None), + ("tls1.1", None, None), + ("tls1.2", None, None), + ], ) -@Examples("version exitcode message", [ - ("ssl2", None, None), - ("ssl3", None, None), - ("tls1.0", None, None), - ("tls1.1", None, None), - ("tls1.2", None, None) -]) def tls_minimum_protocol_version(self, version, exitcode, message): """Check that `tls_minimum_protocol_version` parameter can be used specify to specify the minimum protocol version of SSL/TLS.""" @@ -250,16 +296,23 @@ def tls_minimum_protocol_version(self, version, exitcode, message): "tls_require_cert": "never", "tls_minimum_protocol_version": version, "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } - users = [{ - "server": "openldap4", "username": "user4", "password": "user4", - "login": True, "exitcode": int(exitcode) if exitcode is not None else None, "message": message - }] + users = [ + { + "server": "openldap4", + "username": "user4", + "password": "user4", + "login": True, + "exitcode": int(exitcode) if exitcode is not None else None, + "message": message, + } + ] + + login(servers, "openldap4", *users) - login(servers,"openldap4", *users) @TestFeature @Name("connection protocols") diff --git a/tests/testflows/ldap/external_user_directory/tests/external_user_directory_config.py b/tests/testflows/ldap/external_user_directory/tests/external_user_directory_config.py index 3f1b8076ffa..f1fd956825e 100644 --- a/tests/testflows/ldap/external_user_directory/tests/external_user_directory_config.py +++ b/tests/testflows/ldap/external_user_directory/tests/external_user_directory_config.py @@ -3,9 +3,12 @@ from testflows.core import * from ldap.external_user_directory.tests.common import * from ldap.external_user_directory.requirements import * + @TestScenario @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_LDAPUserDirectory_MoreThanOne("2.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_LDAPUserDirectory_MoreThanOne( + "2.0" + ) ) def more_than_one_user_directory(self): """Check when more than one LDAP user directory is @@ -14,81 +17,125 @@ def more_than_one_user_directory(self): message = "DB::Exception: Duplicate storage type 'ldap' at user_directories" servers = { "openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, "openldap2": { - "host": "openldap2", "port": "636", "enable_tls": "yes", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "tls_require_cert": "never" - } + "host": "openldap2", + "port": "636", + "enable_tls": "yes", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "tls_require_cert": "never", + }, } users = [ - {"server": "openldap1", "username": "user1", "password": "user1", "login": True}, - {"server": "openldap2", "username": "user2", "password": "user2", "login": True} + { + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, + }, + { + "server": "openldap2", + "username": "user2", + "password": "user2", + "login": True, + }, ] role = f"role_{getuid()}" - entries = [ - (["openldap1"], [(role,)]), - (["openldap2"], [(role,)]) - ] + entries = [(["openldap1"], [(role,)]), (["openldap2"], [(role,)])] with ldap_servers(servers): with rbac_roles(role) as roles: config = create_entries_ldap_external_user_directory_config_content(entries) - with ldap_external_user_directory(server=None, roles=None, restart=True, config=config): - with When(f"I login as {users[0]['username']} authenticated using openldap1"): - current().context.node.query(f"SELECT 1", - settings=[("user", users[0]["username"]), ("password", users[0]["password"])]) - - with And(f"I login as {users[1]['username']} authenticated using openldap2"): - current().context.node.query(f"SELECT 1", - settings=[("user", users[1]["username"]), ("password", users[1]["password"])]) + with ldap_external_user_directory( + server=None, roles=None, restart=True, config=config + ): + with When( + f"I login as {users[0]['username']} authenticated using openldap1" + ): + current().context.node.query( + f"SELECT 1", + settings=[ + ("user", users[0]["username"]), + ("password", users[0]["password"]), + ], + ) + with And( + f"I login as {users[1]['username']} authenticated using openldap2" + ): + current().context.node.query( + f"SELECT 1", + settings=[ + ("user", users[1]["username"]), + ("password", users[1]["password"]), + ], + ) @TestScenario @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_Empty("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_Empty( + "1.0" + ) ) def empty_server(self, timeout=300): - """Check that empty string in a `server` field is not allowed. - """ + """Check that empty string in a `server` field is not allowed.""" message = "DB::Exception: Empty 'server' field for LDAP user directory" servers = { "openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, } with ldap_servers(servers): with rbac_roles(f"role_{getuid()}") as roles: - invalid_ldap_external_user_directory_config(server="", roles=roles, message=message, timeout=timeout) + invalid_ldap_external_user_directory_config( + server="", roles=roles, message=message, timeout=timeout + ) + @TestScenario @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_Missing("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_Missing( + "1.0" + ) ) def missing_server(self, timeout=300): - """Check that missing `server` field is not allowed. - """ + """Check that missing `server` field is not allowed.""" message = "DB::Exception: Missing 'server' field for LDAP user directory" servers = { "openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, } with ldap_servers(servers): with rbac_roles(f"role_{getuid()}") as roles: - invalid_ldap_external_user_directory_config(server=None, roles=roles, message=message, timeout=timeout) + invalid_ldap_external_user_directory_config( + server=None, roles=roles, message=message, timeout=timeout + ) + @TestScenario @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_MoreThanOne("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_MoreThanOne( + "1.0" + ) ) def defined_twice_server(self): """Check that when `server` field is defined twice that only the first @@ -96,131 +143,210 @@ def defined_twice_server(self): """ servers = { "openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, } - user = {"server": "openldap1", "username": "user1", "password": "user1", "login": True} + user = { + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, + } role = f"role_{getuid()}" - entries = [ - (["openldap1", "openldap2"], [(role,)]) - ] + entries = [(["openldap1", "openldap2"], [(role,)])] with ldap_servers(servers): with rbac_roles(role) as roles: config = create_entries_ldap_external_user_directory_config_content(entries) - with ldap_external_user_directory(server=None, roles=None, restart=True, config=config): + with ldap_external_user_directory( + server=None, roles=None, restart=True, config=config + ): with When(f"I login as {user['username']} and execute query"): - current().context.node.query("SELECT 1", - settings=[("user", user["username"]), ("password", user["password"])]) + current().context.node.query( + "SELECT 1", + settings=[ + ("user", user["username"]), + ("password", user["password"]), + ], + ) + @TestScenario @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_Invalid("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_Invalid( + "1.0" + ) ) def invalid_server(self): - """Check when `server` field value is invalid. - """ + """Check when `server` field value is invalid.""" servers = { "openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, } - user = {"server": "openldap1", "username": "user1", "password": "user1", "login": True} + user = { + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, + } role = f"role_{getuid()}" - entries = [ - (["openldap2"], [(role,)]) - ] + entries = [(["openldap2"], [(role,)])] with ldap_servers(servers): with rbac_roles(role) as roles: config = create_entries_ldap_external_user_directory_config_content(entries) - with ldap_external_user_directory(server=None, roles=None, restart=True, config=config): + with ldap_external_user_directory( + server=None, roles=None, restart=True, config=config + ): with When(f"I login as {user['username']} and execute query"): - current().context.node.query("SELECT 1", - settings=[("user", user["username"]), ("password", user["password"])], - exitcode=4, message="DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name.") + current().context.node.query( + "SELECT 1", + settings=[ + ("user", user["username"]), + ("password", user["password"]), + ], + exitcode=4, + message="DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name.", + ) + @TestScenario @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_Empty("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_Empty( + "1.0" + ) ) def empty_roles(self): - """Check when `roles` parameter is empty then user can't read any tables. - """ + """Check when `roles` parameter is empty then user can't read any tables.""" message = "DB::Exception: user1: Not enough privileges." exitcode = 241 servers = { "openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, } user = {"server": "openldap1", "username": "user1", "password": "user1"} - entries = [ - (["openldap1"], [[]]) - ] + entries = [(["openldap1"], [[]])] with ldap_servers(servers): - with table(f"table_{getuid()}", "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()") as table_name: + with table( + f"table_{getuid()}", + "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()", + ) as table_name: config = create_entries_ldap_external_user_directory_config_content(entries) - with ldap_external_user_directory(server=None, roles=None, restart=True, config=config): + with ldap_external_user_directory( + server=None, roles=None, restart=True, config=config + ): with When(f"I login as {user['username']} and execute query"): - current().context.node.query(f"SELECT * FROM {table_name} LIMIT 1", - settings=[("user", user["username"]), ("password", user["password"])], - exitcode=exitcode, message=message) + current().context.node.query( + f"SELECT * FROM {table_name} LIMIT 1", + settings=[ + ("user", user["username"]), + ("password", user["password"]), + ], + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_MoreThanOne("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_MoreThanOne( + "1.0" + ) ) def defined_twice_roles(self): - """Check that when `roles` is defined twice then only the first entry is used. - """ + """Check that when `roles` is defined twice then only the first entry is used.""" node = self.context.node - create_statement = "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()" + create_statement = ( + "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()" + ) servers = { "openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, } - user = {"server": "openldap1", "username": "user1", "password": "user1", "login": True} + user = { + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, + } roles = [f"role0_{getuid()}", f"role1_{getuid()}"] - entries = [ - (["openldap1"], [[roles[0]],[roles[1]]]) - ] + entries = [(["openldap1"], [[roles[0]], [roles[1]]])] with ldap_servers(servers): with rbac_roles(*roles): - with table(f"table0_{getuid()}", create_statement) as table0_name, \ - table(f"table1_{getuid()}", create_statement) as table1_name: + with table(f"table0_{getuid()}", create_statement) as table0_name, table( + f"table1_{getuid()}", create_statement + ) as table1_name: - with Given("I grant select privilege for the first table to the first role"): + with Given( + "I grant select privilege for the first table to the first role" + ): node.query(f"GRANT SELECT ON {table0_name} TO {roles[0]}") - with And("I grant select privilege for the second table to the second role"): + with And( + "I grant select privilege for the second table to the second role" + ): node.query(f"GRANT SELECT ON {table1_name} TO {roles[1]}") - config = create_entries_ldap_external_user_directory_config_content(entries) + config = create_entries_ldap_external_user_directory_config_content( + entries + ) - with ldap_external_user_directory(server=None, roles=None, restart=True, config=config): - with When(f"I login as {user['username']} and try to read from the first table"): - current().context.node.query(f"SELECT * FROM {table0_name} LIMIT 1", - settings=[("user", user["username"]), ("password", user["password"])]) + with ldap_external_user_directory( + server=None, roles=None, restart=True, config=config + ): + with When( + f"I login as {user['username']} and try to read from the first table" + ): + current().context.node.query( + f"SELECT * FROM {table0_name} LIMIT 1", + settings=[ + ("user", user["username"]), + ("password", user["password"]), + ], + ) + + with And( + f"I login as {user['username']} again and try to read from the second table" + ): + current().context.node.query( + f"SELECT * FROM {table0_name} LIMIT 1", + settings=[ + ("user", user["username"]), + ("password", user["password"]), + ], + ) - with And(f"I login as {user['username']} again and try to read from the second table"): - current().context.node.query(f"SELECT * FROM {table0_name} LIMIT 1", - settings=[("user", user["username"]), ("password", user["password"])]) @TestScenario @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_Invalid("2.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_Invalid( + "2.0" + ) ) def invalid_role_in_roles(self): """Check that no error is returned when LDAP users try to authenticate @@ -228,8 +354,11 @@ def invalid_role_in_roles(self): """ servers = { "openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, } user = {"server": "openldap1", "username": "user1", "password": "user1"} @@ -237,12 +366,20 @@ def invalid_role_in_roles(self): with ldap_servers(servers): with ldap_external_user_directory("openldap1", roles=["foo"], restart=True): with When(f"I login as {user['username']} and execute query"): - current().context.node.query("SELECT 1", - settings=[("user", user["username"]), ("password", user["password"])]) + current().context.node.query( + "SELECT 1", + settings=[ + ("user", user["username"]), + ("password", user["password"]), + ], + ) + @TestScenario @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_Missing("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_Missing( + "1.0" + ) ) def missing_roles(self): """Check that when the `roles` are missing then @@ -252,25 +389,38 @@ def missing_roles(self): exitcode = 241 servers = { "openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, } user = {"server": "openldap1", "username": "user1", "password": "user1"} - entries = [ - (["openldap1"], None) - ] + entries = [(["openldap1"], None)] with ldap_servers(servers): - with table(f"table_{getuid()}", "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()") as table_name: + with table( + f"table_{getuid()}", + "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()", + ) as table_name: config = create_entries_ldap_external_user_directory_config_content(entries) - with ldap_external_user_directory(server=None, roles=None, restart=True, config=config): + with ldap_external_user_directory( + server=None, roles=None, restart=True, config=config + ): with When(f"I login as {user['username']} and execute query"): - current().context.node.query(f"SELECT * FROM {table_name} LIMIT 1", - settings=[("user", user["username"]), ("password", user["password"])], - exitcode=exitcode, message=message) + current().context.node.query( + f"SELECT * FROM {table_name} LIMIT 1", + settings=[ + ("user", user["username"]), + ("password", user["password"]), + ], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("external user directory config") @@ -278,11 +428,10 @@ def missing_roles(self): RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Syntax("1.0"), RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server("1.0"), RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_LDAPUserDirectory("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Definition("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Definition("1.0"), ) def feature(self, node="clickhouse1"): - """Check LDAP external user directory configuration. - """ + """Check LDAP external user directory configuration.""" self.context.node = self.context.cluster.node(node) for scenario in loads(current_module(), Scenario): diff --git a/tests/testflows/ldap/external_user_directory/tests/restart.py b/tests/testflows/ldap/external_user_directory/tests/restart.py index 59e714172dc..96290d33e62 100644 --- a/tests/testflows/ldap/external_user_directory/tests/restart.py +++ b/tests/testflows/ldap/external_user_directory/tests/restart.py @@ -6,6 +6,7 @@ from testflows.asserts import error from ldap.external_user_directory.tests.common import * from ldap.external_user_directory.requirements import * + @TestScenario def one_external_user_directory(self, node="clickhouse1"): """Check that we can restart ClickHouse server when one @@ -19,13 +20,15 @@ def one_external_user_directory(self, node="clickhouse1"): "port": "389", "enable_tls": "no", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, } with ldap_servers(servers): with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server="openldap1", roles=roles, restart=True): + with ldap_external_user_directory( + server="openldap1", roles=roles, restart=True + ): with Given("I login and execute query"): login_and_execute_query(username="user1", password="user1") @@ -35,6 +38,7 @@ def one_external_user_directory(self, node="clickhouse1"): with Then("I should be able to login and execute query after restart"): login_and_execute_query(username="user1", password="user1") + @TestScenario def multiple_external_user_directories(self, node="clickhouse1"): """Check that we can restart ClickHouse server when two @@ -48,7 +52,7 @@ def multiple_external_user_directories(self, node="clickhouse1"): "port": "389", "enable_tls": "no", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, "openldap2": { "host": "openldap2", @@ -57,35 +61,45 @@ def multiple_external_user_directories(self, node="clickhouse1"): "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", "tls_require_cert": "never", - } + }, } with Given("I have two LDAP servers"): - entries = [ - (["openldap1"], []), - (["openldap2"], []) - ] + entries = [(["openldap1"], []), (["openldap2"], [])] - with And("I create config file to define LDAP external user directory for each LDAP server"): + with And( + "I create config file to define LDAP external user directory for each LDAP server" + ): config = create_entries_ldap_external_user_directory_config_content(entries) with ldap_servers(servers): - with ldap_external_user_directory(server=None, roles=None, restart=True, config=config): - with Given("I login and execute query using a user defined in the first LDAP server"): + with ldap_external_user_directory( + server=None, roles=None, restart=True, config=config + ): + with Given( + "I login and execute query using a user defined in the first LDAP server" + ): login_and_execute_query(username="user1", password="user1") - with And("I login and execute query using a user defined the second LDAP server"): + with And( + "I login and execute query using a user defined the second LDAP server" + ): login_and_execute_query(username="user2", password="user2") with When("I restart the server"): restart() - with Then("I should be able to login and execute query again using a user defined in the first LDAP server"): + with Then( + "I should be able to login and execute query again using a user defined in the first LDAP server" + ): login_and_execute_query(username="user1", password="user1") - with And("I should be able to login and execute query again using a user defined in the second LDAP server"): + with And( + "I should be able to login and execute query again using a user defined in the second LDAP server" + ): login_and_execute_query(username="user2", password="user2") + @TestScenario def dynamically_added_users(self, node="clickhouse1", count=10): """Check that we can restart ClickHouse server when one @@ -100,20 +114,24 @@ def dynamically_added_users(self, node="clickhouse1", count=10): "port": "389", "enable_tls": "no", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, } with ldap_servers(servers): with rbac_roles("ldap_role") as roles: - with ldap_external_user_directory(server="openldap1", roles=roles, restart=True): + with ldap_external_user_directory( + server="openldap1", roles=roles, restart=True + ): with Given("I login and execute query using existing LDAP user"): login_and_execute_query(username="user1", password="user1") with When("I then restart the server"): restart() - with Then("after restart I should be able to login and execute query using existing LDAP user"): + with Then( + "after restart I should be able to login and execute query using existing LDAP user" + ): login_and_execute_query(username="user1", password="user1") dynamic_users = [] @@ -123,11 +141,21 @@ def dynamically_added_users(self, node="clickhouse1", count=10): {"cn": f"dynamic_user{i}", "userpassword": randomword(20)} ) - with ldap_users(*dynamic_users, node=self.context.cluster.node("openldap1")): - with Then("I should be able to login and execute queries using dynamically added users"): + with ldap_users( + *dynamic_users, node=self.context.cluster.node("openldap1") + ): + with Then( + "I should be able to login and execute queries using dynamically added users" + ): for dynamic_user in dynamic_users: - with When(f"using dynamically added user {dynamic_user['cn']}"): - login_and_execute_query(username=dynamic_user["cn"], password=dynamic_user["userpassword"]) + with When( + f"using dynamically added user {dynamic_user['cn']}" + ): + login_and_execute_query( + username=dynamic_user["cn"], + password=dynamic_user["userpassword"], + ) + @TestScenario @Requirements( @@ -146,7 +174,7 @@ def parallel_login(self, server=None, user_count=10, timeout=300): "port": "389", "enable_tls": "no", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, "openldap2": { "host": "openldap2", @@ -155,50 +183,67 @@ def parallel_login(self, server=None, user_count=10, timeout=300): "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", "tls_require_cert": "never", - } + }, } with Given("I have two LDAP servers"): - entries = [ - (["openldap1"], []), - (["openldap2"], []) - ] + entries = [(["openldap1"], []), (["openldap2"], [])] with And("I define a group of users to be created on each LDAP server"): user_groups = { - "openldap1_users": [{"cn": f"openldap1_parallel_user{i}", "userpassword": randomword(20)} for i in - range(user_count)], - "openldap2_users": [{"cn": f"openldap2_parallel_user{i}", "userpassword": randomword(20)} for i in - range(user_count)], - "local_users": [{"cn": f"local_parallel_user{i}", "userpassword": randomword(20)} for i in - range(user_count)] + "openldap1_users": [ + {"cn": f"openldap1_parallel_user{i}", "userpassword": randomword(20)} + for i in range(user_count) + ], + "openldap2_users": [ + {"cn": f"openldap2_parallel_user{i}", "userpassword": randomword(20)} + for i in range(user_count) + ], + "local_users": [ + {"cn": f"local_parallel_user{i}", "userpassword": randomword(20)} + for i in range(user_count) + ], } @TestStep(When) @Name("I login as {username} and execute query") - def login_and_execute_query_during_restart(self, username, password, exitcode, message, steps=True, timeout=60): + def login_and_execute_query_during_restart( + self, username, password, exitcode, message, steps=True, timeout=60 + ): """Execute a query and ignore exitcode and message as during restart exit codes and messages vary based on the state of the restarted container and the ClickHouse server and there are too many cases and complete list is not fully known therefore trying to list all possible cases produces random fails. """ - r = self.context.cluster.command(None, f"{self.context.cluster.docker_compose} exec {self.context.node.name} " + - f"clickhouse client -q \"SELECT 1\" --user {username} --password {password}", steps=steps, timeout=timeout) + r = self.context.cluster.command( + None, + f"{self.context.cluster.docker_compose} exec {self.context.node.name} " + + f'clickhouse client -q "SELECT 1" --user {username} --password {password}', + steps=steps, + timeout=timeout, + ) return r @TestStep(When) @Name("I login as {username} and execute query") - def login_and_execute_query(self, username, password, exitcode=None, message=None, steps=True, timeout=60): - self.context.node.query("SELECT 1", + def login_and_execute_query( + self, username, password, exitcode=None, message=None, steps=True, timeout=60 + ): + self.context.node.query( + "SELECT 1", settings=[("user", username), ("password", password)], exitcode=exitcode or 0, - message=message, steps=steps, timeout=timeout) + message=message, + steps=steps, + timeout=timeout, + ) - def login_with_valid_username_and_password(users, i, iterations=10, during_restart=False): - """Login with valid username and password. - """ + def login_with_valid_username_and_password( + users, i, iterations=10, during_restart=False + ): + """Login with valid username and password.""" query = login_and_execute_query if during_restart: query = login_and_execute_query_during_restart @@ -207,12 +252,18 @@ def parallel_login(self, server=None, user_count=10, timeout=300): for i in range(iterations): random_user = users[random.randint(0, len(users) - 1)] - query(username=random_user["cn"], password=random_user["userpassword"], - exitcode=0, message="1", steps=False) + query( + username=random_user["cn"], + password=random_user["userpassword"], + exitcode=0, + message="1", + steps=False, + ) - def login_with_valid_username_and_invalid_password(users, i, iterations=10, during_restart=False): - """Login with valid username and invalid password. - """ + def login_with_valid_username_and_invalid_password( + users, i, iterations=10, during_restart=False + ): + """Login with valid username and invalid password.""" query = login_and_execute_query if during_restart: query = login_and_execute_query_during_restart @@ -221,15 +272,18 @@ def parallel_login(self, server=None, user_count=10, timeout=300): for i in range(iterations): random_user = users[random.randint(0, len(users) - 1)] - query(username=random_user["cn"], + query( + username=random_user["cn"], password=(random_user["userpassword"] + randomword(1)), exitcode=4, message=f"DB::Exception: {random_user['cn']}: Authentication failed: password is incorrect or there is no user with such name", - steps=False) + steps=False, + ) - def login_with_invalid_username_and_valid_password(users, i, iterations=10, during_restart=False): - """Login with invalid username and valid password. - """ + def login_with_invalid_username_and_valid_password( + users, i, iterations=10, during_restart=False + ): + """Login with invalid username and valid password.""" query = login_and_execute_query if during_restart: query = login_and_execute_query_during_restart @@ -239,35 +293,51 @@ def parallel_login(self, server=None, user_count=10, timeout=300): random_user = dict(users[random.randint(0, len(users) - 1)]) random_user["cn"] += randomword(1) - query(username=random_user["cn"], + query( + username=random_user["cn"], password=random_user["userpassword"], exitcode=4, message=f"DB::Exception: {random_user['cn']}: Authentication failed: password is incorrect or there is no user with such name", - steps=False) + steps=False, + ) with And("I have a list of checks that I want to run for each user group"): checks = [ login_with_valid_username_and_password, login_with_valid_username_and_invalid_password, - login_with_invalid_username_and_valid_password + login_with_invalid_username_and_valid_password, ] - with And("I create config file to define LDAP external user directory for each LDAP server"): + with And( + "I create config file to define LDAP external user directory for each LDAP server" + ): config = create_entries_ldap_external_user_directory_config_content(entries) with ldap_servers(servers): - with ldap_external_user_directory(server=None, roles=None, restart=True, config=config): - with ldap_users(*user_groups["openldap1_users"], node=self.context.cluster.node("openldap1")): - with ldap_users(*user_groups["openldap2_users"], node=self.context.cluster.node("openldap2")): + with ldap_external_user_directory( + server=None, roles=None, restart=True, config=config + ): + with ldap_users( + *user_groups["openldap1_users"], + node=self.context.cluster.node("openldap1"), + ): + with ldap_users( + *user_groups["openldap2_users"], + node=self.context.cluster.node("openldap2"), + ): with rbac_users(*user_groups["local_users"]): tasks = [] with Pool(4) as pool: try: - with When("I restart the server during parallel login of users in each group"): + with When( + "I restart the server during parallel login of users in each group" + ): for users in user_groups.values(): for check in checks: - tasks.append(pool.submit(check, (users, 0, 25, True))) - + tasks.append( + pool.submit(check, (users, 0, 25, True)) + ) + tasks.append(pool.submit(restart)) finally: with Then("logins during restart should work"): @@ -277,20 +347,25 @@ def parallel_login(self, server=None, user_count=10, timeout=300): tasks = [] with Pool(4) as pool: try: - with When("I perform parallel login of users in each group after restart"): + with When( + "I perform parallel login of users in each group after restart" + ): for users in user_groups.values(): for check in checks: - tasks.append(pool.submit(check, (users, 0, 10, False))) + tasks.append( + pool.submit( + check, (users, 0, 10, False) + ) + ) finally: with Then("logins after restart should work"): for task in tasks: task.result(timeout=timeout) + @TestOutline(Feature) @Name("restart") -@Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Restart_Server("1.0") -) +@Requirements(RQ_SRS_009_LDAP_ExternalUserDirectory_Restart_Server("1.0")) def feature(self, servers=None, server=None, node="clickhouse1"): """Check that we can restart ClickHouse server when one or more external user directories are configured. diff --git a/tests/testflows/ldap/external_user_directory/tests/roles.py b/tests/testflows/ldap/external_user_directory/tests/roles.py index 364ee219e48..266abd12eaa 100644 --- a/tests/testflows/ldap/external_user_directory/tests/roles.py +++ b/tests/testflows/ldap/external_user_directory/tests/roles.py @@ -3,10 +3,9 @@ from testflows.core import * from ldap.external_user_directory.tests.common import * from ldap.external_user_directory.requirements import * + @TestScenario -@Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Role_New("1.0") -) +@Requirements(RQ_SRS_009_LDAP_ExternalUserDirectory_Role_New("1.0")) def new_role(self, server): """Check that new roles can't be assigned to any LDAP user authenticated using external user directory. @@ -18,17 +17,32 @@ def new_role(self, server): users = [ {"username": f"user0_{uid}", "password": "user0_password"}, - {"username": f"user1_{uid}", "password": "user1_password"} + {"username": f"user1_{uid}", "password": "user1_password"}, ] with rbac_roles(f"role0_{uid}", f"role1_{uid}") as roles: - with table(f"table_{getuid()}", "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()") as table_name: + with table( + f"table_{getuid()}", + "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()", + ) as table_name: with ldap_external_user_directory(server=server, roles=roles, restart=True): - with ldap_users(*[{"cn": user["username"], "userpassword": user["password"]} for user in users]): + with ldap_users( + *[ + {"cn": user["username"], "userpassword": user["password"]} + for user in users + ] + ): - with When(f"I login and execute query simple query to cache the LDAP user"): - node.query(f"SELECT 1", - settings=[("user", users[0]["username"]), ("password", users[0]["password"])]) + with When( + f"I login and execute query simple query to cache the LDAP user" + ): + node.query( + f"SELECT 1", + settings=[ + ("user", users[0]["username"]), + ("password", users[0]["password"]), + ], + ) with rbac_roles(f"new_role0_{uid}") as new_roles: @@ -36,20 +50,25 @@ def new_role(self, server): exitcode = 239 with And("I try to grant new role to the cached LDAP user"): - node.query(f"GRANT {new_roles[0]} TO {users[0]['username']}", - exitcode=exitcode, message=message.format(user=users[0]["username"])) + node.query( + f"GRANT {new_roles[0]} TO {users[0]['username']}", + exitcode=exitcode, + message=message.format(user=users[0]["username"]), + ) message = "DB::Exception: There is no role `{user}` in user directories" exitcode = 255 with And("I try to grant new role to the non-cached LDAP user"): - node.query(f"GRANT {new_roles[0]} TO {users[1]['username']}", - exitcode=exitcode, message=message.format(user=users[1]["username"])) + node.query( + f"GRANT {new_roles[0]} TO {users[1]['username']}", + exitcode=exitcode, + message=message.format(user=users[1]["username"]), + ) + @TestScenario -@Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Role_NewPrivilege("1.0") -) +@Requirements(RQ_SRS_009_LDAP_ExternalUserDirectory_Role_NewPrivilege("1.0")) def add_privilege(self, server): """Check that we can add privilege to a role used in the external user directory configuration. @@ -63,38 +82,74 @@ def add_privilege(self, server): users = [ {"username": f"user0_{uid}", "password": "user0_password"}, - {"username": f"user1_{uid}", "password": "user1_password"} + {"username": f"user1_{uid}", "password": "user1_password"}, ] with rbac_roles(f"role0_{uid}", f"role1_{uid}") as roles: - with table(f"table_{getuid()}", "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()") as table_name: + with table( + f"table_{getuid()}", + "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()", + ) as table_name: with ldap_external_user_directory(server=server, roles=roles, restart=True): - with ldap_users(*[{"cn": user["username"], "userpassword": user["password"]} for user in users]): + with ldap_users( + *[ + {"cn": user["username"], "userpassword": user["password"]} + for user in users + ] + ): with When(f"I login and execute query that requires no privileges"): - node.query(f"SELECT 1", - settings=[("user", users[0]["username"]), ("password", users[0]["password"])]) + node.query( + f"SELECT 1", + settings=[ + ("user", users[0]["username"]), + ("password", users[0]["password"]), + ], + ) - with And(f"I login and try to read from the table without having select privilege"): - node.query(f"SELECT * FROM {table_name} LIMIT 1", - settings=[("user", users[0]["username"]), ("password", users[0]["password"])], - exitcode=exitcode, message=message.format(user=users[0]["username"])) + with And( + f"I login and try to read from the table without having select privilege" + ): + node.query( + f"SELECT * FROM {table_name} LIMIT 1", + settings=[ + ("user", users[0]["username"]), + ("password", users[0]["password"]), + ], + exitcode=exitcode, + message=message.format(user=users[0]["username"]), + ) - with When(f"I grant select privilege to one of the two roles assigned to LDAP users"): + with When( + f"I grant select privilege to one of the two roles assigned to LDAP users" + ): node.query(f"GRANT SELECT ON {table_name} TO {roles[0]}") - with And(f"I login again and expect that cached LDAP user can successfully read from the table"): - node.query(f"SELECT * FROM {table_name} LIMIT 1", - settings=[("user", users[0]["username"]), ("password", users[0]["password"])]) + with And( + f"I login again and expect that cached LDAP user can successfully read from the table" + ): + node.query( + f"SELECT * FROM {table_name} LIMIT 1", + settings=[ + ("user", users[0]["username"]), + ("password", users[0]["password"]), + ], + ) + + with And( + f"I login again and expect that non-cached LDAP user can successfully read from the table" + ): + node.query( + f"SELECT * FROM {table_name} LIMIT 1", + settings=[ + ("user", users[1]["username"]), + ("password", users[1]["password"]), + ], + ) - with And(f"I login again and expect that non-cached LDAP user can successfully read from the table"): - node.query(f"SELECT * FROM {table_name} LIMIT 1", - settings=[("user", users[1]["username"]), ("password", users[1]["password"])]) @TestScenario -@Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Role_RemovedPrivilege("1.0") -) +@Requirements(RQ_SRS_009_LDAP_ExternalUserDirectory_Role_RemovedPrivilege("1.0")) def remove_privilege(self, server): """Check that we can remove privilege from a role used in the external user directory configuration. @@ -108,39 +163,73 @@ def remove_privilege(self, server): users = [ {"username": f"user0_{uid}", "password": "user0_password"}, - {"username": f"user1_{uid}", "password": "user1_password"} + {"username": f"user1_{uid}", "password": "user1_password"}, ] with rbac_roles(f"role0_{uid}", f"role1_{uid}") as roles: - with table(f"table_{getuid()}", "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()") as table_name: + with table( + f"table_{getuid()}", + "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()", + ) as table_name: - with When(f"I grant select privilege to one of the two roles assigned to LDAP users"): + with When( + f"I grant select privilege to one of the two roles assigned to LDAP users" + ): node.query(f"GRANT SELECT ON {table_name} TO {roles[0]}") with ldap_external_user_directory(server=server, roles=roles, restart=True): - with ldap_users(*[{"cn": user["username"], "userpassword": user["password"]} for user in users]): + with ldap_users( + *[ + {"cn": user["username"], "userpassword": user["password"]} + for user in users + ] + ): - with When(f"I login then LDAP user should be able to read from the table"): - node.query(f"SELECT * FROM {table_name} LIMIT 1", - settings=[("user", users[0]["username"]), ("password", users[0]["password"])]) + with When( + f"I login then LDAP user should be able to read from the table" + ): + node.query( + f"SELECT * FROM {table_name} LIMIT 1", + settings=[ + ("user", users[0]["username"]), + ("password", users[0]["password"]), + ], + ) - with When(f"I revoke select privilege from all the roles assigned to LDAP users"): + with When( + f"I revoke select privilege from all the roles assigned to LDAP users" + ): node.query(f"REVOKE SELECT ON {table_name} FROM {roles[0]}") - with When(f"I login again then cached LDAP user should not be able to read from the table"): - node.query(f"SELECT * FROM {table_name} LIMIT 1", - settings=[("user", users[0]["username"]), ("password", users[0]["password"])], - exitcode=exitcode, message=message.format(user=users[0]["username"])) + with When( + f"I login again then cached LDAP user should not be able to read from the table" + ): + node.query( + f"SELECT * FROM {table_name} LIMIT 1", + settings=[ + ("user", users[0]["username"]), + ("password", users[0]["password"]), + ], + exitcode=exitcode, + message=message.format(user=users[0]["username"]), + ) + + with When( + f"I login with non-cached LDAP user then the user should also not be able to read from the table" + ): + node.query( + f"SELECT * FROM {table_name} LIMIT 1", + settings=[ + ("user", users[1]["username"]), + ("password", users[1]["password"]), + ], + exitcode=exitcode, + message=message.format(user=users[1]["username"]), + ) - with When(f"I login with non-cached LDAP user then the user should also not be able to read from the table"): - node.query(f"SELECT * FROM {table_name} LIMIT 1", - settings=[("user", users[1]["username"]), ("password", users[1]["password"])], - exitcode=exitcode, message=message.format(user=users[1]["username"])) @TestScenario -@Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Role_Removed("2.0") -) +@Requirements(RQ_SRS_009_LDAP_ExternalUserDirectory_Role_Removed("2.0")) def remove_role(self, server): """Check that when a role used in the external user directory configuration is dynamically removed then any LDAP users should still be authenticated using @@ -153,31 +242,50 @@ def remove_role(self, server): users = [ {"username": f"user0_{uid}", "password": "user0_password"}, - {"username": f"user1_{uid}", "password": "user1_password"} + {"username": f"user1_{uid}", "password": "user1_password"}, ] with rbac_roles(f"role0_{uid}", f"role1_{uid}") as roles: - with ldap_external_user_directory(server=server, roles=roles, restart=True): - with ldap_users(*[{"cn": user["username"], "userpassword": user["password"]} for user in users]): - with When(f"I login and execute query that requires no privileges"): - node.query(f"SELECT 1", - settings=[("user", users[0]["username"]), ("password", users[0]["password"])]) + with ldap_external_user_directory(server=server, roles=roles, restart=True): + with ldap_users( + *[ + {"cn": user["username"], "userpassword": user["password"]} + for user in users + ] + ): + with When(f"I login and execute query that requires no privileges"): + node.query( + f"SELECT 1", + settings=[ + ("user", users[0]["username"]), + ("password", users[0]["password"]), + ], + ) - with And("I remove one of the roles"): - node.query(f"DROP ROLE {roles[1]}") + with And("I remove one of the roles"): + node.query(f"DROP ROLE {roles[1]}") - with And(f"I try to login using cached LDAP user"): - node.query(f"SELECT 1", - settings=[("user", users[0]["username"]), ("password", users[0]["password"])]) + with And(f"I try to login using cached LDAP user"): + node.query( + f"SELECT 1", + settings=[ + ("user", users[0]["username"]), + ("password", users[0]["password"]), + ], + ) + + with And(f"I try to login again using non-cached LDAP user"): + node.query( + f"SELECT 1", + settings=[ + ("user", users[1]["username"]), + ("password", users[1]["password"]), + ], + ) - with And(f"I try to login again using non-cached LDAP user"): - node.query(f"SELECT 1", - settings=[("user", users[1]["username"]), ("password", users[1]["password"])]) @TestScenario -@Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Role_Removed_Privileges("1.0") -) +@Requirements(RQ_SRS_009_LDAP_ExternalUserDirectory_Role_Removed_Privileges("1.0")) def remove_privilege_by_removing_role(self, server): """Check that when the role used in the external user directory configuration is dynamically removed then privileges are removed from all @@ -192,35 +300,59 @@ def remove_privilege_by_removing_role(self, server): users = [ {"username": f"user0_{uid}", "password": "user0_password"}, - {"username": f"user1_{uid}", "password": "user1_password"} + {"username": f"user1_{uid}", "password": "user1_password"}, ] with rbac_roles(f"role0_{uid}", f"role1_{uid}") as roles: - with table(f"table_{getuid()}", "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()") as table_name: + with table( + f"table_{getuid()}", + "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()", + ) as table_name: - with When(f"I grant select privilege to one of the two roles assigned to LDAP users"): - node.query(f"GRANT SELECT ON {table_name} TO {roles[0]}") + with When( + f"I grant select privilege to one of the two roles assigned to LDAP users" + ): + node.query(f"GRANT SELECT ON {table_name} TO {roles[0]}") - with ldap_external_user_directory(server=server, roles=roles, restart=True): - with ldap_users(*[{"cn": user["username"], "userpassword": user["password"]} for user in users]): + with ldap_external_user_directory(server=server, roles=roles, restart=True): + with ldap_users( + *[ + {"cn": user["username"], "userpassword": user["password"]} + for user in users + ] + ): - with When(f"I login and expect that LDAP user can read from the table"): - node.query(f"SELECT * FROM {table_name} LIMIT 1", - settings=[("user", users[0]["username"]), ("password", users[0]["password"])]) + with When( + f"I login and expect that LDAP user can read from the table" + ): + node.query( + f"SELECT * FROM {table_name} LIMIT 1", + settings=[ + ("user", users[0]["username"]), + ("password", users[0]["password"]), + ], + ) - with And("I remove the role that grants the privilege"): - node.query(f"DROP ROLE {roles[0]}") + with And("I remove the role that grants the privilege"): + node.query(f"DROP ROLE {roles[0]}") + + with And( + f"I try to relogin and expect that cached LDAP user can login " + "but does not have privilege that was provided by the removed role" + ): + node.query( + f"SELECT * FROM {table_name} LIMIT 1", + settings=[ + ("user", users[0]["username"]), + ("password", users[0]["password"]), + ], + exitcode=exitcode, + message=message.format(user=users[0]["username"]), + ) - with And(f"I try to relogin and expect that cached LDAP user can login " - "but does not have privilege that was provided by the removed role"): - node.query(f"SELECT * FROM {table_name} LIMIT 1", - settings=[("user", users[0]["username"]), ("password", users[0]["password"])], - exitcode=exitcode, message=message.format(user=users[0]["username"])) @TestScenario -@Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Role_Readded_Privileges("1.0") -) +@Requirements(RQ_SRS_009_LDAP_ExternalUserDirectory_Role_Readded_Privileges("1.0")) def readd_privilege_by_readding_role(self, server): """Check that when the role used in the external user directory configuration is dynamically removed then all the privileges are removed from any @@ -234,58 +366,103 @@ def readd_privilege_by_readding_role(self, server): users = [ {"username": f"user0_{uid}", "password": "user0_password"}, - {"username": f"user1_{uid}", "password": "user1_password"} + {"username": f"user1_{uid}", "password": "user1_password"}, ] with rbac_roles(f"role0_{uid}", f"role1_{uid}") as roles: - with table(f"table_{getuid()}", "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()") as table_name: + with table( + f"table_{getuid()}", + "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()", + ) as table_name: - with When(f"I grant select privilege to one of the two roles assigned to LDAP users"): - node.query(f"GRANT SELECT ON {table_name} TO {roles[0]}") + with When( + f"I grant select privilege to one of the two roles assigned to LDAP users" + ): + node.query(f"GRANT SELECT ON {table_name} TO {roles[0]}") - with ldap_external_user_directory(server=server, roles=roles, restart=True): - with ldap_users(*[{"cn": user["username"], "userpassword": user["password"]} for user in users]): + with ldap_external_user_directory(server=server, roles=roles, restart=True): + with ldap_users( + *[ + {"cn": user["username"], "userpassword": user["password"]} + for user in users + ] + ): - with When(f"I login and expect that LDAP user can read from the table"): - node.query(f"SELECT * FROM {table_name} LIMIT 1", - settings=[("user", users[0]["username"]), ("password", users[0]["password"])]) + with When( + f"I login and expect that LDAP user can read from the table" + ): + node.query( + f"SELECT * FROM {table_name} LIMIT 1", + settings=[ + ("user", users[0]["username"]), + ("password", users[0]["password"]), + ], + ) - with And("I remove the role that grants the privilege"): - node.query(f"DROP ROLE {roles[0]}") + with And("I remove the role that grants the privilege"): + node.query(f"DROP ROLE {roles[0]}") - message = "DB::Exception: {user}: Not enough privileges." - exitcode = 241 + message = "DB::Exception: {user}: Not enough privileges." + exitcode = 241 - with And(f"I try to relogin and expect that cached LDAP user can login " - "but does not have privilege that was provided by the removed role"): - node.query(f"SELECT * FROM {table_name} LIMIT 1", - settings=[("user", users[0]["username"]), ("password", users[0]["password"])], - exitcode=exitcode, message=message.format(user=users[0]["username"])) + with And( + f"I try to relogin and expect that cached LDAP user can login " + "but does not have privilege that was provided by the removed role" + ): + node.query( + f"SELECT * FROM {table_name} LIMIT 1", + settings=[ + ("user", users[0]["username"]), + ("password", users[0]["password"]), + ], + exitcode=exitcode, + message=message.format(user=users[0]["username"]), + ) - with And(f"I try to login using non-cached LDAP user and expect it to succeed"): - node.query(f"SELECT 1", - settings=[("user", users[1]["username"]), ("password", users[1]["password"])]) + with And( + f"I try to login using non-cached LDAP user and expect it to succeed" + ): + node.query( + f"SELECT 1", + settings=[ + ("user", users[1]["username"]), + ("password", users[1]["password"]), + ], + ) - with When("I re-add the role"): - node.query(f"CREATE ROLE {roles[0]}") + with When("I re-add the role"): + node.query(f"CREATE ROLE {roles[0]}") - with And(f"I grant select privilege to the re-added role"): - node.query(f"GRANT SELECT ON {table_name} TO {roles[0]}") + with And(f"I grant select privilege to the re-added role"): + node.query(f"GRANT SELECT ON {table_name} TO {roles[0]}") - with And(f"I try to relogin and expect that cached LDAP user can login " - "and again has the privilege that is provided by the role"): - node.query(f"SELECT * FROM {table_name} LIMIT 1", - settings=[("user", users[0]["username"]), ("password", users[0]["password"])]) + with And( + f"I try to relogin and expect that cached LDAP user can login " + "and again has the privilege that is provided by the role" + ): + node.query( + f"SELECT * FROM {table_name} LIMIT 1", + settings=[ + ("user", users[0]["username"]), + ("password", users[0]["password"]), + ], + ) + + with And( + "I try to login using non-cached LDAP expect it to also work again and expect" + "for the user also to have privilege provided by the role" + ): + node.query( + f"SELECT * FROM {table_name} LIMIT 1", + settings=[ + ("user", users[1]["username"]), + ("password", users[1]["password"]), + ], + ) - with And("I try to login using non-cached LDAP expect it to also work again and expect" - "for the user also to have privilege provided by the role"): - node.query(f"SELECT * FROM {table_name} LIMIT 1", - settings=[("user", users[1]["username"]), ("password", users[1]["password"])]) @TestScenario -@Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Role_NotPresent_Added("1.0") -) +@Requirements(RQ_SRS_009_LDAP_ExternalUserDirectory_Role_NotPresent_Added("1.0")) def not_present_role_added(self, server): """Check that when the role used in the external user directory configuration which was not present during LDAP user authentication @@ -299,18 +476,29 @@ def not_present_role_added(self, server): users = [ {"username": f"user0_{uid}", "password": "user0_password"}, - {"username": f"user1_{uid}", "password": "user1_password"} + {"username": f"user1_{uid}", "password": "user1_password"}, ] roles = [f"role0_{uid}", f"role1_{uid}"] - with table(f"table_{getuid()}", "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()") as table_name: + with table( + f"table_{getuid()}", + "CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()", + ) as table_name: with ldap_external_user_directory(server=server, roles=roles, restart=True): - with ldap_users(*[{"cn": user["username"], "userpassword": user["password"]} for user in users]): + with ldap_users( + *[ + {"cn": user["username"], "userpassword": user["password"]} + for user in users + ] + ): with When(f"I login using clickhouse-client"): with self.context.cluster.shell(node=node.name) as shell: - with shell(f"TERM=dumb clickhouse client --user {users[0]['username']} --password {users[0]['password']} | tee", - asynchronous=True, name="client") as client: + with shell( + f"TERM=dumb clickhouse client --user {users[0]['username']} --password {users[0]['password']} | tee", + asynchronous=True, + name="client", + ) as client: client.app.expect("clickhouse1 :\) ") with When("I execute select on the table"): @@ -321,12 +509,18 @@ def not_present_role_added(self, server): client.app.expect("clickhouse1 :\) ") try: - with Given("I add the role and grant the select privilege to it for the table"): + with Given( + "I add the role and grant the select privilege to it for the table" + ): node.query(f"CREATE ROLE {roles[0]}") - node.query(f"GRANT SELECT ON {table_name} TO {roles[0]}") + node.query( + f"GRANT SELECT ON {table_name} TO {roles[0]}" + ) with When("I re-execute select on the table"): - client.app.send(f"SELECT * FROM {table_name} LIMIT 1") + client.app.send( + f"SELECT * FROM {table_name} LIMIT 1" + ) with Then("I expect to get no errors"): client.app.expect("Ok\.") @@ -336,6 +530,7 @@ def not_present_role_added(self, server): with Finally("I delete the role"): node.query(f"DROP ROLE IF EXISTS {roles[0]}") + @TestFeature @Name("roles") @Requirements( @@ -350,8 +545,11 @@ def feature(self, node="clickhouse1"): servers = { "openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, } user = {"server": "openldap1", "username": "user1", "password": "user1"} diff --git a/tests/testflows/ldap/external_user_directory/tests/server_config.py b/tests/testflows/ldap/external_user_directory/tests/server_config.py index 31e1c42da94..a26713e28cf 100644 --- a/tests/testflows/ldap/external_user_directory/tests/server_config.py +++ b/tests/testflows/ldap/external_user_directory/tests/server_config.py @@ -7,39 +7,55 @@ from ldap.external_user_directory.requirements import * from ldap.authentication.tests.common import invalid_server_config -@TestScenario -@Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Invalid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Name("1.0") -) -def empty_server_name(self, timeout=60): - """Check that empty string as a server name is not allowed. - """ - servers = {"": {"host": "foo", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" - }} - invalid_server_config(servers, timeout=timeout) @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Invalid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Authentication_UnreachableServer("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Name("1.0"), +) +def empty_server_name(self, timeout=60): + """Check that empty string as a server name is not allowed.""" + servers = { + "": { + "host": "foo", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + } + } + invalid_server_config(servers, timeout=timeout) + + +@TestScenario +@Requirements( + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Invalid("1.0"), + RQ_SRS_009_LDAP_ExternalUserDirectory_Connection_Authentication_UnreachableServer( + "1.0" + ), ) def invalid_host(self): """Check that server returns an error when LDAP server host name is invalid. """ servers = {"foo": {"host": "foo", "port": "389", "enable_tls": "no"}} - users = [{ - "server": "foo", "username": "user1", "password": "user1", "login": True, - "exitcode": 4, "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name." - }] + users = [ + { + "server": "foo", + "username": "user1", + "password": "user1", + "login": True, + "exitcode": 4, + "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name.", + } + ] login(servers, "foo", *users) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Invalid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Host("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Host("1.0"), ) def empty_host(self, tail=30, timeout=300): """Check that server returns an error when LDAP server @@ -52,10 +68,11 @@ def empty_host(self, tail=30, timeout=300): invalid_server_config(servers, message=message, tail=30, timeout=timeout) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Invalid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Host("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Host("1.0"), ) def missing_host(self, tail=30, timeout=300): """Check that server returns an error when LDAP server @@ -65,18 +82,27 @@ def missing_host(self, tail=30, timeout=300): message = "DB::Exception: Missing 'host' entry" servers = {"foo": {"port": "389", "enable_tls": "no"}} - users = [{ - "server": "foo", "username": "user1", "password": "user1", "login": True, - "exitcode": 36, "message": "DB::Exception: LDAP server 'foo' is not configured." - }] + users = [ + { + "server": "foo", + "username": "user1", + "password": "user1", + "login": True, + "exitcode": 36, + "message": "DB::Exception: LDAP server 'foo' is not configured.", + } + ] with Given("I prepare the error log by writting empty lines into it"): - node.command("echo -e \"%s\" > /var/log/clickhouse-server/clickhouse-server.err.log" % ("-\\n" * tail)) + node.command( + 'echo -e "%s" > /var/log/clickhouse-server/clickhouse-server.err.log' + % ("-\\n" * tail) + ) with ldap_servers(servers): with Then("server shall fail to merge the new config"): started = time.time() - command = f"tail -n {tail} /var/log/clickhouse-server/clickhouse-server.err.log | grep \"{message}\"" + command = f'tail -n {tail} /var/log/clickhouse-server/clickhouse-server.err.log | grep "{message}"' while time.time() - started < timeout: exitcode = node.command(command, steps=False).exitcode if exitcode == 0: @@ -84,6 +110,7 @@ def missing_host(self, tail=30, timeout=300): time.sleep(1) assert exitcode == 0, error() + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Invalid("1.0"), @@ -93,155 +120,247 @@ def invalid_port(self): port is not valid. """ servers = {"openldap1": {"host": "openldap1", "port": "3890", "enable_tls": "no"}} - users = [{ - "server": "openldap1", "username": "user1", "password": "user1", "login": True, - "exitcode": 4, "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name." - }] + users = [ + { + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, + "exitcode": 4, + "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name.", + } + ] login(servers, "openldap1", *users) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Invalid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_AuthDN_Prefix("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_AuthDN_Prefix("1.0"), ) def invalid_auth_dn_prefix(self): """Check that server returns an error when LDAP server definition has invalid auth_dn_prefix. """ - servers = {"openldap1": {"host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "foo=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" - }} - users = [{ - "server": "openldap1", "username": "user1", "password": "user1", "login": True, - "exitcode": 4, "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name." - }] + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "foo=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + } + } + users = [ + { + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, + "exitcode": 4, + "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name.", + } + ] login(servers, "openldap1", *users) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Invalid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_AuthDN_Suffix("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_AuthDN_Suffix("1.0"), ) def invalid_auth_dn_suffix(self): """Check that server returns an error when LDAP server definition has invalid auth_dn_suffix. """ - servers = {"openldap1": {"host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",foo=users,dc=company,dc=com" - }} - users = [{ - "server": "openldap1", "username": "user1", "password": "user1", "login": True, - "exitcode": 4, "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name." - }] + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",foo=users,dc=company,dc=com", + } + } + users = [ + { + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, + "exitcode": 4, + "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name.", + } + ] login(servers, "openldap1", *users) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Invalid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_EnableTLS("1.0"), ) def invalid_enable_tls_value(self, timeout=60): """Check that server returns an error when enable_tls option has invalid value. """ message = "Syntax error: Cannot convert to boolean: foo" - servers = {"openldap1": {"host": "openldap1", "port": "389", "enable_tls": "foo", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" - }} + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "foo", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + } + } invalid_server_config(servers, message=message, tail=30, timeout=timeout) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Invalid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSRequireCert("1.0"), ) def invalid_tls_require_cert_value(self): """Check that server returns an error when tls_require_cert option has invalid value. """ - servers = {"openldap2": { - "host": "openldap2", "port": "636", "enable_tls": "yes", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "tls_require_cert": "foo", - "ca_cert_dir": "/container/service/slapd/assets/certs/", - "ca_cert_file": "/container/service/slapd/assets/certs/ca.crt" - }} - users = [{ - "server": "openldap2", "username": "user2", "password": "user2", "login": True, - "exitcode": 4, "message": "DB::Exception: user2: Authentication failed: password is incorrect or there is no user with such name." - }] + servers = { + "openldap2": { + "host": "openldap2", + "port": "636", + "enable_tls": "yes", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "tls_require_cert": "foo", + "ca_cert_dir": "/container/service/slapd/assets/certs/", + "ca_cert_file": "/container/service/slapd/assets/certs/ca.crt", + } + } + users = [ + { + "server": "openldap2", + "username": "user2", + "password": "user2", + "login": True, + "exitcode": 4, + "message": "DB::Exception: user2: Authentication failed: password is incorrect or there is no user with such name.", + } + ] login(servers, "openldap2", *users) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Invalid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSCACertDir("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSCACertDir("1.0"), ) def empty_ca_cert_dir(self): - """Check that server returns an error when ca_cert_dir is empty. - """ - servers = {"openldap2": {"host": "openldap2", "port": "636", "enable_tls": "yes", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "tls_require_cert": "demand", - "ca_cert_dir": "", - "ca_cert_file": "/container/service/slapd/assets/certs/ca.crt" - }} - users = [{ - "server": "openldap2", "username": "user2", "password": "user2", "login": True, - "exitcode": 4, - "message": "DB::Exception: user2: Authentication failed: password is incorrect or there is no user with such name" - }] + """Check that server returns an error when ca_cert_dir is empty.""" + servers = { + "openldap2": { + "host": "openldap2", + "port": "636", + "enable_tls": "yes", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "tls_require_cert": "demand", + "ca_cert_dir": "", + "ca_cert_file": "/container/service/slapd/assets/certs/ca.crt", + } + } + users = [ + { + "server": "openldap2", + "username": "user2", + "password": "user2", + "login": True, + "exitcode": 4, + "message": "DB::Exception: user2: Authentication failed: password is incorrect or there is no user with such name", + } + ] login(servers, "openldap2", *users) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Invalid("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSCertFile("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSCertFile("1.0"), ) def empty_ca_cert_file(self): - """Check that server returns an error when ca_cert_file is empty. - """ - servers = {"openldap2": {"host": "openldap2", "port": "636", "enable_tls": "yes", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "tls_require_cert": "demand", - "ca_cert_dir": "/container/service/slapd/assets/certs/", - "ca_cert_file": "" - }} - users = [{ - "server": "openldap2", "username": "user2", "password": "user2", "login": True, - "exitcode": 4, - "message": "DB::Exception: user2: Authentication failed: password is incorrect or there is no user with such name." - }] + """Check that server returns an error when ca_cert_file is empty.""" + servers = { + "openldap2": { + "host": "openldap2", + "port": "636", + "enable_tls": "yes", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "tls_require_cert": "demand", + "ca_cert_dir": "/container/service/slapd/assets/certs/", + "ca_cert_file": "", + } + } + users = [ + { + "server": "openldap2", + "username": "user2", + "password": "user2", + "login": True, + "exitcode": 4, + "message": "DB::Exception: user2: Authentication failed: password is incorrect or there is no user with such name.", + } + ] login(servers, "openldap2", *users) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_AuthDN_Value("1.0"), RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_AuthDN_Prefix("1.0"), - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_AuthDN_Suffix("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_AuthDN_Suffix("1.0"), ) def auth_dn_value(self): """Check that server configuration can properly define the `dn` value of the user.""" servers = { "openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" - }} - user = {"server": "openldap1", "username": "user1", "password": "user1", "login": True} + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + } + } + user = { + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, + } login(servers, "openldap1", user) + @TestOutline(Scenario) -@Examples("invalid_value", [ - ("-1", Name("negative int")), - ("foo", Name("string")), - ("", Name("empty string")), - ("36893488147419103232", Name("overflow with extremely large int value")), - ("-36893488147419103232", Name("overflow with extremely large negative int value")), - ("@#", Name("special characters")) -]) +@Examples( + "invalid_value", + [ + ("-1", Name("negative int")), + ("foo", Name("string")), + ("", Name("empty string")), + ("36893488147419103232", Name("overflow with extremely large int value")), + ( + "-36893488147419103232", + Name("overflow with extremely large negative int value"), + ), + ("@#", Name("special characters")), + ], +) @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown_Invalid("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown_Invalid( + "1.0" + ) ) def invalid_verification_cooldown_value(self, invalid_value, timeout=300): """Check that server returns an error when LDAP server @@ -250,19 +369,26 @@ def invalid_verification_cooldown_value(self, invalid_value, timeout=300): error_message = f" Syntax error: Not a valid unsigned integer{': ' + invalid_value if invalid_value else invalid_value}" - with Given("LDAP server configuration that uses a negative integer for the verification_cooldown parameter"): - servers = {"openldap1": {"host": "openldap1", "port": "389", "enable_tls": "no", - "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", - "verification_cooldown": f"{invalid_value}" - }} + with Given( + "LDAP server configuration that uses a negative integer for the verification_cooldown parameter" + ): + servers = { + "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": f"{invalid_value}", + } + } with When("I try to use this configuration then it should not work"): invalid_server_config(servers, message=error_message, tail=30, timeout=timeout) + @TestScenario -@Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Syntax("2.0") -) +@Requirements(RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Syntax("2.0")) def syntax(self): """Check that server configuration with valid syntax can be loaded. ```xml @@ -293,23 +419,23 @@ def syntax(self): "auth_dn_suffix": ",ou=users,dc=company,dc=com", "verification_cooldown": "0", "enable_tls": "yes", - "tls_minimum_protocol_version": "tls1.2" , + "tls_minimum_protocol_version": "tls1.2", "tls_require_cert": "demand", "tls_cert_file": "/container/service/slapd/assets/certs/ldap.crt", "tls_key_file": "/container/service/slapd/assets/certs/ldap.key", "tls_ca_cert_file": "/container/service/slapd/assets/certs/ca.crt", "tls_ca_cert_dir": "/container/service/slapd/assets/certs/", - "tls_cipher_suite": "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384" + "tls_cipher_suite": "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384", } } with ldap_servers(servers): pass + @TestFeature @Name("server config") def feature(self, node="clickhouse1"): - """Check LDAP server configuration. - """ + """Check LDAP server configuration.""" self.context.node = self.context.cluster.node(node) for scenario in loads(current_module(), Scenario): scenario() diff --git a/tests/testflows/ldap/external_user_directory/tests/simple.py b/tests/testflows/ldap/external_user_directory/tests/simple.py index c48048833c7..3c2ecc2cce6 100644 --- a/tests/testflows/ldap/external_user_directory/tests/simple.py +++ b/tests/testflows/ldap/external_user_directory/tests/simple.py @@ -3,11 +3,11 @@ from testflows.asserts import error from ldap.external_user_directory.tests.common import login + @TestScenario @Name("simple") def scenario(self, node="clickhouse1"): - """Check that an LDAP external user directory can be used to authenticate a user. - """ + """Check that an LDAP external user directory can be used to authenticate a user.""" self.context.node = self.context.cluster.node(node) servers = { "openldap1": { @@ -15,10 +15,15 @@ def scenario(self, node="clickhouse1"): "port": "389", "enable_tls": "no", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", }, } users = [ - {"server": "openldap1", "username": "user1", "password": "user1", "login": True}, + { + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, + }, ] login(servers, "openldap1", *users) diff --git a/tests/testflows/ldap/regression.py b/tests/testflows/ldap/regression.py index 549977fdbef..5b3ea30ef73 100755 --- a/tests/testflows/ldap/regression.py +++ b/tests/testflows/ldap/regression.py @@ -6,13 +6,19 @@ append_path(sys.path, "..") from helpers.argparser import argparser + @TestModule @Name("ldap") @ArgumentParser(argparser) -def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None): - """ClickHouse LDAP integration regression module. - """ - args = {"local": local, "clickhouse_binary_path": clickhouse_binary_path, "clickhouse_version": clickhouse_version} +def regression( + self, local, clickhouse_binary_path, clickhouse_version=None, stress=None +): + """ClickHouse LDAP integration regression module.""" + args = { + "local": local, + "clickhouse_binary_path": clickhouse_binary_path, + "clickhouse_version": clickhouse_version, + } self.context.clickhouse_version = clickhouse_version @@ -21,11 +27,24 @@ def regression(self, local, clickhouse_binary_path, clickhouse_version=None, str with Pool(3) as pool: try: - Feature(test=load("ldap.authentication.regression", "regression"), parallel=True, executor=pool)(**args) - Feature(test=load("ldap.external_user_directory.regression", "regression"), parallel=True, executor=pool)(**args) - Feature(test=load("ldap.role_mapping.regression", "regression"), parallel=True, executor=pool)(**args) + Feature( + test=load("ldap.authentication.regression", "regression"), + parallel=True, + executor=pool, + )(**args) + Feature( + test=load("ldap.external_user_directory.regression", "regression"), + parallel=True, + executor=pool, + )(**args) + Feature( + test=load("ldap.role_mapping.regression", "regression"), + parallel=True, + executor=pool, + )(**args) finally: join() + if main(): regression() diff --git a/tests/testflows/ldap/role_mapping/regression.py b/tests/testflows/ldap/role_mapping/regression.py index 9dd553e8d29..fc2b85dba6f 100755 --- a/tests/testflows/ldap/role_mapping/regression.py +++ b/tests/testflows/ldap/role_mapping/regression.py @@ -12,34 +12,36 @@ from helpers.common import check_clickhouse_version # Cross-outs of known fails xfails = { - "mapping/roles removed and added in parallel": - [(Fail, "known bug")], - "user dn detection/mapping/roles removed and added in parallel": - [(Fail, "known bug")], - "cluster secret/external user directory/:/:/cluster with secret/ldap user/:mapped True/select using mapped role/with privilege on source and distributed": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/34130")] + "mapping/roles removed and added in parallel": [(Fail, "known bug")], + "user dn detection/mapping/roles removed and added in parallel": [ + (Fail, "known bug") + ], + "cluster secret/external user directory/:/:/cluster with secret/ldap user/:mapped True/select using mapped role/with privilege on source and distributed": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/34130") + ], } # Force results without running the test -ffails={ - "cluster secret": - (Skip, "feature available on 20.10+", check_clickhouse_version("<20.10")) +ffails = { + "cluster secret": ( + Skip, + "feature available on 20.10+", + check_clickhouse_version("<20.10"), + ) } + @TestFeature @Name("role mapping") @ArgumentParser(argparser) -@Specifications( - SRS_014_ClickHouse_LDAP_Role_Mapping -) -@Requirements( - RQ_SRS_014_LDAP_RoleMapping("1.0") -) +@Specifications(SRS_014_ClickHouse_LDAP_Role_Mapping) +@Requirements(RQ_SRS_014_LDAP_RoleMapping("1.0")) @XFails(xfails) @FFails(ffails) -def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None): - """ClickHouse LDAP role mapping regression module. - """ +def regression( + self, local, clickhouse_binary_path, clickhouse_version=None, stress=None +): + """ClickHouse LDAP role mapping regression module.""" nodes = { "clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3"), } @@ -52,20 +54,27 @@ def regression(self, local, clickhouse_binary_path, clickhouse_version=None, str from platform import processor as current_cpu folder_name = os.path.basename(current_dir()) - if current_cpu() == 'aarch64': + if current_cpu() == "aarch64": env = f"{folder_name}_env_arm64" else: env = f"{folder_name}_env" - with Cluster(local, clickhouse_binary_path, nodes=nodes, - docker_compose_project_dir=os.path.join(current_dir(), env)) as cluster: + with Cluster( + local, + clickhouse_binary_path, + nodes=nodes, + docker_compose_project_dir=os.path.join(current_dir(), env), + ) as cluster: self.context.cluster = cluster - Scenario(run=load("ldap.authentication.tests.sanity", "scenario"), name="ldap sanity") + Scenario( + run=load("ldap.authentication.tests.sanity", "scenario"), name="ldap sanity" + ) Feature(run=load("ldap.role_mapping.tests.server_config", "feature")) Feature(run=load("ldap.role_mapping.tests.mapping", "feature")) Feature(run=load("ldap.role_mapping.tests.user_dn_detection", "feature")) Feature(run=load("ldap.role_mapping.tests.cluster_secret", "feature")) + if main(): regression() diff --git a/tests/testflows/ldap/role_mapping/requirements/requirements.py b/tests/testflows/ldap/role_mapping/requirements/requirements.py index afa03f973fb..e63e8593e99 100644 --- a/tests/testflows/ldap/role_mapping/requirements/requirements.py +++ b/tests/testflows/ldap/role_mapping/requirements/requirements.py @@ -9,985 +9,1036 @@ from testflows.core import Requirement Heading = Specification.Heading RQ_SRS_014_LDAP_RoleMapping = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support mapping of [LDAP] groups to [RBAC] roles\n' - 'for users authenticated using [LDAP] external user directory.\n' - '\n' - ), + "[ClickHouse] SHALL support mapping of [LDAP] groups to [RBAC] roles\n" + "for users authenticated using [LDAP] external user directory.\n" + "\n" + ), link=None, level=3, - num='4.1.1') + num="4.1.1", +) RQ_SRS_014_LDAP_RoleMapping_WithFixedRoles = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.WithFixedRoles', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.WithFixedRoles", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support mapping of [LDAP] groups to [RBAC] roles\n' - 'for users authenticated using [LDAP] external user directory when\n' - 'one or more roles are specified in the `` section.\n' - '\n' - ), + "[ClickHouse] SHALL support mapping of [LDAP] groups to [RBAC] roles\n" + "for users authenticated using [LDAP] external user directory when\n" + "one or more roles are specified in the `` section.\n" + "\n" + ), link=None, level=3, - num='4.1.2') + num="4.1.2", +) RQ_SRS_014_LDAP_RoleMapping_Search = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Search', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Search", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL perform search on the [LDAP] server and map the results to [RBAC] role names \n' - 'when authenticating users using the [LDAP] external user directory if the `` section is configured\n' - 'as part of the [LDAP] external user directory. The matched roles SHALL be assigned to the user.\n' - '\n' - ), + "[ClickHouse] SHALL perform search on the [LDAP] server and map the results to [RBAC] role names \n" + "when authenticating users using the [LDAP] external user directory if the `` section is configured\n" + "as part of the [LDAP] external user directory. The matched roles SHALL be assigned to the user.\n" + "\n" + ), link=None, level=3, - num='4.1.3') + num="4.1.3", +) RQ_SRS_014_LDAP_RoleMapping_Map_Role_Name_WithUTF8Characters = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Map.Role.Name.WithUTF8Characters', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Map.Role.Name.WithUTF8Characters", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support mapping [LDAP] search results for users authenticated using [LDAP] external user directory\n' - 'to an [RBAC] role that contains UTF-8 characters.\n' - '\n' - ), + "[ClickHouse] SHALL support mapping [LDAP] search results for users authenticated using [LDAP] external user directory\n" + "to an [RBAC] role that contains UTF-8 characters.\n" + "\n" + ), link=None, level=3, - num='4.2.1') + num="4.2.1", +) RQ_SRS_014_LDAP_RoleMapping_Map_Role_Name_Long = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Map.Role.Name.Long', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Map.Role.Name.Long", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support mapping [LDAP] search results for users authenticated using [LDAP] external user directory\n' - 'to an [RBAC] role that has a name with more than 128 characters.\n' - '\n' - ), + "[ClickHouse] SHALL support mapping [LDAP] search results for users authenticated using [LDAP] external user directory\n" + "to an [RBAC] role that has a name with more than 128 characters.\n" + "\n" + ), link=None, level=3, - num='4.2.2') + num="4.2.2", +) RQ_SRS_014_LDAP_RoleMapping_Map_Role_Name_WithSpecialXMLCharacters = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Map.Role.Name.WithSpecialXMLCharacters', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Map.Role.Name.WithSpecialXMLCharacters", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support mapping [LDAP] search results for users authenticated using [LDAP] external user directory\n' - 'to an [RBAC] role that has a name that contains special characters that need to be escaped in XML.\n' - '\n' - ), + "[ClickHouse] SHALL support mapping [LDAP] search results for users authenticated using [LDAP] external user directory\n" + "to an [RBAC] role that has a name that contains special characters that need to be escaped in XML.\n" + "\n" + ), link=None, level=3, - num='4.2.3') + num="4.2.3", +) RQ_SRS_014_LDAP_RoleMapping_Map_Role_Name_WithSpecialRegexCharacters = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Map.Role.Name.WithSpecialRegexCharacters', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Map.Role.Name.WithSpecialRegexCharacters", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support mapping [LDAP] search results for users authenticated using [LDAP] external user directory\n' - 'to an [RBAC] role that has a name that contains special characters that need to be escaped in regex.\n' - '\n' - ), + "[ClickHouse] SHALL support mapping [LDAP] search results for users authenticated using [LDAP] external user directory\n" + "to an [RBAC] role that has a name that contains special characters that need to be escaped in regex.\n" + "\n" + ), link=None, level=3, - num='4.2.4') + num="4.2.4", +) RQ_SRS_014_LDAP_RoleMapping_Map_MultipleRoles = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Map.MultipleRoles', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Map.MultipleRoles", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support mapping one or more [LDAP] search results for users authenticated using \n' - '[LDAP] external user directory to one or more [RBAC] role.\n' - '\n' - ), + "[ClickHouse] SHALL support mapping one or more [LDAP] search results for users authenticated using \n" + "[LDAP] external user directory to one or more [RBAC] role.\n" + "\n" + ), link=None, level=3, - num='4.3.1') + num="4.3.1", +) RQ_SRS_014_LDAP_RoleMapping_LDAP_Group_Removed = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.LDAP.Group.Removed', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.LDAP.Group.Removed", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not assign [RBAC] role(s) for any users authenticated using [LDAP] external user directory\n' - 'if the corresponding [LDAP] group(s) that map those role(s) are removed. Any users that have active sessions SHALL still\n' - 'have privileges provided by the role(s) until the next time they are authenticated.\n' - '\n' - ), + "[ClickHouse] SHALL not assign [RBAC] role(s) for any users authenticated using [LDAP] external user directory\n" + "if the corresponding [LDAP] group(s) that map those role(s) are removed. Any users that have active sessions SHALL still\n" + "have privileges provided by the role(s) until the next time they are authenticated.\n" + "\n" + ), link=None, level=3, - num='4.4.1') + num="4.4.1", +) RQ_SRS_014_LDAP_RoleMapping_LDAP_Group_RemovedAndAdded_Parallel = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.LDAP.Group.RemovedAndAdded.Parallel', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.LDAP.Group.RemovedAndAdded.Parallel", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support authenticating users using [LDAP] external user directory \n' - 'when [LDAP] groups are removed and added \n' - 'at the same time as [LDAP] user authentications are performed in parallel.\n' - '\n' - ), + "[ClickHouse] SHALL support authenticating users using [LDAP] external user directory \n" + "when [LDAP] groups are removed and added \n" + "at the same time as [LDAP] user authentications are performed in parallel.\n" + "\n" + ), link=None, level=3, - num='4.4.2') + num="4.4.2", +) RQ_SRS_014_LDAP_RoleMapping_LDAP_Group_UserRemoved = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.LDAP.Group.UserRemoved', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.LDAP.Group.UserRemoved", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not assign [RBAC] role(s) for the user authenticated using [LDAP] external user directory\n' - 'if the user has been removed from the corresponding [LDAP] group(s) that map those role(s). \n' - 'Any active user sessions SHALL have privileges provided by the role(s) until the next time the user is authenticated.\n' - '\n' - ), + "[ClickHouse] SHALL not assign [RBAC] role(s) for the user authenticated using [LDAP] external user directory\n" + "if the user has been removed from the corresponding [LDAP] group(s) that map those role(s). \n" + "Any active user sessions SHALL have privileges provided by the role(s) until the next time the user is authenticated.\n" + "\n" + ), link=None, level=3, - num='4.4.3') + num="4.4.3", +) RQ_SRS_014_LDAP_RoleMapping_LDAP_Group_UserRemovedAndAdded_Parallel = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.LDAP.Group.UserRemovedAndAdded.Parallel', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.LDAP.Group.UserRemovedAndAdded.Parallel", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support authenticating users using [LDAP] external user directory\n' - 'when [LDAP] users are added and removed from [LDAP] groups used to map to [RBAC] roles\n' - 'at the same time as [LDAP] user authentications are performed in parallel.\n' - '\n' - ), + "[ClickHouse] SHALL support authenticating users using [LDAP] external user directory\n" + "when [LDAP] users are added and removed from [LDAP] groups used to map to [RBAC] roles\n" + "at the same time as [LDAP] user authentications are performed in parallel.\n" + "\n" + ), link=None, level=3, - num='4.4.4') + num="4.4.4", +) RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_NotPresent = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.NotPresent', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.NotPresent", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not reject authentication attempt using [LDAP] external user directory if any of the roles that are \n' - 'are mapped from [LDAP] but are not present locally.\n' - '\n' - ), + "[ClickHouse] SHALL not reject authentication attempt using [LDAP] external user directory if any of the roles that are \n" + "are mapped from [LDAP] but are not present locally.\n" + "\n" + ), link=None, level=3, - num='4.5.1') + num="4.5.1", +) RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_Added = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.Added', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.Added", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL add the privileges provided by the [LDAP] mapped role when the\n' - 'role is not present during user authentication using [LDAP] external user directory\n' - 'as soon as the role is added.\n' - '\n' - ), + "[ClickHouse] SHALL add the privileges provided by the [LDAP] mapped role when the\n" + "role is not present during user authentication using [LDAP] external user directory\n" + "as soon as the role is added.\n" + "\n" + ), link=None, level=3, - num='4.5.2') + num="4.5.2", +) RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_Removed = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.Removed', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.Removed", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL remove the privileges provided by the role from all the\n' - 'users authenticated using [LDAP] external user directory if the [RBAC] role that was mapped\n' - 'as a result of [LDAP] search is removed.\n' - '\n' - ), + "[ClickHouse] SHALL remove the privileges provided by the role from all the\n" + "users authenticated using [LDAP] external user directory if the [RBAC] role that was mapped\n" + "as a result of [LDAP] search is removed.\n" + "\n" + ), link=None, level=3, - num='4.5.3') + num="4.5.3", +) RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_Readded = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.Readded', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.Readded", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL reassign the [RBAC] role and add all the privileges provided by the role\n' - 'when it is re-added after removal for all [LDAP] users authenticated using external user directory\n' - 'for any role that was mapped as a result of [LDAP] search.\n' - '\n' - ), + "[ClickHouse] SHALL reassign the [RBAC] role and add all the privileges provided by the role\n" + "when it is re-added after removal for all [LDAP] users authenticated using external user directory\n" + "for any role that was mapped as a result of [LDAP] search.\n" + "\n" + ), link=None, level=3, - num='4.5.4') + num="4.5.4", +) RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_RemovedAndAdded_Parallel = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.RemovedAndAdded.Parallel', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.RemovedAndAdded.Parallel", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support authenticating users using [LDAP] external user directory\n' - 'when [RBAC] roles that are mapped by [LDAP] groups\n' - 'are added and removed at the same time as [LDAP] user authentications are performed in parallel.\n' - '\n' - ), + "[ClickHouse] SHALL support authenticating users using [LDAP] external user directory\n" + "when [RBAC] roles that are mapped by [LDAP] groups\n" + "are added and removed at the same time as [LDAP] user authentications are performed in parallel.\n" + "\n" + ), link=None, level=3, - num='4.5.5') + num="4.5.5", +) RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_New = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.New', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.New", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not allow any new roles to be assigned to any\n' - 'users authenticated using [LDAP] external user directory unless the role is specified\n' - 'in the configuration of the external user directory or was mapped as a result of [LDAP] search.\n' - '\n' - ), + "[ClickHouse] SHALL not allow any new roles to be assigned to any\n" + "users authenticated using [LDAP] external user directory unless the role is specified\n" + "in the configuration of the external user directory or was mapped as a result of [LDAP] search.\n" + "\n" + ), link=None, level=3, - num='4.5.6') + num="4.5.6", +) RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_NewPrivilege = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.NewPrivilege', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.NewPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL add new privilege to all the users authenticated using [LDAP] external user directory\n' - 'when new privilege is added to one of the roles that were mapped as a result of [LDAP] search.\n' - '\n' - ), + "[ClickHouse] SHALL add new privilege to all the users authenticated using [LDAP] external user directory\n" + "when new privilege is added to one of the roles that were mapped as a result of [LDAP] search.\n" + "\n" + ), link=None, level=3, - num='4.5.7') + num="4.5.7", +) RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_RemovedPrivilege = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.RemovedPrivilege', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.RemovedPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL remove privilege from all the users authenticated using [LDAP] external user directory\n' - 'when the privilege that was provided by the mapped role is removed from all the roles \n' - 'that were mapped as a result of [LDAP] search.\n' - '\n' - ), + "[ClickHouse] SHALL remove privilege from all the users authenticated using [LDAP] external user directory\n" + "when the privilege that was provided by the mapped role is removed from all the roles \n" + "that were mapped as a result of [LDAP] search.\n" + "\n" + ), link=None, level=3, - num='4.5.8') + num="4.5.8", +) RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support parallel authentication of users using [LDAP] server\n' - 'when using [LDAP] external user directory that has role mapping enabled.\n' - '\n' - ), + "[ClickHouse] SHALL support parallel authentication of users using [LDAP] server\n" + "when using [LDAP] external user directory that has role mapping enabled.\n" + "\n" + ), link=None, level=3, - num='4.6.1') + num="4.6.1", +) RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_ValidAndInvalid = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.ValidAndInvalid', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.ValidAndInvalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support authentication of valid users and\n' - 'prohibit authentication of invalid users using [LDAP] server\n' - 'in parallel without having invalid attempts affecting valid authentications\n' - 'when using [LDAP] external user directory that has role mapping enabled.\n' - '\n' - ), + "[ClickHouse] SHALL support authentication of valid users and\n" + "prohibit authentication of invalid users using [LDAP] server\n" + "in parallel without having invalid attempts affecting valid authentications\n" + "when using [LDAP] external user directory that has role mapping enabled.\n" + "\n" + ), link=None, level=3, - num='4.6.2') + num="4.6.2", +) RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_MultipleServers = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.MultipleServers', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.MultipleServers", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support parallel authentication of external [LDAP] users\n' - 'authenticated using multiple [LDAP] external user directories that have\n' - 'role mapping enabled.\n' - '\n' - ), + "[ClickHouse] SHALL support parallel authentication of external [LDAP] users\n" + "authenticated using multiple [LDAP] external user directories that have\n" + "role mapping enabled.\n" + "\n" + ), link=None, level=3, - num='4.6.3') + num="4.6.3", +) RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_LocalOnly = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.LocalOnly', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.LocalOnly", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support parallel authentication of users defined only locally\n' - 'when one or more [LDAP] external user directories with role mapping\n' - 'are specified in the configuration file.\n' - '\n' - ), + "[ClickHouse] SHALL support parallel authentication of users defined only locally\n" + "when one or more [LDAP] external user directories with role mapping\n" + "are specified in the configuration file.\n" + "\n" + ), link=None, level=3, - num='4.6.4') + num="4.6.4", +) RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_LocalAndMultipleLDAP = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.LocalAndMultipleLDAP', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.LocalAndMultipleLDAP", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support parallel authentication of local and external [LDAP] users\n' - 'authenticated using multiple [LDAP] external user directories with role mapping enabled.\n' - '\n' - ), + "[ClickHouse] SHALL support parallel authentication of local and external [LDAP] users\n" + "authenticated using multiple [LDAP] external user directories with role mapping enabled.\n" + "\n" + ), link=None, level=3, - num='4.6.5') + num="4.6.5", +) RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_SameUser = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.SameUser', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.SameUser", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support parallel authentication of the same external [LDAP] user\n' - 'authenticated using the same [LDAP] external user directory with role mapping enabled.\n' - '\n' - ), + "[ClickHouse] SHALL support parallel authentication of the same external [LDAP] user\n" + "authenticated using the same [LDAP] external user directory with role mapping enabled.\n" + "\n" + ), link=None, level=3, - num='4.6.6') + num="4.6.6", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_BindDN = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.BindDN', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.BindDN", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `` parameter in the `` section\n' - 'of the `config.xml` that SHALL be used to construct the `DN` to bind to.\n' - 'The resulting `DN` SHALL be constructed by replacing all `{user_name}` substrings of the template \n' - 'with the actual user name during each authentication attempt.\n' - '\n' - 'For example, \n' - '\n' - '```xml\n' - '\n' - ' \n' - ' \n' - ' \n' - ' uid={user_name},ou=users,dc=example,dc=com\n' - ' \n' - ' \n' - ' \n' - '\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the `` parameter in the `` section\n" + "of the `config.xml` that SHALL be used to construct the `DN` to bind to.\n" + "The resulting `DN` SHALL be constructed by replacing all `{user_name}` substrings of the template \n" + "with the actual user name during each authentication attempt.\n" + "\n" + "For example, \n" + "\n" + "```xml\n" + "\n" + " \n" + " \n" + " \n" + " uid={user_name},ou=users,dc=example,dc=com\n" + " \n" + " \n" + " \n" + "\n" + "```\n" + "\n" + ), link=None, level=4, - num='4.7.1.1') + num="4.7.1.1", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_BindDN_ConflictWith_AuthDN = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.BindDN.ConflictWith.AuthDN', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.BindDN.ConflictWith.AuthDN", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if both `` and `` or `` parameters\n' - 'are specified as part of [LDAP] server description in the `` section of the `config.xml`.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if both `` and `` or `` parameters\n" + "are specified as part of [LDAP] server description in the `` section of the `config.xml`.\n" + "\n" + ), link=None, level=4, - num='4.7.1.2') + num="4.7.1.2", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_UserDNDetection = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.UserDNDetection', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.UserDNDetection", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `user_dn_detection` sub-section in the `` section\n' - 'of the `config.xml` that SHALL be used to enable detecting the actual user DN of the bound user. \n' - '\n' - ), + "[ClickHouse] SHALL support the `user_dn_detection` sub-section in the `` section\n" + "of the `config.xml` that SHALL be used to enable detecting the actual user DN of the bound user. \n" + "\n" + ), link=None, level=4, - num='4.7.2.1') + num="4.7.2.1", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_UserDNDetection_BaseDN = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.UserDNDetection.BaseDN', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.UserDNDetection.BaseDN", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `base_dn` parameter in the `user_dn_detection` sub-section in the \n' - '`` section of the `config.xml` that SHALL specify how \n' - 'to construct the base DN for the LDAP search to detect the actual user DN.\n' - '\n' - 'For example,\n' - '\n' - '```xml\n' - '\n' - ' ...\n' - ' CN=Users,DC=example,DC=com\n' - '\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support `base_dn` parameter in the `user_dn_detection` sub-section in the \n" + "`` section of the `config.xml` that SHALL specify how \n" + "to construct the base DN for the LDAP search to detect the actual user DN.\n" + "\n" + "For example,\n" + "\n" + "```xml\n" + "\n" + " ...\n" + " CN=Users,DC=example,DC=com\n" + "\n" + "```\n" + "\n" + ), link=None, level=4, - num='4.7.2.2') + num="4.7.2.2", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_UserDNDetection_Scope = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.UserDNDetection.Scope', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.UserDNDetection.Scope", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `scope` parameter in the `user_dn_detection` sub-section in the \n' - '`` section of the `config.xml` that SHALL the scope of the \n' - 'LDAP search to detect the actual user DN. The `scope` parameter SHALL support the following values\n' - '\n' - '* `base`\n' - '* `one_level`\n' - '* `children`\n' - '* `subtree`\n' - '\n' - 'For example,\n' - '\n' - '```xml\n' - '\n' - ' ...\n' - ' one_level\n' - '\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support `scope` parameter in the `user_dn_detection` sub-section in the \n" + "`` section of the `config.xml` that SHALL the scope of the \n" + "LDAP search to detect the actual user DN. The `scope` parameter SHALL support the following values\n" + "\n" + "* `base`\n" + "* `one_level`\n" + "* `children`\n" + "* `subtree`\n" + "\n" + "For example,\n" + "\n" + "```xml\n" + "\n" + " ...\n" + " one_level\n" + "\n" + "```\n" + "\n" + ), link=None, level=4, - num='4.7.2.3') + num="4.7.2.3", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_UserDNDetection_SearchFilter = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.UserDNDetection.SearchFilter', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.UserDNDetection.SearchFilter", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `search_filter` parameter in the `user_dn_detection` sub-section in the \n' - '`` section of the `config.xml` that SHALL specify the LDAP search\n' - 'filter used to detect the actual user DN.\n' - '\n' - 'For example,\n' - '\n' - '```xml\n' - '\n' - ' ...\n' - ' (&(objectClass=user)(sAMAccountName={user_name}))\n' - '\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support `search_filter` parameter in the `user_dn_detection` sub-section in the \n" + "`` section of the `config.xml` that SHALL specify the LDAP search\n" + "filter used to detect the actual user DN.\n" + "\n" + "For example,\n" + "\n" + "```xml\n" + "\n" + " ...\n" + " (&(objectClass=user)(sAMAccountName={user_name}))\n" + "\n" + "```\n" + "\n" + ), link=None, level=4, - num='4.7.2.4') + num="4.7.2.4", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Syntax = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Syntax', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `role_mapping` sub-section in the `` section\n' - 'of the `config.xml`.\n' - '\n' - 'For example,\n' - '\n' - '```xml\n' - '\n' - ' \n' - ' \n' - ' \n' - ' \n' - ' ou=groups,dc=example,dc=com\n' - ' cn\n' - ' subtree\n' - ' (&(objectClass=groupOfNames)(member={bind_dn}))\n' - ' clickhouse_\n' - ' \n' - ' \n' - ' \n' - '\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the `role_mapping` sub-section in the `` section\n" + "of the `config.xml`.\n" + "\n" + "For example,\n" + "\n" + "```xml\n" + "\n" + " \n" + " \n" + " \n" + " \n" + " ou=groups,dc=example,dc=com\n" + " cn\n" + " subtree\n" + " (&(objectClass=groupOfNames)(member={bind_dn}))\n" + " clickhouse_\n" + " \n" + " \n" + " \n" + "\n" + "```\n" + "\n" + ), link=None, level=4, - num='4.8.1.1') + num="4.8.1.1", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_SpecialCharactersEscaping = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.SpecialCharactersEscaping', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.SpecialCharactersEscaping", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support properly escaped special XML characters that can be present\n' - 'as part of the values for different configuration parameters inside the\n' - '`` section of the `config.xml` such as\n' - '\n' - '* `` parameter\n' - '* `` parameter\n' - '\n' - ), + "[ClickHouse] SHALL support properly escaped special XML characters that can be present\n" + "as part of the values for different configuration parameters inside the\n" + "`` section of the `config.xml` such as\n" + "\n" + "* `` parameter\n" + "* `` parameter\n" + "\n" + ), link=None, level=4, - num='4.8.2.1') + num="4.8.2.1", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_MultipleSections = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.MultipleSections', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.MultipleSections", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support multiple `` sections defined inside the same `` section \n' - 'of the `config.xml` and all of the `` sections SHALL be applied.\n' - '\n' - ), + "[ClickHouse] SHALL support multiple `` sections defined inside the same `` section \n" + "of the `config.xml` and all of the `` sections SHALL be applied.\n" + "\n" + ), link=None, level=4, - num='4.8.3.1') + num="4.8.3.1", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_MultipleSections_IdenticalParameters = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.MultipleSections.IdenticalParameters', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.MultipleSections.IdenticalParameters", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not duplicate mapped roles when multiple `` sections \n' - 'with identical parameters are defined inside the `` section \n' - 'of the `config.xml`.\n' - '\n' - ), + "[ClickHouse] SHALL not duplicate mapped roles when multiple `` sections \n" + "with identical parameters are defined inside the `` section \n" + "of the `config.xml`.\n" + "\n" + ), link=None, level=4, - num='4.8.3.2') + num="4.8.3.2", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_BaseDN = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.BaseDN', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.BaseDN", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `` parameter in the `` section \n' - 'of the `config.xml` that SHALL specify the template to be used to construct the base `DN` for the [LDAP] search.\n' - '\n' - 'The resulting `DN` SHALL be constructed by replacing all the `{user_name}`, `{bind_dn}`, and `user_dn` substrings of \n' - 'the template with the actual user name and bind `DN` during each [LDAP] search.\n' - '\n' - ), + "[ClickHouse] SHALL support the `` parameter in the `` section \n" + "of the `config.xml` that SHALL specify the template to be used to construct the base `DN` for the [LDAP] search.\n" + "\n" + "The resulting `DN` SHALL be constructed by replacing all the `{user_name}`, `{bind_dn}`, and `user_dn` substrings of \n" + "the template with the actual user name and bind `DN` during each [LDAP] search.\n" + "\n" + ), link=None, level=4, - num='4.8.4.1') + num="4.8.4.1", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Attribute = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Attribute', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Attribute", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `` parameter in the `` section of \n' - 'the `config.xml` that SHALL specify the name of the attribute whose values SHALL be returned by the [LDAP] search.\n' - '\n' - ), + "[ClickHouse] SHALL support the `` parameter in the `` section of \n" + "the `config.xml` that SHALL specify the name of the attribute whose values SHALL be returned by the [LDAP] search.\n" + "\n" + ), link=None, level=4, - num='4.8.5.1') + num="4.8.5.1", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Scope = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `` parameter in the `` section of \n' - 'the `config.xml` that SHALL define the scope of the LDAP search as defined \n' - 'by the https://ldapwiki.com/wiki/LDAP%20Search%20Scopes.\n' - '\n' - ), + "[ClickHouse] SHALL support the `` parameter in the `` section of \n" + "the `config.xml` that SHALL define the scope of the LDAP search as defined \n" + "by the https://ldapwiki.com/wiki/LDAP%20Search%20Scopes.\n" + "\n" + ), link=None, level=4, - num='4.8.6.1') + num="4.8.6.1", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Scope_Value_Base = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.Base', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.Base", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `base` value for the the `` parameter in the \n' - '`` section of the `config.xml` that SHALL\n' - 'limit the scope as specified by the https://ldapwiki.com/wiki/BaseObject.\n' - '\n' - ), + "[ClickHouse] SHALL support the `base` value for the the `` parameter in the \n" + "`` section of the `config.xml` that SHALL\n" + "limit the scope as specified by the https://ldapwiki.com/wiki/BaseObject.\n" + "\n" + ), link=None, level=4, - num='4.8.6.2') + num="4.8.6.2", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Scope_Value_OneLevel = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.OneLevel', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.OneLevel", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `one_level` value for the the `` parameter in the \n' - '`` section of the `config.xml` that SHALL\n' - 'limit the scope as specified by the https://ldapwiki.com/wiki/SingleLevel.\n' - '\n' - ), + "[ClickHouse] SHALL support the `one_level` value for the the `` parameter in the \n" + "`` section of the `config.xml` that SHALL\n" + "limit the scope as specified by the https://ldapwiki.com/wiki/SingleLevel.\n" + "\n" + ), link=None, level=4, - num='4.8.6.3') + num="4.8.6.3", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Scope_Value_Children = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.Children', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.Children", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `children` value for the the `` parameter in the \n' - '`` section of the `config.xml` that SHALL\n' - 'limit the scope as specified by the https://ldapwiki.com/wiki/SubordinateSubtree.\n' - '\n' - ), + "[ClickHouse] SHALL support the `children` value for the the `` parameter in the \n" + "`` section of the `config.xml` that SHALL\n" + "limit the scope as specified by the https://ldapwiki.com/wiki/SubordinateSubtree.\n" + "\n" + ), link=None, level=4, - num='4.8.6.4') + num="4.8.6.4", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Scope_Value_Subtree = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.Subtree', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.Subtree", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `children` value for the the `` parameter in the \n' - '`` section of the `config.xml` that SHALL\n' - 'limit the scope as specified by the https://ldapwiki.com/wiki/WholeSubtree.\n' - '\n' - ), + "[ClickHouse] SHALL support the `children` value for the the `` parameter in the \n" + "`` section of the `config.xml` that SHALL\n" + "limit the scope as specified by the https://ldapwiki.com/wiki/WholeSubtree.\n" + "\n" + ), link=None, level=4, - num='4.8.6.5') + num="4.8.6.5", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Scope_Value_Default = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.Default', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.Default", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `subtree` as the default value for the the `` parameter in the \n' - '`` section of the `config.xml` when the `` parameter is not specified.\n' - '\n' - ), + "[ClickHouse] SHALL support the `subtree` as the default value for the the `` parameter in the \n" + "`` section of the `config.xml` when the `` parameter is not specified.\n" + "\n" + ), link=None, level=4, - num='4.8.6.6') + num="4.8.6.6", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_SearchFilter = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.SearchFilter', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.SearchFilter", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `` parameter in the ``\n' - 'section of the `config.xml` that SHALL specify the template used to construct \n' - 'the [LDAP filter](https://ldap.com/ldap-filters/) for the search.\n' - '\n' - 'The resulting filter SHALL be constructed by replacing all `{user_name}`, `{bind_dn}`, `{base_dn}`, and `{user_dn}` substrings \n' - 'of the template with the actual user name, bind `DN`, and base `DN` during each the [LDAP] search.\n' - ' \n' - ), + "[ClickHouse] SHALL support the `` parameter in the ``\n" + "section of the `config.xml` that SHALL specify the template used to construct \n" + "the [LDAP filter](https://ldap.com/ldap-filters/) for the search.\n" + "\n" + "The resulting filter SHALL be constructed by replacing all `{user_name}`, `{bind_dn}`, `{base_dn}`, and `{user_dn}` substrings \n" + "of the template with the actual user name, bind `DN`, and base `DN` during each the [LDAP] search.\n" + " \n" + ), link=None, level=4, - num='4.8.7.1') + num="4.8.7.1", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `` parameter in the ``\n' - 'section of the `config.xml` that SHALL be expected to be in front of each string in \n' - 'the original list of strings returned by the [LDAP] search. \n' - 'Prefix SHALL be removed from the original strings and resulting strings SHALL be treated as [RBAC] role names. \n' - '\n' - ), + "[ClickHouse] SHALL support the `` parameter in the ``\n" + "section of the `config.xml` that SHALL be expected to be in front of each string in \n" + "the original list of strings returned by the [LDAP] search. \n" + "Prefix SHALL be removed from the original strings and resulting strings SHALL be treated as [RBAC] role names. \n" + "\n" + ), link=None, level=4, - num='4.8.8.1') + num="4.8.8.1", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_Default = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.Default', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.Default", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support empty string as the default value of the `` parameter in \n' - 'the `` section of the `config.xml`.\n' - '\n' - ), + "[ClickHouse] SHALL support empty string as the default value of the `` parameter in \n" + "the `` section of the `config.xml`.\n" + "\n" + ), link=None, level=4, - num='4.8.8.2') + num="4.8.8.2", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithUTF8Characters = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithUTF8Characters', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithUTF8Characters", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support UTF8 characters as the value of the `` parameter in\n' - 'the `` section of the `config.xml`.\n' - '\n' - ), + "[ClickHouse] SHALL support UTF8 characters as the value of the `` parameter in\n" + "the `` section of the `config.xml`.\n" + "\n" + ), link=None, level=4, - num='4.8.8.3') + num="4.8.8.3", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithSpecialXMLCharacters = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialXMLCharacters', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialXMLCharacters", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support XML special characters as the value of the `` parameter in\n' - 'the `` section of the `config.xml`.\n' - '\n' - ), + "[ClickHouse] SHALL support XML special characters as the value of the `` parameter in\n" + "the `` section of the `config.xml`.\n" + "\n" + ), link=None, level=4, - num='4.8.8.4') + num="4.8.8.4", +) RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithSpecialRegexCharacters = Requirement( - name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialRegexCharacters', - version='1.0', + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialRegexCharacters", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support regex special characters as the value of the `` parameter in\n' - 'the `` section of the `config.xml`.\n' - '\n' - ), + "[ClickHouse] SHALL support regex special characters as the value of the `` parameter in\n" + "the `` section of the `config.xml`.\n" + "\n" + ), link=None, level=4, - num='4.8.8.5') + num="4.8.8.5", +) RQ_SRS_014_LDAP_ClusterWithAndWithoutSecret_DistributedTable = Requirement( - name='RQ.SRS-014.LDAP.ClusterWithAndWithoutSecret.DistributedTable', - version='1.0', + name="RQ.SRS-014.LDAP.ClusterWithAndWithoutSecret.DistributedTable", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support propagating query user roles and their corresponding privileges\n' - 'when using `Distributed` table to the remote servers for the users that are authenticated\n' - 'using LDAP either via external user directory or defined in `users.xml` when\n' - 'cluster is configured with and without ``.\n' - '\n' - 'For example,\n' - '\n' - '```xml\n' - '\n' - ' \n' - ' \n' - ' qwerty123\n' - ' \n' - ' true\n' - ' \n' - ' dwh\n' - ' host1\n' - ' \n' - ' \n' - ' \n' - ' true\n' - ' \n' - ' dwh\n' - ' host2\n' - ' \n' - ' \n' - ' \n' - ' \n' - '\n' - '```\n' - '\n' - 'or \n' - '\n' - '```xml\n' - '\n' - ' \n' - ' \n' - ' \n' - ' true\n' - ' \n' - ' dwh\n' - ' host1\n' - ' \n' - ' \n' - ' \n' - ' true\n' - ' \n' - ' dwh\n' - ' host2\n' - ' \n' - ' \n' - ' \n' - ' \n' - '\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support propagating query user roles and their corresponding privileges\n" + "when using `Distributed` table to the remote servers for the users that are authenticated\n" + "using LDAP either via external user directory or defined in `users.xml` when\n" + "cluster is configured with and without ``.\n" + "\n" + "For example,\n" + "\n" + "```xml\n" + "\n" + " \n" + " \n" + " qwerty123\n" + " \n" + " true\n" + " \n" + " dwh\n" + " host1\n" + " \n" + " \n" + " \n" + " true\n" + " \n" + " dwh\n" + " host2\n" + " \n" + " \n" + " \n" + " \n" + "\n" + "```\n" + "\n" + "or \n" + "\n" + "```xml\n" + "\n" + " \n" + " \n" + " \n" + " true\n" + " \n" + " dwh\n" + " host1\n" + " \n" + " \n" + " \n" + " true\n" + " \n" + " dwh\n" + " host2\n" + " \n" + " \n" + " \n" + " \n" + "\n" + "```\n" + "\n" + ), link=None, level=4, - num='4.9.8.1') + num="4.9.8.1", +) SRS_014_ClickHouse_LDAP_Role_Mapping = Specification( - name='SRS-014 ClickHouse LDAP Role Mapping', + name="SRS-014 ClickHouse LDAP Role Mapping", description=None, author=None, - date=None, - status=None, + date=None, + status=None, approved_by=None, approved_date=None, approved_version=None, @@ -999,83 +1050,261 @@ SRS_014_ClickHouse_LDAP_Role_Mapping = Specification( parent=None, children=None, headings=( - Heading(name='Revision History', level=1, num='1'), - Heading(name='Introduction', level=1, num='2'), - Heading(name='Terminology', level=1, num='3'), - Heading(name='LDAP', level=2, num='3.1'), - Heading(name='Requirements', level=1, num='4'), - Heading(name='General', level=2, num='4.1'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping', level=3, num='4.1.1'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.WithFixedRoles', level=3, num='4.1.2'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Search', level=3, num='4.1.3'), - Heading(name='Mapped Role Names', level=2, num='4.2'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Map.Role.Name.WithUTF8Characters', level=3, num='4.2.1'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Map.Role.Name.Long', level=3, num='4.2.2'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Map.Role.Name.WithSpecialXMLCharacters', level=3, num='4.2.3'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Map.Role.Name.WithSpecialRegexCharacters', level=3, num='4.2.4'), - Heading(name='Multiple Roles', level=2, num='4.3'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Map.MultipleRoles', level=3, num='4.3.1'), - Heading(name='LDAP Groups', level=2, num='4.4'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.LDAP.Group.Removed', level=3, num='4.4.1'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.LDAP.Group.RemovedAndAdded.Parallel', level=3, num='4.4.2'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.LDAP.Group.UserRemoved', level=3, num='4.4.3'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.LDAP.Group.UserRemovedAndAdded.Parallel', level=3, num='4.4.4'), - Heading(name='RBAC Roles', level=2, num='4.5'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.NotPresent', level=3, num='4.5.1'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.Added', level=3, num='4.5.2'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.Removed', level=3, num='4.5.3'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.Readded', level=3, num='4.5.4'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.RemovedAndAdded.Parallel', level=3, num='4.5.5'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.New', level=3, num='4.5.6'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.NewPrivilege', level=3, num='4.5.7'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.RemovedPrivilege', level=3, num='4.5.8'), - Heading(name='Authentication', level=2, num='4.6'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel', level=3, num='4.6.1'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.ValidAndInvalid', level=3, num='4.6.2'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.MultipleServers', level=3, num='4.6.3'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.LocalOnly', level=3, num='4.6.4'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.LocalAndMultipleLDAP', level=3, num='4.6.5'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.SameUser', level=3, num='4.6.6'), - Heading(name='Server Configuration', level=2, num='4.7'), - Heading(name='BindDN Parameter', level=3, num='4.7.1'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.BindDN', level=4, num='4.7.1.1'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.BindDN.ConflictWith.AuthDN', level=4, num='4.7.1.2'), - Heading(name='User DN Detection', level=3, num='4.7.2'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.UserDNDetection', level=4, num='4.7.2.1'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.UserDNDetection.BaseDN', level=4, num='4.7.2.2'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.UserDNDetection.Scope', level=4, num='4.7.2.3'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.UserDNDetection.SearchFilter', level=4, num='4.7.2.4'), - Heading(name='External User Directory Configuration', level=2, num='4.8'), - Heading(name='Syntax', level=3, num='4.8.1'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Syntax', level=4, num='4.8.1.1'), - Heading(name='Special Characters Escaping', level=3, num='4.8.2'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.SpecialCharactersEscaping', level=4, num='4.8.2.1'), - Heading(name='Multiple Sections', level=3, num='4.8.3'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.MultipleSections', level=4, num='4.8.3.1'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.MultipleSections.IdenticalParameters', level=4, num='4.8.3.2'), - Heading(name='BaseDN Parameter', level=3, num='4.8.4'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.BaseDN', level=4, num='4.8.4.1'), - Heading(name='Attribute Parameter', level=3, num='4.8.5'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Attribute', level=4, num='4.8.5.1'), - Heading(name='Scope Parameter', level=3, num='4.8.6'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope', level=4, num='4.8.6.1'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.Base', level=4, num='4.8.6.2'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.OneLevel', level=4, num='4.8.6.3'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.Children', level=4, num='4.8.6.4'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.Subtree', level=4, num='4.8.6.5'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.Default', level=4, num='4.8.6.6'), - Heading(name='Search Filter Parameter', level=3, num='4.8.7'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.SearchFilter', level=4, num='4.8.7.1'), - Heading(name='Prefix Parameter', level=3, num='4.8.8'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix', level=4, num='4.8.8.1'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.Default', level=4, num='4.8.8.2'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithUTF8Characters', level=4, num='4.8.8.3'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialXMLCharacters', level=4, num='4.8.8.4'), - Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialRegexCharacters', level=4, num='4.8.8.5'), - Heading(name='Cluster With And Without Secret', level=2, num='4.9'), - Heading(name='RQ.SRS-014.LDAP.ClusterWithAndWithoutSecret.DistributedTable', level=4, num='4.9.8.1'), - Heading(name='References', level=1, num='5'), + Heading(name="Revision History", level=1, num="1"), + Heading(name="Introduction", level=1, num="2"), + Heading(name="Terminology", level=1, num="3"), + Heading(name="LDAP", level=2, num="3.1"), + Heading(name="Requirements", level=1, num="4"), + Heading(name="General", level=2, num="4.1"), + Heading(name="RQ.SRS-014.LDAP.RoleMapping", level=3, num="4.1.1"), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.WithFixedRoles", level=3, num="4.1.2" ), + Heading(name="RQ.SRS-014.LDAP.RoleMapping.Search", level=3, num="4.1.3"), + Heading(name="Mapped Role Names", level=2, num="4.2"), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Map.Role.Name.WithUTF8Characters", + level=3, + num="4.2.1", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Map.Role.Name.Long", level=3, num="4.2.2" + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Map.Role.Name.WithSpecialXMLCharacters", + level=3, + num="4.2.3", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Map.Role.Name.WithSpecialRegexCharacters", + level=3, + num="4.2.4", + ), + Heading(name="Multiple Roles", level=2, num="4.3"), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Map.MultipleRoles", level=3, num="4.3.1" + ), + Heading(name="LDAP Groups", level=2, num="4.4"), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.LDAP.Group.Removed", level=3, num="4.4.1" + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.LDAP.Group.RemovedAndAdded.Parallel", + level=3, + num="4.4.2", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.LDAP.Group.UserRemoved", + level=3, + num="4.4.3", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.LDAP.Group.UserRemovedAndAdded.Parallel", + level=3, + num="4.4.4", + ), + Heading(name="RBAC Roles", level=2, num="4.5"), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.NotPresent", + level=3, + num="4.5.1", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.Added", level=3, num="4.5.2" + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.Removed", level=3, num="4.5.3" + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.Readded", level=3, num="4.5.4" + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.RemovedAndAdded.Parallel", + level=3, + num="4.5.5", + ), + Heading(name="RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.New", level=3, num="4.5.6"), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.NewPrivilege", + level=3, + num="4.5.7", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.RBAC.Role.RemovedPrivilege", + level=3, + num="4.5.8", + ), + Heading(name="Authentication", level=2, num="4.6"), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel", + level=3, + num="4.6.1", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.ValidAndInvalid", + level=3, + num="4.6.2", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.MultipleServers", + level=3, + num="4.6.3", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.LocalOnly", + level=3, + num="4.6.4", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.LocalAndMultipleLDAP", + level=3, + num="4.6.5", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Authentication.Parallel.SameUser", + level=3, + num="4.6.6", + ), + Heading(name="Server Configuration", level=2, num="4.7"), + Heading(name="BindDN Parameter", level=3, num="4.7.1"), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.BindDN", + level=4, + num="4.7.1.1", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.BindDN.ConflictWith.AuthDN", + level=4, + num="4.7.1.2", + ), + Heading(name="User DN Detection", level=3, num="4.7.2"), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.UserDNDetection", + level=4, + num="4.7.2.1", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.UserDNDetection.BaseDN", + level=4, + num="4.7.2.2", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.UserDNDetection.Scope", + level=4, + num="4.7.2.3", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.Server.UserDNDetection.SearchFilter", + level=4, + num="4.7.2.4", + ), + Heading(name="External User Directory Configuration", level=2, num="4.8"), + Heading(name="Syntax", level=3, num="4.8.1"), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Syntax", + level=4, + num="4.8.1.1", + ), + Heading(name="Special Characters Escaping", level=3, num="4.8.2"), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.SpecialCharactersEscaping", + level=4, + num="4.8.2.1", + ), + Heading(name="Multiple Sections", level=3, num="4.8.3"), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.MultipleSections", + level=4, + num="4.8.3.1", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.MultipleSections.IdenticalParameters", + level=4, + num="4.8.3.2", + ), + Heading(name="BaseDN Parameter", level=3, num="4.8.4"), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.BaseDN", + level=4, + num="4.8.4.1", + ), + Heading(name="Attribute Parameter", level=3, num="4.8.5"), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Attribute", + level=4, + num="4.8.5.1", + ), + Heading(name="Scope Parameter", level=3, num="4.8.6"), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope", + level=4, + num="4.8.6.1", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.Base", + level=4, + num="4.8.6.2", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.OneLevel", + level=4, + num="4.8.6.3", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.Children", + level=4, + num="4.8.6.4", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.Subtree", + level=4, + num="4.8.6.5", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Scope.Value.Default", + level=4, + num="4.8.6.6", + ), + Heading(name="Search Filter Parameter", level=3, num="4.8.7"), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.SearchFilter", + level=4, + num="4.8.7.1", + ), + Heading(name="Prefix Parameter", level=3, num="4.8.8"), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix", + level=4, + num="4.8.8.1", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.Default", + level=4, + num="4.8.8.2", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithUTF8Characters", + level=4, + num="4.8.8.3", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialXMLCharacters", + level=4, + num="4.8.8.4", + ), + Heading( + name="RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialRegexCharacters", + level=4, + num="4.8.8.5", + ), + Heading(name="Cluster With And Without Secret", level=2, num="4.9"), + Heading( + name="RQ.SRS-014.LDAP.ClusterWithAndWithoutSecret.DistributedTable", + level=4, + num="4.9.8.1", + ), + Heading(name="References", level=1, num="5"), + ), requirements=( RQ_SRS_014_LDAP_RoleMapping, RQ_SRS_014_LDAP_RoleMapping_WithFixedRoles, @@ -1128,8 +1357,8 @@ SRS_014_ClickHouse_LDAP_Role_Mapping = Specification( RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithSpecialXMLCharacters, RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithSpecialRegexCharacters, RQ_SRS_014_LDAP_ClusterWithAndWithoutSecret_DistributedTable, - ), - content=''' + ), + content=""" # SRS-014 ClickHouse LDAP Role Mapping # Software Requirements Specification @@ -1763,4 +1992,5 @@ or [Revision History]: https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/ldap/role_mapping/requirements/requirements.md [Git]: https://git-scm.com/ [GitHub]: https://github.com -''') +""", +) diff --git a/tests/testflows/ldap/role_mapping/tests/cluster_secret.py b/tests/testflows/ldap/role_mapping/tests/cluster_secret.py index 38c751c1026..1309230d345 100644 --- a/tests/testflows/ldap/role_mapping/tests/cluster_secret.py +++ b/tests/testflows/ldap/role_mapping/tests/cluster_secret.py @@ -6,37 +6,24 @@ from ldap.role_mapping.tests.common import * def cluster_node(name): - """Get cluster node instance. - """ + """Get cluster node instance.""" return current().context.cluster.node(name) @TestStep(Given) -def add_sharded_cluster(self, node, name="sharded_cluster_with_secret", with_secret=True, restart=False): - """Add configuration of sharded cluster that uses secret. - """ - entries = { - "remote_servers": { - name: [] - } - } +def add_sharded_cluster( + self, node, name="sharded_cluster_with_secret", with_secret=True, restart=False +): + """Add configuration of sharded cluster that uses secret.""" + entries = {"remote_servers": {name: []}} if with_secret: - entries["remote_servers"][name].append( - { - "secret": "qwerty123" - } - ) + entries["remote_servers"][name].append({"secret": "qwerty123"}) for node_name in self.context.cluster.nodes["clickhouse"]: entries["remote_servers"][name].append( { - "shard": { - "replica": { - "host": node_name, - "port": "9000" - } - }, + "shard": {"replica": {"host": node_name, "port": "9000"}}, }, ) @@ -46,14 +33,13 @@ def add_sharded_cluster(self, node, name="sharded_cluster_with_secret", with_sec @TestStep(Given) def create_table(self, on_cluster, name=None, node=None): - """Create table on cluster. - """ + """Create table on cluster.""" if node is None: node = self.context.node if name is None: name = getuid() - try: + try: node.query( f"CREATE TABLE {name} ON CLUSTER {on_cluster} (d Date, a String, b UInt8, x String, y Int8) " f"ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') " @@ -67,16 +53,17 @@ def create_table(self, on_cluster, name=None, node=None): @TestStep(Given) def create_distributed_table(self, on_cluster, over, name=None, node=None): - """Create distributed table on cluster over some underlying table. - """ + """Create distributed table on cluster over some underlying table.""" if node is None: node = self.context.node if name is None: name = getuid() try: - node.query(f"CREATE TABLE {name} ON CLUSTER {on_cluster} AS {over} " - f"ENGINE = Distributed({on_cluster}, default, {over}, rand())") + node.query( + f"CREATE TABLE {name} ON CLUSTER {on_cluster} AS {over} " + f"ENGINE = Distributed({on_cluster}, default, {over}, rand())" + ) yield name finally: with Finally(f"I drop table {name} on cluster {on_cluster} on {node.name}"): @@ -93,9 +80,11 @@ def grant_select(self, cluster, privilege, role_or_user, node=None): try: node.query(f"GRANT ON CLUSTER {cluster} {privilege} TO {role_or_user}") - yield + yield finally: - with Finally(f"I remove privilege '{privilege}' on {cluster} from {role_or_user}"): + with Finally( + f"I remove privilege '{privilege}' on {cluster} from {role_or_user}" + ): node.query(f"REVOKE ON CLUSTER {cluster} {privilege} FROM {role_or_user}") @@ -109,22 +98,24 @@ def select_using_mapped_role(self, cluster, role_name, role_mapped, user): # default cluster node node = cluster_node("clickhouse1") - query_settings=[ - ("user", user["username"]), ("password", user["password"]) - ] + query_settings = [("user", user["username"]), ("password", user["password"])] with Given(f"I create base table on cluster {cluster}"): src_table = create_table(on_cluster=cluster, node=node) - + with And(f"I create distristibuted table over base table on cluster {cluster}"): - dist_table = create_distributed_table(on_cluster=cluster, over=src_table, node=node) + dist_table = create_distributed_table( + on_cluster=cluster, over=src_table, node=node + ) with And("I check that grants for the user"): for name in self.context.cluster.nodes["clickhouse"]: for attempt in retries(timeout=10): with attempt: with By(f"executing query on node {name}", flags=TE): - r = self.context.cluster.node(name).query(f"SHOW GRANTS", settings=query_settings) + r = self.context.cluster.node(name).query( + f"SHOW GRANTS", settings=query_settings + ) if role_mapped: with Then("check that role is mapped"): assert role_name in r.output, error() @@ -133,51 +124,96 @@ def select_using_mapped_role(self, cluster, role_name, role_mapped, user): with When("user tries to read from the source table without privilege"): for name in self.context.cluster.nodes["clickhouse"]: with By(f"executing query on node {name}", flags=TE): - self.context.cluster.node(name).query(f"SELECT * FROM {src_table}", settings=query_settings, - exitcode=241, message=f"DB::Exception:") - + self.context.cluster.node(name).query( + f"SELECT * FROM {src_table}", + settings=query_settings, + exitcode=241, + message=f"DB::Exception:", + ) + with Example("with privilege on source table"): with Given("I grant SELECT on source table to the mapped role"): - grant_select(cluster=cluster, privilege=f"SELECT ON {src_table}", role_or_user=role_name, node=node) + grant_select( + cluster=cluster, + privilege=f"SELECT ON {src_table}", + role_or_user=role_name, + node=node, + ) with Then("user should be able to read from the source table"): for name in self.context.cluster.nodes["clickhouse"]: with By(f"executing query on node {name}", flags=TE): - self.context.cluster.node(name).query(f"SELECT * FROM {src_table}", settings=query_settings, - exitcode=0 if role_mapped else 241, message="" if role_mapped else "DB::Exception:") + self.context.cluster.node(name).query( + f"SELECT * FROM {src_table}", + settings=query_settings, + exitcode=0 if role_mapped else 241, + message="" if role_mapped else "DB::Exception:", + ) with Example("with privilege only on distributed table"): with Given("I grant SELECT on distributed table to the mapped role"): - grant_select(cluster=cluster, privilege=f"SELECT ON {dist_table}", role_or_user=role_name, node=node) + grant_select( + cluster=cluster, + privilege=f"SELECT ON {dist_table}", + role_or_user=role_name, + node=node, + ) with Then("user should still not be able to read from distributed table"): for name in self.context.cluster.nodes["clickhouse"]: with By(f"executing query on node {name}", flags=TE): - self.context.cluster.node(name).query(f"SELECT * FROM {dist_table}", settings=query_settings, - exitcode=241, message=f"DB::Exception:") + self.context.cluster.node(name).query( + f"SELECT * FROM {dist_table}", + settings=query_settings, + exitcode=241, + message=f"DB::Exception:", + ) with Example("with privilege only on source but not on distributed table"): with Given("I grant SELECT on source table to the mapped role"): - grant_select(cluster=cluster, privilege=f"SELECT ON {src_table}", role_or_user=role_name, node=node) + grant_select( + cluster=cluster, + privilege=f"SELECT ON {src_table}", + role_or_user=role_name, + node=node, + ) with Then("user should still not be able to read from distributed table"): for name in self.context.cluster.nodes["clickhouse"]: with By(f"executing query on node {name}", flags=TE): - self.context.cluster.node(name).query(f"SELECT * FROM {dist_table}", settings=query_settings, - exitcode=241, message=f"DB::Exception:") + self.context.cluster.node(name).query( + f"SELECT * FROM {dist_table}", + settings=query_settings, + exitcode=241, + message=f"DB::Exception:", + ) with Example("with privilege on source and distributed"): with Given("I grant SELECT on source table to the mapped role"): - grant_select(cluster=cluster, privilege=f"SELECT ON {src_table}", role_or_user=role_name, node=node) + grant_select( + cluster=cluster, + privilege=f"SELECT ON {src_table}", + role_or_user=role_name, + node=node, + ) with And("I grant SELECT on distributed table to the mapped role"): - grant_select(cluster=cluster, privilege=f"SELECT ON {dist_table}", role_or_user=role_name, node=node) + grant_select( + cluster=cluster, + privilege=f"SELECT ON {dist_table}", + role_or_user=role_name, + node=node, + ) with Then("user should be able to read from the distributed table"): for name in self.context.cluster.nodes["clickhouse"]: with By(f"executing query on node {name}", flags=TE): - self.context.cluster.node(name).query(f"SELECT * FROM {dist_table}", settings=query_settings, - exitcode=0 if role_mapped else 241, message="" if role_mapped else "DB::Exception:") + self.context.cluster.node(name).query( + f"SELECT * FROM {dist_table}", + settings=query_settings, + exitcode=0 if role_mapped else 241, + message="" if role_mapped else "DB::Exception:", + ) @TestFeature @@ -195,15 +231,24 @@ def execute_tests(self, role_name, role_mapped, ldap_user, local_user): with Given(f"I grant role {role_name} to local RBAC user"): for name in self.context.cluster.nodes["clickhouse"]: with By(f"on node {name}"): - cluster_node(name).query(f"GRANT {role_name} TO {local_user['username']}") + cluster_node(name).query( + f"GRANT {role_name} TO {local_user['username']}" + ) for scenario in ordered(loads(current_module(), Scenario)): - scenario(cluster="sharded_cluster_" + cluster_type.replace(" ", "_"), - role_name=role_name, role_mapped=role_mapped, user=user) + scenario( + cluster="sharded_cluster_" + + cluster_type.replace(" ", "_"), + role_name=role_name, + role_mapped=role_mapped, + user=user, + ) @TestOutline(Feature) -def outline_using_external_user_directory(self, ldap_servers, mapping, ldap_roles_or_groups, rbac_roles, mapped_roles): +def outline_using_external_user_directory( + self, ldap_servers, mapping, ldap_roles_or_groups, rbac_roles, mapped_roles +): """Check using simple and distributed table access when using LDAP external user directory or LDAP authenticated existing RBAC users with and without cluster secret. @@ -222,7 +267,7 @@ def outline_using_external_user_directory(self, ldap_servers, mapping, ldap_role local_user = { "type": "local user", "username": "local_user1", - "password": "local_user1" + "password": "local_user1", } role_mappings = [ @@ -230,7 +275,7 @@ def outline_using_external_user_directory(self, ldap_servers, mapping, ldap_role "base_dn": "ou=groups,dc=company,dc=com", "attribute": "cn", "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": "clickhouse_" + "prefix": "clickhouse_", } ] @@ -239,26 +284,55 @@ def outline_using_external_user_directory(self, ldap_servers, mapping, ldap_role for name in ldap_servers: for group_name in ldap_roles_or_groups: with By(f"adding {group_name}"): - ldap_groups = add_ldap_groups(groups=({"cn": group_name},), node=cluster_node(name)) + ldap_groups = add_ldap_groups( + groups=({"cn": group_name},), node=cluster_node(name) + ) with And("I add LDAP user to the group"): - add_user_to_group_in_ldap(user=ldap_user, group=ldap_groups[0], node=cluster_node(name)) - - with Given(f"I add LDAP external user directory configuration with {mapping} role mapping"): + add_user_to_group_in_ldap( + user=ldap_user, + group=ldap_groups[0], + node=cluster_node(name), + ) + + with Given( + f"I add LDAP external user directory configuration with {mapping} role mapping" + ): for name in self.context.cluster.nodes["clickhouse"]: if mapping == "dynamic": - By(f"on node {name}", test=add_ldap_external_user_directory, parallel=True)( - server="openldap1", role_mappings=role_mappings, - restart=True, node=cluster_node(name)) + By( + f"on node {name}", + test=add_ldap_external_user_directory, + parallel=True, + )( + server="openldap1", + role_mappings=role_mappings, + restart=True, + node=cluster_node(name), + ) elif mapping == "dynamic and static": - By(f"on node {name}", test=add_ldap_external_user_directory, parallel=True)( - server="openldap1", role_mappings=role_mappings, + By( + f"on node {name}", + test=add_ldap_external_user_directory, + parallel=True, + )( + server="openldap1", + role_mappings=role_mappings, roles=ldap_roles_or_groups, - restart=True, node=cluster_node(name)) + restart=True, + node=cluster_node(name), + ) else: - By(f"on node {name}", test=add_ldap_external_user_directory, parallel=True)( - server="openldap1", roles=ldap_roles_or_groups, - restart=True, node=cluster_node(name)) + By( + f"on node {name}", + test=add_ldap_external_user_directory, + parallel=True, + )( + server="openldap1", + roles=ldap_roles_or_groups, + restart=True, + node=cluster_node(name), + ) with And("I add local RBAC user"): for name in self.context.cluster.nodes["clickhouse"]: @@ -270,8 +344,13 @@ def outline_using_external_user_directory(self, ldap_servers, mapping, ldap_role with By(f"on node {name}"): add_rbac_roles(roles=rbac_roles, node=cluster_node(name)) - for role_name in rbac_roles: - execute_tests(role_name=role_name, role_mapped=(role_name in mapped_roles) ,ldap_user=ldap_user, local_user=local_user) + for role_name in rbac_roles: + execute_tests( + role_name=role_name, + role_mapped=(role_name in mapped_roles), + ldap_user=ldap_user, + local_user=local_user, + ) @TestFeature @@ -287,13 +366,13 @@ def using_authenticated_users(self, ldap_servers): "username": "myuser", "userpassword": "myuser", "password": "myuser", - "server": "openldap1" + "server": "openldap1", } local_user = { "type": "local user", "username": "local_user2", - "password": "local_user2" + "password": "local_user2", } with Given("I add LDAP user"): @@ -307,7 +386,8 @@ def using_authenticated_users(self, ldap_servers): with And("I add LDAP authenticated users configuration"): for name in self.context.cluster.nodes["clickhouse"]: By(f"on node {name}", test=add_ldap_authenticated_users, parallel=True)( - users=[ldap_user], rbac=True, node=cluster_node(name)) + users=[ldap_user], rbac=True, node=cluster_node(name) + ) with And("I add local RBAC user"): for name in self.context.cluster.nodes["clickhouse"]: @@ -318,18 +398,27 @@ def using_authenticated_users(self, ldap_servers): for name in self.context.cluster.nodes["clickhouse"]: with By(f"on node {name}"): add_rbac_roles(roles=(f"{role_name}",), node=cluster_node(name)) - + with And("I grant role to LDAP authenticated user"): for name in self.context.cluster.nodes["clickhouse"]: with By(f"on node {name}"): - cluster_node(name).query(f"GRANT {role_name} TO {ldap_user['username']}") + cluster_node(name).query( + f"GRANT {role_name} TO {ldap_user['username']}" + ) with And("I grant role to local RBAC user"): for name in self.context.cluster.nodes["clickhouse"]: with By(f"on node {name}"): - cluster_node(name).query(f"GRANT {role_name} TO {local_user['username']}") + cluster_node(name).query( + f"GRANT {role_name} TO {local_user['username']}" + ) - execute_tests(role_name=role_name, role_mapped=role_name, ldap_user=ldap_user, local_user=local_user) + execute_tests( + role_name=role_name, + role_mapped=role_name, + ldap_user=ldap_user, + local_user=local_user, + ) @TestFeature @@ -343,68 +432,111 @@ def using_external_user_directory(self, ldap_servers): with Example(f"{mapping}"): with Example("all mapped roles exist"): if mapping == "dynamic": - ldap_roles_or_groups = [f"clickhouse_role0_{uid}", f"clickhouse_role1_{uid}"] + ldap_roles_or_groups = [ + f"clickhouse_role0_{uid}", + f"clickhouse_role1_{uid}", + ] elif mapping == "dynamic and static": - ldap_roles_or_groups = [f"clickhouse_role0_{uid}", f"clickhouse_role1_{uid}", f"role2_{uid}", f"role3_{uid}"] + ldap_roles_or_groups = [ + f"clickhouse_role0_{uid}", + f"clickhouse_role1_{uid}", + f"role2_{uid}", + f"role3_{uid}", + ] else: - ldap_roles_or_groups = [f"role0_{uid}", f"role1_{uid}", f"role2_{uid}", f"role3_{uid}"] - + ldap_roles_or_groups = [ + f"role0_{uid}", + f"role1_{uid}", + f"role2_{uid}", + f"role3_{uid}", + ] + rbac_roles = [f"role0_{uid}", f"role1_{uid}"] mapped_roles = [f"role0_{uid}", f"role1_{uid}"] - outline_using_external_user_directory(ldap_servers=ldap_servers, - mapping=mapping, ldap_roles_or_groups=ldap_roles_or_groups, - rbac_roles=rbac_roles, mapped_roles=mapped_roles) + outline_using_external_user_directory( + ldap_servers=ldap_servers, + mapping=mapping, + ldap_roles_or_groups=ldap_roles_or_groups, + rbac_roles=rbac_roles, + mapped_roles=mapped_roles, + ) with Example("some mapped roles exist"): if mapping == "dynamic": - ldap_roles_or_groups = [f"clickhouse_role0_{uid}", f"clickhouse_role1_{uid}"] + ldap_roles_or_groups = [ + f"clickhouse_role0_{uid}", + f"clickhouse_role1_{uid}", + ] elif mapping == "dynamic and static": - ldap_roles_or_groups = [f"clickhouse_role0_{uid}", f"clickhouse_role1_{uid}", f"role2_{uid}", f"role3_{uid}"] + ldap_roles_or_groups = [ + f"clickhouse_role0_{uid}", + f"clickhouse_role1_{uid}", + f"role2_{uid}", + f"role3_{uid}", + ] else: ldap_roles_or_groups = [f"role0_{uid}", f"role1_{uid}"] - + rbac_roles = [f"role0_{uid}", f"role_not_mapped_{uid}", f"role2_{uid}"] - + if mapping == "dynamic and static": mapped_roles = [f"role0_{uid}", f"role2_{uid}"] else: - mapped_roles = [f"role0_{uid}"] + mapped_roles = [f"role0_{uid}"] - outline_using_external_user_directory(ldap_servers=ldap_servers, - mapping=mapping, ldap_roles_or_groups=ldap_roles_or_groups, - rbac_roles=rbac_roles, mapped_roles=mapped_roles) + outline_using_external_user_directory( + ldap_servers=ldap_servers, + mapping=mapping, + ldap_roles_or_groups=ldap_roles_or_groups, + rbac_roles=rbac_roles, + mapped_roles=mapped_roles, + ) with Example("no mapped roles exist"): if mapping == "dynamic": - ldap_roles_or_groups = [f"clickhouse_role0_{uid}", f"clickhouse_role1_{uid}"] + ldap_roles_or_groups = [ + f"clickhouse_role0_{uid}", + f"clickhouse_role1_{uid}", + ] elif mapping == "dynamic and static": - ldap_roles_or_groups = [f"clickhouse_role0_{uid}", f"clickhouse_role1_{uid}", f"role2_{uid}", f"role3_{uid}"] + ldap_roles_or_groups = [ + f"clickhouse_role0_{uid}", + f"clickhouse_role1_{uid}", + f"role2_{uid}", + f"role3_{uid}", + ] else: ldap_roles_or_groups = [f"role0_{uid}", f"role1_{uid}"] - + rbac_roles = [f"role_not_mapped0_{uid}", f"role_not_mapped1_{uid}"] mapped_roles = [] - outline_using_external_user_directory(ldap_servers=ldap_servers, - mapping=mapping, ldap_roles_or_groups=ldap_roles_or_groups, - rbac_roles=rbac_roles, mapped_roles=mapped_roles) + outline_using_external_user_directory( + ldap_servers=ldap_servers, + mapping=mapping, + ldap_roles_or_groups=ldap_roles_or_groups, + rbac_roles=rbac_roles, + mapped_roles=mapped_roles, + ) with Example("empty roles"): ldap_roles_or_groups = [] rbac_roles = [f"role0_{uid}", f"role1_{uid}"] mapped_roles = [] - outline_using_external_user_directory(ldap_servers=ldap_servers, - mapping=mapping, ldap_roles_or_groups=ldap_roles_or_groups, - rbac_roles=rbac_roles, mapped_roles=mapped_roles) + outline_using_external_user_directory( + ldap_servers=ldap_servers, + mapping=mapping, + ldap_roles_or_groups=ldap_roles_or_groups, + rbac_roles=rbac_roles, + mapped_roles=mapped_roles, + ) @TestFeature @Name("cluster secret") -@Requirements( - RQ_SRS_014_LDAP_ClusterWithAndWithoutSecret_DistributedTable("1.0") -) +@Requirements(RQ_SRS_014_LDAP_ClusterWithAndWithoutSecret_DistributedTable("1.0")) def feature(self): """Check using Distributed table when cluster is configured with and without secret using users authenticated via LDAP either through external user directory @@ -415,7 +547,7 @@ def feature(self): "host": "openldap1", "port": "389", "enable_tls": "no", - "bind_dn": "cn={user_name},ou=users,dc=company,dc=com" + "bind_dn": "cn={user_name},ou=users,dc=company,dc=com", }, } @@ -423,20 +555,41 @@ def feature(self): for name in ldap_servers: fix_ldap_permissions(node=cluster_node(name)) - with And("I add LDAP servers configuration on all nodes", description=f"{ldap_servers}"): + with And( + "I add LDAP servers configuration on all nodes", description=f"{ldap_servers}" + ): for name in self.context.cluster.nodes["clickhouse"]: By(f"on node {name}", test=add_ldap_servers_configuration, parallel=True)( - servers=ldap_servers, node=cluster_node(name)) + servers=ldap_servers, node=cluster_node(name) + ) with And("I add sharded cluster that uses secrets on all the nodes"): for name in self.context.cluster.nodes["clickhouse"]: - By(f"adding configuration on {name}", test=add_sharded_cluster, parallel=True)( - node=cluster_node(name), name="sharded_cluster_with_secret", with_secret=True) - + By( + f"adding configuration on {name}", + test=add_sharded_cluster, + parallel=True, + )( + node=cluster_node(name), + name="sharded_cluster_with_secret", + with_secret=True, + ) + with And("I add sharded cluster that does not use secrets on all the nodes"): for name in self.context.cluster.nodes["clickhouse"]: - By(f"adding configuration on {name}", test=add_sharded_cluster, parallel=True)( - node=cluster_node(name), name="sharded_cluster_without_secret", with_secret=False) + By( + f"adding configuration on {name}", + test=add_sharded_cluster, + parallel=True, + )( + node=cluster_node(name), + name="sharded_cluster_without_secret", + with_secret=False, + ) - Feature("external user directory", test=using_external_user_directory)(ldap_servers=ldap_servers) - Feature("authenticated users", test=using_authenticated_users)(ldap_servers=ldap_servers) + Feature("external user directory", test=using_external_user_directory)( + ldap_servers=ldap_servers + ) + Feature("authenticated users", test=using_authenticated_users)( + ldap_servers=ldap_servers + ) diff --git a/tests/testflows/ldap/role_mapping/tests/common.py b/tests/testflows/ldap/role_mapping/tests/common.py index 155dac83ec7..ec7cd6b0144 100644 --- a/tests/testflows/ldap/role_mapping/tests/common.py +++ b/tests/testflows/ldap/role_mapping/tests/common.py @@ -4,13 +4,17 @@ from testflows.core import * from testflows.asserts import error from helpers.common import create_xml_config_content, add_config -from ldap.authentication.tests.common import getuid, create_ldap_servers_config_content, ldap_authenticated_users +from ldap.authentication.tests.common import ( + getuid, + create_ldap_servers_config_content, + ldap_authenticated_users, +) from ldap.external_user_directory.tests.common import rbac_roles, rbac_users, ldap_users + @TestStep(Given) def create_table(self, name, create_statement, on_cluster=False, node=None): - """Create table. - """ + """Create table.""" if node is None: node = current().context.node try: @@ -24,19 +28,27 @@ def create_table(self, name, create_statement, on_cluster=False, node=None): else: node.query(f"DROP TABLE IF EXISTS {name}") + @TestStep(Given) -def add_ldap_servers_configuration(self, servers, config=None, config_d_dir="/etc/clickhouse-server/config.d", - config_file="ldap_servers.xml", timeout=60, restart=False, node=None): - """Add LDAP servers configuration to config.xml. - """ +def add_ldap_servers_configuration( + self, + servers, + config=None, + config_d_dir="/etc/clickhouse-server/config.d", + config_file="ldap_servers.xml", + timeout=60, + restart=False, + node=None, +): + """Add LDAP servers configuration to config.xml.""" if config is None: config = create_ldap_servers_config_content(servers, config_d_dir, config_file) return add_config(config, restart=restart, node=node) + @TestStep(Given) def add_ldap_groups(self, groups, node=None): - """Add multiple new groups to the LDAP server. - """ + """Add multiple new groups to the LDAP server.""" try: _groups = [] for group in groups: @@ -48,69 +60,89 @@ def add_ldap_groups(self, groups, node=None): for _group in _groups: delete_group_from_ldap(_group, node=node) + @TestStep(Given) -def add_ldap_external_user_directory(self, server, roles=None, role_mappings=None, - config_d_dir="/etc/clickhouse-server/config.d", - config_file=None, timeout=60, restart=True, config=None, node=None): - """Add LDAP external user directory. - """ +def add_ldap_external_user_directory( + self, + server, + roles=None, + role_mappings=None, + config_d_dir="/etc/clickhouse-server/config.d", + config_file=None, + timeout=60, + restart=True, + config=None, + node=None, +): + """Add LDAP external user directory.""" if config_file is None: config_file = f"ldap_external_user_directory_with_role_mapping_{getuid()}.xml" if config is None: - config = create_ldap_external_user_directory_config_content(server=server, roles=roles, - role_mappings=role_mappings, config_d_dir=config_d_dir, config_file=config_file) + config = create_ldap_external_user_directory_config_content( + server=server, + roles=roles, + role_mappings=role_mappings, + config_d_dir=config_d_dir, + config_file=config_file, + ) return add_config(config, restart=restart, node=node) + @TestStep(Given) def add_rbac_roles(self, roles, node=None): - """Add RBAC roles. - """ + """Add RBAC roles.""" with rbac_roles(*roles, node=node) as _roles: yield _roles + @TestStep(Given) def add_rbac_users(self, users, node=None): - """Add RBAC users. - """ + """Add RBAC users.""" if node is None: node = self.context.node try: with Given(f"I create local users on {node}"): for user in users: - username = user.get('username', None) or user['cn'] - password = user.get('password', None) or user['userpassword'] + username = user.get("username", None) or user["cn"] + password = user.get("password", None) or user["userpassword"] with By(f"creating user {username}"): - node.query(f"CREATE USER OR REPLACE {username} IDENTIFIED WITH PLAINTEXT_PASSWORD BY '{password}'") + node.query( + f"CREATE USER OR REPLACE {username} IDENTIFIED WITH PLAINTEXT_PASSWORD BY '{password}'" + ) yield users finally: with Finally(f"I drop local users on {node}"): for user in users: - username = user.get('username', None) or user['cn'] + username = user.get("username", None) or user["cn"] with By(f"dropping user {username}", flags=TE): node.query(f"DROP USER IF EXISTS {username}") + @TestStep(Given) def add_ldap_users(self, users, node=None): - """Add LDAP users. - """ + """Add LDAP users.""" with ldap_users(*users, node=node) as _users: yield _users + @TestStep(Given) -def add_ldap_authenticated_users(self, users, config_file=None, rbac=False, node=None, restart=True): - """Add LDAP authenticated users. - """ +def add_ldap_authenticated_users( + self, users, config_file=None, rbac=False, node=None, restart=True +): + """Add LDAP authenticated users.""" if config_file is None: config_file = f"ldap_users_{getuid()}.xml" - with ldap_authenticated_users(*users, config_file=config_file, restart=restart, rbac=rbac, node=node): + with ldap_authenticated_users( + *users, config_file=config_file, restart=restart, rbac=rbac, node=node + ): yield users + def add_group_to_ldap(cn, gidnumber=None, node=None, _gidnumber=[600], exitcode=0): - """Add new group entry to LDAP. - """ + """Add new group entry to LDAP.""" _gidnumber[0] += 1 if node is None: @@ -123,7 +155,7 @@ def add_group_to_ldap(cn, gidnumber=None, node=None, _gidnumber=[600], exitcode= "dn": f"cn={cn},ou=groups,dc=company,dc=com", "objectclass": ["top", "groupOfUniqueNames"], "uniquemember": "cn=admin,dc=company,dc=com", - "_server": node.name + "_server": node.name, } lines = [] @@ -140,29 +172,31 @@ def add_group_to_ldap(cn, gidnumber=None, node=None, _gidnumber=[600], exitcode= ldif = "\n".join(lines) r = node.command( - f"echo -e \"{ldif}\" | ldapadd -x -H ldap://localhost -D \"cn=admin,dc=company,dc=com\" -w admin") + f'echo -e "{ldif}" | ldapadd -x -H ldap://localhost -D "cn=admin,dc=company,dc=com" -w admin' + ) if exitcode is not None: assert r.exitcode == exitcode, error() return group + def delete_group_from_ldap(group, node=None, exitcode=0): - """Delete group entry from LDAP. - """ + """Delete group entry from LDAP.""" if node is None: node = current().context.ldap_node with By(f"deleting group {group['dn']}"): r = node.command( - f"ldapdelete -x -H ldap://localhost -D \"cn=admin,dc=company,dc=com\" -w admin \"{group['dn']}\"") + f"ldapdelete -x -H ldap://localhost -D \"cn=admin,dc=company,dc=com\" -w admin \"{group['dn']}\"" + ) if exitcode is not None: assert r.exitcode == exitcode, error() + def fix_ldap_permissions(node=None, exitcode=0): - """Fix LDAP access permissions. - """ + """Fix LDAP access permissions.""" if node is None: node = current().context.ldap_node @@ -172,63 +206,67 @@ def fix_ldap_permissions(node=None, exitcode=0): "delete: olcAccess\n" "-\n" "add: olcAccess\n" - "olcAccess: to attrs=userPassword,shadowLastChange by self write by dn=\\\"cn=admin,dc=company,dc=com\\\" write by anonymous auth by * none\n" - "olcAccess: to * by self write by dn=\\\"cn=admin,dc=company,dc=com\\\" read by users read by * none" + 'olcAccess: to attrs=userPassword,shadowLastChange by self write by dn=\\"cn=admin,dc=company,dc=com\\" write by anonymous auth by * none\n' + 'olcAccess: to * by self write by dn=\\"cn=admin,dc=company,dc=com\\" read by users read by * none' ) - r = node.command( - f"echo -e \"{ldif}\" | ldapmodify -Y EXTERNAL -Q -H ldapi:///") + r = node.command(f'echo -e "{ldif}" | ldapmodify -Y EXTERNAL -Q -H ldapi:///') if exitcode is not None: assert r.exitcode == exitcode, error() + def add_user_to_group_in_ldap(user, group, node=None, exitcode=0): - """Add user to a group in LDAP. - """ + """Add user to a group in LDAP.""" if node is None: node = current().context.ldap_node - ldif = (f"dn: {group['dn']}\n" + ldif = ( + f"dn: {group['dn']}\n" "changetype: modify\n" "add: uniquemember\n" - f"uniquemember: {user['dn']}") + f"uniquemember: {user['dn']}" + ) with By(f"adding user {user['dn']} to group {group['dn']}"): r = node.command( - f"echo -e \"{ldif}\" | ldapmodify -x -H ldap://localhost -D \"cn=admin,dc=company,dc=com\" -w admin") + f'echo -e "{ldif}" | ldapmodify -x -H ldap://localhost -D "cn=admin,dc=company,dc=com" -w admin' + ) if exitcode is not None: assert r.exitcode == exitcode, error() + def delete_user_from_group_in_ldap(user, group, node=None, exitcode=0): - """Delete user from a group in LDAP. - """ + """Delete user from a group in LDAP.""" if node is None: node = current().context.ldap_node - ldif = (f"dn: {group['dn']}\n" + ldif = ( + f"dn: {group['dn']}\n" "changetype: modify\n" "delete: uniquemember\n" - f"uniquemember: {user['dn']}") + f"uniquemember: {user['dn']}" + ) with By(f"deleting user {user['dn']} from group {group['dn']}"): r = node.command( - f"echo -e \"{ldif}\" | ldapmodify -x -H ldap://localhost -D \"cn=admin,dc=company,dc=com\" -w admin") + f'echo -e "{ldif}" | ldapmodify -x -H ldap://localhost -D "cn=admin,dc=company,dc=com" -w admin' + ) if exitcode is not None: assert r.exitcode == exitcode, error() -def create_ldap_external_user_directory_config_content(server=None, roles=None, role_mappings=None, **kwargs): - """Create LDAP external user directory configuration file content. - """ - kwargs["config_file"] = kwargs.pop("config_file", "external_ldap_user_directory.xml") - entries = { - "user_directories": { - "ldap": { - } - } - } +def create_ldap_external_user_directory_config_content( + server=None, roles=None, role_mappings=None, **kwargs +): + """Create LDAP external user directory configuration file content.""" + kwargs["config_file"] = kwargs.pop( + "config_file", "external_ldap_user_directory.xml" + ) + + entries = {"user_directories": {"ldap": {}}} entries["user_directories"]["ldap"] = [] @@ -236,7 +274,9 @@ def create_ldap_external_user_directory_config_content(server=None, roles=None, entries["user_directories"]["ldap"].append({"server": server}) if roles: - entries["user_directories"]["ldap"].append({"roles": [{r: None} for r in roles]}) + entries["user_directories"]["ldap"].append( + {"roles": [{r: None} for r in roles]} + ) if role_mappings: for role_mapping in role_mappings: @@ -244,8 +284,10 @@ def create_ldap_external_user_directory_config_content(server=None, roles=None, return create_xml_config_content(entries, **kwargs) + def create_entries_ldap_external_user_directory_config_content(entries, **kwargs): - """Create LDAP external user directory configuration file content. - """ - kwargs["config_file"] = kwargs.pop("config_file", "external_ldap_user_directory.xml") + """Create LDAP external user directory configuration file content.""" + kwargs["config_file"] = kwargs.pop( + "config_file", "external_ldap_user_directory.xml" + ) return create_xml_config_content(entries, **kwargs) diff --git a/tests/testflows/ldap/role_mapping/tests/mapping.py b/tests/testflows/ldap/role_mapping/tests/mapping.py index c8637f53d16..b74e3a073fe 100644 --- a/tests/testflows/ldap/role_mapping/tests/mapping.py +++ b/tests/testflows/ldap/role_mapping/tests/mapping.py @@ -6,22 +6,28 @@ from ldap.role_mapping.requirements import * from ldap.role_mapping.tests.common import * from ldap.external_user_directory.tests.common import randomword -from ldap.external_user_directory.tests.authentications import login_with_valid_username_and_password -from ldap.external_user_directory.tests.authentications import login_with_invalid_username_and_valid_password -from ldap.external_user_directory.tests.authentications import login_with_valid_username_and_invalid_password +from ldap.external_user_directory.tests.authentications import ( + login_with_valid_username_and_password, +) +from ldap.external_user_directory.tests.authentications import ( + login_with_invalid_username_and_valid_password, +) +from ldap.external_user_directory.tests.authentications import ( + login_with_valid_username_and_invalid_password, +) + def remove_ldap_groups_in_parallel(groups, i, iterations=10): - """Remove LDAP groups. - """ + """Remove LDAP groups.""" with When(f"LDAP groups are removed #{i}"): for j in range(iterations): for group in groups: with When(f"I delete group #{j}", description=f"{group}"): delete_group_from_ldap(group, exitcode=None) + def add_ldap_groups_in_parallel(ldap_user, names, i, iterations=10): - """Add LDAP groups. - """ + """Add LDAP groups.""" with When(f"LDAP groups are added #{i}"): for j in range(iterations): for name in names: @@ -29,48 +35,53 @@ def add_ldap_groups_in_parallel(ldap_user, names, i, iterations=10): group = add_group_to_ldap(cn=name, exitcode=None) with When(f"I add user to the group"): - add_user_to_group_in_ldap(user=ldap_user, group=group, exitcode=None) + add_user_to_group_in_ldap( + user=ldap_user, group=group, exitcode=None + ) + def add_user_to_ldap_groups_in_parallel(ldap_user, groups, i, iterations=10): - """Add user to LDAP groups. - """ + """Add user to LDAP groups.""" with When(f"user is added to LDAP groups #{i}"): for j in range(iterations): for group in groups: with When(f"I add user to the group {group['dn']} #{j}"): - add_user_to_group_in_ldap(user=ldap_user, group=group, exitcode=None) + add_user_to_group_in_ldap( + user=ldap_user, group=group, exitcode=None + ) + def remove_user_from_ldap_groups_in_parallel(ldap_user, groups, i, iterations=10): - """Remove user from LDAP groups. - """ + """Remove user from LDAP groups.""" with When(f"user is removed from LDAP groups #{i}"): for j in range(iterations): for group in groups: with When(f"I remove user from the group {group['dn']} #{j}"): - delete_user_from_group_in_ldap(user=ldap_user, group=group, exitcode=None) + delete_user_from_group_in_ldap( + user=ldap_user, group=group, exitcode=None + ) + def add_roles_in_parallel(role_names, i, iterations=10): - """Add roles. - """ + """Add roles.""" with When(f"roles are added #{i}"): for j in range(iterations): for role_name in role_names: with When(f"I add role {role_name} #{j}"): current().context.node.query(f"CREATE ROLE OR REPLACE {role_name}") + def remove_roles_in_parallel(role_names, i, iterations=10): - """Remove roles. - """ + """Remove roles.""" with When(f"roles are removed #{i}"): for j in range(iterations): for role_name in role_names: with When(f"I remove role {role_name} #{j}"): current().context.node.query(f"DROP ROLE IF EXISTS {role_name}") + @TestScenario -@Requirements( - RQ_SRS_014_LDAP_RoleMapping_Map_MultipleRoles("1.0") -) +@Requirements(RQ_SRS_014_LDAP_RoleMapping_Map_MultipleRoles("1.0")) def multiple_roles(self, ldap_server, ldap_user): """Check that users authenticated using LDAP external user directory can be assigned multiple LDAP mapped roles. @@ -82,12 +93,14 @@ def multiple_roles(self, ldap_server, ldap_user): "base_dn": "ou=groups,dc=company,dc=com", "attribute": "cn", "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix":"" + "prefix": "", } ] with Given("I add LDAP groups"): - groups = add_ldap_groups(groups=({"cn": f"role0_{uid}"}, {"cn": f"role1_{uid}"})) + groups = add_ldap_groups( + groups=({"cn": f"role0_{uid}"}, {"cn": f"role1_{uid}"}) + ) with And("I add LDAP user to each LDAP group"): add_user_to_group_in_ldap(user=ldap_user, group=groups[0]) @@ -97,23 +110,30 @@ def multiple_roles(self, ldap_server, ldap_user): roles = add_rbac_roles(roles=(f"role0_{uid}", f"role1_{uid}")) with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=ldap_server, - role_mappings=role_mappings, restart=True) + add_ldap_external_user_directory( + server=ldap_server, role_mappings=role_mappings, restart=True + ) with When(f"I login as an LDAP user"): - r = self.context.node.query(f"SHOW GRANTS", settings=[ - ("user", ldap_user["username"]), ("password", ldap_user["password"])]) + r = self.context.node.query( + f"SHOW GRANTS", + settings=[ + ("user", ldap_user["username"]), + ("password", ldap_user["password"]), + ], + ) with Then("I expect the user to have mapped LDAP roles"): with By(f"checking that first role is assigned", description=f"{roles[0]}"): assert roles[0] in r.output, error() - with And(f"checking that second role is also assigned", description=f"{roles[1]}"): + with And( + f"checking that second role is also assigned", description=f"{roles[1]}" + ): assert roles[1] in r.output, error() + @TestScenario -@Requirements( - RQ_SRS_014_LDAP_RoleMapping_WithFixedRoles("1.0") -) +@Requirements(RQ_SRS_014_LDAP_RoleMapping_WithFixedRoles("1.0")) def with_fixed_roles(self, ldap_server, ldap_user): """Check that LDAP users can be assigned roles dynamically and statically using the `` section. @@ -127,7 +147,7 @@ def with_fixed_roles(self, ldap_server, ldap_user): "base_dn": "ou=groups,dc=company,dc=com", "attribute": "cn", "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": "" + "prefix": "", } ] @@ -144,12 +164,18 @@ def with_fixed_roles(self, ldap_server, ldap_user): roles = add_rbac_roles(roles=(f"{fixed_role_name}",)) with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=ldap_server, - role_mappings=role_mappings, roles=roles, restart=True) + add_ldap_external_user_directory( + server=ldap_server, role_mappings=role_mappings, roles=roles, restart=True + ) with When(f"I login as an LDAP user"): - r = self.context.node.query(f"SHOW GRANTS", settings=[ - ("user", ldap_user["username"]), ("password", ldap_user["password"])]) + r = self.context.node.query( + f"SHOW GRANTS", + settings=[ + ("user", ldap_user["username"]), + ("password", ldap_user["password"]), + ], + ) with Then("I expect the user to have mapped and fixed roles"): with By("checking that mapped role is assigned"): @@ -157,17 +183,19 @@ def with_fixed_roles(self, ldap_server, ldap_user): with And("checking that fixed role is assigned"): assert roles[0] in r.output, error() + @TestOutline -def map_role(self, role_name, ldap_server, ldap_user, rbac_role_name=None, role_mappings=None): - """Check that we can map a role with a given name. - """ +def map_role( + self, role_name, ldap_server, ldap_user, rbac_role_name=None, role_mappings=None +): + """Check that we can map a role with a given name.""" if role_mappings is None: role_mappings = [ { "base_dn": "ou=groups,dc=company,dc=com", "attribute": "cn", "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": "" + "prefix": "", } ] @@ -184,45 +212,46 @@ def map_role(self, role_name, ldap_server, ldap_user, rbac_role_name=None, role_ roles = add_rbac_roles(roles=(f"'{rbac_role_name}'",)) with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=ldap_server, - role_mappings=role_mappings, restart=True) + add_ldap_external_user_directory( + server=ldap_server, role_mappings=role_mappings, restart=True + ) with When(f"I login as an LDAP user"): - r = self.context.node.query(f"SHOW GRANTS", settings=[ - ("user", ldap_user["username"]), ("password", ldap_user["password"])]) + r = self.context.node.query( + f"SHOW GRANTS", + settings=[ + ("user", ldap_user["username"]), + ("password", ldap_user["password"]), + ], + ) with Then("I expect the user to have mapped LDAP role"): with By(f"checking that the role is assigned", description=f"{role_name}"): assert roles[0].strip("'") in r.output, error() + @TestScenario -@Requirements( - RQ_SRS_014_LDAP_RoleMapping_Map_Role_Name_WithUTF8Characters("1.0") -) +@Requirements(RQ_SRS_014_LDAP_RoleMapping_Map_Role_Name_WithUTF8Characters("1.0")) def role_name_with_utf8_characters(self, ldap_server, ldap_user): - """Check that we can map a role that contains UTF8 characters. - """ + """Check that we can map a role that contains UTF8 characters.""" uid = getuid() role_name = f"role_{uid}_Gãńdåłf_Thê_Gręât" map_role(role_name=role_name, ldap_server=ldap_server, ldap_user=ldap_user) + @TestScenario -@Requirements( - RQ_SRS_014_LDAP_RoleMapping_Map_Role_Name_Long("1.0") -) +@Requirements(RQ_SRS_014_LDAP_RoleMapping_Map_Role_Name_Long("1.0")) def role_name_with_more_than_128_characters(self, ldap_server, ldap_user): - """Check that we can map a role that contains more than 128 characters. - """ + """Check that we can map a role that contains more than 128 characters.""" uid = getuid() role_name = f"role_{uid}_{'r'*128}" map_role(role_name=role_name, ldap_server=ldap_server, ldap_user=ldap_user) + @TestScenario -@Requirements( - RQ_SRS_014_LDAP_RoleMapping_Map_Role_Name_WithSpecialXMLCharacters("1.0") -) +@Requirements(RQ_SRS_014_LDAP_RoleMapping_Map_Role_Name_WithSpecialXMLCharacters("1.0")) def role_name_with_special_xml_characters(self, ldap_server, ldap_user): """Check that we can map a role that contains special XML characters that must be escaped. @@ -231,7 +260,13 @@ def role_name_with_special_xml_characters(self, ldap_server, ldap_user): role_name = f"role_{uid}_\\<\\>" rbac_role_name = f"role_{uid}_<>" - map_role(role_name=role_name, ldap_server=ldap_server, ldap_user=ldap_user, rbac_role_name=rbac_role_name) + map_role( + role_name=role_name, + ldap_server=ldap_server, + ldap_user=ldap_user, + rbac_role_name=rbac_role_name, + ) + @TestScenario @Requirements( @@ -245,22 +280,37 @@ def role_name_with_special_regex_characters(self, ldap_server, ldap_user): role_name = f"role_{uid}_\\+.?$" rbac_role_name = f"role_{uid}_+.?$" - map_role(role_name=role_name, ldap_server=ldap_server, ldap_user=ldap_user, rbac_role_name=rbac_role_name) + map_role( + role_name=role_name, + ldap_server=ldap_server, + ldap_user=ldap_user, + rbac_role_name=rbac_role_name, + ) + @TestOutline -def map_groups_with_prefixes(self, prefixes, group_names, role_names, - expected, not_expected, ldap_server, ldap_user): - """Check that we can map multiple groups to roles whith one or more prefixes. - """ +def map_groups_with_prefixes( + self, + prefixes, + group_names, + role_names, + expected, + not_expected, + ldap_server, + ldap_user, +): + """Check that we can map multiple groups to roles whith one or more prefixes.""" role_mappings = [] for prefix in prefixes: - role_mappings.append({ - "base_dn": "ou=groups,dc=company,dc=com", - "attribute": "cn", - "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": prefix - }) + role_mappings.append( + { + "base_dn": "ou=groups,dc=company,dc=com", + "attribute": "cn", + "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", + "prefix": prefix, + } + ) with Given("I add LDAP group"): groups = add_ldap_groups(groups=({"cn": name} for name in group_names)) @@ -273,27 +323,40 @@ def map_groups_with_prefixes(self, prefixes, group_names, role_names, roles = add_rbac_roles(roles=(f"'{name}'" for name in role_names)) with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=ldap_server, - role_mappings=role_mappings, restart=True) + add_ldap_external_user_directory( + server=ldap_server, role_mappings=role_mappings, restart=True + ) with When(f"I login as an LDAP user"): - r = self.context.node.query(f"SHOW GRANTS", settings=[ - ("user", ldap_user["username"]), ("password", ldap_user["password"])]) + r = self.context.node.query( + f"SHOW GRANTS", + settings=[ + ("user", ldap_user["username"]), + ("password", ldap_user["password"]), + ], + ) with Then("I expect the user to have mapped roles"): - with By(f"checking that the roles are assigned", description=f"{', '.join(expected)}"): + with By( + f"checking that the roles are assigned", + description=f"{', '.join(expected)}", + ): for name in expected: assert name in r.output, error() with And("I expect the user not to have mapped roles"): - with By(f"checking that the roles are not assigned", description=f"{', '.join(not_expected)}"): + with By( + f"checking that the roles are not assigned", + description=f"{', '.join(not_expected)}", + ): for name in not_expected: assert name not in r.output, error() + @TestScenario @Requirements( RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Syntax("1.0"), - RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix("1.0") + RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix("1.0"), ) def prefix_non_empty(self, ldap_server, ldap_user): """Check that only group names with specified prefix are mapped to roles @@ -302,31 +365,34 @@ def prefix_non_empty(self, ldap_server, ldap_user): uid = getuid() with Given("I define group names"): - group_names=[ - f"clickhouse_role_{uid}", - f"role0_{uid}" - ] + group_names = [f"clickhouse_role_{uid}", f"role0_{uid}"] with And("I define role names"): - role_names=[ - f"role_{uid}", - f"role0_{uid}" - ] + role_names = [f"role_{uid}", f"role0_{uid}"] with And("I define group prefixes to be mapped"): prefixes = ["clickhouse_"] with And("I define the expected mapped and not mapped roles"): - expected=[f"role_{uid}"] - not_expected=[f"role0_{uid}"] + expected = [f"role_{uid}"] + not_expected = [f"role0_{uid}"] + + map_groups_with_prefixes( + ldap_server=ldap_server, + ldap_user=ldap_user, + prefixes=prefixes, + group_names=group_names, + role_names=role_names, + expected=expected, + not_expected=not_expected, + ) - map_groups_with_prefixes(ldap_server=ldap_server, ldap_user=ldap_user, - prefixes=prefixes, group_names=group_names, role_names=role_names, - expected=expected, not_expected=not_expected) @TestScenario @Requirements( - RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_Default("1.0") + RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_Default( + "1.0" + ) ) def prefix_default_value(self, ldap_server, ldap_user): """Check that when prefix is not specified the default value of prefix @@ -343,108 +409,124 @@ def prefix_default_value(self, ldap_server, ldap_user): } ] - map_role(role_name=role_name, ldap_server=ldap_server, ldap_user=ldap_user, role_mappings=role_mappings) + map_role( + role_name=role_name, + ldap_server=ldap_server, + ldap_user=ldap_user, + role_mappings=role_mappings, + ) + @TestScenario @Requirements( - RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithUTF8Characters("1.0") + RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithUTF8Characters( + "1.0" + ) ) def prefix_with_utf8_characters(self, ldap_server, ldap_user): - """Check that we can map a role when prefix contains UTF8 characters. - """ + """Check that we can map a role when prefix contains UTF8 characters.""" uid = getuid() with Given("I define group names"): - group_names=[ - f"Gãńdåłf_Thê_Gręât_role_{uid}", - f"role0_{uid}" - ] + group_names = [f"Gãńdåłf_Thê_Gręât_role_{uid}", f"role0_{uid}"] with And("I define role names"): - role_names=[ - f"role_{uid}", - f"role0_{uid}" - ] + role_names = [f"role_{uid}", f"role0_{uid}"] with And("I define group prefixes to be mapped"): prefixes = ["Gãńdåłf_Thê_Gręât_"] with And("I define the expected mapped and not mapped roles"): - expected=[f"role_{uid}"] - not_expected=[f"role0_{uid}"] + expected = [f"role_{uid}"] + not_expected = [f"role0_{uid}"] + + map_groups_with_prefixes( + ldap_server=ldap_server, + ldap_user=ldap_user, + prefixes=prefixes, + group_names=group_names, + role_names=role_names, + expected=expected, + not_expected=not_expected, + ) - map_groups_with_prefixes(ldap_server=ldap_server, ldap_user=ldap_user, - prefixes=prefixes, group_names=group_names, role_names=role_names, - expected=expected, not_expected=not_expected) @TestScenario @Requirements( - RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_SpecialCharactersEscaping("1.0"), - RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithSpecialXMLCharacters("1.0") + RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_SpecialCharactersEscaping( + "1.0" + ), + RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithSpecialXMLCharacters( + "1.0" + ), ) def prefix_with_special_xml_characters(self, ldap_server, ldap_user): - """Check that we can map a role when prefix contains special XML characters. - """ + """Check that we can map a role when prefix contains special XML characters.""" uid = getuid() with Given("I define group names"): - group_names=[ - f"clickhouse\\<\\>_role_{uid}", - f"role0_{uid}" - ] + group_names = [f"clickhouse\\<\\>_role_{uid}", f"role0_{uid}"] with And("I define role names"): - role_names=[ - f"role_{uid}", - f"role0_{uid}" - ] + role_names = [f"role_{uid}", f"role0_{uid}"] with And("I define group prefixes to be mapped"): prefixes = ["clickhouse<>_"] with And("I define the expected mapped and not mapped roles"): - expected=[f"role_{uid}"] - not_expected=[f"role0_{uid}"] + expected = [f"role_{uid}"] + not_expected = [f"role0_{uid}"] + + map_groups_with_prefixes( + ldap_server=ldap_server, + ldap_user=ldap_user, + prefixes=prefixes, + group_names=group_names, + role_names=role_names, + expected=expected, + not_expected=not_expected, + ) - map_groups_with_prefixes(ldap_server=ldap_server, ldap_user=ldap_user, - prefixes=prefixes, group_names=group_names, role_names=role_names, - expected=expected, not_expected=not_expected) @TestScenario @Requirements( - RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithSpecialRegexCharacters("1.0") + RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithSpecialRegexCharacters( + "1.0" + ) ) def prefix_with_special_regex_characters(self, ldap_server, ldap_user): - """Check that we can map a role when prefix contains special regex characters. - """ + """Check that we can map a role when prefix contains special regex characters.""" uid = getuid() with Given("I define group names"): - group_names=[ - f"clickhouse\\+.?\\$_role_{uid}", - f"role0_{uid}" - ] + group_names = [f"clickhouse\\+.?\\$_role_{uid}", f"role0_{uid}"] with And("I define role names"): - role_names=[ - f"role_{uid}", - f"role0_{uid}" - ] + role_names = [f"role_{uid}", f"role0_{uid}"] with And("I define group prefixes to be mapped"): prefixes = ["clickhouse+.?\\$_"] with And("I define the expected mapped and not mapped roles"): - expected=[f"role_{uid}"] - not_expected=[f"role0_{uid}"] + expected = [f"role_{uid}"] + not_expected = [f"role0_{uid}"] + + map_groups_with_prefixes( + ldap_server=ldap_server, + ldap_user=ldap_user, + prefixes=prefixes, + group_names=group_names, + role_names=role_names, + expected=expected, + not_expected=not_expected, + ) - map_groups_with_prefixes(ldap_server=ldap_server, ldap_user=ldap_user, - prefixes=prefixes, group_names=group_names, role_names=role_names, - expected=expected, not_expected=not_expected) @TestScenario @Requirements( - RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_MultipleSections("1.0") + RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_MultipleSections( + "1.0" + ) ) def multiple_sections_with_different_prefixes(self, ldap_server, ldap_user): """Check that we can map multiple roles with multiple role mapping sections @@ -453,34 +535,35 @@ def multiple_sections_with_different_prefixes(self, ldap_server, ldap_user): uid = getuid() with Given("I define group names"): - group_names=[ + group_names = [ f"clickhouse0_role0_{uid}", f"clickhouse1_role1_{uid}", - f"role2_{uid}" + f"role2_{uid}", ] with And("I define role names"): - role_names=[ - f"role0_{uid}", - f"role1_{uid}", - f"role2_{uid}" - ] + role_names = [f"role0_{uid}", f"role1_{uid}", f"role2_{uid}"] with And("I define group prefixes to be mapped"): prefixes = ["clickhouse0_", "clickhouse1_"] with And("I define the expected mapped and not mapped roles"): - expected=[f"role0_{uid}", f"role1_{uid}"] - not_expected=[f"role2_{uid}"] + expected = [f"role0_{uid}", f"role1_{uid}"] + not_expected = [f"role2_{uid}"] + + map_groups_with_prefixes( + ldap_server=ldap_server, + ldap_user=ldap_user, + prefixes=prefixes, + group_names=group_names, + role_names=role_names, + expected=expected, + not_expected=not_expected, + ) - map_groups_with_prefixes(ldap_server=ldap_server, ldap_user=ldap_user, - prefixes=prefixes, group_names=group_names, role_names=role_names, - expected=expected, not_expected=not_expected) @TestScenario -@Requirements( - RQ_SRS_014_LDAP_RoleMapping_LDAP_Group_Removed("1.0") -) +@Requirements(RQ_SRS_014_LDAP_RoleMapping_LDAP_Group_Removed("1.0")) def group_removed(self, ldap_server, ldap_user): """Check that roles are not mapped after the corresponding LDAP group is removed. @@ -493,7 +576,7 @@ def group_removed(self, ldap_server, ldap_user): "base_dn": "ou=groups,dc=company,dc=com", "attribute": "cn", "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": "" + "prefix": "", } ] @@ -508,12 +591,18 @@ def group_removed(self, ldap_server, ldap_user): roles = add_rbac_roles(roles=(f"{role_name}",)) with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=ldap_server, - role_mappings=role_mappings, restart=True) + add_ldap_external_user_directory( + server=ldap_server, role_mappings=role_mappings, restart=True + ) with When(f"I login as an LDAP user"): - r = self.context.node.query(f"SHOW GRANTS", settings=[ - ("user", ldap_user["username"]), ("password", ldap_user["password"])]) + r = self.context.node.query( + f"SHOW GRANTS", + settings=[ + ("user", ldap_user["username"]), + ("password", ldap_user["password"]), + ], + ) with Then("I expect the user to have mapped LDAP role"): with By(f"checking that the role is assigned", description=f"{role_name}"): @@ -523,17 +612,21 @@ def group_removed(self, ldap_server, ldap_user): delete_group_from_ldap(group) with When(f"I login as an LDAP user after LDAP group is removed"): - r = self.context.node.query(f"SHOW GRANTS", settings=[ - ("user", ldap_user["username"]), ("password", ldap_user["password"])]) + r = self.context.node.query( + f"SHOW GRANTS", + settings=[ + ("user", ldap_user["username"]), + ("password", ldap_user["password"]), + ], + ) with Then("I expect the user not to have mapped LDAP role"): with By(f"checking that the role is not assigned", description=f"{role_name}"): assert role_name not in r.output, error() + @TestScenario -@Requirements( - RQ_SRS_014_LDAP_RoleMapping_LDAP_Group_UserRemoved("1.0") -) +@Requirements(RQ_SRS_014_LDAP_RoleMapping_LDAP_Group_UserRemoved("1.0")) def user_removed_from_group(self, ldap_server, ldap_user): """Check that roles are not mapped after the user has been removed from the corresponding LDAP group. @@ -546,7 +639,7 @@ def user_removed_from_group(self, ldap_server, ldap_user): "base_dn": "ou=groups,dc=company,dc=com", "attribute": "cn", "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": "" + "prefix": "", } ] @@ -560,12 +653,18 @@ def user_removed_from_group(self, ldap_server, ldap_user): roles = add_rbac_roles(roles=(f"{role_name}",)) with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=ldap_server, - role_mappings=role_mappings, restart=True) + add_ldap_external_user_directory( + server=ldap_server, role_mappings=role_mappings, restart=True + ) with When(f"I login as an LDAP user"): - r = self.context.node.query(f"SHOW GRANTS", settings=[ - ("user", ldap_user["username"]), ("password", ldap_user["password"])]) + r = self.context.node.query( + f"SHOW GRANTS", + settings=[ + ("user", ldap_user["username"]), + ("password", ldap_user["password"]), + ], + ) with Then("I expect the user to have mapped LDAP role"): with By(f"checking that the role is assigned", description=f"{role_name}"): @@ -575,17 +674,21 @@ def user_removed_from_group(self, ldap_server, ldap_user): delete_user_from_group_in_ldap(user=ldap_user, group=groups[0]) with And(f"I login as an LDAP user after user has been removed from the group"): - r = self.context.node.query(f"SHOW GRANTS", settings=[ - ("user", ldap_user["username"]), ("password", ldap_user["password"])]) + r = self.context.node.query( + f"SHOW GRANTS", + settings=[ + ("user", ldap_user["username"]), + ("password", ldap_user["password"]), + ], + ) with Then("I expect the user not to have mapped LDAP role"): with By(f"checking that the role is not assigned", description=f"{role_name}"): assert role_name not in r.output, error() + @TestScenario -@Requirements( - RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_NotPresent("1.0") -) +@Requirements(RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_NotPresent("1.0")) def role_not_present(self, ldap_server, ldap_user): """Check that LDAP users can still be authenticated even if the mapped role is not present. @@ -598,7 +701,7 @@ def role_not_present(self, ldap_server, ldap_user): "base_dn": "ou=groups,dc=company,dc=com", "attribute": "cn", "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": "" + "prefix": "", } ] @@ -609,12 +712,19 @@ def role_not_present(self, ldap_server, ldap_user): add_user_to_group_in_ldap(user=ldap_user, group=groups[0]) with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=ldap_server, - role_mappings=role_mappings, restart=True) + add_ldap_external_user_directory( + server=ldap_server, role_mappings=role_mappings, restart=True + ) with When(f"I login as an LDAP user"): - r = self.context.node.query(f"SHOW GRANTS", settings=[ - ("user", ldap_user["username"]), ("password", ldap_user["password"])], no_checks=True) + r = self.context.node.query( + f"SHOW GRANTS", + settings=[ + ("user", ldap_user["username"]), + ("password", ldap_user["password"]), + ], + no_checks=True, + ) with Then("I expect the login to succeed"): assert r.exitcode == 0, error() @@ -622,10 +732,9 @@ def role_not_present(self, ldap_server, ldap_user): with And("the user not to have any mapped LDAP role"): assert r.output == "", error() + @TestScenario -@Requirements( - RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_NotPresent("1.0") -) +@Requirements(RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_NotPresent("1.0")) def add_new_role_not_present(self, ldap_server, ldap_user): """Check that LDAP user can still authenticate when the LDAP user is added to a new LDAP group that does not match any existing @@ -639,7 +748,7 @@ def add_new_role_not_present(self, ldap_server, ldap_user): "base_dn": "ou=groups,dc=company,dc=com", "attribute": "cn", "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": "clickhouse_" + "prefix": "clickhouse_", } ] @@ -653,12 +762,19 @@ def add_new_role_not_present(self, ldap_server, ldap_user): roles = add_rbac_roles(roles=(f"{role_name}",)) with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=ldap_server, - role_mappings=role_mappings, restart=True) + add_ldap_external_user_directory( + server=ldap_server, role_mappings=role_mappings, restart=True + ) with When(f"I login as an LDAP user"): - r = self.context.node.query(f"SHOW GRANTS", settings=[ - ("user", ldap_user["username"]), ("password", ldap_user["password"])], no_checks=True) + r = self.context.node.query( + f"SHOW GRANTS", + settings=[ + ("user", ldap_user["username"]), + ("password", ldap_user["password"]), + ], + no_checks=True, + ) with Then("I expect the login to succeed"): assert r.exitcode == 0, error() @@ -667,14 +783,22 @@ def add_new_role_not_present(self, ldap_server, ldap_user): assert f"{role_name}" in r.output, error() with When("I add LDAP group that maps to unknown role"): - unknown_groups = add_ldap_groups(groups=({"cn": "clickhouse_" + role_name + "_unknown"},)) + unknown_groups = add_ldap_groups( + groups=({"cn": "clickhouse_" + role_name + "_unknown"},) + ) with And("I add LDAP user to the group that maps to unknown role"): add_user_to_group_in_ldap(user=ldap_user, group=unknown_groups[0]) with And(f"I again login as an LDAP user"): - r = self.context.node.query(f"SHOW GRANTS", settings=[ - ("user", ldap_user["username"]), ("password", ldap_user["password"])], no_checks=True) + r = self.context.node.query( + f"SHOW GRANTS", + settings=[ + ("user", ldap_user["username"]), + ("password", ldap_user["password"]), + ], + no_checks=True, + ) with Then("I expect the login to succeed"): assert r.exitcode == 0, error() @@ -685,9 +809,17 @@ def add_new_role_not_present(self, ldap_server, ldap_user): with When("I add matching previously unknown RBAC role"): unknown_roles = add_rbac_roles(roles=(f"{role_name}_unknown",)) - with And(f"I again login as an LDAP user after previously unknown RBAC role has been added"): - r = self.context.node.query(f"SHOW GRANTS", settings=[ - ("user", ldap_user["username"]), ("password", ldap_user["password"])], no_checks=True) + with And( + f"I again login as an LDAP user after previously unknown RBAC role has been added" + ): + r = self.context.node.query( + f"SHOW GRANTS", + settings=[ + ("user", ldap_user["username"]), + ("password", ldap_user["password"]), + ], + no_checks=True, + ) with Then("I expect the login to succeed"): assert r.exitcode == 0, error() @@ -698,10 +830,11 @@ def add_new_role_not_present(self, ldap_server, ldap_user): with And("the user should have the previously unknown mapped LDAP role"): assert f"{role_name}_unknown" in r.output, error() + @TestScenario @Requirements( RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_Removed("1.0"), - RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_Readded("1.0") + RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_Readded("1.0"), ) def role_removed_and_readded(self, ldap_server, ldap_user): """Check that when a mapped role is removed the privileges provided by the role @@ -716,7 +849,7 @@ def role_removed_and_readded(self, ldap_server, ldap_user): "base_dn": "ou=groups,dc=company,dc=com", "attribute": "cn", "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": "" + "prefix": "", } ] with Given("I add LDAP group"): @@ -729,21 +862,26 @@ def role_removed_and_readded(self, ldap_server, ldap_user): roles = add_rbac_roles(roles=(f"{role_name}",)) with And("I create a table for which the role will provide privilege"): - table_name = create_table(name=f"table_{uid}", - create_statement="CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()") + table_name = create_table( + name=f"table_{uid}", + create_statement="CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()", + ) with And("I grant select privilege on the table to the role"): self.context.node.query(f"GRANT SELECT ON {table_name} TO {role_name}") with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=ldap_server, - role_mappings=role_mappings, restart=True) + add_ldap_external_user_directory( + server=ldap_server, role_mappings=role_mappings, restart=True + ) with When(f"I login as LDAP user using clickhouse-client"): with self.context.cluster.shell(node=self.context.node.name) as shell: with shell( - f"TERM=dumb clickhouse client --user {ldap_user['username']} --password {ldap_user['password']}", - asynchronous=True, name="client") as client: + f"TERM=dumb clickhouse client --user {ldap_user['username']} --password {ldap_user['password']}", + asynchronous=True, + name="client", + ) as client: client.app.expect("clickhouse1 :\) ") with When("I execute SHOW GRANTS"): @@ -767,12 +905,16 @@ def role_removed_and_readded(self, ldap_server, ldap_user): client.app.send(f"SELECT * FROM {table_name} LIMIT 1") with Then("I expect to get not enough privileges error"): - client.app.expect(f"DB::Exception: {ldap_user['username']}: Not enough privileges.") + client.app.expect( + f"DB::Exception: {ldap_user['username']}: Not enough privileges." + ) client.app.expect("clickhouse1 :\) ") with When("I add the role that grant the privilege back"): self.context.node.query(f"CREATE ROLE {role_name}") - self.context.node.query(f"GRANT SELECT ON {table_name} TO {role_name}") + self.context.node.query( + f"GRANT SELECT ON {table_name} TO {role_name}" + ) with And("I execute select on the table after role is added back"): client.app.send(f"SELECT * FROM {table_name} LIMIT 1") @@ -781,10 +923,11 @@ def role_removed_and_readded(self, ldap_server, ldap_user): client.app.expect("Ok\.") client.app.expect("clickhouse1 :\) ") + @TestScenario @Requirements( RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_NewPrivilege("1.0"), - RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_RemovedPrivilege("1.0") + RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_RemovedPrivilege("1.0"), ) def privilege_new_and_removed(self, ldap_server, ldap_user): """Check that when a new privilege is added to the mapped role @@ -800,7 +943,7 @@ def privilege_new_and_removed(self, ldap_server, ldap_user): "base_dn": "ou=groups,dc=company,dc=com", "attribute": "cn", "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": "" + "prefix": "", } ] with Given("I add LDAP group"): @@ -813,18 +956,23 @@ def privilege_new_and_removed(self, ldap_server, ldap_user): roles = add_rbac_roles(roles=(f"{role_name}",)) with And("I create a table for which the role will provide privilege"): - table_name = create_table(name=f"table_{uid}", - create_statement="CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()") + table_name = create_table( + name=f"table_{uid}", + create_statement="CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()", + ) with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=ldap_server, - role_mappings=role_mappings, restart=True) + add_ldap_external_user_directory( + server=ldap_server, role_mappings=role_mappings, restart=True + ) with When(f"I login as LDAP user using clickhouse-client"): with self.context.cluster.shell(node=self.context.node.name) as shell: with shell( - f"TERM=dumb clickhouse client --user {ldap_user['username']} --password {ldap_user['password']}", - asynchronous=True, name="client") as client: + f"TERM=dumb clickhouse client --user {ldap_user['username']} --password {ldap_user['password']}", + asynchronous=True, + name="client", + ) as client: client.app.expect("clickhouse1 :\) ") with When("I execute SHOW GRANTS"): @@ -834,15 +982,21 @@ def privilege_new_and_removed(self, ldap_server, ldap_user): client.app.expect(f"{role_name}") client.app.expect("clickhouse1 :\) ") - with And("I execute select on the table when the mapped role does not provide this privilege"): + with And( + "I execute select on the table when the mapped role does not provide this privilege" + ): client.app.send(f"SELECT * FROM {table_name} LIMIT 1") with Then("I expect to get not enough privileges error"): - client.app.expect(f"DB::Exception: {ldap_user['username']}: Not enough privileges.") + client.app.expect( + f"DB::Exception: {ldap_user['username']}: Not enough privileges." + ) client.app.expect("clickhouse1 :\) ") with When("I grant select privilege on the table to the mapped role"): - self.context.node.query(f"GRANT SELECT ON {table_name} TO {role_name}") + self.context.node.query( + f"GRANT SELECT ON {table_name} TO {role_name}" + ) with And("I execute select on the table"): client.app.send(f"SELECT * FROM {table_name} LIMIT 1") @@ -852,19 +1006,22 @@ def privilege_new_and_removed(self, ldap_server, ldap_user): client.app.expect("clickhouse1 :\) ") with When("I remove the privilege from the mapped role"): - self.context.node.query(f"REVOKE SELECT ON {table_name} FROM {role_name}") + self.context.node.query( + f"REVOKE SELECT ON {table_name} FROM {role_name}" + ) with And("I re-execute select on the table"): client.app.send(f"SELECT * FROM {table_name} LIMIT 1") with Then("I expect to get not enough privileges error"): - client.app.expect(f"DB::Exception: {ldap_user['username']}: Not enough privileges.") + client.app.expect( + f"DB::Exception: {ldap_user['username']}: Not enough privileges." + ) client.app.expect("clickhouse1 :\) ") + @TestScenario -@Requirements( - RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_Added("1.0") -) +@Requirements(RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_Added("1.0")) def role_added(self, ldap_server, ldap_user): """Check that when the mapped role is not present during LDAP user authentication but is later added then the authenticated LDAP users is granted the privileges provided @@ -878,7 +1035,7 @@ def role_added(self, ldap_server, ldap_user): "base_dn": "ou=groups,dc=company,dc=com", "attribute": "cn", "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": "" + "prefix": "", } ] with Given("I add LDAP group"): @@ -888,18 +1045,23 @@ def role_added(self, ldap_server, ldap_user): add_user_to_group_in_ldap(user=ldap_user, group=groups[0]) with And("I create a table for which the role will provide privilege"): - table_name = create_table(name=f"table_{uid}", - create_statement="CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()") + table_name = create_table( + name=f"table_{uid}", + create_statement="CREATE TABLE {name} (d DATE, s String, i UInt8) ENGINE = Memory()", + ) with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=ldap_server, - role_mappings=role_mappings, restart=True) + add_ldap_external_user_directory( + server=ldap_server, role_mappings=role_mappings, restart=True + ) with When(f"I login as LDAP user using clickhouse-client"): with self.context.cluster.shell(node=self.context.node.name) as shell: with shell( - f"TERM=dumb clickhouse client --user {ldap_user['username']} --password {ldap_user['password']}", - asynchronous=True, name="client") as client: + f"TERM=dumb clickhouse client --user {ldap_user['username']} --password {ldap_user['password']}", + asynchronous=True, + name="client", + ) as client: client.app.expect("clickhouse1 :\) ") with When("I execute SHOW GRANTS"): @@ -913,12 +1075,16 @@ def role_added(self, ldap_server, ldap_user): client.app.send(f"SELECT * FROM {table_name} LIMIT 1") with Then("I expect to get not enough privileges error"): - client.app.expect(f"DB::Exception: {ldap_user['username']}: Not enough privileges.") + client.app.expect( + f"DB::Exception: {ldap_user['username']}: Not enough privileges." + ) client.app.expect("clickhouse1 :\) ") with When("I add the role that grant the privilege"): self.context.node.query(f"CREATE ROLE {role_name}") - self.context.node.query(f"GRANT SELECT ON {table_name} TO {role_name}") + self.context.node.query( + f"GRANT SELECT ON {table_name} TO {role_name}" + ) with And("I execute select on the table after role is added"): client.app.send(f"SELECT * FROM {table_name} LIMIT 1") @@ -927,13 +1093,11 @@ def role_added(self, ldap_server, ldap_user): client.app.expect("Ok\.") client.app.expect("clickhouse1 :\) ") + @TestScenario -@Requirements( - RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_New("1.0") -) +@Requirements(RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_New("1.0")) def role_new(self, ldap_server, ldap_user): - """Check that no new roles can be granted to LDAP authenticated users. - """ + """Check that no new roles can be granted to LDAP authenticated users.""" uid = getuid() role_name = f"role_{uid}" @@ -942,7 +1106,7 @@ def role_new(self, ldap_server, ldap_user): "base_dn": "ou=groups,dc=company,dc=com", "attribute": "cn", "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": "" + "prefix": "", } ] @@ -953,23 +1117,32 @@ def role_new(self, ldap_server, ldap_user): roles = add_rbac_roles(roles=(f"{role_name}",)) with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=ldap_server, - role_mappings=role_mappings, restart=True) + add_ldap_external_user_directory( + server=ldap_server, role_mappings=role_mappings, restart=True + ) with When(f"I login as LDAP user using clickhouse-client"): with self.context.cluster.shell(node=self.context.node.name) as shell: with shell( - f"TERM=dumb clickhouse client --user {ldap_user['username']} --password {ldap_user['password']}", - asynchronous=True, name="client") as client: + f"TERM=dumb clickhouse client --user {ldap_user['username']} --password {ldap_user['password']}", + asynchronous=True, + name="client", + ) as client: client.app.expect("clickhouse1 :\) ") with When("I try to grant new role to user"): - self.context.node.query(f"GRANT {role_name} TO {ldap_user['username']}", - message=message, exitcode=exitcode) + self.context.node.query( + f"GRANT {role_name} TO {ldap_user['username']}", + message=message, + exitcode=exitcode, + ) + @TestScenario @Requirements( - RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_MultipleSections_IdenticalParameters("1.0") + RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_MultipleSections_IdenticalParameters( + "1.0" + ) ) def multiple_sections_with_identical_parameters(self, ldap_server, ldap_user): """Check behaviour when multiple role mapping sections @@ -983,7 +1156,7 @@ def multiple_sections_with_identical_parameters(self, ldap_server, ldap_user): "base_dn": "ou=groups,dc=company,dc=com", "attribute": "cn", "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": "" + "prefix": "", } ] * 4 @@ -997,22 +1170,29 @@ def multiple_sections_with_identical_parameters(self, ldap_server, ldap_user): roles = add_rbac_roles(roles=(f"{role_name}",)) with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=ldap_server, - role_mappings=role_mappings, restart=True) + add_ldap_external_user_directory( + server=ldap_server, role_mappings=role_mappings, restart=True + ) with When(f"I login as an LDAP user"): - r = self.context.node.query(f"SHOW GRANTS", settings=[ - ("user", ldap_user["username"]), ("password", ldap_user["password"])]) + r = self.context.node.query( + f"SHOW GRANTS", + settings=[ + ("user", ldap_user["username"]), + ("password", ldap_user["password"]), + ], + ) with Then("I expect the user to have mapped LDAP role"): with By(f"checking that the role is assigned", description=f"{role_name}"): assert roles[0].strip("'") in r.output, error() + @TestScenario -@Requirements( - RQ_SRS_014_LDAP_RoleMapping_LDAP_Group_RemovedAndAdded_Parallel("1.0") -) -def group_removed_and_added_in_parallel(self, ldap_server, ldap_user, count=20, timeout=200): +@Requirements(RQ_SRS_014_LDAP_RoleMapping_LDAP_Group_RemovedAndAdded_Parallel("1.0")) +def group_removed_and_added_in_parallel( + self, ldap_server, ldap_user, count=20, timeout=200 +): """Check that user can be authenticated successfully when LDAP groups are removed and added in parallel. """ @@ -1026,7 +1206,7 @@ def group_removed_and_added_in_parallel(self, ldap_server, ldap_user, count=20, "base_dn": "ou=groups,dc=company,dc=com", "attribute": "cn", "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": "" + "prefix": "", } ] @@ -1043,17 +1223,48 @@ def group_removed_and_added_in_parallel(self, ldap_server, ldap_user, count=20, add_rbac_roles(roles=role_names) with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=ldap_server, - role_mappings=role_mappings, restart=True) + add_ldap_external_user_directory( + server=ldap_server, role_mappings=role_mappings, restart=True + ) tasks = [] with Pool(4) as pool: try: - with When("user try to login while LDAP groups are added and removed in parallel"): + with When( + "user try to login while LDAP groups are added and removed in parallel" + ): for i in range(10): - tasks.append(pool.submit(login_with_valid_username_and_password, (users, i, 50,))) - tasks.append(pool.submit(remove_ldap_groups_in_parallel, (groups, i, 10,))) - tasks.append(pool.submit(add_ldap_groups_in_parallel,(ldap_user, role_names, i, 10,))) + tasks.append( + pool.submit( + login_with_valid_username_and_password, + ( + users, + i, + 50, + ), + ) + ) + tasks.append( + pool.submit( + remove_ldap_groups_in_parallel, + ( + groups, + i, + 10, + ), + ) + ) + tasks.append( + pool.submit( + add_ldap_groups_in_parallel, + ( + ldap_user, + role_names, + i, + 10, + ), + ) + ) finally: with Finally("it should work", flags=TE): for task in tasks: @@ -1063,11 +1274,14 @@ def group_removed_and_added_in_parallel(self, ldap_server, ldap_user, count=20, for group in groups: delete_group_from_ldap(group, exitcode=None) + @TestScenario @Requirements( RQ_SRS_014_LDAP_RoleMapping_LDAP_Group_UserRemovedAndAdded_Parallel("1.0") ) -def user_removed_and_added_in_ldap_groups_in_parallel(self, ldap_server, ldap_user, count=20, timeout=200): +def user_removed_and_added_in_ldap_groups_in_parallel( + self, ldap_server, ldap_user, count=20, timeout=200 +): """Check that user can be authenticated successfully when it is removed and added from mapping LDAP groups in parallel. """ @@ -1081,7 +1295,7 @@ def user_removed_and_added_in_ldap_groups_in_parallel(self, ldap_server, ldap_us "base_dn": "ou=groups,dc=company,dc=com", "attribute": "cn", "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": "" + "prefix": "", } ] @@ -1096,27 +1310,60 @@ def user_removed_and_added_in_ldap_groups_in_parallel(self, ldap_server, ldap_us add_rbac_roles(roles=role_names) with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=ldap_server, - role_mappings=role_mappings, restart=True) + add_ldap_external_user_directory( + server=ldap_server, role_mappings=role_mappings, restart=True + ) tasks = [] with Pool(4) as pool: try: - with When("user try to login while user is added and removed from LDAP groups in parallel"): + with When( + "user try to login while user is added and removed from LDAP groups in parallel" + ): for i in range(10): - tasks.append(pool.submit(login_with_valid_username_and_password, (users, i, 50,))) - tasks.append(pool.submit(remove_user_from_ldap_groups_in_parallel, (ldap_user, groups, i, 1,))) - tasks.append(pool.submit(add_user_to_ldap_groups_in_parallel, (ldap_user, groups, i, 1,))) + tasks.append( + pool.submit( + login_with_valid_username_and_password, + ( + users, + i, + 50, + ), + ) + ) + tasks.append( + pool.submit( + remove_user_from_ldap_groups_in_parallel, + ( + ldap_user, + groups, + i, + 1, + ), + ) + ) + tasks.append( + pool.submit( + add_user_to_ldap_groups_in_parallel, + ( + ldap_user, + groups, + i, + 1, + ), + ) + ) finally: with Finally("it should work", flags=TE): for task in tasks: task.result(timeout=timeout) + @TestScenario -@Requirements( - RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_RemovedAndAdded_Parallel("1.0") -) -def roles_removed_and_added_in_parallel(self, ldap_server, ldap_user, count=20, timeout=200): +@Requirements(RQ_SRS_014_LDAP_RoleMapping_RBAC_Role_RemovedAndAdded_Parallel("1.0")) +def roles_removed_and_added_in_parallel( + self, ldap_server, ldap_user, count=20, timeout=200 +): """Check that user can be authenticated successfully when roles that are mapped by the LDAP groups are removed and added in parallel. """ @@ -1130,7 +1377,7 @@ def roles_removed_and_added_in_parallel(self, ldap_server, ldap_user, count=20, "base_dn": "ou=groups,dc=company,dc=com", "attribute": "cn", "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": "" + "prefix": "", } ] @@ -1146,17 +1393,47 @@ def roles_removed_and_added_in_parallel(self, ldap_server, ldap_user, count=20, add_rbac_roles(roles=role_names) with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=ldap_server, - role_mappings=role_mappings, restart=True) + add_ldap_external_user_directory( + server=ldap_server, role_mappings=role_mappings, restart=True + ) tasks = [] with Pool(4) as pool: try: - with When("user try to login while mapped roles are added and removed in parallel"): + with When( + "user try to login while mapped roles are added and removed in parallel" + ): for i in range(10): - tasks.append(pool.submit(login_with_valid_username_and_password, (users, i, 50,))) - tasks.append(pool.submit(remove_roles_in_parallel, (role_names, i, 10,))) - tasks.append(pool.submit(add_roles_in_parallel, (role_names, i, 10,))) + tasks.append( + pool.submit( + login_with_valid_username_and_password, + ( + users, + i, + 50, + ), + ) + ) + tasks.append( + pool.submit( + remove_roles_in_parallel, + ( + role_names, + i, + 10, + ), + ) + ) + tasks.append( + pool.submit( + add_roles_in_parallel, + ( + role_names, + i, + 10, + ), + ) + ) finally: with Finally("it should work", flags=TE): for task in tasks: @@ -1167,15 +1444,21 @@ def roles_removed_and_added_in_parallel(self, ldap_server, ldap_user, count=20, with By(f"dropping role {role_name}", flags=TE): self.context.node.query(f"DROP ROLE IF EXISTS {role_name}") + @TestOutline -def parallel_login(self, ldap_server, ldap_user, user_count=10, timeout=200, role_count=10): +def parallel_login( + self, ldap_server, ldap_user, user_count=10, timeout=200, role_count=10 +): """Check that login of valid and invalid LDAP authenticated users with mapped roles works in parallel. """ uid = getuid() role_names = [f"role{i}_{uid}" for i in range(role_count)] - users = [{"cn": f"parallel_user{i}", "userpassword": randomword(20)} for i in range(user_count)] + users = [ + {"cn": f"parallel_user{i}", "userpassword": randomword(20)} + for i in range(user_count) + ] groups = [{"cn": f"clickhouse_{role_name}"} for role_name in role_names] role_mappings = [ @@ -1183,7 +1466,7 @@ def parallel_login(self, ldap_server, ldap_user, user_count=10, timeout=200, rol "base_dn": "ou=groups,dc=company,dc=com", "attribute": "cn", "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": "clickhouse_" + "prefix": "clickhouse_", } ] @@ -1202,89 +1485,171 @@ def parallel_login(self, ldap_server, ldap_user, user_count=10, timeout=200, rol add_rbac_roles(roles=role_names) with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=ldap_server, - role_mappings=role_mappings, restart=True) + add_ldap_external_user_directory( + server=ldap_server, role_mappings=role_mappings, restart=True + ) tasks = [] with Pool(4) as pool: try: - with When("users try to login in parallel", description=""" + with When( + "users try to login in parallel", + description=""" * with valid username and password * with invalid username and valid password * with valid username and invalid password - """): + """, + ): for i in range(10): - tasks.append(pool.submit(login_with_valid_username_and_password, (users, i, 50,))) - tasks.append(pool.submit(login_with_valid_username_and_invalid_password, (users, i, 50,))) - tasks.append(pool.submit(login_with_invalid_username_and_valid_password, (users, i, 50,))) + tasks.append( + pool.submit( + login_with_valid_username_and_password, + ( + users, + i, + 50, + ), + ) + ) + tasks.append( + pool.submit( + login_with_valid_username_and_invalid_password, + ( + users, + i, + 50, + ), + ) + ) + tasks.append( + pool.submit( + login_with_invalid_username_and_valid_password, + ( + users, + i, + 50, + ), + ) + ) finally: with Then("it should work"): for task in tasks: task.result(timeout=timeout) + @TestScenario @Requirements( RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel("1.0"), - RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_ValidAndInvalid("1.0") + RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_ValidAndInvalid("1.0"), ) -def parallel_login_of_multiple_users(self, ldap_server, ldap_user, timeout=200, role_count=10): +def parallel_login_of_multiple_users( + self, ldap_server, ldap_user, timeout=200, role_count=10 +): """Check that valid and invalid logins of multiple LDAP authenticated users with mapped roles works in parallel. """ - parallel_login(user_count=10, ldap_user=ldap_user,ldap_server=ldap_server, - timeout=timeout, role_count=role_count) + parallel_login( + user_count=10, + ldap_user=ldap_user, + ldap_server=ldap_server, + timeout=timeout, + role_count=role_count, + ) + @TestScenario @Requirements( RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_SameUser("1.0"), - RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_ValidAndInvalid("1.0") + RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_ValidAndInvalid("1.0"), ) -def parallel_login_of_the_same_user(self, ldap_server, ldap_user, timeout=200, role_count=10): +def parallel_login_of_the_same_user( + self, ldap_server, ldap_user, timeout=200, role_count=10 +): """Check that valid and invalid logins of the same LDAP authenticated user with mapped roles works in parallel. """ - parallel_login(user_count=10, ldap_user=ldap_user,ldap_server=ldap_server, - timeout=timeout, role_count=role_count) + parallel_login( + user_count=10, + ldap_user=ldap_user, + ldap_server=ldap_server, + timeout=timeout, + role_count=role_count, + ) + @TestScenario @Requirements( RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_MultipleServers("1.0"), - RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_ValidAndInvalid("1.0") + RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_ValidAndInvalid("1.0"), ) -def parallel_login_of_ldap_users_with_multiple_servers(self, ldap_server, ldap_user, timeout=200): +def parallel_login_of_ldap_users_with_multiple_servers( + self, ldap_server, ldap_user, timeout=200 +): """Check that valid and invalid logins of multiple LDAP users that have mapped roles works in parallel using multiple LDAP external user directories. """ - parallel_login_with_multiple_servers(ldap_server=ldap_server, ldap_user=ldap_user, - user_count=10, role_count=10,timeout=timeout, with_ldap_users=True, with_local_users=False) + parallel_login_with_multiple_servers( + ldap_server=ldap_server, + ldap_user=ldap_user, + user_count=10, + role_count=10, + timeout=timeout, + with_ldap_users=True, + with_local_users=False, + ) + @TestScenario @Requirements( RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_LocalAndMultipleLDAP("1.0"), - RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_ValidAndInvalid("1.0") + RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_ValidAndInvalid("1.0"), ) -def parallel_login_of_local_and_ldap_users_with_multiple_servers(self, ldap_server, ldap_user, timeout=200): +def parallel_login_of_local_and_ldap_users_with_multiple_servers( + self, ldap_server, ldap_user, timeout=200 +): """Check that valid and invalid logins of local users and LDAP users that have mapped roles works in parallel using multiple LDAP external user directories. """ - parallel_login_with_multiple_servers(ldap_server=ldap_server, ldap_user=ldap_user, - user_count=10, role_count=10, timeout=timeout, with_local_users=True, with_ldap_users=True) + parallel_login_with_multiple_servers( + ldap_server=ldap_server, + ldap_user=ldap_user, + user_count=10, + role_count=10, + timeout=timeout, + with_local_users=True, + with_ldap_users=True, + ) + @TestScenario -@Requirements( - RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_LocalOnly("1.0") -) +@Requirements(RQ_SRS_014_LDAP_RoleMapping_Authentication_Parallel_LocalOnly("1.0")) def parallel_login_of_local_users(self, ldap_server, ldap_user, timeout=200): """Check that valid and invalid logins of local users works in parallel when multiple LDAP external user directories with role mapping are configured. """ - parallel_login_with_multiple_servers(ldap_server=ldap_server, ldap_user=ldap_user, - user_count=10, role_count=10, timeout=timeout, with_local_users=True, with_ldap_users=False) + parallel_login_with_multiple_servers( + ldap_server=ldap_server, + ldap_user=ldap_user, + user_count=10, + role_count=10, + timeout=timeout, + with_local_users=True, + with_ldap_users=False, + ) + @TestOutline -def parallel_login_with_multiple_servers(self, ldap_server, ldap_user, user_count=10, - role_count=10, timeout=200, with_ldap_users=True, with_local_users=False): +def parallel_login_with_multiple_servers( + self, + ldap_server, + ldap_user, + user_count=10, + role_count=10, + timeout=200, + with_ldap_users=True, + with_local_users=False, +): """Check that login of valid and invalid local users or LDAP users that have mapped roles works in parallel using multiple LDAP external user directories. """ @@ -1302,50 +1667,69 @@ def parallel_login_with_multiple_servers(self, ldap_server, ldap_user, user_coun if with_ldap_users: with And("I define a group of users to be created on each LDAP server"): user_groups["openldap1_users"] = [ - {"cn": f"openldap1_parallel_user{i}_{uid}", "userpassword": randomword(20)} for i in range(user_count) + { + "cn": f"openldap1_parallel_user{i}_{uid}", + "userpassword": randomword(20), + } + for i in range(user_count) ] user_groups["openldap2_users"] = [ - {"cn": f"openldap2_parallel_user{i}_{uid}", "userpassword": randomword(20)} for i in range(user_count) + { + "cn": f"openldap2_parallel_user{i}_{uid}", + "userpassword": randomword(20), + } + for i in range(user_count) ] if with_local_users: with And("I define a group of local users to be created"): user_groups["local_users"] = [ - {"cn": f"local_parallel_user{i}_{uid}", "userpassword": randomword(20)} for i in range(user_count) + {"cn": f"local_parallel_user{i}_{uid}", "userpassword": randomword(20)} + for i in range(user_count) ] with And("I have a list of checks that I want to run for each user group"): checks = [ login_with_valid_username_and_password, login_with_valid_username_and_invalid_password, - login_with_invalid_username_and_valid_password + login_with_invalid_username_and_valid_password, ] - with And("I create config file to define LDAP external user directory for each LDAP server"): + with And( + "I create config file to define LDAP external user directory for each LDAP server" + ): entries = { "user_directories": [ - {"ldap": [ - {"server": "openldap1"}, - {"role_mappings" : [ + { + "ldap": [ + {"server": "openldap1"}, { - "base_dn": "ou=groups,dc=company,dc=com", - "attribute": "cn", - "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": "clickhouse_" - } - ]} - ]}, - {"ldap": [ - {"server": "openldap2"}, - {"role_mappings": [ + "role_mappings": [ + { + "base_dn": "ou=groups,dc=company,dc=com", + "attribute": "cn", + "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", + "prefix": "clickhouse_", + } + ] + }, + ] + }, + { + "ldap": [ + {"server": "openldap2"}, { - "base_dn": "ou=groups,dc=company,dc=com", - "attribute": "cn", - "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", - "prefix": "clickhouse_" - } - ]} - ]} + "role_mappings": [ + { + "base_dn": "ou=groups,dc=company,dc=com", + "attribute": "cn", + "search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))", + "prefix": "clickhouse_", + } + ] + }, + ] + }, ] } config = create_entries_ldap_external_user_directory_config_content(entries) @@ -1355,24 +1739,40 @@ def parallel_login_with_multiple_servers(self, ldap_server, ldap_user, user_coun if with_ldap_users: with And("I add LDAP users to each LDAP server"): - openldap1_users = add_ldap_users(users=user_groups["openldap1_users"], node=cluster.node("openldap1")) - openldap2_users = add_ldap_users(users=user_groups["openldap2_users"], node=cluster.node("openldap2")) + openldap1_users = add_ldap_users( + users=user_groups["openldap1_users"], node=cluster.node("openldap1") + ) + openldap2_users = add_ldap_users( + users=user_groups["openldap2_users"], node=cluster.node("openldap2") + ) with And("I add all LDAP groups to each LDAP server"): - openldap1_groups = add_ldap_groups(groups=groups, node=cluster.node("openldap1")) - openldap2_groups = add_ldap_groups(groups=groups, node=cluster.node("openldap2")) + openldap1_groups = add_ldap_groups( + groups=groups, node=cluster.node("openldap1") + ) + openldap2_groups = add_ldap_groups( + groups=groups, node=cluster.node("openldap2") + ) with And("I add all users to LDAP groups on the first LDAP server"): for group in openldap1_groups: for user in openldap1_users: - with By(f"adding LDAP user {user['dn']} to the group {group['dn']}"): - add_user_to_group_in_ldap(user=user, group=group, node=cluster.node("openldap1")) + with By( + f"adding LDAP user {user['dn']} to the group {group['dn']}" + ): + add_user_to_group_in_ldap( + user=user, group=group, node=cluster.node("openldap1") + ) with And("I add all users to LDAP groups on the second LDAP server"): for group in openldap2_groups: for user in openldap2_users: - with By(f"adding LDAP user {user['dn']} to the group {group['dn']}"): - add_user_to_group_in_ldap(user=user, group=group, node=cluster.node("openldap2")) + with By( + f"adding LDAP user {user['dn']} to the group {group['dn']}" + ): + add_user_to_group_in_ldap( + user=user, group=group, node=cluster.node("openldap2") + ) with And("I add RBAC roles"): add_rbac_roles(roles=role_names) @@ -1389,28 +1789,38 @@ def parallel_login_with_multiple_servers(self, ldap_server, ldap_user, user_coun tasks = [] with Pool(4) as pool: try: - with When("users in each group try to login in parallel", description=""" + with When( + "users in each group try to login in parallel", + description=""" * with valid username and password * with invalid username and valid password * with valid username and invalid password - """): + """, + ): for i in range(10): for users in user_groups.values(): for check in checks: - tasks.append(pool.submit(check, (users, i, 50,))) + tasks.append( + pool.submit( + check, + ( + users, + i, + 50, + ), + ) + ) finally: with Then("it should work"): for task in tasks: task.result(timeout=timeout) + @TestFeature @Name("mapping") -@Requirements( - RQ_SRS_014_LDAP_RoleMapping_Search("1.0") -) +@Requirements(RQ_SRS_014_LDAP_RoleMapping_Search("1.0")) def feature(self): - """Check role LDAP role mapping. - """ + """Check role LDAP role mapping.""" self.context.node = self.context.cluster.node("clickhouse1") self.context.ldap_node = self.context.cluster.node("openldap1") @@ -1419,7 +1829,7 @@ def feature(self): "host": "openldap1", "port": "389", "enable_tls": "no", - "bind_dn": "cn={user_name},ou=users,dc=company,dc=com" + "bind_dn": "cn={user_name},ou=users,dc=company,dc=com", }, "openldap2": { "host": "openldap2", @@ -1427,12 +1837,17 @@ def feature(self): "enable_tls": "yes", "bind_dn": "cn={user_name},ou=users,dc=company,dc=com", "tls_require_cert": "never", - } + }, } users = [ - {"server": "openldap1", "username": "user1", "password": "user1", "login": True, - "dn": "cn=user1,ou=users,dc=company,dc=com"}, + { + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, + "dn": "cn=user1,ou=users,dc=company,dc=com", + }, ] with Given("I fix LDAP access permissions"): diff --git a/tests/testflows/ldap/role_mapping/tests/server_config.py b/tests/testflows/ldap/role_mapping/tests/server_config.py index 8008d9003d7..b9d308d3833 100644 --- a/tests/testflows/ldap/role_mapping/tests/server_config.py +++ b/tests/testflows/ldap/role_mapping/tests/server_config.py @@ -6,47 +6,55 @@ from ldap.role_mapping.requirements import * from ldap.authentication.tests.common import invalid_server_config from ldap.external_user_directory.tests.common import login + @TestScenario -@Requirements( - RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_BindDN("1.0") -) +@Requirements(RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_BindDN("1.0")) def valid_bind_dn(self): - """Check that LDAP users can login when `bind_dn` is valid. - """ + """Check that LDAP users can login when `bind_dn` is valid.""" servers = { "openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "bind_dn": "cn={user_name},ou=users,dc=company,dc=com" + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "bind_dn": "cn={user_name},ou=users,dc=company,dc=com", } } user = { - "server": "openldap1", "username": "user1", "password": "user1", "login": True, + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, } login(servers, "openldap1", user) + @TestScenario -@Requirements( - RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_BindDN("1.0") -) +@Requirements(RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_BindDN("1.0")) def invalid_bind_dn(self): - """Check that LDAP users can't login when `bind_dn` is invalid. - """ + """Check that LDAP users can't login when `bind_dn` is invalid.""" servers = { "openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", - "bind_dn": "cn={user_name},ou=users,dc=company2,dc=com" - }} + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "bind_dn": "cn={user_name},ou=users,dc=company2,dc=com", + } + } user = { - "server": "openldap1", "username": "user1", "password": "user1", "login": True, + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, "exitcode": 4, - "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name." + "message": "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name.", } login(servers, "openldap1", user) + @TestScenario @Requirements( RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_BindDN_ConflictWith_AuthDN("1.0") @@ -58,10 +66,12 @@ def bind_dn_conflict_with_auth_dn(self, timeout=60): message = "DB::Exception: Deprecated 'auth_dn_prefix' and 'auth_dn_suffix' entries cannot be used with 'bind_dn' entry" servers = { "openldap1": { - "host": "openldap1", "port": "389", "enable_tls": "no", + "host": "openldap1", + "port": "389", + "enable_tls": "no", "bind_dn": "cn={user_name},ou=users,dc=company,dc=com", "auth_dn_prefix": "cn=", - "auth_dn_suffix": ",ou=users,dc=company,dc=com" + "auth_dn_suffix": ",ou=users,dc=company,dc=com", } } @@ -71,8 +81,7 @@ def bind_dn_conflict_with_auth_dn(self, timeout=60): @TestFeature @Name("server config") def feature(self, node="clickhouse1"): - """Check LDAP server configuration. - """ + """Check LDAP server configuration.""" self.context.node = self.context.cluster.node(node) for scenario in loads(current_module(), Scenario): scenario() diff --git a/tests/testflows/ldap/role_mapping/tests/user_dn_detection.py b/tests/testflows/ldap/role_mapping/tests/user_dn_detection.py index 147da8a5dcc..aa81f235108 100644 --- a/tests/testflows/ldap/role_mapping/tests/user_dn_detection.py +++ b/tests/testflows/ldap/role_mapping/tests/user_dn_detection.py @@ -7,10 +7,12 @@ from testflows.asserts import error from ldap.role_mapping.requirements import * from ldap.role_mapping.tests.common import * + @TestOutline -def check_config(self, entries, valid=True, ldap_server="openldap1", user="user1", password="user1"): - """Apply LDAP server configuration and check login. - """ +def check_config( + self, entries, valid=True, ldap_server="openldap1", user="user1", password="user1" +): + """Apply LDAP server configuration and check login.""" if valid: exitcode = 0 message = "1" @@ -19,16 +21,24 @@ def check_config(self, entries, valid=True, ldap_server="openldap1", user="user1 message = "DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name" with Given("I add LDAP server configuration"): - config = create_xml_config_content(entries=entries, config_file="ldap_servers.xml") + config = create_xml_config_content( + entries=entries, config_file="ldap_servers.xml" + ) add_ldap_servers_configuration(servers=None, config=config) with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=ldap_server, - role_mappings=None, restart=True) + add_ldap_external_user_directory( + server=ldap_server, role_mappings=None, restart=True + ) with When(f"I login I try to login as an LDAP user"): - r = self.context.node.query(f"SELECT 1", settings=[ - ("user", user), ("password", password)], exitcode=exitcode, message=message) + r = self.context.node.query( + f"SELECT 1", + settings=[("user", user), ("password", password)], + exitcode=exitcode, + message=message, + ) + @TestScenario @Tags("config") @@ -36,8 +46,7 @@ def check_config(self, entries, valid=True, ldap_server="openldap1", user="user1 RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_UserDNDetection_BaseDN("1.0") ) def config_invalid_base_dn(self): - """Check when invalid `base_dn` is specified in the user_dn_detection section. - """ + """Check when invalid `base_dn` is specified in the user_dn_detection section.""" with Given("I define LDAP server configuration with invalid base_dn"): entries = { @@ -50,8 +59,8 @@ def config_invalid_base_dn(self): "bind_dn": "cn={user_name},ou=users,dc=company,dc=com", "user_dn_detection": { "base_dn": "ou=user,dc=company,dc=com", - "search_filter": "(&(objectClass=inetOrgPerson)(uid={user_name}))" - } + "search_filter": "(&(objectClass=inetOrgPerson)(uid={user_name}))", + }, } } ] @@ -59,14 +68,14 @@ def config_invalid_base_dn(self): check_config(entries=entries, valid=False) + @TestScenario @Tags("config") @Requirements( RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_UserDNDetection_BaseDN("1.0") ) def config_empty_base_dn(self): - """Check when empty `base_dn` is specified in the user_dn_detection section. - """ + """Check when empty `base_dn` is specified in the user_dn_detection section.""" with Given("I define LDAP server configuration with invalid base_dn"): entries = { "ldap_servers": [ @@ -78,8 +87,8 @@ def config_empty_base_dn(self): "bind_dn": "cn={user_name},ou=users,dc=company,dc=com", "user_dn_detection": { "base_dn": "", - "search_filter": "(&(objectClass=inetOrgPerson)(uid={user_name}))" - } + "search_filter": "(&(objectClass=inetOrgPerson)(uid={user_name}))", + }, } } ] @@ -87,14 +96,14 @@ def config_empty_base_dn(self): check_config(entries=entries, valid=False) + @TestScenario @Tags("config") @Requirements( RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_UserDNDetection_BaseDN("1.0") ) def config_missing_base_dn(self): - """Check when missing `base_dn` is specified in the user_dn_detection section. - """ + """Check when missing `base_dn` is specified in the user_dn_detection section.""" with Given("I define LDAP server configuration with invalid base_dn"): entries = { "ldap_servers": [ @@ -106,7 +115,7 @@ def config_missing_base_dn(self): "bind_dn": "cn={user_name},ou=users,dc=company,dc=com", "user_dn_detection": { "search_filter": "(&(objectClass=inetOrgPerson)(uid={user_name}))" - } + }, } } ] @@ -114,14 +123,14 @@ def config_missing_base_dn(self): check_config(entries=entries, valid=False) + @TestScenario @Tags("config") @Requirements( # FIXME ) def config_invalid_search_filter(self): - """Check when invalid `search_filter` is specified in the user_dn_detection section. - """ + """Check when invalid `search_filter` is specified in the user_dn_detection section.""" with Given("I define LDAP server configuration with invalid search_filter"): entries = { "ldap_servers": [ @@ -133,8 +142,8 @@ def config_invalid_search_filter(self): "bind_dn": "cn={user_name},ou=users,dc=company,dc=com", "user_dn_detection": { "base_dn": "ou=users,dc=company,dc=com", - "search_filter": "(&(objectClass=inetOrgPersons)(uid={user_name}))" - } + "search_filter": "(&(objectClass=inetOrgPersons)(uid={user_name}))", + }, } } ] @@ -142,14 +151,14 @@ def config_invalid_search_filter(self): check_config(entries=entries, valid=False) + @TestScenario @Tags("config") @Requirements( RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_UserDNDetection_SearchFilter("1.0") ) def config_missing_search_filter(self): - """Check when missing `search_filter` is specified in the user_dn_detection section. - """ + """Check when missing `search_filter` is specified in the user_dn_detection section.""" with Given("I define LDAP server configuration with invalid search_filter"): entries = { "ldap_servers": [ @@ -161,7 +170,7 @@ def config_missing_search_filter(self): "bind_dn": "cn={user_name},ou=users,dc=company,dc=com", "user_dn_detection": { "base_dn": "ou=users,dc=company,dc=com", - } + }, } } ] @@ -169,14 +178,14 @@ def config_missing_search_filter(self): check_config(entries=entries, valid=False) + @TestScenario @Tags("config") @Requirements( RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_UserDNDetection_SearchFilter("1.0") ) def config_empty_search_filter(self): - """Check when empty `search_filter` is specified in the user_dn_detection section. - """ + """Check when empty `search_filter` is specified in the user_dn_detection section.""" with Given("I define LDAP server configuration with invalid search_filter"): entries = { "ldap_servers": [ @@ -188,8 +197,8 @@ def config_empty_search_filter(self): "bind_dn": "cn={user_name},ou=users,dc=company,dc=com", "user_dn_detection": { "base_dn": "ou=users,dc=company,dc=com", - "search_filter": "" - } + "search_filter": "", + }, } } ] @@ -197,15 +206,17 @@ def config_empty_search_filter(self): check_config(entries=entries, valid=False) + @TestScenario @Tags("config") @Requirements( RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_UserDNDetection_BaseDN("1.0"), - RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_UserDNDetection_SearchFilter("1.0") + RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_UserDNDetection_SearchFilter( + "1.0" + ), ) def config_valid(self): - """Check valid config with valid user_dn_detection section. - """ + """Check valid config with valid user_dn_detection section.""" with Given("I define LDAP server configuration"): entries = { "ldap_servers": [ @@ -217,8 +228,8 @@ def config_valid(self): "bind_dn": "cn={user_name},ou=users,dc=company,dc=com", "user_dn_detection": { "base_dn": "ou=users,dc=company,dc=com", - "search_filter": "(&(objectClass=inetOrgPerson)(uid={user_name}))" - } + "search_filter": "(&(objectClass=inetOrgPerson)(uid={user_name}))", + }, } } ] @@ -226,11 +237,14 @@ def config_valid(self): check_config(entries=entries, valid=True) + @TestScenario @Tags("config") @Requirements( RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_UserDNDetection_BaseDN("1.0"), - RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_UserDNDetection_SearchFilter("1.0") + RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_UserDNDetection_SearchFilter( + "1.0" + ), ) def config_valid_tls_connection(self): """Check valid config with valid user_dn_detection section when @@ -248,28 +262,37 @@ def config_valid_tls_connection(self): "tls_require_cert": "never", "user_dn_detection": { "base_dn": "ou=users,dc=company,dc=com", - "search_filter": "(&(objectClass=inetOrgPerson)(uid={user_name}))" - } + "search_filter": "(&(objectClass=inetOrgPerson)(uid={user_name}))", + }, } } ] } - check_config(entries=entries, valid=True, ldap_server="openldap2", user="user2", password="user2") + check_config( + entries=entries, + valid=True, + ldap_server="openldap2", + user="user2", + password="user2", + ) + @TestOutline(Scenario) @Requirements( RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_UserDNDetection_Scope("1.0") ) -@Examples("scope base_dn", [ - ("base", "cn=user1,ou=users,dc=company,dc=com"), - ("one_level","ou=users,dc=company,dc=com"), - ("children","ou=users,dc=company,dc=com"), - ("subtree","ou=users,dc=company,dc=com") # default value -]) +@Examples( + "scope base_dn", + [ + ("base", "cn=user1,ou=users,dc=company,dc=com"), + ("one_level", "ou=users,dc=company,dc=com"), + ("children", "ou=users,dc=company,dc=com"), + ("subtree", "ou=users,dc=company,dc=com"), # default value + ], +) def check_valid_scope_values(self, scope, base_dn): - """Check configuration with valid scope values. - """ + """Check configuration with valid scope values.""" with Given("I define LDAP server configuration"): entries = { "ldap_servers": [ @@ -282,8 +305,8 @@ def check_valid_scope_values(self, scope, base_dn): "user_dn_detection": { "base_dn": base_dn, "search_filter": "(&(objectClass=inetOrgPerson)(uid={user_name}))", - "scope": scope - } + "scope": scope, + }, } } ] @@ -291,6 +314,7 @@ def check_valid_scope_values(self, scope, base_dn): check_config(entries=entries, valid=True) + @TestSuite def mapping(self): """Run all role mapping tests with both @@ -298,8 +322,13 @@ def mapping(self): user DN detection. """ users = [ - {"server": "openldap1", "username": "user1", "password": "user1", "login": True, - "dn": "cn=user1,ou=users,dc=company,dc=com"}, + { + "server": "openldap1", + "username": "user1", + "password": "user1", + "login": True, + "dn": "cn=user1,ou=users,dc=company,dc=com", + }, ] entries = { @@ -312,8 +341,8 @@ def mapping(self): "bind_dn": "cn={user_name},ou=users,dc=company,dc=com", "user_dn_detection": { "base_dn": "ou=users,dc=company,dc=com", - "search_filter": "(&(objectClass=inetOrgPerson)(uid={user_name}))" - } + "search_filter": "(&(objectClass=inetOrgPerson)(uid={user_name}))", + }, }, "openldap2": { "host": "openldap2", @@ -323,20 +352,25 @@ def mapping(self): "tls_require_cert": "never", "user_dn_detection": { "base_dn": "ou=users,dc=company,dc=com", - "search_filter": "(&(objectClass=inetOrgPerson)(uid={user_name}))" - } - } + "search_filter": "(&(objectClass=inetOrgPerson)(uid={user_name}))", + }, + }, }, ] } with Given("I add LDAP servers configuration"): - config = create_xml_config_content(entries=entries, config_file="ldap_servers.xml") + config = create_xml_config_content( + entries=entries, config_file="ldap_servers.xml" + ) add_ldap_servers_configuration(servers=None, config=config) - for scenario in loads(importlib.import_module("ldap.role_mapping.tests.mapping"), Scenario): + for scenario in loads( + importlib.import_module("ldap.role_mapping.tests.mapping"), Scenario + ): scenario(ldap_server="openldap1", ldap_user=users[0]) + @TestOutline def setup_different_bind_dn_and_user_dn(self, uid, map_by, user_dn_detection): """Check that roles get mapped properly when bind_dn and user_dn are different @@ -361,7 +395,7 @@ def setup_different_bind_dn_and_user_dn(self, uid, map_by, user_dn_detection): entries["ldap_servers"][0]["openldap1"]["user_dn_detection"] = { "base_dn": "ou=users,dc=company,dc=com", "search_filter": "(&(objectClass=inetOrgPerson)(uid={user_name}))", - "scope": "subtree" + "scope": "subtree", } with And("I define role mappings"): @@ -370,21 +404,23 @@ def setup_different_bind_dn_and_user_dn(self, uid, map_by, user_dn_detection): "base_dn": "ou=groups,dc=company,dc=com", "attribute": "cn", "search_filter": f"(&(objectClass=groupOfUniqueNames)(uniquemember={{{map_by}}}))", - "prefix":"" + "prefix": "", } ] with Given("I add LDAP users"): - first_user = add_ldap_users(users=[ - {"cn": f"first_user", "userpassword": "user", "uid": "second_user"} - ])[0] + first_user = add_ldap_users( + users=[{"cn": f"first_user", "userpassword": "user", "uid": "second_user"}] + )[0] - second_user = add_ldap_users(users=[ - {"cn": f"second_user", "userpassword": "user", "uid": "first_user"} - ])[0] + second_user = add_ldap_users( + users=[{"cn": f"second_user", "userpassword": "user", "uid": "first_user"}] + )[0] with Given("I add LDAP groups"): - groups = add_ldap_groups(groups=({"cn": f"role0_{uid}"}, {"cn": f"role1_{uid}"})) + groups = add_ldap_groups( + groups=({"cn": f"role0_{uid}"}, {"cn": f"role1_{uid}"}) + ) with And("I add LDAP user to each LDAP group"): with By("adding first group to first user"): @@ -396,12 +432,18 @@ def setup_different_bind_dn_and_user_dn(self, uid, map_by, user_dn_detection): roles = add_rbac_roles(roles=(f"role0_{uid}", f"role1_{uid}")) with Given("I add LDAP server configuration"): - config = create_xml_config_content(entries=entries, config_file="ldap_servers.xml") + config = create_xml_config_content( + entries=entries, config_file="ldap_servers.xml" + ) add_ldap_servers_configuration(servers=None, config=config) with And("I add LDAP external user directory configuration"): - add_ldap_external_user_directory(server=self.context.ldap_node.name, - role_mappings=role_mappings, restart=True) + add_ldap_external_user_directory( + server=self.context.ldap_node.name, + role_mappings=role_mappings, + restart=True, + ) + @TestScenario def map_roles_by_user_dn_when_base_dn_and_user_dn_are_different(self): @@ -414,22 +456,27 @@ def map_roles_by_user_dn_when_base_dn_and_user_dn_are_different(self): """ uid = getuid() - setup_different_bind_dn_and_user_dn(uid=uid, map_by="user_dn", user_dn_detection=True) + setup_different_bind_dn_and_user_dn( + uid=uid, map_by="user_dn", user_dn_detection=True + ) with When(f"I login as first LDAP user"): - r = self.context.node.query(f"SHOW GRANTS", settings=[ - ("user", "first_user"), ("password", "user")]) + r = self.context.node.query( + f"SHOW GRANTS", settings=[("user", "first_user"), ("password", "user")] + ) with Then("I expect the first user to have mapped LDAP roles from second user"): assert f"GRANT role1_{uid} TO first_user" in r.output, error() with When(f"I login as second LDAP user"): - r = self.context.node.query(f"SHOW GRANTS", settings=[ - ("user", "second_user"), ("password", "user")]) + r = self.context.node.query( + f"SHOW GRANTS", settings=[("user", "second_user"), ("password", "user")] + ) with Then("I expect the second user to have mapped LDAP roles from first user"): assert f"GRANT role0_{uid} TO second_user" in r.output, error() + @TestScenario def map_roles_by_bind_dn_when_base_dn_and_user_dn_are_different(self): """Check the case when we map roles by bind_dn when bind_dn and user_dn @@ -437,30 +484,32 @@ def map_roles_by_bind_dn_when_base_dn_and_user_dn_are_different(self): """ uid = getuid() - setup_different_bind_dn_and_user_dn(uid=uid, map_by="bind_dn", user_dn_detection=True) + setup_different_bind_dn_and_user_dn( + uid=uid, map_by="bind_dn", user_dn_detection=True + ) with When(f"I login as first LDAP user"): - r = self.context.node.query(f"SHOW GRANTS", settings=[ - ("user", "first_user"), ("password", "user")]) + r = self.context.node.query( + f"SHOW GRANTS", settings=[("user", "first_user"), ("password", "user")] + ) with Then("I expect the first user to have no mapped LDAP roles"): assert f"GRANT role0_{uid} TO first_user" == r.output, error() with When(f"I login as second LDAP user"): - r = self.context.node.query(f"SHOW GRANTS", settings=[ - ("user", "second_user"), ("password", "user")]) + r = self.context.node.query( + f"SHOW GRANTS", settings=[("user", "second_user"), ("password", "user")] + ) with Then("I expect the second user to have no mapped LDAP roles"): assert f"GRANT role1_{uid} TO second_user" in r.output, error() + @TestFeature @Name("user dn detection") -@Requirements( - RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_UserDNDetection("1.0") -) +@Requirements(RQ_SRS_014_LDAP_RoleMapping_Configuration_Server_UserDNDetection("1.0")) def feature(self): - """Check LDAP user DN detection. - """ + """Check LDAP user DN detection.""" self.context.node = self.context.cluster.node("clickhouse1") self.context.ldap_node = self.context.cluster.node("openldap1") diff --git a/tests/testflows/map_type/regression.py b/tests/testflows/map_type/regression.py index 16117b04b48..321a6944b2b 100755 --- a/tests/testflows/map_type/regression.py +++ b/tests/testflows/map_type/regression.py @@ -11,121 +11,162 @@ from helpers.argparser import argparser from map_type.requirements import SRS018_ClickHouse_Map_Data_Type xfails = { - "tests/table map with key integer/Int:": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21032")], - "tests/table map with value integer/Int:": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21032")], - "tests/table map with key integer/UInt256": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21031")], - "tests/table map with value integer/UInt256": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21031")], - "tests/select map with key integer/Int64": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21030")], - "tests/select map with value integer/Int64": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21030")], - "tests/cast tuple of two arrays to map/string -> int": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21029")], - "tests/mapcontains/null key in map": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21028")], - "tests/mapcontains/null key not in map": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21028")], - "tests/mapkeys/null key not in map": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21028")], - "tests/mapkeys/null key in map": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21028")], - "tests/mapcontains/select nullable key": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21026")], - "tests/mapkeys/select keys from column": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21026")], - "tests/table map select key with value string/LowCardinality:": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21406")], - "tests/table map select key with key string/FixedString": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21406")], - "tests/table map select key with key string/Nullable": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21406")], - "tests/table map select key with key string/Nullable(NULL)": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21026")], - "tests/table map select key with key string/LowCardinality:": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21406")], - "tests/table map select key with key integer/Int:": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21032")], - "tests/table map select key with key integer/UInt256": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21031")], - "tests/table map select key with key integer/toNullable": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21406")], - "tests/table map select key with key integer/toNullable(NULL)": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/21026")], - "tests/select map with key integer/Int128": - [(Fail, "large Int128 as key not supported")], - "tests/select map with key integer/Int256": - [(Fail, "large Int256 as key not supported")], - "tests/select map with key integer/UInt256": - [(Fail, "large UInt256 as key not supported")], - "tests/select map with key integer/toNullable": - [(Fail, "Nullable type as key not supported")], - "tests/select map with key integer/toNullable(NULL)": - [(Fail, "Nullable type as key not supported")], - "tests/select map with key string/Nullable": - [(Fail, "Nullable type as key not supported")], - "tests/select map with key string/Nullable(NULL)": - [(Fail, "Nullable type as key not supported")], - "tests/table map queries/select map with nullable value": - [(Fail, "Nullable value not supported")], - "tests/table map with key integer/toNullable": - [(Fail, "Nullable type as key not supported")], - "tests/table map with key integer/toNullable(NULL)": - [(Fail, "Nullable type as key not supported")], - "tests/table map with key string/Nullable": - [(Fail, "Nullable type as key not supported")], - "tests/table map with key string/Nullable(NULL)": - [(Fail, "Nullable type as key not supported")], - "tests/table map with key string/LowCardinality(String)": - [(Fail, "LowCardinality(String) as key not supported")], - "tests/table map with key string/LowCardinality(String) cast from String": - [(Fail, "LowCardinality(String) as key not supported")], - "tests/table map with key string/LowCardinality(String) for key and value": - [(Fail, "LowCardinality(String) as key not supported")], - "tests/table map with key string/LowCardinality(FixedString)": - [(Fail, "LowCardinality(FixedString) as key not supported")], - "tests/table map with value string/LowCardinality(String) for key and value": - [(Fail, "LowCardinality(String) as key not supported")], + "tests/table map with key integer/Int:": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21032") + ], + "tests/table map with value integer/Int:": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21032") + ], + "tests/table map with key integer/UInt256": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21031") + ], + "tests/table map with value integer/UInt256": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21031") + ], + "tests/select map with key integer/Int64": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21030") + ], + "tests/select map with value integer/Int64": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21030") + ], + "tests/cast tuple of two arrays to map/string -> int": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21029") + ], + "tests/mapcontains/null key in map": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21028") + ], + "tests/mapcontains/null key not in map": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21028") + ], + "tests/mapkeys/null key not in map": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21028") + ], + "tests/mapkeys/null key in map": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21028") + ], + "tests/mapcontains/select nullable key": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21026") + ], + "tests/mapkeys/select keys from column": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21026") + ], + "tests/table map select key with value string/LowCardinality:": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21406") + ], + "tests/table map select key with key string/FixedString": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21406") + ], + "tests/table map select key with key string/Nullable": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21406") + ], + "tests/table map select key with key string/Nullable(NULL)": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21026") + ], + "tests/table map select key with key string/LowCardinality:": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21406") + ], + "tests/table map select key with key integer/Int:": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21032") + ], + "tests/table map select key with key integer/UInt256": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21031") + ], + "tests/table map select key with key integer/toNullable": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21406") + ], + "tests/table map select key with key integer/toNullable(NULL)": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/21026") + ], + "tests/select map with key integer/Int128": [ + (Fail, "large Int128 as key not supported") + ], + "tests/select map with key integer/Int256": [ + (Fail, "large Int256 as key not supported") + ], + "tests/select map with key integer/UInt256": [ + (Fail, "large UInt256 as key not supported") + ], + "tests/select map with key integer/toNullable": [ + (Fail, "Nullable type as key not supported") + ], + "tests/select map with key integer/toNullable(NULL)": [ + (Fail, "Nullable type as key not supported") + ], + "tests/select map with key string/Nullable": [ + (Fail, "Nullable type as key not supported") + ], + "tests/select map with key string/Nullable(NULL)": [ + (Fail, "Nullable type as key not supported") + ], + "tests/table map queries/select map with nullable value": [ + (Fail, "Nullable value not supported") + ], + "tests/table map with key integer/toNullable": [ + (Fail, "Nullable type as key not supported") + ], + "tests/table map with key integer/toNullable(NULL)": [ + (Fail, "Nullable type as key not supported") + ], + "tests/table map with key string/Nullable": [ + (Fail, "Nullable type as key not supported") + ], + "tests/table map with key string/Nullable(NULL)": [ + (Fail, "Nullable type as key not supported") + ], + "tests/table map with key string/LowCardinality(String)": [ + (Fail, "LowCardinality(String) as key not supported") + ], + "tests/table map with key string/LowCardinality(String) cast from String": [ + (Fail, "LowCardinality(String) as key not supported") + ], + "tests/table map with key string/LowCardinality(String) for key and value": [ + (Fail, "LowCardinality(String) as key not supported") + ], + "tests/table map with key string/LowCardinality(FixedString)": [ + (Fail, "LowCardinality(FixedString) as key not supported") + ], + "tests/table map with value string/LowCardinality(String) for key and value": [ + (Fail, "LowCardinality(String) as key not supported") + ], # JSON related - "tests/table map with duplicated keys/Map(Int64, String))": - [(Fail, "new bug due to JSON changes")], - "tests/table map with key integer/UInt64": - [(Fail, "new bug due to JSON changes")], - "tests/table map with value integer/UInt64": - [(Fail, "new bug due to JSON changes")] + "tests/table map with duplicated keys/Map(Int64, String))": [ + (Fail, "new bug due to JSON changes") + ], + "tests/table map with key integer/UInt64": [(Fail, "new bug due to JSON changes")], + "tests/table map with value integer/UInt64": [ + (Fail, "new bug due to JSON changes") + ], } -xflags = { -} +xflags = {} + @TestModule @ArgumentParser(argparser) @XFails(xfails) @XFlags(xflags) @Name("map type") -@Specifications( - SRS018_ClickHouse_Map_Data_Type -) -def regression(self, local, clickhouse_binary_path, clickhouser_version=None, stress=None): - """Map type regression. - """ - nodes = { - "clickhouse": - ("clickhouse1", "clickhouse2", "clickhouse3") - } +@Specifications(SRS018_ClickHouse_Map_Data_Type) +def regression( + self, local, clickhouse_binary_path, clickhouser_version=None, stress=None +): + """Map type regression.""" + nodes = {"clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3")} if stress is not None: self.context.stress = stress self.context.clickhouse_version = clickhouse_version - with Cluster(local, clickhouse_binary_path, nodes=nodes, - docker_compose_project_dir=os.path.join(current_dir(), "map_type_env")) as cluster: + with Cluster( + local, + clickhouse_binary_path, + nodes=nodes, + docker_compose_project_dir=os.path.join(current_dir(), "map_type_env"), + ) as cluster: self.context.cluster = cluster Feature(run=load("map_type.tests.feature", "feature")) + if main(): regression() diff --git a/tests/testflows/map_type/requirements/requirements.py b/tests/testflows/map_type/requirements/requirements.py index 7569f7cc177..d25c6149658 100644 --- a/tests/testflows/map_type/requirements/requirements.py +++ b/tests/testflows/map_type/requirements/requirements.py @@ -9,793 +9,831 @@ from testflows.core import Requirement Heading = Specification.Heading RQ_SRS_018_ClickHouse_Map_DataType = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `Map(key, value)` data type that stores `key:value` pairs.\n' - '\n' - ), + "[ClickHouse] SHALL support `Map(key, value)` data type that stores `key:value` pairs.\n" + "\n" + ), link=None, level=3, - num='3.1.1') + num="3.1.1", +) RQ_SRS_018_ClickHouse_Map_DataType_Performance_Vs_ArrayOfTuples = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Performance.Vs.ArrayOfTuples', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Performance.Vs.ArrayOfTuples", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL provide comparable performance for `Map(key, value)` data type as\n' - 'compared to `Array(Tuple(K,V))` data type.\n' - '\n' - ), + "[ClickHouse] SHALL provide comparable performance for `Map(key, value)` data type as\n" + "compared to `Array(Tuple(K,V))` data type.\n" + "\n" + ), link=None, level=3, - num='3.2.1') + num="3.2.1", +) RQ_SRS_018_ClickHouse_Map_DataType_Performance_Vs_TupleOfArrays = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Performance.Vs.TupleOfArrays', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Performance.Vs.TupleOfArrays", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL provide comparable performance for `Map(key, value)` data type as\n' - 'compared to `Tuple(Array(String), Array(String))` data type where the first\n' - 'array defines an array of keys and the second array defines an array of values.\n' - '\n' - ), + "[ClickHouse] SHALL provide comparable performance for `Map(key, value)` data type as\n" + "compared to `Tuple(Array(String), Array(String))` data type where the first\n" + "array defines an array of keys and the second array defines an array of values.\n" + "\n" + ), link=None, level=3, - num='3.2.2') + num="3.2.2", +) RQ_SRS_018_ClickHouse_Map_DataType_Key_String = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Key.String', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Key.String", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `Map(key, value)` data type where key is of a [String] type.\n' - '\n' - ), + "[ClickHouse] SHALL support `Map(key, value)` data type where key is of a [String] type.\n" + "\n" + ), link=None, level=3, - num='3.3.1') + num="3.3.1", +) RQ_SRS_018_ClickHouse_Map_DataType_Key_Integer = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Key.Integer', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Key.Integer", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `Map(key, value)` data type where key is of an [Integer] type.\n' - '\n' - ), + "[ClickHouse] SHALL support `Map(key, value)` data type where key is of an [Integer] type.\n" + "\n" + ), link=None, level=3, - num='3.3.2') + num="3.3.2", +) RQ_SRS_018_ClickHouse_Map_DataType_Value_String = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Value.String', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Value.String", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `Map(key, value)` data type where value is of a [String] type.\n' - '\n' - ), + "[ClickHouse] SHALL support `Map(key, value)` data type where value is of a [String] type.\n" + "\n" + ), link=None, level=3, - num='3.4.1') + num="3.4.1", +) RQ_SRS_018_ClickHouse_Map_DataType_Value_Integer = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Value.Integer', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Value.Integer", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `Map(key, value)` data type where value is of a [Integer] type.\n' - '\n' - ), + "[ClickHouse] SHALL support `Map(key, value)` data type where value is of a [Integer] type.\n" + "\n" + ), link=None, level=3, - num='3.4.2') + num="3.4.2", +) RQ_SRS_018_ClickHouse_Map_DataType_Value_Array = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Value.Array', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Value.Array", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `Map(key, value)` data type where value is of a [Array] type.\n' - '\n' - ), + "[ClickHouse] SHALL support `Map(key, value)` data type where value is of a [Array] type.\n" + "\n" + ), link=None, level=3, - num='3.4.3') + num="3.4.3", +) RQ_SRS_018_ClickHouse_Map_DataType_Invalid_Nullable = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Invalid.Nullable', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Invalid.Nullable", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not support creating table columns that have `Nullable(Map(key, value))` data type.\n' - '\n' - ), + "[ClickHouse] SHALL not support creating table columns that have `Nullable(Map(key, value))` data type.\n" + "\n" + ), link=None, level=3, - num='3.5.1') + num="3.5.1", +) RQ_SRS_018_ClickHouse_Map_DataType_Invalid_NothingNothing = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Invalid.NothingNothing', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Invalid.NothingNothing", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not support creating table columns that have `Map(Nothing, Nothing))` data type.\n' - '\n' - ), + "[ClickHouse] SHALL not support creating table columns that have `Map(Nothing, Nothing))` data type.\n" + "\n" + ), link=None, level=3, - num='3.5.2') + num="3.5.2", +) RQ_SRS_018_ClickHouse_Map_DataType_DuplicatedKeys = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.DuplicatedKeys', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.DuplicatedKeys", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] MAY support `Map(key, value)` data type with duplicated keys.\n' - '\n' - ), + "[ClickHouse] MAY support `Map(key, value)` data type with duplicated keys.\n" + "\n" + ), link=None, level=3, - num='3.6.1') + num="3.6.1", +) RQ_SRS_018_ClickHouse_Map_DataType_ArrayOfMaps = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.ArrayOfMaps', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.ArrayOfMaps", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `Array(Map(key, value))` data type.\n' - '\n' - ), + "[ClickHouse] SHALL support `Array(Map(key, value))` data type.\n" "\n" + ), link=None, level=3, - num='3.7.1') + num="3.7.1", +) RQ_SRS_018_ClickHouse_Map_DataType_NestedWithMaps = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.NestedWithMaps', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.NestedWithMaps", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support defining `Map(key, value)` data type inside the [Nested] data type.\n' - '\n' - ), + "[ClickHouse] SHALL support defining `Map(key, value)` data type inside the [Nested] data type.\n" + "\n" + ), link=None, level=3, - num='3.8.1') + num="3.8.1", +) RQ_SRS_018_ClickHouse_Map_DataType_Value_Retrieval = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Value.Retrieval', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Value.Retrieval", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support getting the value from a `Map(key, value)` data type using `map[key]` syntax.\n' - 'If `key` has duplicates then the first `key:value` pair MAY be returned. \n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL support getting the value from a `Map(key, value)` data type using `map[key]` syntax.\n" + "If `key` has duplicates then the first `key:value` pair MAY be returned. \n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT a['key2'] FROM table_map;\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=3, - num='3.9.1') + num="3.9.1", +) RQ_SRS_018_ClickHouse_Map_DataType_Value_Retrieval_KeyInvalid = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Value.Retrieval.KeyInvalid', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Value.Retrieval.KeyInvalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when key does not match the key type.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT map(1,2) AS m, m[1024]\n' - '```\n' - '\n' - 'Exceptions:\n' - '\n' - '* when key is `NULL` the return value MAY be `NULL`\n' - '* when key value is not valid for the key type, for example it is out of range for [Integer] type, \n' - ' when reading from a table column it MAY return the default value for key data type\n' - '\n' - ), + "[ClickHouse] SHALL return an error when key does not match the key type.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT map(1,2) AS m, m[1024]\n" + "```\n" + "\n" + "Exceptions:\n" + "\n" + "* when key is `NULL` the return value MAY be `NULL`\n" + "* when key value is not valid for the key type, for example it is out of range for [Integer] type, \n" + " when reading from a table column it MAY return the default value for key data type\n" + "\n" + ), link=None, level=3, - num='3.9.2') + num="3.9.2", +) RQ_SRS_018_ClickHouse_Map_DataType_Value_Retrieval_KeyNotFound = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Value.Retrieval.KeyNotFound', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Value.Retrieval.KeyNotFound", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return default value for the data type of the value\n' + "[ClickHouse] SHALL return default value for the data type of the value\n" "when there's no corresponding `key` defined in the `Map(key, value)` data type. \n" - '\n' - '\n' - ), + "\n" + "\n" + ), link=None, level=3, - num='3.9.3') + num="3.9.3", +) RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_TupleOfArraysToMap = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.TupleOfArraysToMap', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.TupleOfArraysToMap", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support converting [Tuple(Array, Array)] to `Map(key, value)` using the [CAST] function.\n' - '\n' - '``` sql\n' + "[ClickHouse] SHALL support converting [Tuple(Array, Array)] to `Map(key, value)` using the [CAST] function.\n" + "\n" + "``` sql\n" "SELECT CAST(([1, 2, 3], ['Ready', 'Steady', 'Go']), 'Map(UInt8, String)') AS map;\n" - '```\n' - '\n' - '``` text\n' - '┌─map───────────────────────────┐\n' + "```\n" + "\n" + "``` text\n" + "┌─map───────────────────────────┐\n" "│ {1:'Ready',2:'Steady',3:'Go'} │\n" - '└───────────────────────────────┘\n' - '```\n' - '\n' - ), + "└───────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=3, - num='3.10.1') + num="3.10.1", +) RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_TupleOfArraysMap_Invalid = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.TupleOfArraysMap.Invalid', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.TupleOfArraysMap.Invalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] MAY return an error when casting [Tuple(Array, Array)] to `Map(key, value)`\n' - '\n' - '* when arrays are not of equal size\n' - '\n' - ' For example,\n' - '\n' - ' ```sql\n' + "[ClickHouse] MAY return an error when casting [Tuple(Array, Array)] to `Map(key, value)`\n" + "\n" + "* when arrays are not of equal size\n" + "\n" + " For example,\n" + "\n" + " ```sql\n" " SELECT CAST(([2, 1, 1023], ['', '']), 'Map(UInt8, String)') AS map, map[10]\n" - ' ```\n' - '\n' - ), + " ```\n" + "\n" + ), link=None, level=3, - num='3.10.2') + num="3.10.2", +) RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_ArrayOfTuplesToMap = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.ArrayOfTuplesToMap', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.ArrayOfTuplesToMap", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support converting [Array(Tuple(K,V))] to `Map(key, value)` using the [CAST] function.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL support converting [Array(Tuple(K,V))] to `Map(key, value)` using the [CAST] function.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT CAST(([(1,2),(3)]), 'Map(UInt8, UInt8)') AS map\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=3, - num='3.11.1') + num="3.11.1", +) RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_ArrayOfTuplesToMap_Invalid = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.ArrayOfTuplesToMap.Invalid', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.ArrayOfTuplesToMap.Invalid", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] MAY return an error when casting [Array(Tuple(K, V))] to `Map(key, value)`\n' - '\n' - '* when element is not a [Tuple]\n' - '\n' - ' ```sql\n' + "[ClickHouse] MAY return an error when casting [Array(Tuple(K, V))] to `Map(key, value)`\n" + "\n" + "* when element is not a [Tuple]\n" + "\n" + " ```sql\n" " SELECT CAST(([(1,2),(3)]), 'Map(UInt8, UInt8)') AS map\n" - ' ```\n' - '\n' - '* when [Tuple] does not contain two elements\n' - '\n' - ' ```sql\n' + " ```\n" + "\n" + "* when [Tuple] does not contain two elements\n" + "\n" + " ```sql\n" " SELECT CAST(([(1,2),(3,)]), 'Map(UInt8, UInt8)') AS map\n" - ' ```\n' - '\n' - ), + " ```\n" + "\n" + ), link=None, level=3, - num='3.11.2') + num="3.11.2", +) RQ_SRS_018_ClickHouse_Map_DataType_SubColumns_Keys = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Keys', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Keys", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `keys` subcolumn in the `Map(key, value)` type that can be used \n' - 'to retrieve an [Array] of map keys.\n' - '\n' - '```sql\n' - 'SELECT m.keys FROM t_map;\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support `keys` subcolumn in the `Map(key, value)` type that can be used \n" + "to retrieve an [Array] of map keys.\n" + "\n" + "```sql\n" + "SELECT m.keys FROM t_map;\n" + "```\n" + "\n" + ), link=None, level=3, - num='3.12.1') + num="3.12.1", +) RQ_SRS_018_ClickHouse_Map_DataType_SubColumns_Keys_ArrayFunctions = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Keys.ArrayFunctions', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Keys.ArrayFunctions", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support applying [Array] functions to the `keys` subcolumn in the `Map(key, value)` type.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL support applying [Array] functions to the `keys` subcolumn in the `Map(key, value)` type.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT * FROM t_map WHERE has(m.keys, 'a');\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=3, - num='3.12.2') + num="3.12.2", +) RQ_SRS_018_ClickHouse_Map_DataType_SubColumns_Keys_InlineDefinedMap = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Keys.InlineDefinedMap', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Keys.InlineDefinedMap", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] MAY not support using inline defined map to get `keys` subcolumn.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] MAY not support using inline defined map to get `keys` subcolumn.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT map( 'aa', 4, '44' , 5) as c, c.keys\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=3, - num='3.12.3') + num="3.12.3", +) RQ_SRS_018_ClickHouse_Map_DataType_SubColumns_Values = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Values', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Values", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `values` subcolumn in the `Map(key, value)` type that can be used \n' - 'to retrieve an [Array] of map values.\n' - '\n' - '```sql\n' - 'SELECT m.values FROM t_map;\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support `values` subcolumn in the `Map(key, value)` type that can be used \n" + "to retrieve an [Array] of map values.\n" + "\n" + "```sql\n" + "SELECT m.values FROM t_map;\n" + "```\n" + "\n" + ), link=None, level=3, - num='3.12.4') + num="3.12.4", +) RQ_SRS_018_ClickHouse_Map_DataType_SubColumns_Values_ArrayFunctions = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Values.ArrayFunctions', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Values.ArrayFunctions", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support applying [Array] functions to the `values` subcolumn in the `Map(key, value)` type.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL support applying [Array] functions to the `values` subcolumn in the `Map(key, value)` type.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT * FROM t_map WHERE has(m.values, 'a');\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=3, - num='3.12.5') + num="3.12.5", +) RQ_SRS_018_ClickHouse_Map_DataType_SubColumns_Values_InlineDefinedMap = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Values.InlineDefinedMap', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Values.InlineDefinedMap", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] MAY not support using inline defined map to get `values` subcolumn.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] MAY not support using inline defined map to get `values` subcolumn.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT map( 'aa', 4, '44' , 5) as c, c.values\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=3, - num='3.12.6') + num="3.12.6", +) RQ_SRS_018_ClickHouse_Map_DataType_Functions_InlineDefinedMap = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.InlineDefinedMap', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.InlineDefinedMap", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using inline defined maps as an argument to map functions.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL support using inline defined maps as an argument to map functions.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT map( 'aa', 4, '44' , 5) as c, mapKeys(c)\n" "SELECT map( 'aa', 4, '44' , 5) as c, mapValues(c)\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=3, - num='3.13.1') + num="3.13.1", +) RQ_SRS_018_ClickHouse_Map_DataType_Functions_Length = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.Length', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.Length", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `Map(key, value)` data type as an argument to the [length] function\n' - 'that SHALL return number of keys in the map.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT length(map(1,2,3,4))\n' - 'SELECT length(map())\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support `Map(key, value)` data type as an argument to the [length] function\n" + "that SHALL return number of keys in the map.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT length(map(1,2,3,4))\n" + "SELECT length(map())\n" + "```\n" + "\n" + ), link=None, level=4, - num='3.13.2.1') + num="3.13.2.1", +) RQ_SRS_018_ClickHouse_Map_DataType_Functions_Empty = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.Empty', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.Empty", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `Map(key, value)` data type as an argument to the [empty] function\n' - 'that SHALL return 1 if number of keys in the map is 0 otherwise if the number of keys is \n' - 'greater or equal to 1 it SHALL return 0.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT empty(map(1,2,3,4))\n' - 'SELECT empty(map())\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support `Map(key, value)` data type as an argument to the [empty] function\n" + "that SHALL return 1 if number of keys in the map is 0 otherwise if the number of keys is \n" + "greater or equal to 1 it SHALL return 0.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT empty(map(1,2,3,4))\n" + "SELECT empty(map())\n" + "```\n" + "\n" + ), link=None, level=4, - num='3.13.3.1') + num="3.13.3.1", +) RQ_SRS_018_ClickHouse_Map_DataType_Functions_NotEmpty = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.NotEmpty', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.NotEmpty", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `Map(key, value)` data type as an argument to the [notEmpty] function\n' - 'that SHALL return 0 if number if keys in the map is 0 otherwise if the number of keys is\n' - 'greater or equal to 1 it SHALL return 1.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT notEmpty(map(1,2,3,4))\n' - 'SELECT notEmpty(map())\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support `Map(key, value)` data type as an argument to the [notEmpty] function\n" + "that SHALL return 0 if number if keys in the map is 0 otherwise if the number of keys is\n" + "greater or equal to 1 it SHALL return 1.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT notEmpty(map(1,2,3,4))\n" + "SELECT notEmpty(map())\n" + "```\n" + "\n" + ), link=None, level=4, - num='3.13.4.1') + num="3.13.4.1", +) RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support arranging `key, value` pairs into `Map(key, value)` data type\n' - 'using `map` function.\n' - '\n' - '**Syntax** \n' - '\n' - '``` sql\n' - 'map(key1, value1[, key2, value2, ...])\n' - '```\n' - '\n' - 'For example,\n' - '\n' - '``` sql\n' + "[ClickHouse] SHALL support arranging `key, value` pairs into `Map(key, value)` data type\n" + "using `map` function.\n" + "\n" + "**Syntax** \n" + "\n" + "``` sql\n" + "map(key1, value1[, key2, value2, ...])\n" + "```\n" + "\n" + "For example,\n" + "\n" + "``` sql\n" "SELECT map('key1', number, 'key2', number * 2) FROM numbers(3);\n" - '\n' + "\n" "┌─map('key1', number, 'key2', multiply(number, 2))─┐\n" "│ {'key1':0,'key2':0} │\n" "│ {'key1':1,'key2':2} │\n" "│ {'key1':2,'key2':4} │\n" - '└──────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "└──────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=4, - num='3.13.5.1') + num="3.13.5.1", +) RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map_InvalidNumberOfArguments = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.InvalidNumberOfArguments', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.InvalidNumberOfArguments", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `map` function is called with non even number of arguments.\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `map` function is called with non even number of arguments.\n" + "\n" + ), link=None, level=4, - num='3.13.5.2') + num="3.13.5.2", +) RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map_MixedKeyOrValueTypes = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MixedKeyOrValueTypes', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MixedKeyOrValueTypes", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `map` function is called with mixed key or value types.\n' - '\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `map` function is called with mixed key or value types.\n" + "\n" + "\n" + ), link=None, level=4, - num='3.13.5.3') + num="3.13.5.3", +) RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map_MapAdd = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MapAdd', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MapAdd", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support converting the results of `mapAdd` function to a `Map(key, value)` data type.\n' - '\n' - 'For example,\n' - '\n' - '``` sql\n' + "[ClickHouse] SHALL support converting the results of `mapAdd` function to a `Map(key, value)` data type.\n" + "\n" + "For example,\n" + "\n" + "``` sql\n" 'SELECT CAST(mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])), "Map(Int8,Int8)")\n' - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=4, - num='3.13.5.4') + num="3.13.5.4", +) RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map_MapSubstract = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MapSubstract', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MapSubstract", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support converting the results of `mapSubstract` function to a `Map(key, value)` data type.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL support converting the results of `mapSubstract` function to a `Map(key, value)` data type.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" 'SELECT CAST(mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt32(2), 1])), "Map(Int8,Int8)")\n' - '```\n' - ), + "```\n" + ), link=None, level=4, - num='3.13.5.5') + num="3.13.5.5", +) RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map_MapPopulateSeries = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MapPopulateSeries', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MapPopulateSeries", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support converting the results of `mapPopulateSeries` function to a `Map(key, value)` data type.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL support converting the results of `mapPopulateSeries` function to a `Map(key, value)` data type.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" 'SELECT CAST(mapPopulateSeries([1,2,4], [11,22,44], 5), "Map(Int8,Int8)")\n' - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=4, - num='3.13.5.6') + num="3.13.5.6", +) RQ_SRS_018_ClickHouse_Map_DataType_Functions_MapContains = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.MapContains', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.MapContains", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `mapContains(map, key)` function to check weather `map.keys` contains the `key`.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL support `mapContains(map, key)` function to check weather `map.keys` contains the `key`.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT mapContains(a, 'abc') from table_map;\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=4, - num='3.13.6.1') + num="3.13.6.1", +) RQ_SRS_018_ClickHouse_Map_DataType_Functions_MapKeys = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.MapKeys', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.MapKeys", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `mapKeys(map)` function to return all the map keys in the [Array] format.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT mapKeys(a) from table_map;\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support `mapKeys(map)` function to return all the map keys in the [Array] format.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT mapKeys(a) from table_map;\n" + "```\n" + "\n" + ), link=None, level=4, - num='3.13.7.1') + num="3.13.7.1", +) RQ_SRS_018_ClickHouse_Map_DataType_Functions_MapValues = Requirement( - name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.MapValues', - version='1.0', + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.MapValues", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `mapValues(map)` function to return all the map values in the [Array] format.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT mapValues(a) from table_map;\n' - '```\n' - '\n' - '[Nested]: https://clickhouse.com/docs/en/sql-reference/data-types/nested-data-structures/nested/\n' - '[length]: https://clickhouse.com/docs/en/sql-reference/functions/array-functions/#array_functions-length\n' - '[empty]: https://clickhouse.com/docs/en/sql-reference/functions/array-functions/#function-empty\n' - '[notEmpty]: https://clickhouse.com/docs/en/sql-reference/functions/array-functions/#function-notempty\n' - '[CAST]: https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#type_conversion_function-cast\n' - '[Tuple]: https://clickhouse.com/docs/en/sql-reference/data-types/tuple/\n' - '[Tuple(Array,Array)]: https://clickhouse.com/docs/en/sql-reference/data-types/tuple/\n' - '[Array]: https://clickhouse.com/docs/en/sql-reference/data-types/array/ \n' - '[String]: https://clickhouse.com/docs/en/sql-reference/data-types/string/\n' - '[Integer]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint/\n' - '[ClickHouse]: https://clickhouse.com\n' - '[GitHub Repository]: https://github.com/ClickHouse/ClickHouse/blob/master/tests/testflows/map_type/requirements/requirements.md \n' - '[Revision History]: https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/map_type/requirements/requirements.md\n' - '[Git]: https://git-scm.com/\n' - '[GitHub]: https://github.com\n' - ), + "[ClickHouse] SHALL support `mapValues(map)` function to return all the map values in the [Array] format.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT mapValues(a) from table_map;\n" + "```\n" + "\n" + "[Nested]: https://clickhouse.com/docs/en/sql-reference/data-types/nested-data-structures/nested/\n" + "[length]: https://clickhouse.com/docs/en/sql-reference/functions/array-functions/#array_functions-length\n" + "[empty]: https://clickhouse.com/docs/en/sql-reference/functions/array-functions/#function-empty\n" + "[notEmpty]: https://clickhouse.com/docs/en/sql-reference/functions/array-functions/#function-notempty\n" + "[CAST]: https://clickhouse.com/docs/en/sql-reference/functions/type-conversion-functions/#type_conversion_function-cast\n" + "[Tuple]: https://clickhouse.com/docs/en/sql-reference/data-types/tuple/\n" + "[Tuple(Array,Array)]: https://clickhouse.com/docs/en/sql-reference/data-types/tuple/\n" + "[Array]: https://clickhouse.com/docs/en/sql-reference/data-types/array/ \n" + "[String]: https://clickhouse.com/docs/en/sql-reference/data-types/string/\n" + "[Integer]: https://clickhouse.com/docs/en/sql-reference/data-types/int-uint/\n" + "[ClickHouse]: https://clickhouse.com\n" + "[GitHub Repository]: https://github.com/ClickHouse/ClickHouse/blob/master/tests/testflows/map_type/requirements/requirements.md \n" + "[Revision History]: https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/map_type/requirements/requirements.md\n" + "[Git]: https://git-scm.com/\n" + "[GitHub]: https://github.com\n" + ), link=None, level=4, - num='3.13.8.1') + num="3.13.8.1", +) SRS018_ClickHouse_Map_Data_Type = Specification( - name='SRS018 ClickHouse Map Data Type', + name="SRS018 ClickHouse Map Data Type", description=None, author=None, - date=None, - status=None, + date=None, + status=None, approved_by=None, approved_date=None, approved_version=None, @@ -807,69 +845,211 @@ SRS018_ClickHouse_Map_Data_Type = Specification( parent=None, children=None, headings=( - Heading(name='Revision History', level=1, num='1'), - Heading(name='Introduction', level=1, num='2'), - Heading(name='Requirements', level=1, num='3'), - Heading(name='General', level=2, num='3.1'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType', level=3, num='3.1.1'), - Heading(name='Performance', level=2, num='3.2'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Performance.Vs.ArrayOfTuples', level=3, num='3.2.1'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Performance.Vs.TupleOfArrays', level=3, num='3.2.2'), - Heading(name='Key Types', level=2, num='3.3'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Key.String', level=3, num='3.3.1'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Key.Integer', level=3, num='3.3.2'), - Heading(name='Value Types', level=2, num='3.4'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Value.String', level=3, num='3.4.1'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Value.Integer', level=3, num='3.4.2'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Value.Array', level=3, num='3.4.3'), - Heading(name='Invalid Types', level=2, num='3.5'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Invalid.Nullable', level=3, num='3.5.1'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Invalid.NothingNothing', level=3, num='3.5.2'), - Heading(name='Duplicated Keys', level=2, num='3.6'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.DuplicatedKeys', level=3, num='3.6.1'), - Heading(name='Array of Maps', level=2, num='3.7'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.ArrayOfMaps', level=3, num='3.7.1'), - Heading(name='Nested With Maps', level=2, num='3.8'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.NestedWithMaps', level=3, num='3.8.1'), - Heading(name='Value Retrieval', level=2, num='3.9'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Value.Retrieval', level=3, num='3.9.1'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Value.Retrieval.KeyInvalid', level=3, num='3.9.2'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Value.Retrieval.KeyNotFound', level=3, num='3.9.3'), - Heading(name='Converting Tuple(Array, Array) to Map', level=2, num='3.10'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.TupleOfArraysToMap', level=3, num='3.10.1'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.TupleOfArraysMap.Invalid', level=3, num='3.10.2'), - Heading(name='Converting Array(Tuple(K,V)) to Map', level=2, num='3.11'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.ArrayOfTuplesToMap', level=3, num='3.11.1'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.ArrayOfTuplesToMap.Invalid', level=3, num='3.11.2'), - Heading(name='Keys and Values Subcolumns', level=2, num='3.12'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Keys', level=3, num='3.12.1'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Keys.ArrayFunctions', level=3, num='3.12.2'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Keys.InlineDefinedMap', level=3, num='3.12.3'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Values', level=3, num='3.12.4'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Values.ArrayFunctions', level=3, num='3.12.5'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Values.InlineDefinedMap', level=3, num='3.12.6'), - Heading(name='Functions', level=2, num='3.13'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.InlineDefinedMap', level=3, num='3.13.1'), - Heading(name='`length`', level=3, num='3.13.2'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.Length', level=4, num='3.13.2.1'), - Heading(name='`empty`', level=3, num='3.13.3'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.Empty', level=4, num='3.13.3.1'), - Heading(name='`notEmpty`', level=3, num='3.13.4'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.NotEmpty', level=4, num='3.13.4.1'), - Heading(name='`map`', level=3, num='3.13.5'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map', level=4, num='3.13.5.1'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.InvalidNumberOfArguments', level=4, num='3.13.5.2'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MixedKeyOrValueTypes', level=4, num='3.13.5.3'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MapAdd', level=4, num='3.13.5.4'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MapSubstract', level=4, num='3.13.5.5'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MapPopulateSeries', level=4, num='3.13.5.6'), - Heading(name='`mapContains`', level=3, num='3.13.6'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.MapContains', level=4, num='3.13.6.1'), - Heading(name='`mapKeys`', level=3, num='3.13.7'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.MapKeys', level=4, num='3.13.7.1'), - Heading(name='`mapValues`', level=3, num='3.13.8'), - Heading(name='RQ.SRS-018.ClickHouse.Map.DataType.Functions.MapValues', level=4, num='3.13.8.1'), + Heading(name="Revision History", level=1, num="1"), + Heading(name="Introduction", level=1, num="2"), + Heading(name="Requirements", level=1, num="3"), + Heading(name="General", level=2, num="3.1"), + Heading(name="RQ.SRS-018.ClickHouse.Map.DataType", level=3, num="3.1.1"), + Heading(name="Performance", level=2, num="3.2"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Performance.Vs.ArrayOfTuples", + level=3, + num="3.2.1", ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Performance.Vs.TupleOfArrays", + level=3, + num="3.2.2", + ), + Heading(name="Key Types", level=2, num="3.3"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Key.String", level=3, num="3.3.1" + ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Key.Integer", level=3, num="3.3.2" + ), + Heading(name="Value Types", level=2, num="3.4"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Value.String", level=3, num="3.4.1" + ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Value.Integer", + level=3, + num="3.4.2", + ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Value.Array", level=3, num="3.4.3" + ), + Heading(name="Invalid Types", level=2, num="3.5"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Invalid.Nullable", + level=3, + num="3.5.1", + ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Invalid.NothingNothing", + level=3, + num="3.5.2", + ), + Heading(name="Duplicated Keys", level=2, num="3.6"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.DuplicatedKeys", + level=3, + num="3.6.1", + ), + Heading(name="Array of Maps", level=2, num="3.7"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.ArrayOfMaps", level=3, num="3.7.1" + ), + Heading(name="Nested With Maps", level=2, num="3.8"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.NestedWithMaps", + level=3, + num="3.8.1", + ), + Heading(name="Value Retrieval", level=2, num="3.9"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Value.Retrieval", + level=3, + num="3.9.1", + ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Value.Retrieval.KeyInvalid", + level=3, + num="3.9.2", + ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Value.Retrieval.KeyNotFound", + level=3, + num="3.9.3", + ), + Heading(name="Converting Tuple(Array, Array) to Map", level=2, num="3.10"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.TupleOfArraysToMap", + level=3, + num="3.10.1", + ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.TupleOfArraysMap.Invalid", + level=3, + num="3.10.2", + ), + Heading(name="Converting Array(Tuple(K,V)) to Map", level=2, num="3.11"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.ArrayOfTuplesToMap", + level=3, + num="3.11.1", + ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Conversion.From.ArrayOfTuplesToMap.Invalid", + level=3, + num="3.11.2", + ), + Heading(name="Keys and Values Subcolumns", level=2, num="3.12"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Keys", + level=3, + num="3.12.1", + ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Keys.ArrayFunctions", + level=3, + num="3.12.2", + ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Keys.InlineDefinedMap", + level=3, + num="3.12.3", + ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Values", + level=3, + num="3.12.4", + ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Values.ArrayFunctions", + level=3, + num="3.12.5", + ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.SubColumns.Values.InlineDefinedMap", + level=3, + num="3.12.6", + ), + Heading(name="Functions", level=2, num="3.13"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.InlineDefinedMap", + level=3, + num="3.13.1", + ), + Heading(name="`length`", level=3, num="3.13.2"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.Length", + level=4, + num="3.13.2.1", + ), + Heading(name="`empty`", level=3, num="3.13.3"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.Empty", + level=4, + num="3.13.3.1", + ), + Heading(name="`notEmpty`", level=3, num="3.13.4"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.NotEmpty", + level=4, + num="3.13.4.1", + ), + Heading(name="`map`", level=3, num="3.13.5"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map", + level=4, + num="3.13.5.1", + ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.InvalidNumberOfArguments", + level=4, + num="3.13.5.2", + ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MixedKeyOrValueTypes", + level=4, + num="3.13.5.3", + ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MapAdd", + level=4, + num="3.13.5.4", + ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MapSubstract", + level=4, + num="3.13.5.5", + ), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.Map.MapPopulateSeries", + level=4, + num="3.13.5.6", + ), + Heading(name="`mapContains`", level=3, num="3.13.6"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.MapContains", + level=4, + num="3.13.6.1", + ), + Heading(name="`mapKeys`", level=3, num="3.13.7"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.MapKeys", + level=4, + num="3.13.7.1", + ), + Heading(name="`mapValues`", level=3, num="3.13.8"), + Heading( + name="RQ.SRS-018.ClickHouse.Map.DataType.Functions.MapValues", + level=4, + num="3.13.8.1", + ), + ), requirements=( RQ_SRS_018_ClickHouse_Map_DataType, RQ_SRS_018_ClickHouse_Map_DataType_Performance_Vs_ArrayOfTuples, @@ -910,8 +1090,8 @@ SRS018_ClickHouse_Map_Data_Type = Specification( RQ_SRS_018_ClickHouse_Map_DataType_Functions_MapContains, RQ_SRS_018_ClickHouse_Map_DataType_Functions_MapKeys, RQ_SRS_018_ClickHouse_Map_DataType_Functions_MapValues, - ), - content=''' + ), + content=""" # SRS018 ClickHouse Map Data Type # Software Requirements Specification @@ -1424,4 +1604,5 @@ SELECT mapValues(a) from table_map; [Revision History]: https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/map_type/requirements/requirements.md [Git]: https://git-scm.com/ [GitHub]: https://github.com -''') +""", +) diff --git a/tests/testflows/map_type/tests/common.py b/tests/testflows/map_type/tests/common.py index 6ce1b6ab8a6..754d5b75a38 100644 --- a/tests/testflows/map_type/tests/common.py +++ b/tests/testflows/map_type/tests/common.py @@ -5,17 +5,20 @@ from testflows.core import * from testflows.core.name import basename, parentname from testflows._core.testtype import TestSubType + def getuid(): if current().subtype == TestSubType.Example: - testname = f"{basename(parentname(current().name)).replace(' ', '_').replace(',','')}" + testname = ( + f"{basename(parentname(current().name)).replace(' ', '_').replace(',','')}" + ) else: testname = f"{basename(current().name).replace(' ', '_').replace(',','')}" - return testname + "_" + str(uuid.uuid1()).replace('-', '_') + return testname + "_" + str(uuid.uuid1()).replace("-", "_") + @TestStep(Given) def create_table(self, name, statement, on_cluster=False): - """Create table. - """ + """Create table.""" node = current().context.node try: with Given(f"I have a {name} table"): diff --git a/tests/testflows/map_type/tests/feature.py b/tests/testflows/map_type/tests/feature.py index 5d7c900d591..0aee235c1ed 100755 --- a/tests/testflows/map_type/tests/feature.py +++ b/tests/testflows/map_type/tests/feature.py @@ -7,10 +7,10 @@ from testflows.asserts import error from map_type.requirements import * from map_type.tests.common import * + @TestOutline def select_map(self, map, output, exitcode=0, message=None): - """Create a map using select statement. - """ + """Create a map using select statement.""" node = self.context.node with When("I create a map using select", description=map): @@ -19,10 +19,20 @@ def select_map(self, map, output, exitcode=0, message=None): with Then("I expect output to match", description=output): assert r.output == output, error() + @TestOutline -def table_map(self, type, data, select, filter, exitcode, message, check_insert=False, order_by=None): - """Check using a map column in a table. - """ +def table_map( + self, + type, + data, + select, + filter, + exitcode, + message, + check_insert=False, + order_by=None, +): + """Check using a map column in a table.""" uid = getuid() node = self.context.node @@ -30,344 +40,973 @@ def table_map(self, type, data, select, filter, exitcode, message, check_insert= order_by = "m" with Given(f"table definition with {type}"): - sql = "CREATE TABLE {name} (m " + type + ") ENGINE = MergeTree() ORDER BY " + order_by + sql = ( + "CREATE TABLE {name} (m " + + type + + ") ENGINE = MergeTree() ORDER BY " + + order_by + ) with And(f"I create a table", description=sql): table = create_table(name=uid, statement=sql) with When("I insert data into the map column"): if check_insert: - node.query(f"INSERT INTO {table} VALUES {data}", exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table} VALUES {data}", exitcode=exitcode, message=message + ) else: node.query(f"INSERT INTO {table} VALUES {data}") if not check_insert: with And("I try to read from the table"): - node.query(f"SELECT {select} FROM {table} WHERE {filter} FORMAT JSONEachRow", exitcode=exitcode, message=message) + node.query( + f"SELECT {select} FROM {table} WHERE {filter} FORMAT JSONEachRow", + exitcode=exitcode, + message=message, + ) + @TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Key_String("1.0") +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Key_String("1.0")) +@Examples( + "map output", + [ + ("map('',1)", "{'':1}", Name("empty string")), + ("map('hello',1)", "{'hello':1}", Name("non-empty string")), + ("map('Gãńdåłf_Thê_Gręât',1)", "{'Gãńdåłf_Thê_Gręât':1}", Name("utf-8 string")), + ("map('hello there',1)", "{'hello there':1}", Name("multi word string")), + ("map('hello',1,'there',2)", "{'hello':1,'there':2}", Name("multiple keys")), + ("map(toString(1),1)", "{'1':1}", Name("toString")), + ("map(toFixedString('1',1),1)", "{'1':1}", Name("toFixedString")), + ("map(toNullable('1'),1)", "{'1':1}", Name("Nullable")), + ("map(toNullable(NULL),1)", "{NULL:1}", Name("Nullable(NULL)")), + ("map(toLowCardinality('1'),1)", "{'1':1}", Name("LowCardinality(String)")), + ( + "map(toLowCardinality(toFixedString('1',1)),1)", + "{'1':1}", + Name("LowCardinality(FixedString)"), + ), + ], + row_format="%20s,%20s", ) -@Examples("map output", [ - ("map('',1)", "{'':1}", Name("empty string")), - ("map('hello',1)", "{'hello':1}", Name("non-empty string")), - ("map('Gãńdåłf_Thê_Gręât',1)", "{'Gãńdåłf_Thê_Gręât':1}", Name("utf-8 string")), - ("map('hello there',1)", "{'hello there':1}", Name("multi word string")), - ("map('hello',1,'there',2)", "{'hello':1,'there':2}", Name("multiple keys")), - ("map(toString(1),1)", "{'1':1}", Name("toString")), - ("map(toFixedString('1',1),1)", "{'1':1}", Name("toFixedString")), - ("map(toNullable('1'),1)", "{'1':1}", Name("Nullable")), - ("map(toNullable(NULL),1)", "{NULL:1}", Name("Nullable(NULL)")), - ("map(toLowCardinality('1'),1)", "{'1':1}", Name("LowCardinality(String)")), - ("map(toLowCardinality(toFixedString('1',1)),1)", "{'1':1}", Name("LowCardinality(FixedString)")), -], row_format="%20s,%20s") def select_map_with_key_string(self, map, output): - """Create a map using select that has key string type. - """ + """Create a map using select that has key string type.""" select_map(map=map, output=output) + @TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Value_String("1.0") +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Value_String("1.0")) +@Examples( + "map output", + [ + ("map('key','')", "{'key':''}", Name("empty string")), + ("map('key','hello')", "{'key':'hello'}", Name("non-empty string")), + ( + "map('key','Gãńdåłf_Thê_Gręât')", + "{'key':'Gãńdåłf_Thê_Gręât'}", + Name("utf-8 string"), + ), + ( + "map('key','hello there')", + "{'key':'hello there'}", + Name("multi word string"), + ), + ( + "map('key','hello','key2','there')", + "{'key':'hello','key2':'there'}", + Name("multiple keys"), + ), + ("map('key',toString(1))", "{'key':'1'}", Name("toString")), + ("map('key',toFixedString('1',1))", "{'key':'1'}", Name("toFixedString")), + ("map('key',toNullable('1'))", "{'key':'1'}", Name("Nullable")), + ("map('key',toNullable(NULL))", "{'key':NULL}", Name("Nullable(NULL)")), + ( + "map('key',toLowCardinality('1'))", + "{'key':'1'}", + Name("LowCardinality(String)"), + ), + ( + "map('key',toLowCardinality(toFixedString('1',1)))", + "{'key':'1'}", + Name("LowCardinality(FixedString)"), + ), + ], + row_format="%20s,%20s", ) -@Examples("map output", [ - ("map('key','')", "{'key':''}", Name("empty string")), - ("map('key','hello')", "{'key':'hello'}", Name("non-empty string")), - ("map('key','Gãńdåłf_Thê_Gręât')", "{'key':'Gãńdåłf_Thê_Gręât'}", Name("utf-8 string")), - ("map('key','hello there')", "{'key':'hello there'}", Name("multi word string")), - ("map('key','hello','key2','there')", "{'key':'hello','key2':'there'}", Name("multiple keys")), - ("map('key',toString(1))", "{'key':'1'}", Name("toString")), - ("map('key',toFixedString('1',1))", "{'key':'1'}", Name("toFixedString")), - ("map('key',toNullable('1'))", "{'key':'1'}", Name("Nullable")), - ("map('key',toNullable(NULL))", "{'key':NULL}", Name("Nullable(NULL)")), - ("map('key',toLowCardinality('1'))", "{'key':'1'}", Name("LowCardinality(String)")), - ("map('key',toLowCardinality(toFixedString('1',1)))", "{'key':'1'}", Name("LowCardinality(FixedString)")), -], row_format="%20s,%20s") def select_map_with_value_string(self, map, output): - """Create a map using select that has value string type. - """ + """Create a map using select that has value string type.""" select_map(map=map, output=output) + @TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Value_Array("1.0") +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Value_Array("1.0")) +@Examples( + "map output", + [ + ("map('key',[])", "{'key':[]}", Name("empty Array")), + ("map('key',[1,2,3])", "{'key':[1,2,3]}", Name("non-empty array of ints")), + ( + "map('key',['1','2','3'])", + "{'key':['1','2','3']}", + Name("non-empty array of strings"), + ), + ( + "map('key',[map(1,2),map(2,3)])", + "{'key':[{1:2},{2:3}]}", + Name("non-empty array of maps"), + ), + ( + "map('key',[map(1,[map(1,[1])]),map(2,[map(2,[3])])])", + "{'key':[{1:[{1:[1]}]},{2:[{2:[3]}]}]}", + Name("non-empty array of maps of array of maps"), + ), + ], ) -@Examples("map output", [ - ("map('key',[])", "{'key':[]}", Name("empty Array")), - ("map('key',[1,2,3])", "{'key':[1,2,3]}", Name("non-empty array of ints")), - ("map('key',['1','2','3'])", "{'key':['1','2','3']}", Name("non-empty array of strings")), - ("map('key',[map(1,2),map(2,3)])", "{'key':[{1:2},{2:3}]}", Name("non-empty array of maps")), - ("map('key',[map(1,[map(1,[1])]),map(2,[map(2,[3])])])", "{'key':[{1:[{1:[1]}]},{2:[{2:[3]}]}]}", Name("non-empty array of maps of array of maps")), -]) def select_map_with_value_array(self, map, output): - """Create a map using select that has value array type. - """ + """Create a map using select that has value array type.""" select_map(map=map, output=output) + @TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Value_Integer("1.0") +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Value_Integer("1.0")) +@Examples( + "map output", + [ + ("(map(1,127,2,0,3,-128))", "{1:127,2:0,3:-128}", Name("Int8")), + ("(map(1,0,2,255))", "{1:0,2:255}", Name("UInt8")), + ("(map(1,32767,2,0,3,-32768))", "{1:32767,2:0,3:-32768}", Name("Int16")), + ("(map(1,0,2,65535))", "{1:0,2:65535}", Name("UInt16")), + ( + "(map(1,2147483647,2,0,3,-2147483648))", + "{1:2147483647,2:0,3:-2147483648}", + Name("Int32"), + ), + ("(map(1,0,2,4294967295))", "{1:0,2:4294967295}", Name("UInt32")), + ( + "(map(1,9223372036854775807,2,0,3,-9223372036854775808))", + '{1:"9223372036854775807",2:"0",3:"-9223372036854775808"}', + Name("Int64"), + ), + ( + "(map(1,0,2,18446744073709551615))", + "{1:0,2:18446744073709551615}", + Name("UInt64"), + ), + ( + "(map(1,170141183460469231731687303715884105727,2,0,3,-170141183460469231731687303715884105728))", + "{1:1.7014118346046923e38,2:0,3:-1.7014118346046923e38}", + Name("Int128"), + ), + ( + "(map(1,57896044618658097711785492504343953926634992332820282019728792003956564819967,2,0,3,-57896044618658097711785492504343953926634992332820282019728792003956564819968))", + "{1:5.78960446186581e76,2:0,3:-5.78960446186581e76}", + Name("Int256"), + ), + ( + "(map(1,0,2,115792089237316195423570985008687907853269984665640564039457584007913129639935))", + "{1:0,2:1.157920892373162e77}", + Name("UInt256"), + ), + ("(map(1,toNullable(1)))", "{1:1}", Name("toNullable")), + ("(map(1,toNullable(NULL)))", "{1:NULL}", Name("toNullable(NULL)")), + ], ) -@Examples("map output", [ - ("(map(1,127,2,0,3,-128))", '{1:127,2:0,3:-128}', Name("Int8")), - ("(map(1,0,2,255))", '{1:0,2:255}', Name("UInt8")), - ("(map(1,32767,2,0,3,-32768))", '{1:32767,2:0,3:-32768}', Name("Int16")), - ("(map(1,0,2,65535))", '{1:0,2:65535}', Name("UInt16")), - ("(map(1,2147483647,2,0,3,-2147483648))", '{1:2147483647,2:0,3:-2147483648}', Name("Int32")), - ("(map(1,0,2,4294967295))", '{1:0,2:4294967295}', Name("UInt32")), - ("(map(1,9223372036854775807,2,0,3,-9223372036854775808))", '{1:"9223372036854775807",2:"0",3:"-9223372036854775808"}', Name("Int64")), - ("(map(1,0,2,18446744073709551615))", '{1:0,2:18446744073709551615}', Name("UInt64")), - ("(map(1,170141183460469231731687303715884105727,2,0,3,-170141183460469231731687303715884105728))", '{1:1.7014118346046923e38,2:0,3:-1.7014118346046923e38}', Name("Int128")), - ("(map(1,57896044618658097711785492504343953926634992332820282019728792003956564819967,2,0,3,-57896044618658097711785492504343953926634992332820282019728792003956564819968))", '{1:5.78960446186581e76,2:0,3:-5.78960446186581e76}', Name("Int256")), - ("(map(1,0,2,115792089237316195423570985008687907853269984665640564039457584007913129639935))", '{1:0,2:1.157920892373162e77}', Name("UInt256")), - ("(map(1,toNullable(1)))", '{1:1}', Name("toNullable")), - ("(map(1,toNullable(NULL)))", '{1:NULL}', Name("toNullable(NULL)")), -]) def select_map_with_value_integer(self, map, output): - """Create a map using select that has value integer type. - """ + """Create a map using select that has value integer type.""" select_map(map=map, output=output) + @TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Key_Integer("1.0") +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Key_Integer("1.0")) +@Examples( + "map output", + [ + ("(map(127,1,0,1,-128,1))", "{127:1,0:1,-128:1}", Name("Int8")), + ("(map(0,1,255,1))", "{0:1,255:1}", Name("UInt8")), + ("(map(32767,1,0,1,-32768,1))", "{32767:1,0:1,-32768:1}", Name("Int16")), + ("(map(0,1,65535,1))", "{0:1,65535:1}", Name("UInt16")), + ( + "(map(2147483647,1,0,1,-2147483648,1))", + "{2147483647:1,0:1,-2147483648:1}", + Name("Int32"), + ), + ("(map(0,1,4294967295,1))", "{0:1,4294967295:1}", Name("UInt32")), + ( + "(map(9223372036854775807,1,0,1,-9223372036854775808,1))", + '{"9223372036854775807":1,"0":1,"-9223372036854775808":1}', + Name("Int64"), + ), + ( + "(map(0,1,18446744073709551615,1))", + "{0:1,18446744073709551615:1}", + Name("UInt64"), + ), + ( + "(map(170141183460469231731687303715884105727,1,0,1,-170141183460469231731687303715884105728,1))", + "{1.7014118346046923e38:1,0:1,-1.7014118346046923e38:1}", + Name("Int128"), + ), + ( + "(map(57896044618658097711785492504343953926634992332820282019728792003956564819967,1,0,1,-57896044618658097711785492504343953926634992332820282019728792003956564819968,1))", + "{5.78960446186581e76:1,0:1,-5.78960446186581e76:1}", + Name("Int256"), + ), + ( + "(map(0,1,115792089237316195423570985008687907853269984665640564039457584007913129639935,1))", + "{0:1,1.157920892373162e77:1}", + Name("UInt256"), + ), + ("(map(toNullable(1),1))", "{1:1}", Name("toNullable")), + ("(map(toNullable(NULL),1))", "{NULL:1}", Name("toNullable(NULL)")), + ], ) -@Examples("map output", [ - ("(map(127,1,0,1,-128,1))", '{127:1,0:1,-128:1}', Name("Int8")), - ("(map(0,1,255,1))", '{0:1,255:1}', Name("UInt8")), - ("(map(32767,1,0,1,-32768,1))", '{32767:1,0:1,-32768:1}', Name("Int16")), - ("(map(0,1,65535,1))", '{0:1,65535:1}', Name("UInt16")), - ("(map(2147483647,1,0,1,-2147483648,1))", '{2147483647:1,0:1,-2147483648:1}', Name("Int32")), - ("(map(0,1,4294967295,1))", '{0:1,4294967295:1}', Name("UInt32")), - ("(map(9223372036854775807,1,0,1,-9223372036854775808,1))", '{"9223372036854775807":1,"0":1,"-9223372036854775808":1}', Name("Int64")), - ("(map(0,1,18446744073709551615,1))", '{0:1,18446744073709551615:1}', Name("UInt64")), - ("(map(170141183460469231731687303715884105727,1,0,1,-170141183460469231731687303715884105728,1))", '{1.7014118346046923e38:1,0:1,-1.7014118346046923e38:1}', Name("Int128")), - ("(map(57896044618658097711785492504343953926634992332820282019728792003956564819967,1,0,1,-57896044618658097711785492504343953926634992332820282019728792003956564819968,1))", '{5.78960446186581e76:1,0:1,-5.78960446186581e76:1}', Name("Int256")), - ("(map(0,1,115792089237316195423570985008687907853269984665640564039457584007913129639935,1))", '{0:1,1.157920892373162e77:1}', Name("UInt256")), - ("(map(toNullable(1),1))", '{1:1}', Name("toNullable")), - ("(map(toNullable(NULL),1))", '{NULL:1}', Name("toNullable(NULL)")), -]) def select_map_with_key_integer(self, map, output): - """Create a map using select that has key integer type. - """ + """Create a map using select that has key integer type.""" select_map(map=map, output=output) + @TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Key_String("1.0") +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Key_String("1.0")) +@Examples( + "type data output", + [ + ( + "Map(String, Int8)", + "('2020-01-01', map('',1))", + '{"d":"2020-01-01","m":{"":1}}', + Name("empty string"), + ), + ( + "Map(String, Int8)", + "('2020-01-01', map('hello',1))", + '{"d":"2020-01-01","m":{"hello":1}}', + Name("non-empty string"), + ), + ( + "Map(String, Int8)", + "('2020-01-01', map('Gãńdåłf_Thê_Gręât',1))", + '{"d":"2020-01-01","m":{"Gãńdåłf_Thê_Gręât":1}}', + Name("utf-8 string"), + ), + ( + "Map(String, Int8)", + "('2020-01-01', map('hello there',1))", + '{"d":"2020-01-01","m":{"hello there":1}}', + Name("multi word string"), + ), + ( + "Map(String, Int8)", + "('2020-01-01', map('hello',1,'there',2))", + '{"d":"2020-01-01","m":{"hello":1,"there":2}}', + Name("multiple keys"), + ), + ( + "Map(String, Int8)", + "('2020-01-01', map(toString(1),1))", + '{"d":"2020-01-01","m":{"1":1}}', + Name("toString"), + ), + ( + "Map(FixedString(1), Int8)", + "('2020-01-01', map(toFixedString('1',1),1))", + '{"d":"2020-01-01","m":{"1":1}}', + Name("FixedString"), + ), + ( + "Map(Nullable(String), Int8)", + "('2020-01-01', map(toNullable('1'),1))", + '{"d":"2020-01-01","m":{"1":1}}', + Name("Nullable"), + ), + ( + "Map(Nullable(String), Int8)", + "('2020-01-01', map(toNullable(NULL),1))", + '{"d":"2020-01-01","m":{null:1}}', + Name("Nullable(NULL)"), + ), + ( + "Map(LowCardinality(String), Int8)", + "('2020-01-01', map(toLowCardinality('1'),1))", + '{"d":"2020-01-01","m":{"1":1}}', + Name("LowCardinality(String)"), + ), + ( + "Map(LowCardinality(String), Int8)", + "('2020-01-01', map('1',1))", + '{"d":"2020-01-01","m":{"1":1}}', + Name("LowCardinality(String) cast from String"), + ), + ( + "Map(LowCardinality(String), LowCardinality(String))", + "('2020-01-01', map('1','1'))", + '{"d":"2020-01-01","m":{"1":"1"}}', + Name("LowCardinality(String) for key and value"), + ), + ( + "Map(LowCardinality(FixedString(1)), Int8)", + "('2020-01-01', map(toLowCardinality(toFixedString('1',1)),1))", + '{"d":"2020-01-01","m":{"1":1}}', + Name("LowCardinality(FixedString)"), + ), + ], ) -@Examples("type data output", [ - ("Map(String, Int8)", "('2020-01-01', map('',1))", '{"d":"2020-01-01","m":{"":1}}', Name("empty string")), - ("Map(String, Int8)", "('2020-01-01', map('hello',1))", '{"d":"2020-01-01","m":{"hello":1}}', Name("non-empty string")), - ("Map(String, Int8)", "('2020-01-01', map('Gãńdåłf_Thê_Gręât',1))", '{"d":"2020-01-01","m":{"Gãńdåłf_Thê_Gręât":1}}', Name("utf-8 string")), - ("Map(String, Int8)", "('2020-01-01', map('hello there',1))", '{"d":"2020-01-01","m":{"hello there":1}}', Name("multi word string")), - ("Map(String, Int8)", "('2020-01-01', map('hello',1,'there',2))", '{"d":"2020-01-01","m":{"hello":1,"there":2}}', Name("multiple keys")), - ("Map(String, Int8)", "('2020-01-01', map(toString(1),1))", '{"d":"2020-01-01","m":{"1":1}}', Name("toString")), - ("Map(FixedString(1), Int8)", "('2020-01-01', map(toFixedString('1',1),1))", '{"d":"2020-01-01","m":{"1":1}}', Name("FixedString")), - ("Map(Nullable(String), Int8)", "('2020-01-01', map(toNullable('1'),1))", '{"d":"2020-01-01","m":{"1":1}}', Name("Nullable")), - ("Map(Nullable(String), Int8)", "('2020-01-01', map(toNullable(NULL),1))", '{"d":"2020-01-01","m":{null:1}}', Name("Nullable(NULL)")), - ("Map(LowCardinality(String), Int8)", "('2020-01-01', map(toLowCardinality('1'),1))", '{"d":"2020-01-01","m":{"1":1}}', Name("LowCardinality(String)")), - ("Map(LowCardinality(String), Int8)", "('2020-01-01', map('1',1))", '{"d":"2020-01-01","m":{"1":1}}', Name("LowCardinality(String) cast from String")), - ("Map(LowCardinality(String), LowCardinality(String))", "('2020-01-01', map('1','1'))", '{"d":"2020-01-01","m":{"1":"1"}}', Name("LowCardinality(String) for key and value")), - ("Map(LowCardinality(FixedString(1)), Int8)", "('2020-01-01', map(toLowCardinality(toFixedString('1',1)),1))", '{"d":"2020-01-01","m":{"1":1}}', Name("LowCardinality(FixedString)")), -]) def table_map_with_key_string(self, type, data, output): - """Check what values we can insert into map type column with key string. - """ + """Check what values we can insert into map type column with key string.""" insert_into_table(type=type, data=data, output=output) + @TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Key_String("1.0") +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Key_String("1.0")) +@Examples( + "type data output select", + [ + ( + "Map(String, Int8)", + "('2020-01-01', map('',1))", + '{"m":1}', + "m[''] AS m", + Name("empty string"), + ), + ( + "Map(String, Int8)", + "('2020-01-01', map('hello',1))", + '{"m":1}', + "m['hello'] AS m", + Name("non-empty string"), + ), + ( + "Map(String, Int8)", + "('2020-01-01', map('Gãńdåłf_Thê_Gręât',1))", + '{"m":1}', + "m['Gãńdåłf_Thê_Gręât'] AS m", + Name("utf-8 string"), + ), + ( + "Map(String, Int8)", + "('2020-01-01', map('hello there',1))", + '{"m":1}', + "m['hello there'] AS m", + Name("multi word string"), + ), + ( + "Map(String, Int8)", + "('2020-01-01', map('hello',1,'there',2))", + '{"m":1}', + "m['hello'] AS m", + Name("multiple keys"), + ), + ( + "Map(String, Int8)", + "('2020-01-01', map(toString(1),1))", + '{"m":1}', + "m['1'] AS m", + Name("toString"), + ), + ( + "Map(FixedString(1), Int8)", + "('2020-01-01', map(toFixedString('1',1),1))", + '{"m":1}', + "m['1'] AS m", + Name("FixedString"), + ), + ( + "Map(Nullable(String), Int8)", + "('2020-01-01', map(toNullable('1'),1))", + '{"m":1}}', + "m['1'] AS m", + Name("Nullable"), + ), + ( + "Map(Nullable(String), Int8)", + "('2020-01-01', map(toNullable(NULL),1))", + '{"m":1}', + "m[null] AS m", + Name("Nullable(NULL)"), + ), + ( + "Map(LowCardinality(String), Int8)", + "('2020-01-01', map(toLowCardinality('1'),1))", + '{"m":1}}', + "m['1'] AS m", + Name("LowCardinality(String)"), + ), + ( + "Map(LowCardinality(String), Int8)", + "('2020-01-01', map('1',1))", + '{"m":1}', + "m['1'] AS m", + Name("LowCardinality(String) cast from String"), + ), + ( + "Map(LowCardinality(String), LowCardinality(String))", + "('2020-01-01', map('1','1'))", + '{"m":"1"}', + "m['1'] AS m", + Name("LowCardinality(String) for key and value"), + ), + ( + "Map(LowCardinality(FixedString(1)), Int8)", + "('2020-01-01', map(toLowCardinality(toFixedString('1',1)),1))", + '{"m":1}', + "m['1'] AS m", + Name("LowCardinality(FixedString)"), + ), + ], ) -@Examples("type data output select", [ - ("Map(String, Int8)", "('2020-01-01', map('',1))", '{"m":1}', "m[''] AS m", Name("empty string")), - ("Map(String, Int8)", "('2020-01-01', map('hello',1))", '{"m":1}', "m['hello'] AS m", Name("non-empty string")), - ("Map(String, Int8)", "('2020-01-01', map('Gãńdåłf_Thê_Gręât',1))", '{"m":1}', "m['Gãńdåłf_Thê_Gręât'] AS m", Name("utf-8 string")), - ("Map(String, Int8)", "('2020-01-01', map('hello there',1))", '{"m":1}', "m['hello there'] AS m", Name("multi word string")), - ("Map(String, Int8)", "('2020-01-01', map('hello',1,'there',2))", '{"m":1}', "m['hello'] AS m", Name("multiple keys")), - ("Map(String, Int8)", "('2020-01-01', map(toString(1),1))", '{"m":1}', "m['1'] AS m", Name("toString")), - ("Map(FixedString(1), Int8)", "('2020-01-01', map(toFixedString('1',1),1))", '{"m":1}', "m['1'] AS m", Name("FixedString")), - ("Map(Nullable(String), Int8)", "('2020-01-01', map(toNullable('1'),1))", '{"m":1}}', "m['1'] AS m", Name("Nullable")), - ("Map(Nullable(String), Int8)", "('2020-01-01', map(toNullable(NULL),1))", '{"m":1}', "m[null] AS m", Name("Nullable(NULL)")), - ("Map(LowCardinality(String), Int8)", "('2020-01-01', map(toLowCardinality('1'),1))", '{"m":1}}', "m['1'] AS m", Name("LowCardinality(String)")), - ("Map(LowCardinality(String), Int8)", "('2020-01-01', map('1',1))", '{"m":1}', "m['1'] AS m", Name("LowCardinality(String) cast from String")), - ("Map(LowCardinality(String), LowCardinality(String))", "('2020-01-01', map('1','1'))", '{"m":"1"}', "m['1'] AS m", Name("LowCardinality(String) for key and value")), - ("Map(LowCardinality(FixedString(1)), Int8)", "('2020-01-01', map(toLowCardinality(toFixedString('1',1)),1))", '{"m":1}', "m['1'] AS m", Name("LowCardinality(FixedString)")), -]) def table_map_select_key_with_key_string(self, type, data, output, select): - """Check what values we can insert into map type column with key string and if key can be selected. - """ + """Check what values we can insert into map type column with key string and if key can be selected.""" insert_into_table(type=type, data=data, output=output, select=select) -@TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Value_String("1.0") -) -@Examples("type data output", [ - ("Map(String, String)", "('2020-01-01', map('key',''))", '{"d":"2020-01-01","m":{"key":""}}', Name("empty string")), - ("Map(String, String)", "('2020-01-01', map('key','hello'))", '{"d":"2020-01-01","m":{"key":"hello"}}', Name("non-empty string")), - ("Map(String, String)", "('2020-01-01', map('key','Gãńdåłf_Thê_Gręât'))", '{"d":"2020-01-01","m":{"key":"Gãńdåłf_Thê_Gręât"}}', Name("utf-8 string")), - ("Map(String, String)", "('2020-01-01', map('key', 'hello there'))", '{"d":"2020-01-01","m":{"key":"hello there"}}', Name("multi word string")), - ("Map(String, String)", "('2020-01-01', map('key','hello','key2','there'))", '{"d":"2020-01-01","m":{"key":"hello","key2":"there"}}', Name("multiple keys")), - ("Map(String, String)", "('2020-01-01', map('key', toString(1)))", '{"d":"2020-01-01","m":{"key":"1"}}', Name("toString")), - ("Map(String, FixedString(1))", "('2020-01-01', map('key',toFixedString('1',1)))", '{"d":"2020-01-01","m":{"key":"1"}}', Name("FixedString")), - ("Map(String, Nullable(String))", "('2020-01-01', map('key',toNullable('1')))", '{"d":"2020-01-01","m":{"key":"1"}}', Name("Nullable")), - ("Map(String, Nullable(String))", "('2020-01-01', map('key',toNullable(NULL)))", '{"d":"2020-01-01","m":{"key":null}}', Name("Nullable(NULL)")), - ("Map(String, LowCardinality(String))", "('2020-01-01', map('key',toLowCardinality('1')))", '{"d":"2020-01-01","m":{"key":"1"}}', Name("LowCardinality(String)")), - ("Map(String, LowCardinality(String))", "('2020-01-01', map('key','1'))", '{"d":"2020-01-01","m":{"key":"1"}}', Name("LowCardinality(String) cast from String")), - ("Map(LowCardinality(String), LowCardinality(String))", "('2020-01-01', map('1','1'))", '{"d":"2020-01-01","m":{"1":"1"}}', Name("LowCardinality(String) for key and value")), - ("Map(String, LowCardinality(FixedString(1)))", "('2020-01-01', map('key',toLowCardinality(toFixedString('1',1))))", '{"d":"2020-01-01","m":{"key":"1"}}', Name("LowCardinality(FixedString)")) -]) -def table_map_with_value_string(self, type, data, output): - """Check what values we can insert into map type column with value string. - """ - insert_into_table(type=type, data=data, output=output) @TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Value_String("1.0") +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Value_String("1.0")) +@Examples( + "type data output", + [ + ( + "Map(String, String)", + "('2020-01-01', map('key',''))", + '{"d":"2020-01-01","m":{"key":""}}', + Name("empty string"), + ), + ( + "Map(String, String)", + "('2020-01-01', map('key','hello'))", + '{"d":"2020-01-01","m":{"key":"hello"}}', + Name("non-empty string"), + ), + ( + "Map(String, String)", + "('2020-01-01', map('key','Gãńdåłf_Thê_Gręât'))", + '{"d":"2020-01-01","m":{"key":"Gãńdåłf_Thê_Gręât"}}', + Name("utf-8 string"), + ), + ( + "Map(String, String)", + "('2020-01-01', map('key', 'hello there'))", + '{"d":"2020-01-01","m":{"key":"hello there"}}', + Name("multi word string"), + ), + ( + "Map(String, String)", + "('2020-01-01', map('key','hello','key2','there'))", + '{"d":"2020-01-01","m":{"key":"hello","key2":"there"}}', + Name("multiple keys"), + ), + ( + "Map(String, String)", + "('2020-01-01', map('key', toString(1)))", + '{"d":"2020-01-01","m":{"key":"1"}}', + Name("toString"), + ), + ( + "Map(String, FixedString(1))", + "('2020-01-01', map('key',toFixedString('1',1)))", + '{"d":"2020-01-01","m":{"key":"1"}}', + Name("FixedString"), + ), + ( + "Map(String, Nullable(String))", + "('2020-01-01', map('key',toNullable('1')))", + '{"d":"2020-01-01","m":{"key":"1"}}', + Name("Nullable"), + ), + ( + "Map(String, Nullable(String))", + "('2020-01-01', map('key',toNullable(NULL)))", + '{"d":"2020-01-01","m":{"key":null}}', + Name("Nullable(NULL)"), + ), + ( + "Map(String, LowCardinality(String))", + "('2020-01-01', map('key',toLowCardinality('1')))", + '{"d":"2020-01-01","m":{"key":"1"}}', + Name("LowCardinality(String)"), + ), + ( + "Map(String, LowCardinality(String))", + "('2020-01-01', map('key','1'))", + '{"d":"2020-01-01","m":{"key":"1"}}', + Name("LowCardinality(String) cast from String"), + ), + ( + "Map(LowCardinality(String), LowCardinality(String))", + "('2020-01-01', map('1','1'))", + '{"d":"2020-01-01","m":{"1":"1"}}', + Name("LowCardinality(String) for key and value"), + ), + ( + "Map(String, LowCardinality(FixedString(1)))", + "('2020-01-01', map('key',toLowCardinality(toFixedString('1',1))))", + '{"d":"2020-01-01","m":{"key":"1"}}', + Name("LowCardinality(FixedString)"), + ), + ], +) +def table_map_with_value_string(self, type, data, output): + """Check what values we can insert into map type column with value string.""" + insert_into_table(type=type, data=data, output=output) + + +@TestOutline(Scenario) +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Value_String("1.0")) +@Examples( + "type data output", + [ + ( + "Map(String, String)", + "('2020-01-01', map('key',''))", + '{"m":""}', + Name("empty string"), + ), + ( + "Map(String, String)", + "('2020-01-01', map('key','hello'))", + '{"m":"hello"}', + Name("non-empty string"), + ), + ( + "Map(String, String)", + "('2020-01-01', map('key','Gãńdåłf_Thê_Gręât'))", + '{"m":"Gãńdåłf_Thê_Gręât"}', + Name("utf-8 string"), + ), + ( + "Map(String, String)", + "('2020-01-01', map('key', 'hello there'))", + '{"m":"hello there"}', + Name("multi word string"), + ), + ( + "Map(String, String)", + "('2020-01-01', map('key','hello','key2','there'))", + '{"m":"hello"}', + Name("multiple keys"), + ), + ( + "Map(String, String)", + "('2020-01-01', map('key', toString(1)))", + '{"m":"1"}', + Name("toString"), + ), + ( + "Map(String, FixedString(1))", + "('2020-01-01', map('key',toFixedString('1',1)))", + '{"m":"1"}', + Name("FixedString"), + ), + ( + "Map(String, Nullable(String))", + "('2020-01-01', map('key',toNullable('1')))", + '{"m":"1"}', + Name("Nullable"), + ), + ( + "Map(String, Nullable(String))", + "('2020-01-01', map('key',toNullable(NULL)))", + '{"m":null}', + Name("Nullable(NULL)"), + ), + ( + "Map(String, LowCardinality(String))", + "('2020-01-01', map('key',toLowCardinality('1')))", + '{"m":"1"}', + Name("LowCardinality(String)"), + ), + ( + "Map(String, LowCardinality(String))", + "('2020-01-01', map('key','1'))", + '{"m":"1"}', + Name("LowCardinality(String) cast from String"), + ), + ( + "Map(LowCardinality(String), LowCardinality(String))", + "('2020-01-01', map('key','1'))", + '{"m":"1"}', + Name("LowCardinality(String) for key and value"), + ), + ( + "Map(String, LowCardinality(FixedString(1)))", + "('2020-01-01', map('key',toLowCardinality(toFixedString('1',1))))", + '{"m":"1"}', + Name("LowCardinality(FixedString)"), + ), + ], ) -@Examples("type data output", [ - ("Map(String, String)", "('2020-01-01', map('key',''))", '{"m":""}', Name("empty string")), - ("Map(String, String)", "('2020-01-01', map('key','hello'))", '{"m":"hello"}', Name("non-empty string")), - ("Map(String, String)", "('2020-01-01', map('key','Gãńdåłf_Thê_Gręât'))", '{"m":"Gãńdåłf_Thê_Gręât"}', Name("utf-8 string")), - ("Map(String, String)", "('2020-01-01', map('key', 'hello there'))", '{"m":"hello there"}', Name("multi word string")), - ("Map(String, String)", "('2020-01-01', map('key','hello','key2','there'))", '{"m":"hello"}', Name("multiple keys")), - ("Map(String, String)", "('2020-01-01', map('key', toString(1)))", '{"m":"1"}', Name("toString")), - ("Map(String, FixedString(1))", "('2020-01-01', map('key',toFixedString('1',1)))", '{"m":"1"}', Name("FixedString")), - ("Map(String, Nullable(String))", "('2020-01-01', map('key',toNullable('1')))", '{"m":"1"}', Name("Nullable")), - ("Map(String, Nullable(String))", "('2020-01-01', map('key',toNullable(NULL)))", '{"m":null}', Name("Nullable(NULL)")), - ("Map(String, LowCardinality(String))", "('2020-01-01', map('key',toLowCardinality('1')))", '{"m":"1"}', Name("LowCardinality(String)")), - ("Map(String, LowCardinality(String))", "('2020-01-01', map('key','1'))", '{"m":"1"}', Name("LowCardinality(String) cast from String")), - ("Map(LowCardinality(String), LowCardinality(String))", "('2020-01-01', map('key','1'))", '{"m":"1"}', Name("LowCardinality(String) for key and value")), - ("Map(String, LowCardinality(FixedString(1)))", "('2020-01-01', map('key',toLowCardinality(toFixedString('1',1))))", '{"m":"1"}', Name("LowCardinality(FixedString)")) -]) def table_map_select_key_with_value_string(self, type, data, output): - """Check what values we can insert into map type column with value string and if it can be selected by key. - """ + """Check what values we can insert into map type column with value string and if it can be selected by key.""" insert_into_table(type=type, data=data, output=output, select="m['key'] AS m") + @TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Value_Integer("1.0") +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Value_Integer("1.0")) +@Examples( + "type data output", + [ + ( + "Map(Int8, Int8)", + "('2020-01-01', map(1,127,2,0,3,-128))", + '{"d":"2020-01-01","m":{"1":127,"2":0,"3":-128}}', + Name("Int8"), + ), + ( + "Map(Int8, UInt8)", + "('2020-01-01', map(1,0,2,255))", + '{"d":"2020-01-01","m":{"1":0,"2":255}}', + Name("UInt8"), + ), + ( + "Map(Int8, Int16)", + "('2020-01-01', map(1,127,2,0,3,-128))", + '{"d":"2020-01-01","m":{"1":32767,"2":0,"3":-32768}}', + Name("Int16"), + ), + ( + "Map(Int8, UInt16)", + "('2020-01-01', map(1,0,2,65535))", + '{"d":"2020-01-01","m":{"1":0,"2":65535}}', + Name("UInt16"), + ), + ( + "Map(Int8, Int32)", + "('2020-01-01', map(1,127,2,0,3,-128))", + '{"d":"2020-01-01","m":{"1":2147483647,"2":0,"3":-2147483648}}', + Name("Int32"), + ), + ( + "Map(Int8, UInt32)", + "('2020-01-01', map(1,0,2,4294967295))", + '{"d":"2020-01-01","m":{"1":0,"2":4294967295}}', + Name("UInt32"), + ), + ( + "Map(Int8, Int64)", + "('2020-01-01', map(1,9223372036854775807,2,0,3,-9223372036854775808))", + '{"d":"2020-01-01","m":{1:"9223372036854775807",2:"0",3:"-9223372036854775808"}}', + Name("Int64"), + ), + ( + "Map(Int8, UInt64)", + "('2020-01-01', map(1,0,2,18446744073709551615))", + '{"d":"2020-01-01","m":{1:"0",2:"18446744073709551615"}}', + Name("UInt64"), + ), + ( + "Map(Int8, Int128)", + "('2020-01-01', map(1,170141183460469231731687303715884105727,2,0,3,-170141183460469231731687303715884105728))", + '{"d":"2020-01-01","m":{1:"170141183460469231731687303715884105727",2:"0",3:"-170141183460469231731687303715884105728"}}', + Name("Int128"), + ), + ( + "Map(Int8, Int256)", + "('2020-01-01', map(1,57896044618658097711785492504343953926634992332820282019728792003956564819967,2,0,3,-57896044618658097711785492504343953926634992332820282019728792003956564819968))", + '{"d":"2020-01-01","m":{1:"57896044618658097711785492504343953926634992332820282019728792003956564819967",2:"0",3:"-57896044618658097711785492504343953926634992332820282019728792003956564819968"}}', + Name("Int256"), + ), + ( + "Map(Int8, UInt256)", + "('2020-01-01', map(1,0,2,115792089237316195423570985008687907853269984665640564039457584007913129639935))", + '{"d":"2020-01-01","m":{1:"0",2:"115792089237316195423570985008687907853269984665640564039457584007913129639935"}}', + Name("UInt256"), + ), + ( + "Map(Int8, Nullable(Int8))", + "('2020-01-01', map(1,toNullable(1)))", + '{"d":"2020-01-01","m":{"1":1}}', + Name("toNullable"), + ), + ( + "Map(Int8, Nullable(Int8))", + "('2020-01-01', map(1,toNullable(NULL)))", + '{"d":"2020-01-01","m":{"1":null}}', + Name("toNullable(NULL)"), + ), + ], ) -@Examples("type data output", [ - ("Map(Int8, Int8)", "('2020-01-01', map(1,127,2,0,3,-128))", '{"d":"2020-01-01","m":{"1":127,"2":0,"3":-128}}', Name("Int8")), - ("Map(Int8, UInt8)", "('2020-01-01', map(1,0,2,255))", '{"d":"2020-01-01","m":{"1":0,"2":255}}', Name("UInt8")), - ("Map(Int8, Int16)", "('2020-01-01', map(1,127,2,0,3,-128))", '{"d":"2020-01-01","m":{"1":32767,"2":0,"3":-32768}}', Name("Int16")), - ("Map(Int8, UInt16)", "('2020-01-01', map(1,0,2,65535))", '{"d":"2020-01-01","m":{"1":0,"2":65535}}', Name("UInt16")), - ("Map(Int8, Int32)", "('2020-01-01', map(1,127,2,0,3,-128))", '{"d":"2020-01-01","m":{"1":2147483647,"2":0,"3":-2147483648}}', Name("Int32")), - ("Map(Int8, UInt32)", "('2020-01-01', map(1,0,2,4294967295))", '{"d":"2020-01-01","m":{"1":0,"2":4294967295}}', Name("UInt32")), - ("Map(Int8, Int64)", "('2020-01-01', map(1,9223372036854775807,2,0,3,-9223372036854775808))", '{"d":"2020-01-01","m":{1:"9223372036854775807",2:"0",3:"-9223372036854775808"}}', Name("Int64")), - ("Map(Int8, UInt64)", "('2020-01-01', map(1,0,2,18446744073709551615))", '{"d":"2020-01-01","m":{1:"0",2:"18446744073709551615"}}', Name("UInt64")), - ("Map(Int8, Int128)", "('2020-01-01', map(1,170141183460469231731687303715884105727,2,0,3,-170141183460469231731687303715884105728))", '{"d":"2020-01-01","m":{1:"170141183460469231731687303715884105727",2:"0",3:"-170141183460469231731687303715884105728"}}', Name("Int128")), - ("Map(Int8, Int256)", "('2020-01-01', map(1,57896044618658097711785492504343953926634992332820282019728792003956564819967,2,0,3,-57896044618658097711785492504343953926634992332820282019728792003956564819968))", '{"d":"2020-01-01","m":{1:"57896044618658097711785492504343953926634992332820282019728792003956564819967",2:"0",3:"-57896044618658097711785492504343953926634992332820282019728792003956564819968"}}', Name("Int256")), - ("Map(Int8, UInt256)", "('2020-01-01', map(1,0,2,115792089237316195423570985008687907853269984665640564039457584007913129639935))", '{"d":"2020-01-01","m":{1:"0",2:"115792089237316195423570985008687907853269984665640564039457584007913129639935"}}', Name("UInt256")), - ("Map(Int8, Nullable(Int8))", "('2020-01-01', map(1,toNullable(1)))", '{"d":"2020-01-01","m":{"1":1}}', Name("toNullable")), - ("Map(Int8, Nullable(Int8))", "('2020-01-01', map(1,toNullable(NULL)))", '{"d":"2020-01-01","m":{"1":null}}', Name("toNullable(NULL)")), -]) def table_map_with_value_integer(self, type, data, output): - """Check what values we can insert into map type column with value integer. - """ + """Check what values we can insert into map type column with value integer.""" insert_into_table(type=type, data=data, output=output) + @TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Value_Array("1.0") +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Value_Array("1.0")) +@Examples( + "type data output", + [ + ( + "Map(String, Array(Int8))", + "('2020-01-01', map('key',[]))", + '{"d":"2020-01-01","m":{"key":[]}}', + Name("empty array"), + ), + ( + "Map(String, Array(Int8))", + "('2020-01-01', map('key',[1,2,3]))", + '{"d":"2020-01-01","m":{"key":[1,2,3]}}', + Name("non-empty array of ints"), + ), + ( + "Map(String, Array(String))", + "('2020-01-01', map('key',['1','2','3']))", + '{"d":"2020-01-01","m":{"key":["1","2","3"]}}', + Name("non-empty array of strings"), + ), + ( + "Map(String, Array(Map(Int8, Int8)))", + "('2020-01-01', map('key',[map(1,2),map(2,3)]))", + '{"d":"2020-01-01","m":{"key":[{"1":2},{"2":3}]}}', + Name("non-empty array of maps"), + ), + ( + "Map(String, Array(Map(Int8, Array(Map(Int8, Array(Int8))))))", + "('2020-01-01', map('key',[map(1,[map(1,[1])]),map(2,[map(2,[3])])]))", + '{"d":"2020-01-01","m":{"key":[{"1":[{"1":[1]}]},{"2":[{"2":[3]}]}]}}', + Name("non-empty array of maps of array of maps"), + ), + ], ) -@Examples("type data output", [ - ("Map(String, Array(Int8))", "('2020-01-01', map('key',[]))", '{"d":"2020-01-01","m":{"key":[]}}', Name("empty array")), - ("Map(String, Array(Int8))", "('2020-01-01', map('key',[1,2,3]))", '{"d":"2020-01-01","m":{"key":[1,2,3]}}', Name("non-empty array of ints")), - ("Map(String, Array(String))", "('2020-01-01', map('key',['1','2','3']))", '{"d":"2020-01-01","m":{"key":["1","2","3"]}}', Name("non-empty array of strings")), - ("Map(String, Array(Map(Int8, Int8)))", "('2020-01-01', map('key',[map(1,2),map(2,3)]))", '{"d":"2020-01-01","m":{"key":[{"1":2},{"2":3}]}}', Name("non-empty array of maps")), - ("Map(String, Array(Map(Int8, Array(Map(Int8, Array(Int8))))))", "('2020-01-01', map('key',[map(1,[map(1,[1])]),map(2,[map(2,[3])])]))", '{"d":"2020-01-01","m":{"key":[{"1":[{"1":[1]}]},{"2":[{"2":[3]}]}]}}', Name("non-empty array of maps of array of maps")), -]) def table_map_with_value_array(self, type, data, output): - """Check what values we can insert into map type column with value Array. - """ + """Check what values we can insert into map type column with value Array.""" insert_into_table(type=type, data=data, output=output) + @TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Key_Integer("1.0") +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Key_Integer("1.0")) +@Examples( + "type data output", + [ + ( + "Map(Int8, Int8)", + "('2020-01-01', map(127,1,0,1,-128,1))", + '{"d":"2020-01-01","m":{"127":1,"0":1,"-128":1}}', + Name("Int8"), + ), + ( + "Map(UInt8, Int8)", + "('2020-01-01', map(0,1,255,1))", + '{"d":"2020-01-01","m":{"0":1,"255":1}}', + Name("UInt8"), + ), + ( + "Map(Int16, Int8)", + "('2020-01-01', map(127,1,0,1,-128,1))", + '{"d":"2020-01-01","m":{"32767":1,"0":1,"-32768":1}}', + Name("Int16"), + ), + ( + "Map(UInt16, Int8)", + "('2020-01-01', map(0,1,65535,1))", + '{"d":"2020-01-01","m":{"0":1,"65535":1}}', + Name("UInt16"), + ), + ( + "Map(Int32, Int8)", + "('2020-01-01', map(2147483647,1,0,1,-2147483648,1))", + '{"d":"2020-01-01","m":{"2147483647":1,"0":1,"-2147483648":1}}', + Name("Int32"), + ), + ( + "Map(UInt32, Int8)", + "('2020-01-01', map(0,1,4294967295,1))", + '{"d":"2020-01-01","m":{"0":1,"4294967295":1}}', + Name("UInt32"), + ), + ( + "Map(Int64, Int8)", + "('2020-01-01', map(9223372036854775807,1,0,1,-9223372036854775808,1))", + '{"d":"2020-01-01","m":{"9223372036854775807":1,"0":1,"-9223372036854775808":1}}', + Name("Int64"), + ), + ( + "Map(UInt64, Int8)", + "('2020-01-01', map(0,1,18446744073709551615,1))", + '{"d":"2020-01-01","m":{"0":1,"18446744073709551615":1}}', + Name("UInt64"), + ), + ( + "Map(Int128, Int8)", + "('2020-01-01', map(170141183460469231731687303715884105727,1,0,1,-170141183460469231731687303715884105728,1))", + '{"d":"2020-01-01","m":{170141183460469231731687303715884105727:1,0:1,"-170141183460469231731687303715884105728":1}}', + Name("Int128"), + ), + ( + "Map(Int256, Int8)", + "('2020-01-01', map(57896044618658097711785492504343953926634992332820282019728792003956564819967,1,0,1,-57896044618658097711785492504343953926634992332820282019728792003956564819968,1))", + '{"d":"2020-01-01","m":{"57896044618658097711785492504343953926634992332820282019728792003956564819967":1,"0":1,"-57896044618658097711785492504343953926634992332820282019728792003956564819968":1}}', + Name("Int256"), + ), + ( + "Map(UInt256, Int8)", + "('2020-01-01', map(0,1,115792089237316195423570985008687907853269984665640564039457584007913129639935,1))", + '{"d":"2020-01-01","m":{"0":1,"115792089237316195423570985008687907853269984665640564039457584007913129639935":1}}', + Name("UInt256"), + ), + ( + "Map(Nullable(Int8), Int8)", + "('2020-01-01', map(toNullable(1),1))", + '{"d":"2020-01-01","m":{1:1}}', + Name("toNullable"), + ), + ( + "Map(Nullable(Int8), Int8)", + "('2020-01-01', map(toNullable(NULL),1))", + '{"d":"2020-01-01","m":{null:1}}', + Name("toNullable(NULL)"), + ), + ], ) -@Examples("type data output", [ - ("Map(Int8, Int8)", "('2020-01-01', map(127,1,0,1,-128,1))", '{"d":"2020-01-01","m":{"127":1,"0":1,"-128":1}}', Name("Int8")), - ("Map(UInt8, Int8)", "('2020-01-01', map(0,1,255,1))", '{"d":"2020-01-01","m":{"0":1,"255":1}}', Name("UInt8")), - ("Map(Int16, Int8)", "('2020-01-01', map(127,1,0,1,-128,1))", '{"d":"2020-01-01","m":{"32767":1,"0":1,"-32768":1}}', Name("Int16")), - ("Map(UInt16, Int8)", "('2020-01-01', map(0,1,65535,1))", '{"d":"2020-01-01","m":{"0":1,"65535":1}}', Name("UInt16")), - ("Map(Int32, Int8)", "('2020-01-01', map(2147483647,1,0,1,-2147483648,1))", '{"d":"2020-01-01","m":{"2147483647":1,"0":1,"-2147483648":1}}', Name("Int32")), - ("Map(UInt32, Int8)", "('2020-01-01', map(0,1,4294967295,1))", '{"d":"2020-01-01","m":{"0":1,"4294967295":1}}', Name("UInt32")), - ("Map(Int64, Int8)", "('2020-01-01', map(9223372036854775807,1,0,1,-9223372036854775808,1))", '{"d":"2020-01-01","m":{"9223372036854775807":1,"0":1,"-9223372036854775808":1}}', Name("Int64")), - ("Map(UInt64, Int8)", "('2020-01-01', map(0,1,18446744073709551615,1))", '{"d":"2020-01-01","m":{"0":1,"18446744073709551615":1}}', Name("UInt64")), - ("Map(Int128, Int8)", "('2020-01-01', map(170141183460469231731687303715884105727,1,0,1,-170141183460469231731687303715884105728,1))", '{"d":"2020-01-01","m":{170141183460469231731687303715884105727:1,0:1,"-170141183460469231731687303715884105728":1}}', Name("Int128")), - ("Map(Int256, Int8)", "('2020-01-01', map(57896044618658097711785492504343953926634992332820282019728792003956564819967,1,0,1,-57896044618658097711785492504343953926634992332820282019728792003956564819968,1))", '{"d":"2020-01-01","m":{"57896044618658097711785492504343953926634992332820282019728792003956564819967":1,"0":1,"-57896044618658097711785492504343953926634992332820282019728792003956564819968":1}}', Name("Int256")), - ("Map(UInt256, Int8)", "('2020-01-01', map(0,1,115792089237316195423570985008687907853269984665640564039457584007913129639935,1))", '{"d":"2020-01-01","m":{"0":1,"115792089237316195423570985008687907853269984665640564039457584007913129639935":1}}', Name("UInt256")), - ("Map(Nullable(Int8), Int8)", "('2020-01-01', map(toNullable(1),1))", '{"d":"2020-01-01","m":{1:1}}', Name("toNullable")), - ("Map(Nullable(Int8), Int8)", "('2020-01-01', map(toNullable(NULL),1))", '{"d":"2020-01-01","m":{null:1}}', Name("toNullable(NULL)")), -]) def table_map_with_key_integer(self, type, data, output): - """Check what values we can insert into map type column with key integer. - """ + """Check what values we can insert into map type column with key integer.""" insert_into_table(type=type, data=data, output=output) + @TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Key_Integer("1.0") +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Key_Integer("1.0")) +@Examples( + "type data output select", + [ + ( + "Map(Int8, Int8)", + "('2020-01-01', map(127,1,0,1,-128,1))", + '{"m":1}', + "m[127] AS m", + Name("Int8"), + ), + ( + "Map(UInt8, Int8)", + "('2020-01-01', map(0,1,255,1))", + '{"m":2}', + "(m[255] + m[0]) AS m", + Name("UInt8"), + ), + ( + "Map(Int16, Int8)", + "('2020-01-01', map(127,1,0,1,-128,1))", + '{"m":3}', + "(m[-128] + m[0] + m[-128]) AS m", + Name("Int16"), + ), + ( + "Map(UInt16, Int8)", + "('2020-01-01', map(0,1,65535,1))", + '{"m":2}', + "(m[0] + m[65535]) AS m", + Name("UInt16"), + ), + ( + "Map(Int32, Int8)", + "('2020-01-01', map(2147483647,1,0,1,-2147483648,1))", + '{"m":3}', + "(m[2147483647] + m[0] + m[-2147483648]) AS m", + Name("Int32"), + ), + ( + "Map(UInt32, Int8)", + "('2020-01-01', map(0,1,4294967295,1))", + '{"m":2}', + "(m[0] + m[4294967295]) AS m", + Name("UInt32"), + ), + ( + "Map(Int64, Int8)", + "('2020-01-01', map(9223372036854775807,1,0,1,-9223372036854775808,1))", + '{"m":3}', + "(m[9223372036854775807] + m[0] + m[-9223372036854775808]) AS m", + Name("Int64"), + ), + ( + "Map(UInt64, Int8)", + "('2020-01-01', map(0,1,18446744073709551615,1))", + '{"m":2}', + "(m[0] + m[18446744073709551615]) AS m", + Name("UInt64"), + ), + ( + "Map(Int128, Int8)", + "('2020-01-01', map(170141183460469231731687303715884105727,1,0,1,-170141183460469231731687303715884105728,1))", + '{"m":3}', + "(m[170141183460469231731687303715884105727] + m[0] + m[-170141183460469231731687303715884105728]) AS m", + Name("Int128"), + ), + ( + "Map(Int256, Int8)", + "('2020-01-01', map(57896044618658097711785492504343953926634992332820282019728792003956564819967,1,0,1,-57896044618658097711785492504343953926634992332820282019728792003956564819968,1))", + '{"m":3}', + "(m[57896044618658097711785492504343953926634992332820282019728792003956564819967] + m[0] + m[-57896044618658097711785492504343953926634992332820282019728792003956564819968]) AS m", + Name("Int256"), + ), + ( + "Map(UInt256, Int8)", + "('2020-01-01', map(0,1,115792089237316195423570985008687907853269984665640564039457584007913129639935,1))", + '{"m":2}', + "(m[0] + m[115792089237316195423570985008687907853269984665640564039457584007913129639935]) AS m", + Name("UInt256"), + ), + ( + "Map(Nullable(Int8), Int8)", + "('2020-01-01', map(toNullable(1),1))", + '{"m":1}', + "m[1] AS m", + Name("toNullable"), + ), + ( + "Map(Nullable(Int8), Int8)", + "('2020-01-01', map(toNullable(NULL),1))", + '{"m":1}', + "m[null] AS m", + Name("toNullable(NULL)"), + ), + ], ) -@Examples("type data output select", [ - ("Map(Int8, Int8)", "('2020-01-01', map(127,1,0,1,-128,1))", '{"m":1}', "m[127] AS m", Name("Int8")), - ("Map(UInt8, Int8)", "('2020-01-01', map(0,1,255,1))", '{"m":2}', "(m[255] + m[0]) AS m", Name("UInt8")), - ("Map(Int16, Int8)", "('2020-01-01', map(127,1,0,1,-128,1))", '{"m":3}', "(m[-128] + m[0] + m[-128]) AS m", Name("Int16")), - ("Map(UInt16, Int8)", "('2020-01-01', map(0,1,65535,1))", '{"m":2}', "(m[0] + m[65535]) AS m", Name("UInt16")), - ("Map(Int32, Int8)", "('2020-01-01', map(2147483647,1,0,1,-2147483648,1))", '{"m":3}', "(m[2147483647] + m[0] + m[-2147483648]) AS m", Name("Int32")), - ("Map(UInt32, Int8)", "('2020-01-01', map(0,1,4294967295,1))", '{"m":2}', "(m[0] + m[4294967295]) AS m", Name("UInt32")), - ("Map(Int64, Int8)", "('2020-01-01', map(9223372036854775807,1,0,1,-9223372036854775808,1))", '{"m":3}', "(m[9223372036854775807] + m[0] + m[-9223372036854775808]) AS m", Name("Int64")), - ("Map(UInt64, Int8)", "('2020-01-01', map(0,1,18446744073709551615,1))", '{"m":2}', "(m[0] + m[18446744073709551615]) AS m", Name("UInt64")), - ("Map(Int128, Int8)", "('2020-01-01', map(170141183460469231731687303715884105727,1,0,1,-170141183460469231731687303715884105728,1))", '{"m":3}', "(m[170141183460469231731687303715884105727] + m[0] + m[-170141183460469231731687303715884105728]) AS m", Name("Int128")), - ("Map(Int256, Int8)", "('2020-01-01', map(57896044618658097711785492504343953926634992332820282019728792003956564819967,1,0,1,-57896044618658097711785492504343953926634992332820282019728792003956564819968,1))", '{"m":3}', "(m[57896044618658097711785492504343953926634992332820282019728792003956564819967] + m[0] + m[-57896044618658097711785492504343953926634992332820282019728792003956564819968]) AS m", Name("Int256")), - ("Map(UInt256, Int8)", "('2020-01-01', map(0,1,115792089237316195423570985008687907853269984665640564039457584007913129639935,1))", '{"m":2}', "(m[0] + m[115792089237316195423570985008687907853269984665640564039457584007913129639935]) AS m", Name("UInt256")), - ("Map(Nullable(Int8), Int8)", "('2020-01-01', map(toNullable(1),1))", '{"m":1}', "m[1] AS m", Name("toNullable")), - ("Map(Nullable(Int8), Int8)", "('2020-01-01', map(toNullable(NULL),1))", '{"m":1}', "m[null] AS m", Name("toNullable(NULL)")), -]) def table_map_select_key_with_key_integer(self, type, data, output, select): - """Check what values we can insert into map type column with key integer and if we can use the key to select the value. - """ + """Check what values we can insert into map type column with key integer and if we can use the key to select the value.""" insert_into_table(type=type, data=data, output=output, select=select) + @TestOutline(Scenario) @Requirements( RQ_SRS_018_ClickHouse_Map_DataType_ArrayOfMaps("1.0"), - RQ_SRS_018_ClickHouse_Map_DataType_NestedWithMaps("1.0") + RQ_SRS_018_ClickHouse_Map_DataType_NestedWithMaps("1.0"), +) +@Examples( + "type data output partition_by", + [ + ( + "Array(Map(String, Int8))", + "('2020-01-01', [map('hello',1),map('hello',1,'there',2)])", + '{"d":"2020-01-01","m":[{"hello":1},{"hello":1,"there":2}]}', + "m", + Name("Array(Map(String, Int8))"), + ), + ( + "Nested(x Map(String, Int8))", + "('2020-01-01', [map('hello',1)])", + '{"d":"2020-01-01","m.x":[{"hello":1}]}', + "m.x", + Name("Nested(x Map(String, Int8)"), + ), + ], ) -@Examples("type data output partition_by", [ - ("Array(Map(String, Int8))", - "('2020-01-01', [map('hello',1),map('hello',1,'there',2)])", - '{"d":"2020-01-01","m":[{"hello":1},{"hello":1,"there":2}]}', - "m", - Name("Array(Map(String, Int8))")), - ("Nested(x Map(String, Int8))", - "('2020-01-01', [map('hello',1)])", - '{"d":"2020-01-01","m.x":[{"hello":1}]}', - "m.x", - Name("Nested(x Map(String, Int8)")) -]) def table_with_map_inside_another_type(self, type, data, output, partition_by): - """Check what values we can insert into a type that has map type. - """ + """Check what values we can insert into a type that has map type.""" insert_into_table(type=type, data=data, output=output, partition_by=partition_by) + @TestOutline def insert_into_table(self, type, data, output, partition_by="m", select="*"): - """Check we can insert data into a table. - """ + """Check we can insert data into a table.""" uid = getuid() node = self.context.node with Given(f"table definition with {type}"): - sql = "CREATE TABLE {name} (d DATE, m " + type + ") ENGINE = MergeTree() PARTITION BY " + partition_by + " ORDER BY d" + sql = ( + "CREATE TABLE {name} (d DATE, m " + + type + + ") ENGINE = MergeTree() PARTITION BY " + + partition_by + + " ORDER BY d" + ) with Given(f"I create a table", description=sql): table = create_table(name=uid, statement=sql) @@ -382,30 +1021,34 @@ def insert_into_table(self, type, data, output, partition_by="m", select="*"): with Then("I expect output to match", description=output): assert r.output == output, error() + @TestScenario @Requirements( RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map_MixedKeyOrValueTypes("1.0") ) def select_map_with_invalid_mixed_key_and_value_types(self): - """Check that creating a map with mixed key types fails. - """ + """Check that creating a map with mixed key types fails.""" node = self.context.node exitcode = 130 message = "DB::Exception: There is no supertype for types String, UInt8 because some of them are String/FixedString and some of them are not" - with Check("attempt to create a map using SELECT with mixed key types then it fails"): + with Check( + "attempt to create a map using SELECT with mixed key types then it fails" + ): node.query("SELECT map('hello',1,2,3)", exitcode=exitcode, message=message) - with Check("attempt to create a map using SELECT with mixed value types then it fails"): + with Check( + "attempt to create a map using SELECT with mixed value types then it fails" + ): node.query("SELECT map(1,'hello',2,2)", exitcode=exitcode, message=message) + @TestScenario @Requirements( RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map_InvalidNumberOfArguments("1.0") ) def select_map_with_invalid_number_of_arguments(self): - """Check that creating a map with invalid number of arguments fails. - """ + """Check that creating a map with invalid number of arguments fails.""" node = self.context.node exitcode = 42 message = "DB::Exception: Function map requires even number of arguments" @@ -413,10 +1056,10 @@ def select_map_with_invalid_number_of_arguments(self): with When("I create a map using SELECT with invalid number of arguments"): node.query("SELECT map(1,2,3)", exitcode=exitcode, message=message) + @TestScenario def select_map_empty(self): - """Check that we can can create a empty map by not passing any arguments. - """ + """Check that we can can create a empty map by not passing any arguments.""" node = self.context.node with When("I create a map using SELECT with no arguments"): @@ -425,10 +1068,10 @@ def select_map_empty(self): with Then("it should create an empty map"): assert r.output == "{}", error() + @TestScenario def insert_invalid_mixed_key_and_value_types(self): - """Check that inserting a map with mixed key or value types fails. - """ + """Check that inserting a map with mixed key or value types fails.""" uid = getuid() node = self.context.node exitcode = 130 @@ -448,47 +1091,64 @@ def insert_invalid_mixed_key_and_value_types(self): sql = f"INSERT INTO {table} VALUES ('2020-01-01', map(1,'hello',2,2))" node.query(sql, exitcode=exitcode, message=message) + @TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_DuplicatedKeys("1.0") +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_DuplicatedKeys("1.0")) +@Examples( + "type data output", + [ + ( + "Map(String, String)", + "('2020-01-01', map('hello','there','hello','over there'))", + '{"d":"2020-01-01","m":{"hello":"there","hello":"over there"}}', + Name("Map(String, String))"), + ), + ( + "Map(Int64, String)", + "('2020-01-01', map(12345,'there',12345,'over there'))", + '{"d":"2020-01-01","m":{"12345":"there","12345":"over there"}}', + Name("Map(Int64, String))"), + ), + ], ) -@Examples("type data output", [ - ("Map(String, String)", - "('2020-01-01', map('hello','there','hello','over there'))", - '{"d":"2020-01-01","m":{"hello":"there","hello":"over there"}}', - Name("Map(String, String))")), - ("Map(Int64, String)", - "('2020-01-01', map(12345,'there',12345,'over there'))", - '{"d":"2020-01-01","m":{"12345":"there","12345":"over there"}}', - Name("Map(Int64, String))")), -]) def table_map_with_duplicated_keys(self, type, data, output): - """Check that map supports duplicated keys. - """ + """Check that map supports duplicated keys.""" insert_into_table(type=type, data=data, output=output) -@TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_DuplicatedKeys("1.0") -) -@Examples("map output", [ - ("map('hello','there','hello','over there')", "{'hello':'there','hello':'over there'}", Name("String")), - ("map(12345,'there',12345,'over there')", "{12345:'there',12345:'over there'}", Name("Integer")) -]) -def select_map_with_duplicated_keys(self, map, output): - """Check creating a map with duplicated keys. - """ - select_map(map=map, output=output) @TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Value_Retrieval_KeyNotFound("1.0") +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_DuplicatedKeys("1.0")) +@Examples( + "map output", + [ + ( + "map('hello','there','hello','over there')", + "{'hello':'there','hello':'over there'}", + Name("String"), + ), + ( + "map(12345,'there',12345,'over there')", + "{12345:'there',12345:'over there'}", + Name("Integer"), + ), + ], ) +def select_map_with_duplicated_keys(self, map, output): + """Check creating a map with duplicated keys.""" + select_map(map=map, output=output) + + +@TestOutline(Scenario) +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Value_Retrieval_KeyNotFound("1.0")) def select_map_key_not_found(self): node = self.context.node with When("map is empty"): - node.query("SELECT map() AS m, m[1]", exitcode=43, message="DB::Exception: Illegal types of arguments") + node.query( + "SELECT map() AS m, m[1]", + exitcode=43, + message="DB::Exception: Illegal types of arguments", + ) with When("map has integer values"): r = node.query("SELECT map(1,2) AS m, m[2] FORMAT Values") @@ -505,19 +1165,48 @@ def select_map_key_not_found(self): with Then("empty array be returned for key that is not found"): assert r.output == "({1:[2]},[])", error() + @TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Value_Retrieval_KeyNotFound("1.0") +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Value_Retrieval_KeyNotFound("1.0")) +@Examples( + "type data select exitcode message", + [ + ( + "Map(UInt8, UInt8), y Int8", + "(y) VALUES (1)", + "m[1] AS v", + 0, + '{"v":0}', + Name("empty map"), + ), + ( + "Map(UInt8, UInt8)", + "VALUES (map(1,2))", + "m[2] AS v", + 0, + '{"v":0}', + Name("map has integer values"), + ), + ( + "Map(UInt8, String)", + "VALUES (map(1,'2'))", + "m[2] AS v", + 0, + '{"v":""}', + Name("map has string values"), + ), + ( + "Map(UInt8, Array(Int8))", + "VALUES (map(1,[2]))", + "m[2] AS v", + 0, + '{"v":[]}', + Name("map has array values"), + ), + ], ) -@Examples("type data select exitcode message", [ - ("Map(UInt8, UInt8), y Int8", "(y) VALUES (1)", "m[1] AS v", 0, '{"v":0}', Name("empty map")), - ("Map(UInt8, UInt8)", "VALUES (map(1,2))", "m[2] AS v", 0, '{"v":0}', Name("map has integer values")), - ("Map(UInt8, String)", "VALUES (map(1,'2'))", "m[2] AS v", 0, '{"v":""}', Name("map has string values")), - ("Map(UInt8, Array(Int8))", "VALUES (map(1,[2]))", "m[2] AS v", 0, '{"v":[]}', Name("map has array values")), -]) def table_map_key_not_found(self, type, data, select, exitcode, message, order_by=None): - """Check values returned from a map column when key is not found. - """ + """Check values returned from a map column when key is not found.""" uid = getuid() node = self.context.node @@ -525,7 +1214,12 @@ def table_map_key_not_found(self, type, data, select, exitcode, message, order_b order_by = "m" with Given(f"table definition with {type}"): - sql = "CREATE TABLE {name} (m " + type + ") ENGINE = MergeTree() ORDER BY " + order_by + sql = ( + "CREATE TABLE {name} (m " + + type + + ") ENGINE = MergeTree() ORDER BY " + + order_by + ) with And(f"I create a table", description=sql): table = create_table(name=uid, statement=sql) @@ -534,67 +1228,185 @@ def table_map_key_not_found(self, type, data, select, exitcode, message, order_b node.query(f"INSERT INTO {table} {data}") with And("I try to read from the table"): - node.query(f"SELECT {select} FROM {table} FORMAT JSONEachRow", exitcode=exitcode, message=message) + node.query( + f"SELECT {select} FROM {table} FORMAT JSONEachRow", + exitcode=exitcode, + message=message, + ) + @TestScenario -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Value_Retrieval_KeyInvalid("1.0") -) +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Value_Retrieval_KeyInvalid("1.0")) def invalid_key(self): - """Check when key is not valid. - """ + """Check when key is not valid.""" node = self.context.node with When("I try to use an integer key that is too large"): - node.query("SELECT map(1,2) AS m, m[256]", exitcode=43, message="DB::Exception: Illegal types of arguments") + node.query( + "SELECT map(1,2) AS m, m[256]", + exitcode=43, + message="DB::Exception: Illegal types of arguments", + ) with When("I try to use an integer key that is negative when key is unsigned"): - node.query("SELECT map(1,2) AS m, m[-1]", exitcode=43, message="DB::Exception: Illegal types of arguments") + node.query( + "SELECT map(1,2) AS m, m[-1]", + exitcode=43, + message="DB::Exception: Illegal types of arguments", + ) with When("I try to use a string key when key is an integer"): - node.query("SELECT map(1,2) AS m, m['1']", exitcode=43, message="DB::Exception: Illegal types of arguments") + node.query( + "SELECT map(1,2) AS m, m['1']", + exitcode=43, + message="DB::Exception: Illegal types of arguments", + ) with When("I try to use an integer key when key is a string"): - r = node.query("SELECT map('1',2) AS m, m[1]", exitcode=43, message="DB::Exception: Illegal types of arguments") + r = node.query( + "SELECT map('1',2) AS m, m[1]", + exitcode=43, + message="DB::Exception: Illegal types of arguments", + ) with When("I try to use an empty key when key is a string"): - r = node.query("SELECT map('1',2) AS m, m[]", exitcode=62, message="DB::Exception: Syntax error: failed at position") + r = node.query( + "SELECT map('1',2) AS m, m[]", + exitcode=62, + message="DB::Exception: Syntax error: failed at position", + ) with When("I try to use wrong type conversion in key"): - r = node.query("SELECT map(1,2) AS m, m[toInt8('1')]", exitcode=43, message="DB::Exception: Illegal types of arguments") + r = node.query( + "SELECT map(1,2) AS m, m[toInt8('1')]", + exitcode=43, + message="DB::Exception: Illegal types of arguments", + ) - with When("in array of maps I try to use an integer key that is negative when key is unsigned"): - node.query("SELECT [map(1,2)] AS m, m[1][-1]", exitcode=43, message="DB::Exception: Illegal types of arguments") + with When( + "in array of maps I try to use an integer key that is negative when key is unsigned" + ): + node.query( + "SELECT [map(1,2)] AS m, m[1][-1]", + exitcode=43, + message="DB::Exception: Illegal types of arguments", + ) with When("I try to use a NULL key when key is not nullable"): r = node.query("SELECT map(1,2) AS m, m[NULL] FORMAT Values") with Then("it should return NULL"): assert r.output == "({1:2},NULL)", error() + @TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Value_Retrieval_KeyInvalid("1.0") +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Value_Retrieval_KeyInvalid("1.0")) +@Examples( + "type data select exitcode message order_by", + [ + ( + "Map(UInt8, UInt8)", + "(map(1,2))", + "m[256] AS v", + 0, + '{"v":0}', + "m", + Name("key too large)"), + ), + ( + "Map(UInt8, UInt8)", + "(map(1,2))", + "m[-1] AS v", + 0, + '{"v":0}', + "m", + Name("key is negative"), + ), + ( + "Map(UInt8, UInt8)", + "(map(1,2))", + "m['1'] AS v", + 43, + "DB::Exception: Illegal types of arguments", + "m", + Name("string when key is integer"), + ), + ( + "Map(String, UInt8)", + "(map('1',2))", + "m[1] AS v", + 43, + "DB::Exception: Illegal types of arguments", + "m", + Name("integer when key is string"), + ), + ( + "Map(String, UInt8)", + "(map('1',2))", + "m[] AS v", + 62, + "DB::Exception: Syntax error: failed at position", + "m", + Name("empty when key is string"), + ), + ( + "Map(UInt8, UInt8)", + "(map(1,2))", + "m[toInt8('1')] AS v", + 0, + '{"v":2}', + "m", + Name("wrong type conversion when key is integer"), + ), + ( + "Map(String, UInt8)", + "(map('1',2))", + "m[toFixedString('1',1)] AS v", + 0, + '{"v":2}', + "m", + Name("wrong type conversion when key is string"), + ), + ( + "Map(UInt8, UInt8)", + "(map(1,2))", + "m[NULL] AS v", + 0, + '{"v":null}', + "m", + Name("NULL key when key is not nullable"), + ), + ( + "Array(Map(UInt8, UInt8))", + "([map(1,2)])", + "m[1]['1'] AS v", + 43, + "DB::Exception: Illegal types of arguments", + "m", + Name("string when key is integer in array of maps"), + ), + ( + "Nested(x Map(UInt8, UInt8))", + "([map(1,2)])", + "m.x[1]['1'] AS v", + 43, + "DB::Exception: Illegal types of arguments", + "m.x", + Name("string when key is integer in nested map"), + ), + ], ) -@Examples("type data select exitcode message order_by", [ - ("Map(UInt8, UInt8)", "(map(1,2))", "m[256] AS v", 0, '{"v":0}', "m", Name("key too large)")), - ("Map(UInt8, UInt8)", "(map(1,2))", "m[-1] AS v", 0, '{"v":0}', "m", Name("key is negative")), - ("Map(UInt8, UInt8)", "(map(1,2))", "m['1'] AS v", 43, "DB::Exception: Illegal types of arguments", "m", Name("string when key is integer")), - ("Map(String, UInt8)", "(map('1',2))", "m[1] AS v", 43, "DB::Exception: Illegal types of arguments", "m", Name("integer when key is string")), - ("Map(String, UInt8)", "(map('1',2))", "m[] AS v", 62, "DB::Exception: Syntax error: failed at position", "m", Name("empty when key is string")), - ("Map(UInt8, UInt8)", "(map(1,2))", "m[toInt8('1')] AS v", 0, '{"v":2}', "m", Name("wrong type conversion when key is integer")), - ("Map(String, UInt8)", "(map('1',2))", "m[toFixedString('1',1)] AS v", 0, '{"v":2}', "m", Name("wrong type conversion when key is string")), - ("Map(UInt8, UInt8)", "(map(1,2))", "m[NULL] AS v", 0, '{"v":null}', "m", Name("NULL key when key is not nullable")), - ("Array(Map(UInt8, UInt8))", "([map(1,2)])", "m[1]['1'] AS v", 43, "DB::Exception: Illegal types of arguments", "m", Name("string when key is integer in array of maps")), - ("Nested(x Map(UInt8, UInt8))", "([map(1,2)])", "m.x[1]['1'] AS v", 43, "DB::Exception: Illegal types of arguments", "m.x", Name("string when key is integer in nested map")), -]) def table_map_invalid_key(self, type, data, select, exitcode, message, order_by="m"): - """Check selecting values from a map column using an invalid key. - """ + """Check selecting values from a map column using an invalid key.""" uid = getuid() node = self.context.node with Given(f"table definition with {type}"): - sql = "CREATE TABLE {name} (m " + type + ") ENGINE = MergeTree() ORDER BY " + order_by + sql = ( + "CREATE TABLE {name} (m " + + type + + ") ENGINE = MergeTree() ORDER BY " + + order_by + ) with And(f"I create a table", description=sql): table = create_table(name=uid, statement=sql) @@ -603,35 +1415,114 @@ def table_map_invalid_key(self, type, data, select, exitcode, message, order_by= node.query(f"INSERT INTO {table} VALUES {data}") with And("I try to read from the table"): - node.query(f"SELECT {select} FROM {table} FORMAT JSONEachRow", exitcode=exitcode, message=message) + node.query( + f"SELECT {select} FROM {table} FORMAT JSONEachRow", + exitcode=exitcode, + message=message, + ) + @TestOutline(Scenario) -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Value_Retrieval("1.0") +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Value_Retrieval("1.0")) +@Examples( + "type data select filter exitcode message order_by", + [ + ( + "Map(UInt8, UInt8)", + "(map(1,1)),(map(1,2)),(map(2,3))", + "m[1] AS v", + "1=1 ORDER BY m[1]", + 0, + '{"v":0}\n{"v":1}\n{"v":2}', + None, + Name("select the same key from all the rows"), + ), + ( + "Map(String, String)", + "(map('a','b')),(map('c','d','e','f')),(map('e','f'))", + "m", + "m = map('e','f','c','d')", + 0, + "", + None, + Name("filter rows by map having different pair order"), + ), + ( + "Map(String, String)", + "(map('a','b')),(map('c','d','e','f')),(map('e','f'))", + "m", + "m = map('c','d','e','f')", + 0, + '{"m":{"c":"d","e":"f"}}', + None, + Name("filter rows by map having the same pair order"), + ), + ( + "Map(String, String)", + "(map('a','b')),(map('e','f'))", + "m", + "m = map()", + 0, + "", + None, + Name("filter rows by empty map"), + ), + ( + "Map(String, Int8)", + "(map('a',1,'b',2)),(map('a',2)),(map('b',3))", + "m", + "m['a'] = 1", + 0, + '{"m":{"a":1,"b":2}}', + None, + Name("filter rows by map key value"), + ), + ( + "Map(String, Int8)", + "(map('a',1,'b',2)),(map('a',2)),(map('b',3))", + "m", + "m['a'] = 1 AND m['b'] = 2", + 0, + '{"m":{"a":1,"b":2}}', + None, + Name("filter rows by map multiple key value combined with AND"), + ), + ( + "Map(String, Int8)", + "(map('a',1,'b',2)),(map('a',2)),(map('b',3))", + "m", + "m['a'] = 1 OR m['b'] = 3", + 0, + '{"m":{"a":1,"b":2}}\n{"m":{"b":3}}', + None, + Name("filter rows by map multiple key value combined with OR"), + ), + ( + "Map(String, Array(Int8))", + "(map('a',[])),(map('b',[1])),(map('c',[2]))", + "m['b'] AS v", + "m['b'] IN ([1],[2])", + 0, + '{"v":[1]}', + None, + Name("filter rows by map array value using IN"), + ), + ( + "Map(String, Nullable(String))", + "(map('a',NULL)),(map('a',1))", + "m", + "isNull(m['a']) = 1", + 0, + '{"m":{"a":null}}', + None, + Name("select map with nullable value"), + ), + ], ) -@Examples("type data select filter exitcode message order_by", [ - ("Map(UInt8, UInt8)", "(map(1,1)),(map(1,2)),(map(2,3))", "m[1] AS v", "1=1 ORDER BY m[1]", 0, '{"v":0}\n{"v":1}\n{"v":2}', None, - Name("select the same key from all the rows")), - ("Map(String, String)", "(map('a','b')),(map('c','d','e','f')),(map('e','f'))", "m", "m = map('e','f','c','d')", 0, '', None, - Name("filter rows by map having different pair order")), - ("Map(String, String)", "(map('a','b')),(map('c','d','e','f')),(map('e','f'))", "m", "m = map('c','d','e','f')", 0, '{"m":{"c":"d","e":"f"}}', None, - Name("filter rows by map having the same pair order")), - ("Map(String, String)", "(map('a','b')),(map('e','f'))", "m", "m = map()", 0, '', None, - Name("filter rows by empty map")), - ("Map(String, Int8)", "(map('a',1,'b',2)),(map('a',2)),(map('b',3))", "m", "m['a'] = 1", 0, '{"m":{"a":1,"b":2}}', None, - Name("filter rows by map key value")), - ("Map(String, Int8)", "(map('a',1,'b',2)),(map('a',2)),(map('b',3))", "m", "m['a'] = 1 AND m['b'] = 2", 0, '{"m":{"a":1,"b":2}}', None, - Name("filter rows by map multiple key value combined with AND")), - ("Map(String, Int8)", "(map('a',1,'b',2)),(map('a',2)),(map('b',3))", "m", "m['a'] = 1 OR m['b'] = 3", 0, '{"m":{"a":1,"b":2}}\n{"m":{"b":3}}', None, - Name("filter rows by map multiple key value combined with OR")), - ("Map(String, Array(Int8))", "(map('a',[])),(map('b',[1])),(map('c',[2]))", "m['b'] AS v", "m['b'] IN ([1],[2])", 0, '{"v":[1]}', None, - Name("filter rows by map array value using IN")), - ("Map(String, Nullable(String))", "(map('a',NULL)),(map('a',1))", "m", "isNull(m['a']) = 1", 0, '{"m":{"a":null}}', None, - Name("select map with nullable value")) -]) -def table_map_queries(self, type, data, select, filter, exitcode, message, order_by=None): - """Check retrieving map values and using maps in queries. - """ +def table_map_queries( + self, type, data, select, filter, exitcode, message, order_by=None +): + """Check retrieving map values and using maps in queries.""" uid = getuid() node = self.context.node @@ -639,7 +1530,12 @@ def table_map_queries(self, type, data, select, filter, exitcode, message, order order_by = "m" with Given(f"table definition with {type}"): - sql = "CREATE TABLE {name} (m " + type + ") ENGINE = MergeTree() ORDER BY " + order_by + sql = ( + "CREATE TABLE {name} (m " + + type + + ") ENGINE = MergeTree() ORDER BY " + + order_by + ) with And(f"I create a table", description=sql): table = create_table(name=uid, statement=sql) @@ -648,24 +1544,37 @@ def table_map_queries(self, type, data, select, filter, exitcode, message, order node.query(f"INSERT INTO {table} VALUES {data}") with And("I try to read from the table"): - node.query(f"SELECT {select} FROM {table} WHERE {filter} FORMAT JSONEachRow", exitcode=exitcode, message=message) + node.query( + f"SELECT {select} FROM {table} WHERE {filter} FORMAT JSONEachRow", + exitcode=exitcode, + message=message, + ) + @TestOutline(Scenario) @Requirements( RQ_SRS_018_ClickHouse_Map_DataType_Invalid_Nullable("1.0"), - RQ_SRS_018_ClickHouse_Map_DataType_Invalid_NothingNothing("1.0") + RQ_SRS_018_ClickHouse_Map_DataType_Invalid_NothingNothing("1.0"), +) +@Examples( + "type exitcode message", + [ + ( + "Nullable(Map(String, String))", + 43, + "DB::Exception: Nested type Map(String,String) cannot be inside Nullable type", + Name("nullable map"), + ), + ( + "Map(Nothing, Nothing)", + 37, + "DB::Exception: Column `m` with type Map(Nothing,Nothing) is not allowed in key expression, it's not comparable", + Name("map with nothing type for key and value"), + ), + ], ) -@Examples("type exitcode message", [ - ("Nullable(Map(String, String))", - 43, "DB::Exception: Nested type Map(String,String) cannot be inside Nullable type", - Name("nullable map")), - ("Map(Nothing, Nothing)", - 37, "DB::Exception: Column `m` with type Map(Nothing,Nothing) is not allowed in key expression, it's not comparable", - Name("map with nothing type for key and value")) -]) def table_map_unsupported_types(self, type, exitcode, message): - """Check creating a table with unsupported map column types. - """ + """Check creating a table with unsupported map column types.""" uid = getuid() node = self.context.node @@ -677,109 +1586,265 @@ def table_map_unsupported_types(self, type, exitcode, message): with Finally("drop table if any"): node.query(f"DROP TABLE IF EXISTS {uid}") + @TestOutline(Scenario) @Requirements( RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_TupleOfArraysToMap("1.0"), - RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_TupleOfArraysMap_Invalid("1.0") + RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_TupleOfArraysMap_Invalid("1.0"), +) +@Examples( + "tuple type exitcode message", + [ + ( + "([1, 2, 3], ['Ready', 'Steady', 'Go'])", + "Map(UInt8, String)", + 0, + "{1:'Ready',2:'Steady',3:'Go'}", + Name("int -> int"), + ), + ( + "([1, 2, 3], ['Ready', 'Steady', 'Go'])", + "Map(String, String)", + 0, + "{'1':'Ready','2':'Steady','3':'Go'}", + Name("int -> string"), + ), + ( + "(['1', '2', '3'], ['Ready', 'Steady', 'Go'])", + "Map(UInt8, String)", + 0, + "{1:'Ready',187:'Steady',143:'Go'}", + Name("string -> int"), + ), + ( + "([],[])", + "Map(String, String)", + 0, + "{}", + Name("empty arrays to map str:str"), + ), + ( + "([],[])", + "Map(UInt8, Array(Int8))", + 0, + "{}", + Name("empty arrays to map uint8:array"), + ), + ( + "([[1]],['hello'])", + "Map(String, String)", + 0, + "{'[1]':'hello'}", + Name("array -> string"), + ), + ( + "([(1,2),(3,4)])", + "Map(UInt8, UInt8)", + 0, + "{1:2,3:4}", + Name("array of two tuples"), + ), + ( + "([1, 2], ['Ready', 'Steady', 'Go'])", + "Map(UInt8, String)", + 53, + "DB::Exception: CAST AS Map can only be performed from tuple of arrays with equal sizes", + Name("unequal array sizes"), + ), + ], ) -@Examples("tuple type exitcode message", [ - ("([1, 2, 3], ['Ready', 'Steady', 'Go'])", "Map(UInt8, String)", - 0, "{1:'Ready',2:'Steady',3:'Go'}", Name("int -> int")), - ("([1, 2, 3], ['Ready', 'Steady', 'Go'])", "Map(String, String)", - 0, "{'1':'Ready','2':'Steady','3':'Go'}", Name("int -> string")), - ("(['1', '2', '3'], ['Ready', 'Steady', 'Go'])", "Map(UInt8, String)", - 0, "{1:'Ready',187:'Steady',143:'Go'}", Name("string -> int")), - ("([],[])", "Map(String, String)", - 0, "{}", Name("empty arrays to map str:str")), - ("([],[])", "Map(UInt8, Array(Int8))", - 0, "{}", Name("empty arrays to map uint8:array")), - ("([[1]],['hello'])", "Map(String, String)", - 0, "{'[1]':'hello'}", Name("array -> string")), - ("([(1,2),(3,4)])", "Map(UInt8, UInt8)", - 0, "{1:2,3:4}", Name("array of two tuples")), - ("([1, 2], ['Ready', 'Steady', 'Go'])", "Map(UInt8, String)", - 53, "DB::Exception: CAST AS Map can only be performed from tuple of arrays with equal sizes", - Name("unequal array sizes")), -]) def cast_tuple_of_two_arrays_to_map(self, tuple, type, exitcode, message): - """Check casting Tuple(Array, Array) to a map type. - """ + """Check casting Tuple(Array, Array) to a map type.""" node = self.context.node with When("I try to cast tuple", description=tuple): - node.query(f"SELECT CAST({tuple}, '{type}') AS map", exitcode=exitcode, message=message) + node.query( + f"SELECT CAST({tuple}, '{type}') AS map", exitcode=exitcode, message=message + ) + @TestOutline(Scenario) @Requirements( RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_TupleOfArraysToMap("1.0"), - RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_TupleOfArraysMap_Invalid("1.0") + RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_TupleOfArraysMap_Invalid("1.0"), ) -@Examples("tuple type exitcode message check_insert", [ - ("(([1, 2, 3], ['Ready', 'Steady', 'Go']))", "Map(UInt8, String)", - 0, '{"m":{"1":"Ready","2":"Steady","3":"Go"}}', False, Name("int -> int")), - ("(([1, 2, 3], ['Ready', 'Steady', 'Go']))", "Map(String, String)", - 0, '{"m":{"1":"Ready","2":"Steady","3":"Go"}}', False, Name("int -> string")), - ("((['1', '2', '3'], ['Ready', 'Steady', 'Go']))", "Map(UInt8, String)", - 0, '', True, Name("string -> int")), - ("(([],[]))", "Map(String, String)", - 0, '{"m":{}}', False, Name("empty arrays to map str:str")), - ("(([],[]))", "Map(UInt8, Array(Int8))", - 0, '{"m":{}}', False, Name("empty arrays to map uint8:array")), - ("(([[1]],['hello']))", "Map(String, String)", - 53, 'DB::Exception: Type mismatch in IN or VALUES section', True, Name("array -> string")), - ("(([(1,2),(3,4)]))", "Map(UInt8, UInt8)", - 0, '{"m":{"1":2,"3":4}}', False, Name("array of two tuples")), - ("(([1, 2], ['Ready', 'Steady', 'Go']))", "Map(UInt8, String)", - 53, "DB::Exception: CAST AS Map can only be performed from tuple of arrays with equal sizes", True, - Name("unequal array sizes")), -]) -def table_map_cast_tuple_of_arrays_to_map(self, tuple, type, exitcode, message, check_insert): - """Check converting Tuple(Array, Array) into map on insert into a map type column. - """ - table_map(type=type, data=tuple, select="*", filter="1=1", exitcode=exitcode, message=message, check_insert=check_insert) +@Examples( + "tuple type exitcode message check_insert", + [ + ( + "(([1, 2, 3], ['Ready', 'Steady', 'Go']))", + "Map(UInt8, String)", + 0, + '{"m":{"1":"Ready","2":"Steady","3":"Go"}}', + False, + Name("int -> int"), + ), + ( + "(([1, 2, 3], ['Ready', 'Steady', 'Go']))", + "Map(String, String)", + 0, + '{"m":{"1":"Ready","2":"Steady","3":"Go"}}', + False, + Name("int -> string"), + ), + ( + "((['1', '2', '3'], ['Ready', 'Steady', 'Go']))", + "Map(UInt8, String)", + 0, + "", + True, + Name("string -> int"), + ), + ( + "(([],[]))", + "Map(String, String)", + 0, + '{"m":{}}', + False, + Name("empty arrays to map str:str"), + ), + ( + "(([],[]))", + "Map(UInt8, Array(Int8))", + 0, + '{"m":{}}', + False, + Name("empty arrays to map uint8:array"), + ), + ( + "(([[1]],['hello']))", + "Map(String, String)", + 53, + "DB::Exception: Type mismatch in IN or VALUES section", + True, + Name("array -> string"), + ), + ( + "(([(1,2),(3,4)]))", + "Map(UInt8, UInt8)", + 0, + '{"m":{"1":2,"3":4}}', + False, + Name("array of two tuples"), + ), + ( + "(([1, 2], ['Ready', 'Steady', 'Go']))", + "Map(UInt8, String)", + 53, + "DB::Exception: CAST AS Map can only be performed from tuple of arrays with equal sizes", + True, + Name("unequal array sizes"), + ), + ], +) +def table_map_cast_tuple_of_arrays_to_map( + self, tuple, type, exitcode, message, check_insert +): + """Check converting Tuple(Array, Array) into map on insert into a map type column.""" + table_map( + type=type, + data=tuple, + select="*", + filter="1=1", + exitcode=exitcode, + message=message, + check_insert=check_insert, + ) + @TestOutline(Scenario) @Requirements( RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_ArrayOfTuplesToMap("1.0"), - RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_ArrayOfTuplesToMap_Invalid("1.0") + RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_ArrayOfTuplesToMap_Invalid( + "1.0" + ), +) +@Examples( + "tuple type exitcode message", + [ + ( + "([(1,2),(3,4)])", + "Map(UInt8, UInt8)", + 0, + "{1:2,3:4}", + Name("array of two tuples"), + ), + ( + "([(1,2),(3)])", + "Map(UInt8, UInt8)", + 130, + "DB::Exception: There is no supertype for types Tuple(UInt8, UInt8), UInt8 because some of them are Tuple and some of them are not", + Name("not a tuple"), + ), + ( + "([(1,2),(3,)])", + "Map(UInt8, UInt8)", + 130, + "DB::Exception: There is no supertype for types Tuple(UInt8, UInt8), Tuple(UInt8) because Tuples have different sizes", + Name("invalid tuple"), + ), + ], ) -@Examples("tuple type exitcode message", [ - ("([(1,2),(3,4)])", "Map(UInt8, UInt8)", 0, "{1:2,3:4}", - Name("array of two tuples")), - ("([(1,2),(3)])", "Map(UInt8, UInt8)", 130, - "DB::Exception: There is no supertype for types Tuple(UInt8, UInt8), UInt8 because some of them are Tuple and some of them are not", - Name("not a tuple")), - ("([(1,2),(3,)])", "Map(UInt8, UInt8)", 130, - "DB::Exception: There is no supertype for types Tuple(UInt8, UInt8), Tuple(UInt8) because Tuples have different sizes", - Name("invalid tuple")), -]) def cast_array_of_two_tuples_to_map(self, tuple, type, exitcode, message): - """Check casting Array(Tuple(K,V)) to a map type. - """ + """Check casting Array(Tuple(K,V)) to a map type.""" node = self.context.node with When("I try to cast tuple", description=tuple): - node.query(f"SELECT CAST({tuple}, '{type}') AS map", exitcode=exitcode, message=message) + node.query( + f"SELECT CAST({tuple}, '{type}') AS map", exitcode=exitcode, message=message + ) + @TestOutline(Scenario) @Requirements( RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_ArrayOfTuplesToMap("1.0"), - RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_ArrayOfTuplesToMap_Invalid("1.0") + RQ_SRS_018_ClickHouse_Map_DataType_Conversion_From_ArrayOfTuplesToMap_Invalid( + "1.0" + ), ) -@Examples("tuple type exitcode message check_insert", [ - ("(([(1,2),(3,4)]))", "Map(UInt8, UInt8)", 0, '{"m":{"1":2,"3":4}}', False, - Name("array of two tuples")), - ("(([(1,2),(3)]))", "Map(UInt8, UInt8)", 130, - "DB::Exception: There is no supertype for types Tuple(UInt8, UInt8), UInt8 because some of them are Tuple and some of them are not", True, - Name("not a tuple")), - ("(([(1,2),(3,)]))", "Map(UInt8, UInt8)", 130, - "DB::Exception: There is no supertype for types Tuple(UInt8, UInt8), Tuple(UInt8) because Tuples have different sizes", True, - Name("invalid tuple")), -]) -def table_map_cast_array_of_two_tuples_to_map(self, tuple, type, exitcode, message, check_insert): - """Check converting Array(Tuple(K,V),...) into map on insert into a map type column. - """ - table_map(type=type, data=tuple, select="*", filter="1=1", exitcode=exitcode, message=message, check_insert=check_insert) +@Examples( + "tuple type exitcode message check_insert", + [ + ( + "(([(1,2),(3,4)]))", + "Map(UInt8, UInt8)", + 0, + '{"m":{"1":2,"3":4}}', + False, + Name("array of two tuples"), + ), + ( + "(([(1,2),(3)]))", + "Map(UInt8, UInt8)", + 130, + "DB::Exception: There is no supertype for types Tuple(UInt8, UInt8), UInt8 because some of them are Tuple and some of them are not", + True, + Name("not a tuple"), + ), + ( + "(([(1,2),(3,)]))", + "Map(UInt8, UInt8)", + 130, + "DB::Exception: There is no supertype for types Tuple(UInt8, UInt8), Tuple(UInt8) because Tuples have different sizes", + True, + Name("invalid tuple"), + ), + ], +) +def table_map_cast_array_of_two_tuples_to_map( + self, tuple, type, exitcode, message, check_insert +): + """Check converting Array(Tuple(K,V),...) into map on insert into a map type column.""" + table_map( + type=type, + data=tuple, + select="*", + filter="1=1", + exitcode=exitcode, + message=message, + check_insert=check_insert, + ) + @TestScenario @Requirements( @@ -791,7 +1856,12 @@ def subcolumns_keys_using_inline_defined_map(self): message = "DB::Exception: Missing columns: 'c.keys'" with When("I try to access keys sub-column using an inline defined map"): - node.query("SELECT map( 'aa', 4, '44' , 5) as c, c.keys", exitcode=exitcode, message=message) + node.query( + "SELECT map( 'aa', 4, '44' , 5) as c, c.keys", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( @@ -803,261 +1873,373 @@ def subcolumns_values_using_inline_defined_map(self): message = "DB::Exception: Missing columns: 'c.values'" with When("I try to access values sub-column using an inline defined map"): - node.query("SELECT map( 'aa', 4, '44' , 5) as c, c.values", exitcode=exitcode, message=message) + node.query( + "SELECT map( 'aa', 4, '44' , 5) as c, c.values", + exitcode=exitcode, + message=message, + ) + @TestOutline(Scenario) @Requirements( RQ_SRS_018_ClickHouse_Map_DataType_SubColumns_Keys("1.0"), RQ_SRS_018_ClickHouse_Map_DataType_SubColumns_Keys_ArrayFunctions("1.0"), RQ_SRS_018_ClickHouse_Map_DataType_SubColumns_Values("1.0"), - RQ_SRS_018_ClickHouse_Map_DataType_SubColumns_Values_ArrayFunctions("1.0") + RQ_SRS_018_ClickHouse_Map_DataType_SubColumns_Values_ArrayFunctions("1.0"), +) +@Examples( + "type data select filter exitcode message", + [ + # keys + ( + "Map(String, String)", + "(map('a','b','c','d')),(map('e','f'))", + "m.keys AS keys", + "1=1", + 0, + '{"keys":["a","c"]}\n{"keys":["e"]}', + Name("select keys"), + ), + ( + "Map(String, String)", + "(map('a','b','c','d')),(map('e','f'))", + "m.keys AS keys", + "has(m.keys, 'e')", + 0, + '{"keys":["e"]}', + Name("filter by using keys in an array function"), + ), + ( + "Map(String, String)", + "(map('a','b','c','d')),(map('e','f'))", + "has(m.keys, 'e') AS r", + "1=1", + 0, + '{"r":0}\n{"r":1}', + Name("column that uses keys in an array function"), + ), + # values + ( + "Map(String, String)", + "(map('a','b','c','d')),(map('e','f'))", + "m.values AS values", + "1=1", + 0, + '{"values":["b","d"]}\n{"values":["f"]}', + Name("select values"), + ), + ( + "Map(String, String)", + "(map('a','b','c','d')),(map('e','f'))", + "m.values AS values", + "has(m.values, 'f')", + 0, + '{"values":["f"]}', + Name("filter by using values in an array function"), + ), + ( + "Map(String, String)", + "(map('a','b','c','d')),(map('e','f'))", + "has(m.values, 'f') AS r", + "1=1", + 0, + '{"r":0}\n{"r":1}', + Name("column that uses values in an array function"), + ), + ], ) -@Examples("type data select filter exitcode message", [ - # keys - ("Map(String, String)", "(map('a','b','c','d')),(map('e','f'))", "m.keys AS keys", "1=1", - 0, '{"keys":["a","c"]}\n{"keys":["e"]}', Name("select keys")), - ("Map(String, String)", "(map('a','b','c','d')),(map('e','f'))", "m.keys AS keys", "has(m.keys, 'e')", - 0, '{"keys":["e"]}', Name("filter by using keys in an array function")), - ("Map(String, String)", "(map('a','b','c','d')),(map('e','f'))", "has(m.keys, 'e') AS r", "1=1", - 0, '{"r":0}\n{"r":1}', Name("column that uses keys in an array function")), - # values - ("Map(String, String)", "(map('a','b','c','d')),(map('e','f'))", "m.values AS values", "1=1", - 0, '{"values":["b","d"]}\n{"values":["f"]}', Name("select values")), - ("Map(String, String)", "(map('a','b','c','d')),(map('e','f'))", "m.values AS values", "has(m.values, 'f')", - 0, '{"values":["f"]}', Name("filter by using values in an array function")), - ("Map(String, String)", "(map('a','b','c','d')),(map('e','f'))", "has(m.values, 'f') AS r", "1=1", - 0, '{"r":0}\n{"r":1}', Name("column that uses values in an array function")) -]) def subcolumns(self, type, data, select, filter, exitcode, message, order_by=None): - """Check usage of sub-columns in queries. - """ - table_map(type=type, data=data, select=select, filter=filter, exitcode=exitcode, message=message, order_by=order_by) + """Check usage of sub-columns in queries.""" + table_map( + type=type, + data=data, + select=select, + filter=filter, + exitcode=exitcode, + message=message, + order_by=order_by, + ) + @TestScenario -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Functions_Length("1.0") -) +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Functions_Length("1.0")) def length(self): - """Check usage of length function with map data type. - """ - table_map(type="Map(String, String)", + """Check usage of length function with map data type.""" + table_map( + type="Map(String, String)", data="(map('a','b','c','d')),(map('e','f'))", select="length(m) AS len, m", filter="length(m) = 1", - exitcode=0, message='{"len":"1","m":{"e":"f"}}') + exitcode=0, + message='{"len":"1","m":{"e":"f"}}', + ) + @TestScenario -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Functions_Empty("1.0") -) +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Functions_Empty("1.0")) def empty(self): - """Check usage of empty function with map data type. - """ - table_map(type="Map(String, String)", + """Check usage of empty function with map data type.""" + table_map( + type="Map(String, String)", data="(map('e','f'))", select="empty(m) AS em, m", filter="empty(m) <> 1", - exitcode=0, message='{"em":0,"m":{"e":"f"}}') + exitcode=0, + message='{"em":0,"m":{"e":"f"}}', + ) + @TestScenario -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Functions_NotEmpty("1.0") -) +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Functions_NotEmpty("1.0")) def notempty(self): - """Check usage of notEmpty function with map data type. - """ - table_map(type="Map(String, String)", + """Check usage of notEmpty function with map data type.""" + table_map( + type="Map(String, String)", data="(map('e','f'))", select="notEmpty(m) AS em, m", filter="notEmpty(m) = 1", - exitcode=0, message='{"em":1,"m":{"e":"f"}}') + exitcode=0, + message='{"em":1,"m":{"e":"f"}}', + ) + @TestScenario -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map_MapAdd("1.0") -) +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map_MapAdd("1.0")) def cast_from_mapadd(self): - """Check converting the result of mapAdd function to a map data type. - """ - select_map(map="CAST(mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])), 'Map(Int8, Int8)')", output="{1:2,2:2}") + """Check converting the result of mapAdd function to a map data type.""" + select_map( + map="CAST(mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])), 'Map(Int8, Int8)')", + output="{1:2,2:2}", + ) + @TestScenario -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map_MapSubstract("1.0") -) +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map_MapSubstract("1.0")) def cast_from_mapsubstract(self): - """Check converting the result of mapSubstract function to a map data type. - """ - select_map(map="CAST(mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt32(2), 1])), 'Map(Int8, Int8)')", output="{1:-1,2:0}") + """Check converting the result of mapSubstract function to a map data type.""" + select_map( + map="CAST(mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt32(2), 1])), 'Map(Int8, Int8)')", + output="{1:-1,2:0}", + ) + @TestScenario -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map_MapPopulateSeries("1.0") -) +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map_MapPopulateSeries("1.0")) def cast_from_mappopulateseries(self): - """Check converting the result of mapPopulateSeries function to a map data type. - """ - select_map(map="CAST(mapPopulateSeries([1,2,4], [11,22,44], 5), 'Map(Int8, Int8)')", output="{1:11,2:22,3:0,4:44,5:0}") + """Check converting the result of mapPopulateSeries function to a map data type.""" + select_map( + map="CAST(mapPopulateSeries([1,2,4], [11,22,44], 5), 'Map(Int8, Int8)')", + output="{1:11,2:22,3:0,4:44,5:0}", + ) + @TestScenario -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Functions_MapContains("1.0") -) +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Functions_MapContains("1.0")) def mapcontains(self): - """Check usages of mapContains function with map data type. - """ + """Check usages of mapContains function with map data type.""" node = self.context.node with Example("key in map"): - table_map(type="Map(String, String)", + table_map( + type="Map(String, String)", data="(map('e','f')),(map('a','b'))", select="m", filter="mapContains(m, 'a')", - exitcode=0, message='{"m":{"a":"b"}}') + exitcode=0, + message='{"m":{"a":"b"}}', + ) with Example("key not in map"): - table_map(type="Map(String, String)", + table_map( + type="Map(String, String)", data="(map('e','f')),(map('a','b'))", select="m", filter="NOT mapContains(m, 'a')", - exitcode=0, message='{"m":{"e":"f"}}') + exitcode=0, + message='{"m":{"e":"f"}}', + ) with Example("null key not in map"): - table_map(type="Map(Nullable(String), String)", + table_map( + type="Map(Nullable(String), String)", data="(map('e','f')),(map('a','b'))", select="m", filter="mapContains(m, NULL)", - exitcode=0, message='') + exitcode=0, + message="", + ) with Example("null key in map"): - table_map(type="Map(Nullable(String), String)", + table_map( + type="Map(Nullable(String), String)", data="(map('e','f')),(map('a','b')),(map(NULL,'c'))", select="m", filter="mapContains(m, NULL)", - exitcode=0, message='{null:"c"}') + exitcode=0, + message='{null:"c"}', + ) with Example("select nullable key"): - node.query("SELECT map(NULL, 1, 2, 3) AS m, mapContains(m, toNullable(toUInt8(2)))", exitcode=0, message="{2:3}") + node.query( + "SELECT map(NULL, 1, 2, 3) AS m, mapContains(m, toNullable(toUInt8(2)))", + exitcode=0, + message="{2:3}", + ) + @TestScenario -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Functions_MapKeys("1.0") -) +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Functions_MapKeys("1.0")) def mapkeys(self): - """Check usages of mapKeys function with map data type. - """ + """Check usages of mapKeys function with map data type.""" with Example("key in map"): - table_map(type="Map(String, String)", + table_map( + type="Map(String, String)", data="(map('e','f')),(map('a','b'))", select="m", filter="has(mapKeys(m), 'a')", - exitcode=0, message='{"m":{"a":"b"}}') + exitcode=0, + message='{"m":{"a":"b"}}', + ) with Example("key not in map"): - table_map(type="Map(String, String)", + table_map( + type="Map(String, String)", data="(map('e','f')),(map('a','b'))", select="m", filter="NOT has(mapKeys(m), 'a')", - exitcode=0, message='{"m":{"e":"f"}}') + exitcode=0, + message='{"m":{"e":"f"}}', + ) with Example("null key not in map"): - table_map(type="Map(Nullable(String), String)", + table_map( + type="Map(Nullable(String), String)", data="(map('e','f')),(map('a','b'))", select="m", filter="has(mapKeys(m), NULL)", - exitcode=0, message='') + exitcode=0, + message="", + ) with Example("null key in map"): - table_map(type="Map(Nullable(String), String)", + table_map( + type="Map(Nullable(String), String)", data="(map('e','f')),(map('a','b')),(map(NULL,'c'))", select="m", filter="has(mapKeys(m), NULL)", - exitcode=0, message='{"m":{null:"c"}}') + exitcode=0, + message='{"m":{null:"c"}}', + ) with Example("select keys from column"): - table_map(type="Map(Nullable(String), String)", + table_map( + type="Map(Nullable(String), String)", data="(map('e','f')),(map('a','b')),(map(NULL,'c'))", select="mapKeys(m) AS keys", filter="1 = 1", - exitcode=0, message='{"keys":["a"]}\n{"keys":["e"]}\n{"keys":[null]}') + exitcode=0, + message='{"keys":["a"]}\n{"keys":["e"]}\n{"keys":[null]}', + ) + @TestScenario -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Functions_MapValues("1.0") -) +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Functions_MapValues("1.0")) def mapvalues(self): - """Check usages of mapValues function with map data type. - """ + """Check usages of mapValues function with map data type.""" with Example("value in map"): - table_map(type="Map(String, String)", + table_map( + type="Map(String, String)", data="(map('e','f')),(map('a','b'))", select="m", filter="has(mapValues(m), 'b')", - exitcode=0, message='{"m":{"a":"b"}}') + exitcode=0, + message='{"m":{"a":"b"}}', + ) with Example("value not in map"): - table_map(type="Map(String, String)", + table_map( + type="Map(String, String)", data="(map('e','f')),(map('a','b'))", select="m", filter="NOT has(mapValues(m), 'b')", - exitcode=0, message='{"m":{"e":"f"}}') + exitcode=0, + message='{"m":{"e":"f"}}', + ) with Example("null value not in map"): - table_map(type="Map(String, Nullable(String))", + table_map( + type="Map(String, Nullable(String))", data="(map('e','f')),(map('a','b'))", select="m", filter="has(mapValues(m), NULL)", - exitcode=0, message='') + exitcode=0, + message="", + ) with Example("null value in map"): - table_map(type="Map(String, Nullable(String))", + table_map( + type="Map(String, Nullable(String))", data="(map('e','f')),(map('a','b')),(map('c',NULL))", select="m", filter="has(mapValues(m), NULL)", - exitcode=0, message='{"m":{"c":null}}') + exitcode=0, + message='{"m":{"c":null}}', + ) with Example("select values from column"): - table_map(type="Map(String, Nullable(String))", + table_map( + type="Map(String, Nullable(String))", data="(map('e','f')),(map('a','b')),(map('c',NULL))", select="mapValues(m) AS values", filter="1 = 1", - exitcode=0, message='{"values":["b"]}\n{"values":[null]}\n{"values":["f"]}') + exitcode=0, + message='{"values":["b"]}\n{"values":[null]}\n{"values":["f"]}', + ) + @TestScenario -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Functions_InlineDefinedMap("1.0") -) +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Functions_InlineDefinedMap("1.0")) def functions_with_inline_defined_map(self): """Check that a map defined inline inside the select statement can be used with functions that work with maps. """ with Example("mapKeys"): - select_map(map="map(1,2,3,4) as map, mapKeys(map) AS keys", output="{1:2,3:4}\t[1,3]") + select_map( + map="map(1,2,3,4) as map, mapKeys(map) AS keys", output="{1:2,3:4}\t[1,3]" + ) with Example("mapValyes"): - select_map(map="map(1,2,3,4) as map, mapValues(map) AS values", output="{1:2,3:4}\t[2,4]") + select_map( + map="map(1,2,3,4) as map, mapValues(map) AS values", + output="{1:2,3:4}\t[2,4]", + ) with Example("mapContains"): - select_map(map="map(1,2,3,4) as map, mapContains(map, 1) AS contains", output="{1:2,3:4}\t1") + select_map( + map="map(1,2,3,4) as map, mapContains(map, 1) AS contains", + output="{1:2,3:4}\t1", + ) + @TestScenario def empty_map(self): """Check creating of an empty map `{}` using the map() function when inserting data into a map type table column. """ - table_map(type="Map(String, String)", + table_map( + type="Map(String, String)", data="(map('e','f')),(map())", select="m", filter="1=1", - exitcode=0, message='{"m":{}}\n{"m":{"e":"f"}}') + exitcode=0, + message='{"m":{}}\n{"m":{"e":"f"}}', + ) + @TestScenario -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Performance_Vs_TupleOfArrays("1.0") -) +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Performance_Vs_TupleOfArrays("1.0")) def performance_vs_two_tuple_of_arrays(self, len=10, rows=6000000): - """Check performance of using map data type vs Tuple(Array, Array). - """ + """Check performance of using map data type vs Tuple(Array, Array).""" uid = getuid() node = self.context.node @@ -1073,7 +2255,9 @@ def performance_vs_two_tuple_of_arrays(self, len=10, rows=6000000): keys = range(len) values = range(len) start_time = time.time() - node.query(f"INSERT INTO {tuple_table} SELECT ({keys},{values}) FROM numbers({rows})") + node.query( + f"INSERT INTO {tuple_table} SELECT ({keys},{values}) FROM numbers({rows})" + ) tuple_insert_time = time.time() - start_time metric("tuple insert time", tuple_insert_time, "sec") @@ -1081,34 +2265,40 @@ def performance_vs_two_tuple_of_arrays(self, len=10, rows=6000000): keys = range(len) values = range(len) start_time = time.time() - node.query(f"INSERT INTO {map_table} SELECT ({keys},{values}) FROM numbers({rows})") + node.query( + f"INSERT INTO {map_table} SELECT ({keys},{values}) FROM numbers({rows})" + ) map_insert_time = time.time() - start_time metric("map insert time", map_insert_time, "sec") with And("I retrieve particular key value from table with tuples"): start_time = time.time() - node.query(f"SELECT sum(arrayFirst((v, k) -> k = {len-1}, tupleElement(pairs, 2), tupleElement(pairs, 1))) AS sum FROM {tuple_table}", - exitcode=0, message=f"{rows*(len-1)}") + node.query( + f"SELECT sum(arrayFirst((v, k) -> k = {len-1}, tupleElement(pairs, 2), tupleElement(pairs, 1))) AS sum FROM {tuple_table}", + exitcode=0, + message=f"{rows*(len-1)}", + ) tuple_select_time = time.time() - start_time metric("tuple(array, array) select time", tuple_select_time, "sec") with And("I retrieve particular key value from table with map"): start_time = time.time() - node.query(f"SELECT sum(pairs[{len-1}]) AS sum FROM {map_table}", - exitcode=0, message=f"{rows*(len-1)}") + node.query( + f"SELECT sum(pairs[{len-1}]) AS sum FROM {map_table}", + exitcode=0, + message=f"{rows*(len-1)}", + ) map_select_time = time.time() - start_time metric("map select time", map_select_time, "sec") - metric("insert difference", (1 - map_insert_time/tuple_insert_time) * 100, "%") - metric("select difference", (1 - map_select_time/tuple_select_time) * 100, "%") + metric("insert difference", (1 - map_insert_time / tuple_insert_time) * 100, "%") + metric("select difference", (1 - map_select_time / tuple_select_time) * 100, "%") + @TestScenario -@Requirements( - RQ_SRS_018_ClickHouse_Map_DataType_Performance_Vs_ArrayOfTuples("1.0") -) +@Requirements(RQ_SRS_018_ClickHouse_Map_DataType_Performance_Vs_ArrayOfTuples("1.0")) def performance_vs_array_of_tuples(self, len=10, rows=6000000): - """Check performance of using map data type vs Array(Tuple(K,V)). - """ + """Check performance of using map data type vs Array(Tuple(K,V)).""" uid = getuid() node = self.context.node @@ -1121,7 +2311,7 @@ def performance_vs_array_of_tuples(self, len=10, rows=6000000): map_table = create_table(name=f"map_{uid}", statement=sql) with When("I insert data into table with an array of tuples"): - pairs = list(zip(range(len),range(len))) + pairs = list(zip(range(len), range(len))) start_time = time.time() node.query(f"INSERT INTO {array_table} SELECT ({pairs}) FROM numbers({rows})") array_insert_time = time.time() - start_time @@ -1131,31 +2321,39 @@ def performance_vs_array_of_tuples(self, len=10, rows=6000000): keys = range(len) values = range(len) start_time = time.time() - node.query(f"INSERT INTO {map_table} SELECT ({keys},{values}) FROM numbers({rows})") + node.query( + f"INSERT INTO {map_table} SELECT ({keys},{values}) FROM numbers({rows})" + ) map_insert_time = time.time() - start_time metric("map insert time", map_insert_time, "sec") with And("I retrieve particular key value from table with an array of tuples"): start_time = time.time() - node.query(f"SELECT sum(arrayFirst((v) -> v.1 = {len-1}, pairs).2) AS sum FROM {array_table}", - exitcode=0, message=f"{rows*(len-1)}") + node.query( + f"SELECT sum(arrayFirst((v) -> v.1 = {len-1}, pairs).2) AS sum FROM {array_table}", + exitcode=0, + message=f"{rows*(len-1)}", + ) array_select_time = time.time() - start_time metric("array(tuple(k,v)) select time", array_select_time, "sec") with And("I retrieve particular key value from table with map"): start_time = time.time() - node.query(f"SELECT sum(pairs[{len-1}]) AS sum FROM {map_table}", - exitcode=0, message=f"{rows*(len-1)}") + node.query( + f"SELECT sum(pairs[{len-1}]) AS sum FROM {map_table}", + exitcode=0, + message=f"{rows*(len-1)}", + ) map_select_time = time.time() - start_time metric("map select time", map_select_time, "sec") - metric("insert difference", (1 - map_insert_time/array_insert_time) * 100, "%") - metric("select difference", (1 - map_select_time/array_select_time) * 100, "%") + metric("insert difference", (1 - map_insert_time / array_insert_time) * 100, "%") + metric("select difference", (1 - map_select_time / array_select_time) * 100, "%") + @TestScenario def performance(self, len=10, rows=6000000): - """Check insert and select performance of using map data type. - """ + """Check insert and select performance of using map data type.""" uid = getuid() node = self.context.node @@ -1164,26 +2362,33 @@ def performance(self, len=10, rows=6000000): map_table = create_table(name=f"map_{uid}", statement=sql) with When("I insert data into table with a map"): - values = [x for pair in zip(range(len),range(len)) for x in pair] + values = [x for pair in zip(range(len), range(len)) for x in pair] start_time = time.time() - node.query(f"INSERT INTO {map_table} SELECT (map({','.join([str(v) for v in values])})) FROM numbers({rows})") + node.query( + f"INSERT INTO {map_table} SELECT (map({','.join([str(v) for v in values])})) FROM numbers({rows})" + ) map_insert_time = time.time() - start_time metric("map insert time", map_insert_time, "sec") with And("I retrieve particular key value from table with map"): start_time = time.time() - node.query(f"SELECT sum(pairs[{len-1}]) AS sum FROM {map_table}", - exitcode=0, message=f"{rows*(len-1)}") + node.query( + f"SELECT sum(pairs[{len-1}]) AS sum FROM {map_table}", + exitcode=0, + message=f"{rows*(len-1)}", + ) map_select_time = time.time() - start_time metric("map select time", map_select_time, "sec") + # FIXME: add tests for different table engines + @TestFeature @Name("tests") @Requirements( RQ_SRS_018_ClickHouse_Map_DataType("1.0"), - RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map("1.0") + RQ_SRS_018_ClickHouse_Map_DataType_Functions_Map("1.0"), ) def feature(self, node="clickhouse1"): self.context.node = self.context.cluster.node(node) diff --git a/tests/testflows/rbac/helper/common.py b/tests/testflows/rbac/helper/common.py index b1d4da536dd..352ce6cb77e 100755 --- a/tests/testflows/rbac/helper/common.py +++ b/tests/testflows/rbac/helper/common.py @@ -11,15 +11,20 @@ from testflows.core import * from helpers.common import instrument_clickhouse_server_log from rbac.helper.tables import table_types + def permutations(table_count=1): - return [*range((1 << table_count)-1)] + return [*range((1 << table_count) - 1)] + def getuid(): if current().subtype == TestSubType.Example: - testname = f"{basename(parentname(current().name)).replace(' ', '_').replace(',','')}" + testname = ( + f"{basename(parentname(current().name)).replace(' ', '_').replace(',','')}" + ) else: testname = f"{basename(current().name).replace(' ', '_').replace(',','')}" - return testname + "_" + str(uuid.uuid1()).replace('-', '_') + return testname + "_" + str(uuid.uuid1()).replace("-", "_") + @contextmanager def table(node, name, table_type_name="MergeTree"): @@ -35,10 +40,13 @@ def table(node, name, table_type_name="MergeTree"): for name in names: with Finally(f"I drop the table {name}"): if table_type.cluster: - node.query(f"DROP TABLE IF EXISTS {name} ON CLUSTER {table_type.cluster}") + node.query( + f"DROP TABLE IF EXISTS {name} ON CLUSTER {table_type.cluster}" + ) else: node.query(f"DROP TABLE IF EXISTS {name}") + @contextmanager def user(node, name): try: @@ -52,6 +60,7 @@ def user(node, name): with Finally("I drop the user"): node.query(f"DROP USER IF EXISTS {name}") + @contextmanager def role(node, role): try: @@ -65,10 +74,10 @@ def role(node, role): with Finally("I drop the role"): node.query(f"DROP ROLE IF EXISTS {role}") + @TestStep(Given) def row_policy(self, name, table, node=None): - """Create a row policy with a given name on a given table. - """ + """Create a row policy with a given name on a given table.""" if node is None: node = self.context.node @@ -81,33 +90,41 @@ def row_policy(self, name, table, node=None): with Finally(f"I delete row policy {name}"): node.query(f"DROP ROW POLICY IF EXISTS {name} ON {table}") + tables = { - "table0" : 1 << 0, - "table1" : 1 << 1, - "table2" : 1 << 2, - "table3" : 1 << 3, - "table4" : 1 << 4, - "table5" : 1 << 5, - "table6" : 1 << 6, - "table7" : 1 << 7, + "table0": 1 << 0, + "table1": 1 << 1, + "table2": 1 << 2, + "table3": 1 << 3, + "table4": 1 << 4, + "table5": 1 << 5, + "table6": 1 << 6, + "table7": 1 << 7, } + @contextmanager def grant_select_on_table(node, grants, target_name, *table_names): try: tables_granted = [] for table_number in range(len(table_names)): - if(grants & tables[f"table{table_number}"]): + if grants & tables[f"table{table_number}"]: with When(f"I grant select privilege on {table_names[table_number]}"): - node.query(f"GRANT SELECT ON {table_names[table_number]} TO {target_name}") + node.query( + f"GRANT SELECT ON {table_names[table_number]} TO {target_name}" + ) - tables_granted.append(f'{table_names[table_number]}') + tables_granted.append(f"{table_names[table_number]}") - yield (', ').join(tables_granted) + yield (", ").join(tables_granted) finally: for table_number in range(len(table_names)): - with Finally(f"I revoke the select privilege on {table_names[table_number]}"): - node.query(f"REVOKE SELECT ON {table_names[table_number]} FROM {target_name}") + with Finally( + f"I revoke the select privilege on {table_names[table_number]}" + ): + node.query( + f"REVOKE SELECT ON {table_names[table_number]} FROM {target_name}" + ) diff --git a/tests/testflows/rbac/helper/errors.py b/tests/testflows/rbac/helper/errors.py index 65fdd3a8e42..fc8c88dbfc7 100755 --- a/tests/testflows/rbac/helper/errors.py +++ b/tests/testflows/rbac/helper/errors.py @@ -4,120 +4,183 @@ not_found = "Exception: There is no {type} `{name}`" + def user_not_found_in_disk(name): - return (192,not_found.format(type="user",name=name)) + return (192, not_found.format(type="user", name=name)) + def role_not_found_in_disk(name): - return (255,not_found.format(type="role",name=name)) + return (255, not_found.format(type="role", name=name)) + def settings_profile_not_found_in_disk(name): - return (180,not_found.format(type="settings profile",name=name)) + return (180, not_found.format(type="settings profile", name=name)) + def quota_not_found_in_disk(name): - return (199,not_found.format(type="quota",name=name)) + return (199, not_found.format(type="quota", name=name)) + def row_policy_not_found_in_disk(name): - return (11,not_found.format(type="row policy",name=name)) + return (11, not_found.format(type="row policy", name=name)) + def table_does_not_exist(name): - return(60,"Exception: Table {name} doesn't exist".format(name=name)) + return (60, "Exception: Table {name} doesn't exist".format(name=name)) + # Errors: cannot_rename cannot_rename = "Exception: {type} `{name}`: cannot rename to `{name_new}` because {type} `{name_new}` already exists" cannot_rename_exitcode = 237 -def cannot_rename_user(name,name_new): - return (cannot_rename_exitcode, cannot_rename.format(type="user", name=name, name_new=name_new)) -def cannot_rename_role(name,name_new): - return (cannot_rename_exitcode, cannot_rename.format(type="role", name=name, name_new=name_new)) +def cannot_rename_user(name, name_new): + return ( + cannot_rename_exitcode, + cannot_rename.format(type="user", name=name, name_new=name_new), + ) -def cannot_rename_settings_profile(name,name_new): - return (cannot_rename_exitcode, cannot_rename.format(type="settings profile", name=name, name_new=name_new)) -def cannot_rename_quota(name,name_new): - return (cannot_rename_exitcode, cannot_rename.format(type="quota", name=name, name_new=name_new)) +def cannot_rename_role(name, name_new): + return ( + cannot_rename_exitcode, + cannot_rename.format(type="role", name=name, name_new=name_new), + ) + + +def cannot_rename_settings_profile(name, name_new): + return ( + cannot_rename_exitcode, + cannot_rename.format(type="settings profile", name=name, name_new=name_new), + ) + + +def cannot_rename_quota(name, name_new): + return ( + cannot_rename_exitcode, + cannot_rename.format(type="quota", name=name, name_new=name_new), + ) + + +def cannot_rename_row_policy(name, name_new): + return ( + cannot_rename_exitcode, + cannot_rename.format(type="row policy", name=name, name_new=name_new), + ) -def cannot_rename_row_policy(name,name_new): - return (cannot_rename_exitcode, cannot_rename.format(type="row policy", name=name, name_new=name_new)) # Errors: cannot insert -cannot_insert = "Exception: {type} `{name}`: cannot insert because {type} `{name}` already exists" +cannot_insert = ( + "Exception: {type} `{name}`: cannot insert because {type} `{name}` already exists" +) cannot_insert_exitcode = 237 + def cannot_insert_user(name): - return (cannot_insert_exitcode, cannot_insert.format(type="user",name=name)) + return (cannot_insert_exitcode, cannot_insert.format(type="user", name=name)) + def cannot_insert_role(name): - return (cannot_insert_exitcode, cannot_insert.format(type="role",name=name)) + return (cannot_insert_exitcode, cannot_insert.format(type="role", name=name)) + def cannot_insert_settings_profile(name): - return (cannot_insert_exitcode, cannot_insert.format(type="settings profile",name=name)) + return ( + cannot_insert_exitcode, + cannot_insert.format(type="settings profile", name=name), + ) + def cannot_insert_quota(name): - return (cannot_insert_exitcode, cannot_insert.format(type="quota",name=name)) + return (cannot_insert_exitcode, cannot_insert.format(type="quota", name=name)) + def cannot_insert_row_policy(name): - return (cannot_insert_exitcode, cannot_insert.format(type="row policy",name=name)) + return (cannot_insert_exitcode, cannot_insert.format(type="row policy", name=name)) + # Error: default is readonly cannot_remove_default = "Exception: Cannot remove {type} `default` from users.xml because this storage is readonly" cannot_remove_default_exitcode = 239 + def cannot_update_default(): - return (cannot_remove_default_exitcode, "Exception: Cannot update user `default` in users.xml because this storage is readonly") + return ( + cannot_remove_default_exitcode, + "Exception: Cannot update user `default` in users.xml because this storage is readonly", + ) + def cannot_remove_user_default(): return (cannot_remove_default_exitcode, cannot_remove_default.format(type="user")) + def cannot_remove_settings_profile_default(): - return (cannot_remove_default_exitcode, cannot_remove_default.format(type="settings profile")) + return ( + cannot_remove_default_exitcode, + cannot_remove_default.format(type="settings profile"), + ) + def cannot_remove_quota_default(): return (cannot_remove_default_exitcode, cannot_remove_default.format(type="quota")) + # Other syntax errors + def unknown_setting(setting): return (115, f"Exception: Unknown setting {setting}.") + def cluster_not_found(cluster): return (170, f"Exception: Requested cluster '{cluster}' not found.") + ## Privileges + def not_enough_privileges(name): return (241, f"Exception: {name}: Not enough privileges.") + def cannot_parse_string_as_float(string): return (6, f"Exception: Cannot parse string '{string}' as Float64") + def missing_columns(name): return (47, f"Exception: Missing columns: '{name}' while processing query") + # Errors: wrong name wrong_name = "Exception: Wrong {type} name. Cannot find {type} `{name}` to drop" + def wrong_column_name(name): - return (10, wrong_name.format(type="column",name=name)) + return (10, wrong_name.format(type="column", name=name)) + def wrong_index_name(name): - return (36, wrong_name.format(type="index",name=name)) + return (36, wrong_name.format(type="index", name=name)) + def wrong_constraint_name(name): - return (36, wrong_name.format(type="constraint",name=name)) + return (36, wrong_name.format(type="constraint", name=name)) + # Errors: cannot add cannot_add = "Exception: Cannot add index {name}: index with this name already exists" cannot_add_exitcode = 44 + def cannot_add_index(name): return (cannot_add_exitcode, cannot_add.format(name=name)) + def cannot_add_constraint(name): return (cannot_add_exitcode, cannot_add.format(name=name)) diff --git a/tests/testflows/rbac/helper/tables.py b/tests/testflows/rbac/helper/tables.py index ee6289bcbb5..fc8242c0303 100755 --- a/tests/testflows/rbac/helper/tables.py +++ b/tests/testflows/rbac/helper/tables.py @@ -3,39 +3,102 @@ from collections import namedtuple table_tuple = namedtuple("table_tuple", "create_statement cluster") table_types = { - "MergeTree": table_tuple("CREATE TABLE {name} (d DATE, a String, b UInt8, x String, y Int8) ENGINE = MergeTree() PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", None), - "ReplacingMergeTree": table_tuple("CREATE TABLE {name} (d DATE, a String, b UInt8, x String, y Int8) ENGINE = ReplacingMergeTree() PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", None), - "SummingMergeTree": table_tuple("CREATE TABLE {name} (d DATE, a String, b UInt8 DEFAULT 1, x String, y Int8) ENGINE = SummingMergeTree() PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", None), - "AggregatingMergeTree": table_tuple("CREATE TABLE {name} (d DATE, a String, b UInt8, x String, y Int8) ENGINE = AggregatingMergeTree() PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", None), - "CollapsingMergeTree": table_tuple("CREATE TABLE {name} (d Date, a String, b UInt8, x String, y Int8, sign Int8 DEFAULT 1) ENGINE = CollapsingMergeTree(sign) PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", None), - "VersionedCollapsingMergeTree": table_tuple("CREATE TABLE {name} (d Date, a String, b UInt8, x String, y Int8, version UInt64, sign Int8 DEFAULT 1) ENGINE = VersionedCollapsingMergeTree(sign, version) PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", None), - "GraphiteMergeTree": table_tuple("CREATE TABLE {name} (d Date, a String, b UInt8, x String, y Int8, Path String, Time DateTime, Value Float64, col UInt64, Timestamp Int64) ENGINE = GraphiteMergeTree('graphite_rollup_example') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", None), - "ReplicatedMergeTree-sharded_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER sharded_cluster (d DATE, a String, b UInt8, x String, y Int8) \ - ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "sharded_cluster"), - "ReplicatedMergeTree-one_shard_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER one_shard_cluster (d DATE, a String, b UInt8, x String, y Int8) \ - ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "one_shard_cluster"), - "ReplicatedReplacingMergeTree-sharded_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER sharded_cluster (d DATE, a String, b UInt8, x String, y Int8) \ - ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "sharded_cluster"), - "ReplicatedReplacingMergeTree-one_shard_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER one_shard_cluster (d DATE, a String, b UInt8, x String, y Int8) \ - ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "one_shard_cluster"), - "ReplicatedSummingMergeTree-sharded_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER sharded_cluster (d DATE, a String, b UInt8 DEFAULT 1, x String, y Int8) \ - ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "sharded_cluster"), - "ReplicatedSummingMergeTree-one_shard_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER one_shard_cluster (d DATE, a String, b UInt8 DEFAULT 1, x String, y Int8) \ - ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "one_shard_cluster"), - "ReplicatedAggregatingMergeTree-sharded_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER sharded_cluster (d DATE, a String, b UInt8, x String, y Int8) \ - ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "sharded_cluster"), - "ReplicatedAggregatingMergeTree-one_shard_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER one_shard_cluster (d DATE, a String, b UInt8, x String, y Int8) \ - ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "one_shard_cluster"), - "ReplicatedCollapsingMergeTree-sharded_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER sharded_cluster (d Date, a String, b UInt8, x String, y Int8, sign Int8 DEFAULT 1) \ - ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', sign) PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "sharded_cluster"), - "ReplicatedCollapsingMergeTree-one_shard_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER one_shard_cluster (d Date, a String, b UInt8, x String, y Int8, sign Int8 DEFAULT 1) \ - ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', sign) PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "one_shard_cluster"), - "ReplicatedVersionedCollapsingMergeTree-sharded_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER sharded_cluster (d Date, a String, b UInt8, x String, y Int8, version UInt64, sign Int8 DEFAULT 1) \ - ENGINE = ReplicatedVersionedCollapsingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', sign, version) PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "sharded_cluster"), - "ReplicatedVersionedCollapsingMergeTree-one_shard_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER one_shard_cluster (d Date, a String, b UInt8, x String, y Int8, version UInt64, sign Int8 DEFAULT 1) \ - ENGINE = ReplicatedVersionedCollapsingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', sign, version) PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "one_shard_cluster"), - "ReplicatedGraphiteMergeTree-sharded_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER sharded_cluster (d Date, a String, b UInt8, x String, y Int8, Path String, Time DateTime, Value Float64, col UInt64, Timestamp Int64) \ - ENGINE = ReplicatedGraphiteMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', 'graphite_rollup_example') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "sharded_cluster"), - "ReplicatedGraphiteMergeTree-one_shard_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER one_shard_cluster (d Date, a String, b UInt8, x String, y Int8, Path String, Time DateTime, Value Float64, col UInt64, Timestamp Int64) \ - ENGINE = ReplicatedGraphiteMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', 'graphite_rollup_example') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "one_shard_cluster"), + "MergeTree": table_tuple( + "CREATE TABLE {name} (d DATE, a String, b UInt8, x String, y Int8) ENGINE = MergeTree() PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + None, + ), + "ReplacingMergeTree": table_tuple( + "CREATE TABLE {name} (d DATE, a String, b UInt8, x String, y Int8) ENGINE = ReplacingMergeTree() PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + None, + ), + "SummingMergeTree": table_tuple( + "CREATE TABLE {name} (d DATE, a String, b UInt8 DEFAULT 1, x String, y Int8) ENGINE = SummingMergeTree() PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + None, + ), + "AggregatingMergeTree": table_tuple( + "CREATE TABLE {name} (d DATE, a String, b UInt8, x String, y Int8) ENGINE = AggregatingMergeTree() PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + None, + ), + "CollapsingMergeTree": table_tuple( + "CREATE TABLE {name} (d Date, a String, b UInt8, x String, y Int8, sign Int8 DEFAULT 1) ENGINE = CollapsingMergeTree(sign) PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + None, + ), + "VersionedCollapsingMergeTree": table_tuple( + "CREATE TABLE {name} (d Date, a String, b UInt8, x String, y Int8, version UInt64, sign Int8 DEFAULT 1) ENGINE = VersionedCollapsingMergeTree(sign, version) PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + None, + ), + "GraphiteMergeTree": table_tuple( + "CREATE TABLE {name} (d Date, a String, b UInt8, x String, y Int8, Path String, Time DateTime, Value Float64, col UInt64, Timestamp Int64) ENGINE = GraphiteMergeTree('graphite_rollup_example') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + None, + ), + "ReplicatedMergeTree-sharded_cluster": table_tuple( + "CREATE TABLE {name} ON CLUSTER sharded_cluster (d DATE, a String, b UInt8, x String, y Int8) \ + ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + "sharded_cluster", + ), + "ReplicatedMergeTree-one_shard_cluster": table_tuple( + "CREATE TABLE {name} ON CLUSTER one_shard_cluster (d DATE, a String, b UInt8, x String, y Int8) \ + ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + "one_shard_cluster", + ), + "ReplicatedReplacingMergeTree-sharded_cluster": table_tuple( + "CREATE TABLE {name} ON CLUSTER sharded_cluster (d DATE, a String, b UInt8, x String, y Int8) \ + ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + "sharded_cluster", + ), + "ReplicatedReplacingMergeTree-one_shard_cluster": table_tuple( + "CREATE TABLE {name} ON CLUSTER one_shard_cluster (d DATE, a String, b UInt8, x String, y Int8) \ + ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + "one_shard_cluster", + ), + "ReplicatedSummingMergeTree-sharded_cluster": table_tuple( + "CREATE TABLE {name} ON CLUSTER sharded_cluster (d DATE, a String, b UInt8 DEFAULT 1, x String, y Int8) \ + ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + "sharded_cluster", + ), + "ReplicatedSummingMergeTree-one_shard_cluster": table_tuple( + "CREATE TABLE {name} ON CLUSTER one_shard_cluster (d DATE, a String, b UInt8 DEFAULT 1, x String, y Int8) \ + ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + "one_shard_cluster", + ), + "ReplicatedAggregatingMergeTree-sharded_cluster": table_tuple( + "CREATE TABLE {name} ON CLUSTER sharded_cluster (d DATE, a String, b UInt8, x String, y Int8) \ + ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + "sharded_cluster", + ), + "ReplicatedAggregatingMergeTree-one_shard_cluster": table_tuple( + "CREATE TABLE {name} ON CLUSTER one_shard_cluster (d DATE, a String, b UInt8, x String, y Int8) \ + ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + "one_shard_cluster", + ), + "ReplicatedCollapsingMergeTree-sharded_cluster": table_tuple( + "CREATE TABLE {name} ON CLUSTER sharded_cluster (d Date, a String, b UInt8, x String, y Int8, sign Int8 DEFAULT 1) \ + ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', sign) PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + "sharded_cluster", + ), + "ReplicatedCollapsingMergeTree-one_shard_cluster": table_tuple( + "CREATE TABLE {name} ON CLUSTER one_shard_cluster (d Date, a String, b UInt8, x String, y Int8, sign Int8 DEFAULT 1) \ + ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', sign) PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + "one_shard_cluster", + ), + "ReplicatedVersionedCollapsingMergeTree-sharded_cluster": table_tuple( + "CREATE TABLE {name} ON CLUSTER sharded_cluster (d Date, a String, b UInt8, x String, y Int8, version UInt64, sign Int8 DEFAULT 1) \ + ENGINE = ReplicatedVersionedCollapsingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', sign, version) PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + "sharded_cluster", + ), + "ReplicatedVersionedCollapsingMergeTree-one_shard_cluster": table_tuple( + "CREATE TABLE {name} ON CLUSTER one_shard_cluster (d Date, a String, b UInt8, x String, y Int8, version UInt64, sign Int8 DEFAULT 1) \ + ENGINE = ReplicatedVersionedCollapsingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', sign, version) PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + "one_shard_cluster", + ), + "ReplicatedGraphiteMergeTree-sharded_cluster": table_tuple( + "CREATE TABLE {name} ON CLUSTER sharded_cluster (d Date, a String, b UInt8, x String, y Int8, Path String, Time DateTime, Value Float64, col UInt64, Timestamp Int64) \ + ENGINE = ReplicatedGraphiteMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', 'graphite_rollup_example') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + "sharded_cluster", + ), + "ReplicatedGraphiteMergeTree-one_shard_cluster": table_tuple( + "CREATE TABLE {name} ON CLUSTER one_shard_cluster (d Date, a String, b UInt8, x String, y Int8, Path String, Time DateTime, Value Float64, col UInt64, Timestamp Int64) \ + ENGINE = ReplicatedGraphiteMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', 'graphite_rollup_example') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", + "one_shard_cluster", + ), } diff --git a/tests/testflows/rbac/regression.py b/tests/testflows/rbac/regression.py index 173785ea78f..eb1d6c9acf7 100755 --- a/tests/testflows/rbac/regression.py +++ b/tests/testflows/rbac/regression.py @@ -34,148 +34,197 @@ issue_25413 = "https://github.com/ClickHouse/ClickHouse/issues/25413" issue_26746 = "https://github.com/ClickHouse/ClickHouse/issues/26746" xfails = { - "syntax/show create quota/I show create quota current": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/12495")], - "views/:/create with subquery privilege granted directly or via role/:": - [(Fail, issue_14091)], - "views/:/create with join query privilege granted directly or via role/:": - [(Fail, issue_14091)], - "views/:/create with union query privilege granted directly or via role/:": - [(Fail, issue_14091)], - "views/:/create with join union subquery privilege granted directly or via role/:": - [(Fail, issue_14091)], - "views/:/create with nested views privilege granted directly or via role/:": - [(Fail, issue_14091)], - "views/view/select with join query privilege granted directly or via role/:": - [(Fail, issue_14149)], - "views/view/select with join union subquery privilege granted directly or via role/:": - [(Fail, issue_14149)], - "views/view/select with nested views privilege granted directly or via role/:": - [(Fail, issue_14149)], - "views/live view/refresh with privilege granted directly or via role/:": - [(Fail, issue_14224)], - "views/live view/refresh with privilege revoked directly or from role/:": - [(Fail, issue_14224)], - "views/live view/select:": - [(Fail, issue_14418)], - "views/live view/select:/:": - [(Fail, issue_14418)], - "views/materialized view/select with:": - [(Fail, issue_14451)], - "views/materialized view/select with:/:": - [(Fail, issue_14451)], - "views/materialized view/modify query:": - [(Fail, issue_14674)], - "views/materialized view/modify query:/:": - [(Fail, issue_14674)], - "views/materialized view/insert on source table privilege granted directly or via role/:": - [(Fail, issue_14810)], - "privileges/alter ttl/table_type=:/user with some privileges": - [(Fail, issue_14566)], - "privileges/alter ttl/table_type=:/role with some privileges": - [(Fail, issue_14566)], - "privileges/alter ttl/table_type=:/user with privileges on cluster": - [(Fail, issue_14566)], - "privileges/alter ttl/table_type=:/user with privileges from user with grant option": - [(Fail, issue_14566)], - "privileges/alter ttl/table_type=:/user with privileges from role with grant option": - [(Fail, issue_14566)], - "privileges/alter ttl/table_type=:/role with privileges from user with grant option": - [(Fail, issue_14566)], - "privileges/alter ttl/table_type=:/role with privileges from role with grant option": - [(Fail, issue_14566)], - "privileges/distributed table/:/special cases/insert with table on source table of materialized view:": - [(Fail, issue_14810)], - "privileges/distributed table/cluster tests/cluster='sharded*": - [(Fail, issue_15165)], - "privileges/distributed table/cluster tests/cluster=:/special cases/insert with table on source table of materialized view privilege granted directly or via role/:": - [(Fail, issue_14810)], - "views/materialized view/select from implicit target table privilege granted directly or via role/select from implicit target table, privilege granted directly": - [(Fail, ".inner table is not created as expected")], - "views/materialized view/insert on target table privilege granted directly or via role/insert on target table, privilege granted through a role": - [(Fail, ".inner table is not created as expected")], - "views/materialized view/select from implicit target table privilege granted directly or via role/select from implicit target table, privilege granted through a role": - [(Fail, ".inner table is not created as expected")], - "views/materialized view/insert on target table privilege granted directly or via role/insert on target table, privilege granted directly": - [(Fail, ".inner table is not created as expected")], - "views/materialized view/select from source table privilege granted directly or via role/select from implicit target table, privilege granted directly": - [(Fail, ".inner table is not created as expected")], - "views/materialized view/select from source table privilege granted directly or via role/select from implicit target table, privilege granted through a role": - [(Fail, ".inner table is not created as expected")], - "privileges/alter move/:/:/:/:/move partition to implicit target table of a materialized view": - [(Fail, ".inner table is not created as expected")], - "privileges/alter move/:/:/:/:/user without ALTER MOVE PARTITION privilege/": - [(Fail, issue_16403)], - "privileges/alter move/:/:/:/:/user with revoked ALTER MOVE PARTITION privilege/": - [(Fail, issue_16403)], - "privileges/create table/create with join query privilege granted directly or via role/:": - [(Fail, issue_17653)], - "privileges/create table/create with join union subquery privilege granted directly or via role/:": - [(Fail, issue_17653)], - "privileges/create table/create with nested tables privilege granted directly or via role/:": - [(Fail, issue_17653)], - "privileges/kill mutation/no privilege/kill mutation on cluster": - [(Fail, issue_17146)], - "privileges/kill query/privilege granted directly or via role/:/": - [(Fail, issue_17147)], - "privileges/show dictionaries/:/check privilege/:/exists/EXISTS with privilege": - [(Fail, issue_17655)], - "privileges/public tables/sensitive tables": - [(Fail, issue_18110)], - "privileges/: row policy/nested live:": - [(Fail, issue_21083)], - "privileges/: row policy/nested mat:": - [(Fail, issue_21084)], - "privileges/show dictionaries/:/check privilege/check privilege=SHOW DICTIONARIES/show dict/SHOW DICTIONARIES with privilege": - [(Fail, "new bug")], - "privileges/show dictionaries/:/check privilege/check privilege=CREATE DICTIONARY/show dict/SHOW DICTIONARIES with privilege": - [(Fail, "new bug")], - "privileges/show dictionaries/:/check privilege/check privilege=DROP DICTIONARY/show dict/SHOW DICTIONARIES with privilege": - [(Fail, "new bug")], - "privileges/kill mutation/:/:/KILL ALTER : without privilege": - [(Fail, issue_25413)], - "privileges/kill mutation/:/:/KILL ALTER : with revoked privilege": - [(Fail, issue_25413)], - "privileges/kill mutation/:/:/KILL ALTER : with revoked ALL privilege": - [(Fail, issue_25413)], - "privileges/create table/create with subquery privilege granted directly or via role/create with subquery, privilege granted directly": - [(Fail, issue_26746)], - "privileges/create table/create with subquery privilege granted directly or via role/create with subquery, privilege granted through a role": - [(Fail, issue_26746)], - "views/live view/create with join subquery privilege granted directly or via role/create with join subquery, privilege granted directly": - [(Fail, issue_26746)], - "views/live view/create with join subquery privilege granted directly or via role/create with join subquery, privilege granted through a role": - [(Fail, issue_26746)], + "syntax/show create quota/I show create quota current": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/12495") + ], + "views/:/create with subquery privilege granted directly or via role/:": [ + (Fail, issue_14091) + ], + "views/:/create with join query privilege granted directly or via role/:": [ + (Fail, issue_14091) + ], + "views/:/create with union query privilege granted directly or via role/:": [ + (Fail, issue_14091) + ], + "views/:/create with join union subquery privilege granted directly or via role/:": [ + (Fail, issue_14091) + ], + "views/:/create with nested views privilege granted directly or via role/:": [ + (Fail, issue_14091) + ], + "views/view/select with join query privilege granted directly or via role/:": [ + (Fail, issue_14149) + ], + "views/view/select with join union subquery privilege granted directly or via role/:": [ + (Fail, issue_14149) + ], + "views/view/select with nested views privilege granted directly or via role/:": [ + (Fail, issue_14149) + ], + "views/live view/refresh with privilege granted directly or via role/:": [ + (Fail, issue_14224) + ], + "views/live view/refresh with privilege revoked directly or from role/:": [ + (Fail, issue_14224) + ], + "views/live view/select:": [(Fail, issue_14418)], + "views/live view/select:/:": [(Fail, issue_14418)], + "views/materialized view/select with:": [(Fail, issue_14451)], + "views/materialized view/select with:/:": [(Fail, issue_14451)], + "views/materialized view/modify query:": [(Fail, issue_14674)], + "views/materialized view/modify query:/:": [(Fail, issue_14674)], + "views/materialized view/insert on source table privilege granted directly or via role/:": [ + (Fail, issue_14810) + ], + "privileges/alter ttl/table_type=:/user with some privileges": [ + (Fail, issue_14566) + ], + "privileges/alter ttl/table_type=:/role with some privileges": [ + (Fail, issue_14566) + ], + "privileges/alter ttl/table_type=:/user with privileges on cluster": [ + (Fail, issue_14566) + ], + "privileges/alter ttl/table_type=:/user with privileges from user with grant option": [ + (Fail, issue_14566) + ], + "privileges/alter ttl/table_type=:/user with privileges from role with grant option": [ + (Fail, issue_14566) + ], + "privileges/alter ttl/table_type=:/role with privileges from user with grant option": [ + (Fail, issue_14566) + ], + "privileges/alter ttl/table_type=:/role with privileges from role with grant option": [ + (Fail, issue_14566) + ], + "privileges/distributed table/:/special cases/insert with table on source table of materialized view:": [ + (Fail, issue_14810) + ], + "privileges/distributed table/cluster tests/cluster='sharded*": [ + (Fail, issue_15165) + ], + "privileges/distributed table/cluster tests/cluster=:/special cases/insert with table on source table of materialized view privilege granted directly or via role/:": [ + (Fail, issue_14810) + ], + "views/materialized view/select from implicit target table privilege granted directly or via role/select from implicit target table, privilege granted directly": [ + (Fail, ".inner table is not created as expected") + ], + "views/materialized view/insert on target table privilege granted directly or via role/insert on target table, privilege granted through a role": [ + (Fail, ".inner table is not created as expected") + ], + "views/materialized view/select from implicit target table privilege granted directly or via role/select from implicit target table, privilege granted through a role": [ + (Fail, ".inner table is not created as expected") + ], + "views/materialized view/insert on target table privilege granted directly or via role/insert on target table, privilege granted directly": [ + (Fail, ".inner table is not created as expected") + ], + "views/materialized view/select from source table privilege granted directly or via role/select from implicit target table, privilege granted directly": [ + (Fail, ".inner table is not created as expected") + ], + "views/materialized view/select from source table privilege granted directly or via role/select from implicit target table, privilege granted through a role": [ + (Fail, ".inner table is not created as expected") + ], + "privileges/alter move/:/:/:/:/move partition to implicit target table of a materialized view": [ + (Fail, ".inner table is not created as expected") + ], + "privileges/alter move/:/:/:/:/user without ALTER MOVE PARTITION privilege/": [ + (Fail, issue_16403) + ], + "privileges/alter move/:/:/:/:/user with revoked ALTER MOVE PARTITION privilege/": [ + (Fail, issue_16403) + ], + "privileges/create table/create with join query privilege granted directly or via role/:": [ + (Fail, issue_17653) + ], + "privileges/create table/create with join union subquery privilege granted directly or via role/:": [ + (Fail, issue_17653) + ], + "privileges/create table/create with nested tables privilege granted directly or via role/:": [ + (Fail, issue_17653) + ], + "privileges/kill mutation/no privilege/kill mutation on cluster": [ + (Fail, issue_17146) + ], + "privileges/kill query/privilege granted directly or via role/:/": [ + (Fail, issue_17147) + ], + "privileges/show dictionaries/:/check privilege/:/exists/EXISTS with privilege": [ + (Fail, issue_17655) + ], + "privileges/public tables/sensitive tables": [(Fail, issue_18110)], + "privileges/: row policy/nested live:": [(Fail, issue_21083)], + "privileges/: row policy/nested mat:": [(Fail, issue_21084)], + "privileges/show dictionaries/:/check privilege/check privilege=SHOW DICTIONARIES/show dict/SHOW DICTIONARIES with privilege": [ + (Fail, "new bug") + ], + "privileges/show dictionaries/:/check privilege/check privilege=CREATE DICTIONARY/show dict/SHOW DICTIONARIES with privilege": [ + (Fail, "new bug") + ], + "privileges/show dictionaries/:/check privilege/check privilege=DROP DICTIONARY/show dict/SHOW DICTIONARIES with privilege": [ + (Fail, "new bug") + ], + "privileges/kill mutation/:/:/KILL ALTER : without privilege": [ + (Fail, issue_25413) + ], + "privileges/kill mutation/:/:/KILL ALTER : with revoked privilege": [ + (Fail, issue_25413) + ], + "privileges/kill mutation/:/:/KILL ALTER : with revoked ALL privilege": [ + (Fail, issue_25413) + ], + "privileges/create table/create with subquery privilege granted directly or via role/create with subquery, privilege granted directly": [ + (Fail, issue_26746) + ], + "privileges/create table/create with subquery privilege granted directly or via role/create with subquery, privilege granted through a role": [ + (Fail, issue_26746) + ], + "views/live view/create with join subquery privilege granted directly or via role/create with join subquery, privilege granted directly": [ + (Fail, issue_26746) + ], + "views/live view/create with join subquery privilege granted directly or via role/create with join subquery, privilege granted through a role": [ + (Fail, issue_26746) + ], } xflags = { - "privileges/alter index/table_type='ReplicatedVersionedCollapsingMergeTree-sharded_cluster'/role with privileges from role with grant option/granted=:/I try to ALTER INDEX with given privileges/I check order by when privilege is granted": - (SKIP, 0) + "privileges/alter index/table_type='ReplicatedVersionedCollapsingMergeTree-sharded_cluster'/role with privileges from role with grant option/granted=:/I try to ALTER INDEX with given privileges/I check order by when privilege is granted": ( + SKIP, + 0, + ) } -ffails ={ - "/clickhouse/rbac/privileges/:/table_type='ReplicatedReplacingMergeTree-sharded_cluster": - (Skip, "Causes clickhouse timeout on 21.10", (lambda test: check_clickhouse_version(">=21.10")(test) and check_clickhouse_version("<21.11")(test))), - "/clickhouse/rbac/views": - (Skip, "Does not work on clickhouse 21.09", (lambda test: check_clickhouse_version(">=21.9")(test) and check_clickhouse_version("<21.10")(test))) +ffails = { + "/clickhouse/rbac/privileges/:/table_type='ReplicatedReplacingMergeTree-sharded_cluster": ( + Skip, + "Causes clickhouse timeout on 21.10", + ( + lambda test: check_clickhouse_version(">=21.10")(test) + and check_clickhouse_version("<21.11")(test) + ), + ), + "/clickhouse/rbac/views": ( + Skip, + "Does not work on clickhouse 21.09", + ( + lambda test: check_clickhouse_version(">=21.9")(test) + and check_clickhouse_version("<21.10")(test) + ), + ), } + @TestModule @ArgumentParser(argparser) @XFails(xfails) @XFlags(xflags) @FFails(ffails) @Name("rbac") -@Specifications( - SRS_006_ClickHouse_Role_Based_Access_Control -) -def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None): - """RBAC regression. - """ - nodes = { - "clickhouse": - ("clickhouse1", "clickhouse2", "clickhouse3") - } +@Specifications(SRS_006_ClickHouse_Role_Based_Access_Control) +def regression( + self, local, clickhouse_binary_path, clickhouse_version=None, stress=None +): + """RBAC regression.""" + nodes = {"clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3")} self.context.clickhouse_version = clickhouse_version @@ -185,18 +234,23 @@ def regression(self, local, clickhouse_binary_path, clickhouse_version=None, str from platform import processor as current_cpu folder_name = os.path.basename(current_dir()) - if current_cpu() == 'aarch64': + if current_cpu() == "aarch64": env = f"{folder_name}_env_arm64" else: env = f"{folder_name}_env" - with Cluster(local, clickhouse_binary_path, nodes=nodes, - docker_compose_project_dir=os.path.join(current_dir(), env)) as cluster: + with Cluster( + local, + clickhouse_binary_path, + nodes=nodes, + docker_compose_project_dir=os.path.join(current_dir(), env), + ) as cluster: self.context.cluster = cluster Feature(run=load("rbac.tests.syntax.feature", "feature")) Feature(run=load("rbac.tests.privileges.feature", "feature")) Feature(run=load("rbac.tests.views.feature", "feature")) + if main(): regression() diff --git a/tests/testflows/rbac/requirements/__init__.py b/tests/testflows/rbac/requirements/__init__.py index 75e9d5b4bb8..02f7d430154 100644 --- a/tests/testflows/rbac/requirements/__init__.py +++ b/tests/testflows/rbac/requirements/__init__.py @@ -1 +1 @@ -from .requirements import * \ No newline at end of file +from .requirements import * diff --git a/tests/testflows/rbac/requirements/requirements.py b/tests/testflows/rbac/requirements/requirements.py index d970ff629da..552588e49b9 100755 --- a/tests/testflows/rbac/requirements/requirements.py +++ b/tests/testflows/rbac/requirements/requirements.py @@ -9,8853 +9,9362 @@ from testflows.core import Requirement Heading = Specification.Heading RQ_SRS_006_RBAC = Requirement( - name='RQ.SRS-006.RBAC', - version='1.0', + name="RQ.SRS-006.RBAC", + version="1.0", priority=None, group=None, type=None, uid=None, - description=( - '[ClickHouse] SHALL support role based access control.\n' - '\n' - ), + description=("[ClickHouse] SHALL support role based access control.\n" "\n"), link=None, level=3, - num='5.1.1') + num="5.1.1", +) RQ_SRS_006_RBAC_Login = Requirement( - name='RQ.SRS-006.RBAC.Login', - version='1.0', + name="RQ.SRS-006.RBAC.Login", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only allow access to the server for a given\n' - 'user only when correct username and password are used during\n' - 'the connection to the server.\n' - '\n' - ), + "[ClickHouse] SHALL only allow access to the server for a given\n" + "user only when correct username and password are used during\n" + "the connection to the server.\n" + "\n" + ), link=None, level=3, - num='5.2.1') + num="5.2.1", +) RQ_SRS_006_RBAC_Login_DefaultUser = Requirement( - name='RQ.SRS-006.RBAC.Login.DefaultUser', - version='1.0', + name="RQ.SRS-006.RBAC.Login.DefaultUser", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL use the **default user** when no username and password\n' - 'are specified during the connection to the server.\n' - '\n' - ), + "[ClickHouse] SHALL use the **default user** when no username and password\n" + "are specified during the connection to the server.\n" + "\n" + ), link=None, level=3, - num='5.2.2') + num="5.2.2", +) RQ_SRS_006_RBAC_User = Requirement( - name='RQ.SRS-006.RBAC.User', - version='1.0', + name="RQ.SRS-006.RBAC.User", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support creation and manipulation of\n' - 'one or more **user** accounts to which roles, privileges,\n' - 'settings profile, quotas and row policies can be assigned.\n' - '\n' - ), + "[ClickHouse] SHALL support creation and manipulation of\n" + "one or more **user** accounts to which roles, privileges,\n" + "settings profile, quotas and row policies can be assigned.\n" + "\n" + ), link=None, level=3, - num='5.3.1') + num="5.3.1", +) RQ_SRS_006_RBAC_User_Roles = Requirement( - name='RQ.SRS-006.RBAC.User.Roles', - version='1.0', + name="RQ.SRS-006.RBAC.User.Roles", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning one or more **roles**\n' - 'to a **user**.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning one or more **roles**\n" + "to a **user**.\n" + "\n" + ), link=None, level=3, - num='5.3.2') + num="5.3.2", +) RQ_SRS_006_RBAC_User_Privileges = Requirement( - name='RQ.SRS-006.RBAC.User.Privileges', - version='1.0', + name="RQ.SRS-006.RBAC.User.Privileges", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning one or more privileges to a **user**.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning one or more privileges to a **user**.\n" + "\n" + ), link=None, level=3, - num='5.3.3') + num="5.3.3", +) RQ_SRS_006_RBAC_User_Variables = Requirement( - name='RQ.SRS-006.RBAC.User.Variables', - version='1.0', + name="RQ.SRS-006.RBAC.User.Variables", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning one or more variables to a **user**.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning one or more variables to a **user**.\n" + "\n" + ), link=None, level=3, - num='5.3.4') + num="5.3.4", +) RQ_SRS_006_RBAC_User_Variables_Constraints = Requirement( - name='RQ.SRS-006.RBAC.User.Variables.Constraints', - version='1.0', + name="RQ.SRS-006.RBAC.User.Variables.Constraints", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning min, max and read-only constraints\n' - 'for the variables that can be set and read by the **user**.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning min, max and read-only constraints\n" + "for the variables that can be set and read by the **user**.\n" + "\n" + ), link=None, level=3, - num='5.3.5') + num="5.3.5", +) RQ_SRS_006_RBAC_User_SettingsProfile = Requirement( - name='RQ.SRS-006.RBAC.User.SettingsProfile', - version='1.0', + name="RQ.SRS-006.RBAC.User.SettingsProfile", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning one or more **settings profiles**\n' - 'to a **user**.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning one or more **settings profiles**\n" + "to a **user**.\n" + "\n" + ), link=None, level=3, - num='5.3.6') + num="5.3.6", +) RQ_SRS_006_RBAC_User_Quotas = Requirement( - name='RQ.SRS-006.RBAC.User.Quotas', - version='1.0', + name="RQ.SRS-006.RBAC.User.Quotas", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning one or more **quotas** to a **user**.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning one or more **quotas** to a **user**.\n" + "\n" + ), link=None, level=3, - num='5.3.7') + num="5.3.7", +) RQ_SRS_006_RBAC_User_RowPolicies = Requirement( - name='RQ.SRS-006.RBAC.User.RowPolicies', - version='1.0', + name="RQ.SRS-006.RBAC.User.RowPolicies", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning one or more **row policies** to a **user**.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning one or more **row policies** to a **user**.\n" + "\n" + ), link=None, level=3, - num='5.3.8') + num="5.3.8", +) RQ_SRS_006_RBAC_User_DefaultRole = Requirement( - name='RQ.SRS-006.RBAC.User.DefaultRole', - version='1.0', + name="RQ.SRS-006.RBAC.User.DefaultRole", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning a default role to a **user**.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning a default role to a **user**.\n" "\n" + ), link=None, level=3, - num='5.3.9') + num="5.3.9", +) RQ_SRS_006_RBAC_User_RoleSelection = Requirement( - name='RQ.SRS-006.RBAC.User.RoleSelection', - version='1.0', + name="RQ.SRS-006.RBAC.User.RoleSelection", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support selection of one or more **roles** from the available roles\n' - 'that are assigned to a **user** using `SET ROLE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support selection of one or more **roles** from the available roles\n" + "that are assigned to a **user** using `SET ROLE` statement.\n" + "\n" + ), link=None, level=3, - num='5.3.10') + num="5.3.10", +) RQ_SRS_006_RBAC_User_ShowCreate = Requirement( - name='RQ.SRS-006.RBAC.User.ShowCreate', - version='1.0', + name="RQ.SRS-006.RBAC.User.ShowCreate", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support showing the command of how **user** account was created.\n' - '\n' - ), + "[ClickHouse] SHALL support showing the command of how **user** account was created.\n" + "\n" + ), link=None, level=3, - num='5.3.11') + num="5.3.11", +) RQ_SRS_006_RBAC_User_ShowPrivileges = Requirement( - name='RQ.SRS-006.RBAC.User.ShowPrivileges', - version='1.0', + name="RQ.SRS-006.RBAC.User.ShowPrivileges", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support listing the privileges of the **user**.\n' - '\n' - ), + "[ClickHouse] SHALL support listing the privileges of the **user**.\n" "\n" + ), link=None, level=3, - num='5.3.12') + num="5.3.12", +) RQ_SRS_006_RBAC_User_Use_DefaultRole = Requirement( - name='RQ.SRS-006.RBAC.User.Use.DefaultRole', - version='1.0', + name="RQ.SRS-006.RBAC.User.Use.DefaultRole", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL by default use default role or roles assigned\n' - 'to the user if specified.\n' - '\n' - ), + "[ClickHouse] SHALL by default use default role or roles assigned\n" + "to the user if specified.\n" + "\n" + ), link=None, level=3, - num='5.3.13') + num="5.3.13", +) RQ_SRS_006_RBAC_User_Use_AllRolesWhenNoDefaultRole = Requirement( - name='RQ.SRS-006.RBAC.User.Use.AllRolesWhenNoDefaultRole', - version='1.0', + name="RQ.SRS-006.RBAC.User.Use.AllRolesWhenNoDefaultRole", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL by default use all the roles assigned to the user\n' - 'if no default role or roles are specified for the user.\n' - '\n' - ), + "[ClickHouse] SHALL by default use all the roles assigned to the user\n" + "if no default role or roles are specified for the user.\n" + "\n" + ), link=None, level=3, - num='5.3.14') + num="5.3.14", +) RQ_SRS_006_RBAC_User_Create = Requirement( - name='RQ.SRS-006.RBAC.User.Create', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support creating **user** accounts using `CREATE USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support creating **user** accounts using `CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.15.1') + num="5.3.15.1", +) RQ_SRS_006_RBAC_User_Create_IfNotExists = Requirement( - name='RQ.SRS-006.RBAC.User.Create.IfNotExists', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.IfNotExists", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE USER` statement\n' - 'to skip raising an exception if a user with the same **name** already exists.\n' - 'If the `IF NOT EXISTS` clause is not specified then an exception SHALL be\n' - 'raised if a user with the same **name** already exists.\n' - '\n' - ), + "[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE USER` statement\n" + "to skip raising an exception if a user with the same **name** already exists.\n" + "If the `IF NOT EXISTS` clause is not specified then an exception SHALL be\n" + "raised if a user with the same **name** already exists.\n" + "\n" + ), link=None, level=4, - num='5.3.15.2') + num="5.3.15.2", +) RQ_SRS_006_RBAC_User_Create_Replace = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Replace', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Replace", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE USER` statement\n' - 'to replace existing user account if already exists.\n' - '\n' - ), + "[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE USER` statement\n" + "to replace existing user account if already exists.\n" + "\n" + ), link=None, level=4, - num='5.3.15.3') + num="5.3.15.3", +) RQ_SRS_006_RBAC_User_Create_Password_NoPassword = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Password.NoPassword', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Password.NoPassword", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying no password when creating\n' - 'user account using `IDENTIFIED WITH NO_PASSWORD` clause .\n' - '\n' - ), + "[ClickHouse] SHALL support specifying no password when creating\n" + "user account using `IDENTIFIED WITH NO_PASSWORD` clause .\n" + "\n" + ), link=None, level=4, - num='5.3.15.4') + num="5.3.15.4", +) RQ_SRS_006_RBAC_User_Create_Password_NoPassword_Login = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Password.NoPassword.Login', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Password.NoPassword.Login", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL use no password for the user when connecting to the server\n' - 'when an account was created with `IDENTIFIED WITH NO_PASSWORD` clause.\n' - '\n' - ), + "[ClickHouse] SHALL use no password for the user when connecting to the server\n" + "when an account was created with `IDENTIFIED WITH NO_PASSWORD` clause.\n" + "\n" + ), link=None, level=4, - num='5.3.15.5') + num="5.3.15.5", +) RQ_SRS_006_RBAC_User_Create_Password_PlainText = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Password.PlainText', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Password.PlainText", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying plaintext password when creating\n' - 'user account using `IDENTIFIED WITH PLAINTEXT_PASSWORD BY` clause.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying plaintext password when creating\n" + "user account using `IDENTIFIED WITH PLAINTEXT_PASSWORD BY` clause.\n" + "\n" + ), link=None, level=4, - num='5.3.15.6') + num="5.3.15.6", +) RQ_SRS_006_RBAC_User_Create_Password_PlainText_Login = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Password.PlainText.Login', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Password.PlainText.Login", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL use the plaintext password passed by the user when connecting to the server\n' - 'when an account was created with `IDENTIFIED WITH PLAINTEXT_PASSWORD` clause\n' - 'and compare the password with the one used in the `CREATE USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL use the plaintext password passed by the user when connecting to the server\n" + "when an account was created with `IDENTIFIED WITH PLAINTEXT_PASSWORD` clause\n" + "and compare the password with the one used in the `CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.15.7') + num="5.3.15.7", +) RQ_SRS_006_RBAC_User_Create_Password_Sha256Password = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Password.Sha256Password', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Password.Sha256Password", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying the result of applying SHA256\n' - 'to some password when creating user account using `IDENTIFIED WITH SHA256_PASSWORD BY` or `IDENTIFIED BY`\n' - 'clause.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying the result of applying SHA256\n" + "to some password when creating user account using `IDENTIFIED WITH SHA256_PASSWORD BY` or `IDENTIFIED BY`\n" + "clause.\n" + "\n" + ), link=None, level=4, - num='5.3.15.8') + num="5.3.15.8", +) RQ_SRS_006_RBAC_User_Create_Password_Sha256Password_Login = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Password.Sha256Password.Login', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Password.Sha256Password.Login", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL calculate `SHA256` of the password passed by the user when connecting to the server\n' + "[ClickHouse] SHALL calculate `SHA256` of the password passed by the user when connecting to the server\n" "when an account was created with `IDENTIFIED WITH SHA256_PASSWORD` or with 'IDENTIFIED BY' clause\n" - 'and compare the calculated hash to the one used in the `CREATE USER` statement.\n' - '\n' - ), + "and compare the calculated hash to the one used in the `CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.15.9') + num="5.3.15.9", +) RQ_SRS_006_RBAC_User_Create_Password_Sha256Hash = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying the result of applying SHA256\n' - 'to some already calculated hash when creating user account using `IDENTIFIED WITH SHA256_HASH`\n' - 'clause.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying the result of applying SHA256\n" + "to some already calculated hash when creating user account using `IDENTIFIED WITH SHA256_HASH`\n" + "clause.\n" + "\n" + ), link=None, level=4, - num='5.3.15.10') + num="5.3.15.10", +) RQ_SRS_006_RBAC_User_Create_Password_Sha256Hash_Login = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash.Login', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash.Login", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL calculate `SHA256` of the already calculated hash passed by\n' - 'the user when connecting to the server\n' - 'when an account was created with `IDENTIFIED WITH SHA256_HASH` clause\n' - 'and compare the calculated hash to the one used in the `CREATE USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL calculate `SHA256` of the already calculated hash passed by\n" + "the user when connecting to the server\n" + "when an account was created with `IDENTIFIED WITH SHA256_HASH` clause\n" + "and compare the calculated hash to the one used in the `CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.15.11') + num="5.3.15.11", +) RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Password = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying the result of applying SHA1 two times\n' - 'to a password when creating user account using `IDENTIFIED WITH DOUBLE_SHA1_PASSWORD`\n' - 'clause.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying the result of applying SHA1 two times\n" + "to a password when creating user account using `IDENTIFIED WITH DOUBLE_SHA1_PASSWORD`\n" + "clause.\n" + "\n" + ), link=None, level=4, - num='5.3.15.12') + num="5.3.15.12", +) RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Password_Login = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password.Login', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password.Login", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL calculate `SHA1` two times over the password passed by\n' - 'the user when connecting to the server\n' - 'when an account was created with `IDENTIFIED WITH DOUBLE_SHA1_PASSWORD` clause\n' - 'and compare the calculated value to the one used in the `CREATE USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL calculate `SHA1` two times over the password passed by\n" + "the user when connecting to the server\n" + "when an account was created with `IDENTIFIED WITH DOUBLE_SHA1_PASSWORD` clause\n" + "and compare the calculated value to the one used in the `CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.15.13') + num="5.3.15.13", +) RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Hash = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying the result of applying SHA1 two times\n' - 'to a hash when creating user account using `IDENTIFIED WITH DOUBLE_SHA1_HASH`\n' - 'clause.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying the result of applying SHA1 two times\n" + "to a hash when creating user account using `IDENTIFIED WITH DOUBLE_SHA1_HASH`\n" + "clause.\n" + "\n" + ), link=None, level=4, - num='5.3.15.14') + num="5.3.15.14", +) RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Hash_Login = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash.Login', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash.Login", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL calculate `SHA1` two times over the hash passed by\n' - 'the user when connecting to the server\n' - 'when an account was created with `IDENTIFIED WITH DOUBLE_SHA1_HASH` clause\n' - 'and compare the calculated value to the one used in the `CREATE USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL calculate `SHA1` two times over the hash passed by\n" + "the user when connecting to the server\n" + "when an account was created with `IDENTIFIED WITH DOUBLE_SHA1_HASH` clause\n" + "and compare the calculated value to the one used in the `CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.15.15') + num="5.3.15.15", +) RQ_SRS_006_RBAC_User_Create_Host_Name = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Host.Name', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Host.Name", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying one or more hostnames from\n' - 'which user can access the server using the `HOST NAME` clause\n' - 'in the `CREATE USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying one or more hostnames from\n" + "which user can access the server using the `HOST NAME` clause\n" + "in the `CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.15.16') + num="5.3.15.16", +) RQ_SRS_006_RBAC_User_Create_Host_Regexp = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Host.Regexp', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Host.Regexp", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying one or more regular expressions\n' - 'to match hostnames from which user can access the server\n' - 'using the `HOST REGEXP` clause in the `CREATE USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying one or more regular expressions\n" + "to match hostnames from which user can access the server\n" + "using the `HOST REGEXP` clause in the `CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.15.17') + num="5.3.15.17", +) RQ_SRS_006_RBAC_User_Create_Host_IP = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Host.IP', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Host.IP", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying one or more IP address or subnet from\n' - 'which user can access the server using the `HOST IP` clause in the\n' - '`CREATE USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying one or more IP address or subnet from\n" + "which user can access the server using the `HOST IP` clause in the\n" + "`CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.15.18') + num="5.3.15.18", +) RQ_SRS_006_RBAC_User_Create_Host_Any = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Host.Any', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Host.Any", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying `HOST ANY` clause in the `CREATE USER` statement\n' - 'to indicate that user can access the server from any host.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying `HOST ANY` clause in the `CREATE USER` statement\n" + "to indicate that user can access the server from any host.\n" + "\n" + ), link=None, level=4, - num='5.3.15.19') + num="5.3.15.19", +) RQ_SRS_006_RBAC_User_Create_Host_None = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Host.None', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Host.None", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support fobidding access from any host using `HOST NONE` clause in the\n' - '`CREATE USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support fobidding access from any host using `HOST NONE` clause in the\n" + "`CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.15.20') + num="5.3.15.20", +) RQ_SRS_006_RBAC_User_Create_Host_Local = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Host.Local', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Host.Local", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support limiting user access to local only using `HOST LOCAL` clause in the\n' - '`CREATE USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support limiting user access to local only using `HOST LOCAL` clause in the\n" + "`CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.15.21') + num="5.3.15.21", +) RQ_SRS_006_RBAC_User_Create_Host_Like = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Host.Like', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Host.Like", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying host using `LIKE` command syntax using the\n' - '`HOST LIKE` clause in the `CREATE USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying host using `LIKE` command syntax using the\n" + "`HOST LIKE` clause in the `CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.15.22') + num="5.3.15.22", +) RQ_SRS_006_RBAC_User_Create_Host_Default = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Host.Default', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Host.Default", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support user access to server from any host\n' - 'if no `HOST` clause is specified in the `CREATE USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support user access to server from any host\n" + "if no `HOST` clause is specified in the `CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.15.23') + num="5.3.15.23", +) RQ_SRS_006_RBAC_User_Create_DefaultRole = Requirement( - name='RQ.SRS-006.RBAC.User.Create.DefaultRole', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.DefaultRole", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying one or more default roles\n' - 'using `DEFAULT ROLE` clause in the `CREATE USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying one or more default roles\n" + "using `DEFAULT ROLE` clause in the `CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.15.24') + num="5.3.15.24", +) RQ_SRS_006_RBAC_User_Create_DefaultRole_None = Requirement( - name='RQ.SRS-006.RBAC.User.Create.DefaultRole.None', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.DefaultRole.None", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying no default roles\n' - 'using `DEFAULT ROLE NONE` clause in the `CREATE USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying no default roles\n" + "using `DEFAULT ROLE NONE` clause in the `CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.15.25') + num="5.3.15.25", +) RQ_SRS_006_RBAC_User_Create_DefaultRole_All = Requirement( - name='RQ.SRS-006.RBAC.User.Create.DefaultRole.All', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.DefaultRole.All", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying all roles to be used as default\n' - 'using `DEFAULT ROLE ALL` clause in the `CREATE USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying all roles to be used as default\n" + "using `DEFAULT ROLE ALL` clause in the `CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.15.26') + num="5.3.15.26", +) RQ_SRS_006_RBAC_User_Create_Settings = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Settings', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Settings", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying settings and profile\n' - 'using `SETTINGS` clause in the `CREATE USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying settings and profile\n" + "using `SETTINGS` clause in the `CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.15.27') + num="5.3.15.27", +) RQ_SRS_006_RBAC_User_Create_OnCluster = Requirement( - name='RQ.SRS-006.RBAC.User.Create.OnCluster', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.OnCluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying cluster on which the user\n' - 'will be created using `ON CLUSTER` clause in the `CREATE USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying cluster on which the user\n" + "will be created using `ON CLUSTER` clause in the `CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.15.28') + num="5.3.15.28", +) RQ_SRS_006_RBAC_User_Create_Syntax = Requirement( - name='RQ.SRS-006.RBAC.User.Create.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.User.Create.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for `CREATE USER` statement.\n' - '\n' - '```sql\n' - 'CREATE USER [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]\n' + "[ClickHouse] SHALL support the following syntax for `CREATE USER` statement.\n" + "\n" + "```sql\n" + "CREATE USER [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]\n" " [IDENTIFIED [WITH {NO_PASSWORD|PLAINTEXT_PASSWORD|SHA256_PASSWORD|SHA256_HASH|DOUBLE_SHA1_PASSWORD|DOUBLE_SHA1_HASH}] BY {'password'|'hash'}]\n" " [HOST {LOCAL | NAME 'name' | NAME REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]\n" - ' [DEFAULT ROLE role [,...]]\n' + " [DEFAULT ROLE role [,...]]\n" " [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=4, - num='5.3.15.29') + num="5.3.15.29", +) RQ_SRS_006_RBAC_User_Alter = Requirement( - name='RQ.SRS-006.RBAC.User.Alter', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering **user** accounts using `ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering **user** accounts using `ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.1') + num="5.3.16.1", +) RQ_SRS_006_RBAC_User_Alter_OrderOfEvaluation = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.OrderOfEvaluation', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.OrderOfEvaluation", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support evaluating `ALTER USER` statement from left to right\n' - 'where things defined on the right override anything that was previously defined on\n' - 'the left.\n' - '\n' - ), + "[ClickHouse] SHALL support evaluating `ALTER USER` statement from left to right\n" + "where things defined on the right override anything that was previously defined on\n" + "the left.\n" + "\n" + ), link=None, level=4, - num='5.3.16.2') + num="5.3.16.2", +) RQ_SRS_006_RBAC_User_Alter_IfExists = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.IfExists', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.IfExists", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `IF EXISTS` clause in the `ALTER USER` statement\n' - 'to skip raising an exception (producing a warning instead) if a user with the specified **name** does not exist.\n' - 'If the `IF EXISTS` clause is not specified then an exception SHALL be raised if a user with the **name** does not exist.\n' - '\n' - ), + "[ClickHouse] SHALL support `IF EXISTS` clause in the `ALTER USER` statement\n" + "to skip raising an exception (producing a warning instead) if a user with the specified **name** does not exist.\n" + "If the `IF EXISTS` clause is not specified then an exception SHALL be raised if a user with the **name** does not exist.\n" + "\n" + ), link=None, level=4, - num='5.3.16.3') + num="5.3.16.3", +) RQ_SRS_006_RBAC_User_Alter_Cluster = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.Cluster', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.Cluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying the cluster the user is on\n' - 'when altering user account using `ON CLUSTER` clause in the `ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying the cluster the user is on\n" + "when altering user account using `ON CLUSTER` clause in the `ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.4') + num="5.3.16.4", +) RQ_SRS_006_RBAC_User_Alter_Rename = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.Rename', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.Rename", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying a new name for the user when\n' - 'altering user account using `RENAME` clause in the `ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying a new name for the user when\n" + "altering user account using `RENAME` clause in the `ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.5') + num="5.3.16.5", +) RQ_SRS_006_RBAC_User_Alter_Password_PlainText = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.Password.PlainText', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.Password.PlainText", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying plaintext password when altering\n' - 'user account using `IDENTIFIED WITH PLAINTEXT_PASSWORD BY` or\n' - 'using shorthand `IDENTIFIED BY` clause in the `ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying plaintext password when altering\n" + "user account using `IDENTIFIED WITH PLAINTEXT_PASSWORD BY` or\n" + "using shorthand `IDENTIFIED BY` clause in the `ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.6') + num="5.3.16.6", +) RQ_SRS_006_RBAC_User_Alter_Password_Sha256Password = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.Password.Sha256Password', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.Password.Sha256Password", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying the result of applying SHA256\n' - 'to some password as identification when altering user account using\n' - '`IDENTIFIED WITH SHA256_PASSWORD` clause in the `ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying the result of applying SHA256\n" + "to some password as identification when altering user account using\n" + "`IDENTIFIED WITH SHA256_PASSWORD` clause in the `ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.7') + num="5.3.16.7", +) RQ_SRS_006_RBAC_User_Alter_Password_DoubleSha1Password = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.Password.DoubleSha1Password', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.Password.DoubleSha1Password", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying the result of applying Double SHA1\n' - 'to some password as identification when altering user account using\n' - '`IDENTIFIED WITH DOUBLE_SHA1_PASSWORD` clause in the `ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying the result of applying Double SHA1\n" + "to some password as identification when altering user account using\n" + "`IDENTIFIED WITH DOUBLE_SHA1_PASSWORD` clause in the `ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.8') + num="5.3.16.8", +) RQ_SRS_006_RBAC_User_Alter_Host_AddDrop = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.Host.AddDrop', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.Host.AddDrop", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering user by adding and dropping access to hosts\n' - 'with the `ADD HOST` or the `DROP HOST` in the `ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering user by adding and dropping access to hosts\n" + "with the `ADD HOST` or the `DROP HOST` in the `ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.9') + num="5.3.16.9", +) RQ_SRS_006_RBAC_User_Alter_Host_Local = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.Host.Local', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.Host.Local", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support limiting user access to local only using `HOST LOCAL` clause in the\n' - '`ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support limiting user access to local only using `HOST LOCAL` clause in the\n" + "`ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.10') + num="5.3.16.10", +) RQ_SRS_006_RBAC_User_Alter_Host_Name = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.Host.Name', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.Host.Name", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying one or more hostnames from\n' - 'which user can access the server using the `HOST NAME` clause\n' - 'in the `ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying one or more hostnames from\n" + "which user can access the server using the `HOST NAME` clause\n" + "in the `ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.11') + num="5.3.16.11", +) RQ_SRS_006_RBAC_User_Alter_Host_Regexp = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.Host.Regexp', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.Host.Regexp", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying one or more regular expressions\n' - 'to match hostnames from which user can access the server\n' - 'using the `HOST REGEXP` clause in the `ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying one or more regular expressions\n" + "to match hostnames from which user can access the server\n" + "using the `HOST REGEXP` clause in the `ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.12') + num="5.3.16.12", +) RQ_SRS_006_RBAC_User_Alter_Host_IP = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.Host.IP', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.Host.IP", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying one or more IP address or subnet from\n' - 'which user can access the server using the `HOST IP` clause in the\n' - '`ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying one or more IP address or subnet from\n" + "which user can access the server using the `HOST IP` clause in the\n" + "`ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.13') + num="5.3.16.13", +) RQ_SRS_006_RBAC_User_Alter_Host_Like = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.Host.Like', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.Host.Like", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying one or more similar hosts using `LIKE` command syntax\n' - 'using the `HOST LIKE` clause in the `ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying one or more similar hosts using `LIKE` command syntax\n" + "using the `HOST LIKE` clause in the `ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.14') + num="5.3.16.14", +) RQ_SRS_006_RBAC_User_Alter_Host_Any = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.Host.Any', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.Host.Any", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying `HOST ANY` clause in the `ALTER USER` statement\n' - 'to indicate that user can access the server from any host.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying `HOST ANY` clause in the `ALTER USER` statement\n" + "to indicate that user can access the server from any host.\n" + "\n" + ), link=None, level=4, - num='5.3.16.15') + num="5.3.16.15", +) RQ_SRS_006_RBAC_User_Alter_Host_None = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.Host.None', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.Host.None", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support fobidding access from any host using `HOST NONE` clause in the\n' - '`ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support fobidding access from any host using `HOST NONE` clause in the\n" + "`ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.16') + num="5.3.16.16", +) RQ_SRS_006_RBAC_User_Alter_DefaultRole = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.DefaultRole', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.DefaultRole", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying one or more default roles\n' - 'using `DEFAULT ROLE` clause in the `ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying one or more default roles\n" + "using `DEFAULT ROLE` clause in the `ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.17') + num="5.3.16.17", +) RQ_SRS_006_RBAC_User_Alter_DefaultRole_All = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.DefaultRole.All', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.DefaultRole.All", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying all roles to be used as default\n' - 'using `DEFAULT ROLE ALL` clause in the `ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying all roles to be used as default\n" + "using `DEFAULT ROLE ALL` clause in the `ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.18') + num="5.3.16.18", +) RQ_SRS_006_RBAC_User_Alter_DefaultRole_AllExcept = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.DefaultRole.AllExcept', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.DefaultRole.AllExcept", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying one or more roles which will not be used as default\n' - 'using `DEFAULT ROLE ALL EXCEPT` clause in the `ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying one or more roles which will not be used as default\n" + "using `DEFAULT ROLE ALL EXCEPT` clause in the `ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.19') + num="5.3.16.19", +) RQ_SRS_006_RBAC_User_Alter_Settings = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.Settings', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.Settings", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying one or more variables\n' - 'using `SETTINGS` clause in the `ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying one or more variables\n" + "using `SETTINGS` clause in the `ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.20') + num="5.3.16.20", +) RQ_SRS_006_RBAC_User_Alter_Settings_Min = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.Settings.Min', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.Settings.Min", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying a minimum value for the variable specifed using `SETTINGS` with `MIN` clause in the `ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying a minimum value for the variable specifed using `SETTINGS` with `MIN` clause in the `ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.21') + num="5.3.16.21", +) RQ_SRS_006_RBAC_User_Alter_Settings_Max = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.Settings.Max', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.Settings.Max", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying a maximum value for the variable specifed using `SETTINGS` with `MAX` clause in the `ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying a maximum value for the variable specifed using `SETTINGS` with `MAX` clause in the `ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.22') + num="5.3.16.22", +) RQ_SRS_006_RBAC_User_Alter_Settings_Profile = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.Settings.Profile', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.Settings.Profile", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying the name of a profile for the variable specifed using `SETTINGS` with `PROFILE` clause in the `ALTER USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying the name of a profile for the variable specifed using `SETTINGS` with `PROFILE` clause in the `ALTER USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.16.23') + num="5.3.16.23", +) RQ_SRS_006_RBAC_User_Alter_Syntax = Requirement( - name='RQ.SRS-006.RBAC.User.Alter.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.User.Alter.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `ALTER USER` statement.\n' - '\n' - '```sql\n' - 'ALTER USER [IF EXISTS] name [ON CLUSTER cluster_name]\n' - ' [RENAME TO new_name]\n' + "[ClickHouse] SHALL support the following syntax for the `ALTER USER` statement.\n" + "\n" + "```sql\n" + "ALTER USER [IF EXISTS] name [ON CLUSTER cluster_name]\n" + " [RENAME TO new_name]\n" " [IDENTIFIED [WITH {PLAINTEXT_PASSWORD|SHA256_PASSWORD|DOUBLE_SHA1_PASSWORD}] BY {'password'|'hash'}]\n" " [[ADD|DROP] HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]\n" - ' [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ]\n' + " [DEFAULT ROLE role [,...] | ALL | ALL EXCEPT role [,...] ]\n" " [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=4, - num='5.3.16.24') + num="5.3.16.24", +) RQ_SRS_006_RBAC_User_ShowCreateUser = Requirement( - name='RQ.SRS-006.RBAC.User.ShowCreateUser', - version='1.0', + name="RQ.SRS-006.RBAC.User.ShowCreateUser", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support showing the `CREATE USER` statement used to create the current user object\n' - 'using the `SHOW CREATE USER` statement with `CURRENT_USER` or no argument.\n' - '\n' - ), + "[ClickHouse] SHALL support showing the `CREATE USER` statement used to create the current user object\n" + "using the `SHOW CREATE USER` statement with `CURRENT_USER` or no argument.\n" + "\n" + ), link=None, level=4, - num='5.3.17.1') + num="5.3.17.1", +) RQ_SRS_006_RBAC_User_ShowCreateUser_For = Requirement( - name='RQ.SRS-006.RBAC.User.ShowCreateUser.For', - version='1.0', + name="RQ.SRS-006.RBAC.User.ShowCreateUser.For", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support showing the `CREATE USER` statement used to create the specified user object\n' - 'using the `FOR` clause in the `SHOW CREATE USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support showing the `CREATE USER` statement used to create the specified user object\n" + "using the `FOR` clause in the `SHOW CREATE USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.17.2') + num="5.3.17.2", +) RQ_SRS_006_RBAC_User_ShowCreateUser_Syntax = Requirement( - name='RQ.SRS-006.RBAC.User.ShowCreateUser.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.User.ShowCreateUser.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support showing the following syntax for `SHOW CREATE USER` statement.\n' - '\n' - '```sql\n' - 'SHOW CREATE USER [name | CURRENT_USER]\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support showing the following syntax for `SHOW CREATE USER` statement.\n" + "\n" + "```sql\n" + "SHOW CREATE USER [name | CURRENT_USER]\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.3.17.3') + num="5.3.17.3", +) RQ_SRS_006_RBAC_User_Drop = Requirement( - name='RQ.SRS-006.RBAC.User.Drop', - version='1.0', + name="RQ.SRS-006.RBAC.User.Drop", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support removing a user account using `DROP USER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support removing a user account using `DROP USER` statement.\n" + "\n" + ), link=None, level=4, - num='5.3.18.1') + num="5.3.18.1", +) RQ_SRS_006_RBAC_User_Drop_IfExists = Requirement( - name='RQ.SRS-006.RBAC.User.Drop.IfExists', - version='1.0', + name="RQ.SRS-006.RBAC.User.Drop.IfExists", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP USER` statement\n' - 'to skip raising an exception if the user account does not exist.\n' - 'If the `IF EXISTS` clause is not specified then an exception SHALL be\n' - 'raised if a user does not exist.\n' - '\n' - ), + "[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP USER` statement\n" + "to skip raising an exception if the user account does not exist.\n" + "If the `IF EXISTS` clause is not specified then an exception SHALL be\n" + "raised if a user does not exist.\n" + "\n" + ), link=None, level=4, - num='5.3.18.2') + num="5.3.18.2", +) RQ_SRS_006_RBAC_User_Drop_OnCluster = Requirement( - name='RQ.SRS-006.RBAC.User.Drop.OnCluster', - version='1.0', + name="RQ.SRS-006.RBAC.User.Drop.OnCluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using `ON CLUSTER` clause in the `DROP USER` statement\n' - 'to specify the name of the cluster the user should be dropped from.\n' - '\n' - ), + "[ClickHouse] SHALL support using `ON CLUSTER` clause in the `DROP USER` statement\n" + "to specify the name of the cluster the user should be dropped from.\n" + "\n" + ), link=None, level=4, - num='5.3.18.3') + num="5.3.18.3", +) RQ_SRS_006_RBAC_User_Drop_Syntax = Requirement( - name='RQ.SRS-006.RBAC.User.Drop.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.User.Drop.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for `DROP USER` statement\n' - '\n' - '```sql\n' - 'DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name]\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for `DROP USER` statement\n" + "\n" + "```sql\n" + "DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name]\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.3.18.4') + num="5.3.18.4", +) RQ_SRS_006_RBAC_Role = Requirement( - name='RQ.SRS-006.RBAC.Role', - version='1.0', + name="RQ.SRS-006.RBAC.Role", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClikHouse] SHALL support creation and manipulation of **roles**\n' - 'to which privileges, settings profile, quotas and row policies can be\n' - 'assigned.\n' - '\n' - ), + "[ClikHouse] SHALL support creation and manipulation of **roles**\n" + "to which privileges, settings profile, quotas and row policies can be\n" + "assigned.\n" + "\n" + ), link=None, level=3, - num='5.4.1') + num="5.4.1", +) RQ_SRS_006_RBAC_Role_Privileges = Requirement( - name='RQ.SRS-006.RBAC.Role.Privileges', - version='1.0', + name="RQ.SRS-006.RBAC.Role.Privileges", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning one or more privileges to a **role**.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning one or more privileges to a **role**.\n" + "\n" + ), link=None, level=3, - num='5.4.2') + num="5.4.2", +) RQ_SRS_006_RBAC_Role_Variables = Requirement( - name='RQ.SRS-006.RBAC.Role.Variables', - version='1.0', + name="RQ.SRS-006.RBAC.Role.Variables", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning one or more variables to a **role**.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning one or more variables to a **role**.\n" + "\n" + ), link=None, level=3, - num='5.4.3') + num="5.4.3", +) RQ_SRS_006_RBAC_Role_SettingsProfile = Requirement( - name='RQ.SRS-006.RBAC.Role.SettingsProfile', - version='1.0', + name="RQ.SRS-006.RBAC.Role.SettingsProfile", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning one or more **settings profiles**\n' - 'to a **role**.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning one or more **settings profiles**\n" + "to a **role**.\n" + "\n" + ), link=None, level=3, - num='5.4.4') + num="5.4.4", +) RQ_SRS_006_RBAC_Role_Quotas = Requirement( - name='RQ.SRS-006.RBAC.Role.Quotas', - version='1.0', + name="RQ.SRS-006.RBAC.Role.Quotas", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning one or more **quotas** to a **role**.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning one or more **quotas** to a **role**.\n" + "\n" + ), link=None, level=3, - num='5.4.5') + num="5.4.5", +) RQ_SRS_006_RBAC_Role_RowPolicies = Requirement( - name='RQ.SRS-006.RBAC.Role.RowPolicies', - version='1.0', + name="RQ.SRS-006.RBAC.Role.RowPolicies", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning one or more **row policies** to a **role**.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning one or more **row policies** to a **role**.\n" + "\n" + ), link=None, level=3, - num='5.4.6') + num="5.4.6", +) RQ_SRS_006_RBAC_Role_Create = Requirement( - name='RQ.SRS-006.RBAC.Role.Create', - version='1.0', + name="RQ.SRS-006.RBAC.Role.Create", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support creating a **role** using `CREATE ROLE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support creating a **role** using `CREATE ROLE` statement.\n" + "\n" + ), link=None, level=4, - num='5.4.7.1') + num="5.4.7.1", +) RQ_SRS_006_RBAC_Role_Create_IfNotExists = Requirement( - name='RQ.SRS-006.RBAC.Role.Create.IfNotExists', - version='1.0', + name="RQ.SRS-006.RBAC.Role.Create.IfNotExists", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE ROLE` statement\n' - 'to raising an exception if a role with the same **name** already exists.\n' - 'If the `IF NOT EXISTS` clause is not specified then an exception SHALL be\n' - 'raised if a role with the same **name** already exists.\n' - '\n' - ), + "[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE ROLE` statement\n" + "to raising an exception if a role with the same **name** already exists.\n" + "If the `IF NOT EXISTS` clause is not specified then an exception SHALL be\n" + "raised if a role with the same **name** already exists.\n" + "\n" + ), link=None, level=4, - num='5.4.7.2') + num="5.4.7.2", +) RQ_SRS_006_RBAC_Role_Create_Replace = Requirement( - name='RQ.SRS-006.RBAC.Role.Create.Replace', - version='1.0', + name="RQ.SRS-006.RBAC.Role.Create.Replace", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE ROLE` statement\n' - 'to replace existing role if it already exists.\n' - '\n' - ), + "[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE ROLE` statement\n" + "to replace existing role if it already exists.\n" + "\n" + ), link=None, level=4, - num='5.4.7.3') + num="5.4.7.3", +) RQ_SRS_006_RBAC_Role_Create_Settings = Requirement( - name='RQ.SRS-006.RBAC.Role.Create.Settings', - version='1.0', + name="RQ.SRS-006.RBAC.Role.Create.Settings", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying settings and profile using `SETTINGS`\n' - 'clause in the `CREATE ROLE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying settings and profile using `SETTINGS`\n" + "clause in the `CREATE ROLE` statement.\n" + "\n" + ), link=None, level=4, - num='5.4.7.4') + num="5.4.7.4", +) RQ_SRS_006_RBAC_Role_Create_Syntax = Requirement( - name='RQ.SRS-006.RBAC.Role.Create.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.Role.Create.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `CREATE ROLE` statement\n' - '\n' - '``` sql\n' - 'CREATE ROLE [IF NOT EXISTS | OR REPLACE] name\n' + "[ClickHouse] SHALL support the following syntax for the `CREATE ROLE` statement\n" + "\n" + "``` sql\n" + "CREATE ROLE [IF NOT EXISTS | OR REPLACE] name\n" " [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=4, - num='5.4.7.5') + num="5.4.7.5", +) RQ_SRS_006_RBAC_Role_Alter = Requirement( - name='RQ.SRS-006.RBAC.Role.Alter', - version='1.0', + name="RQ.SRS-006.RBAC.Role.Alter", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering one **role** using `ALTER ROLE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering one **role** using `ALTER ROLE` statement.\n" + "\n" + ), link=None, level=4, - num='5.4.8.1') + num="5.4.8.1", +) RQ_SRS_006_RBAC_Role_Alter_IfExists = Requirement( - name='RQ.SRS-006.RBAC.Role.Alter.IfExists', - version='1.0', + name="RQ.SRS-006.RBAC.Role.Alter.IfExists", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering one **role** using `ALTER ROLE IF EXISTS` statement, where no exception\n' - 'will be thrown if the role does not exist.\n' - '\n' - ), + "[ClickHouse] SHALL support altering one **role** using `ALTER ROLE IF EXISTS` statement, where no exception\n" + "will be thrown if the role does not exist.\n" + "\n" + ), link=None, level=4, - num='5.4.8.2') + num="5.4.8.2", +) RQ_SRS_006_RBAC_Role_Alter_Cluster = Requirement( - name='RQ.SRS-006.RBAC.Role.Alter.Cluster', - version='1.0', + name="RQ.SRS-006.RBAC.Role.Alter.Cluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering one **role** using `ALTER ROLE role ON CLUSTER` statement to specify the\n' - 'cluster location of the specified role.\n' - '\n' - ), + "[ClickHouse] SHALL support altering one **role** using `ALTER ROLE role ON CLUSTER` statement to specify the\n" + "cluster location of the specified role.\n" + "\n" + ), link=None, level=4, - num='5.4.8.3') + num="5.4.8.3", +) RQ_SRS_006_RBAC_Role_Alter_Rename = Requirement( - name='RQ.SRS-006.RBAC.Role.Alter.Rename', - version='1.0', + name="RQ.SRS-006.RBAC.Role.Alter.Rename", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering one **role** using `ALTER ROLE role RENAME TO` statement which renames the\n' - 'role to a specified new name. If the new name already exists, that an exception SHALL be raised unless the\n' - '`IF EXISTS` clause is specified, by which no exception will be raised and nothing will change.\n' - '\n' - ), + "[ClickHouse] SHALL support altering one **role** using `ALTER ROLE role RENAME TO` statement which renames the\n" + "role to a specified new name. If the new name already exists, that an exception SHALL be raised unless the\n" + "`IF EXISTS` clause is specified, by which no exception will be raised and nothing will change.\n" + "\n" + ), link=None, level=4, - num='5.4.8.4') + num="5.4.8.4", +) RQ_SRS_006_RBAC_Role_Alter_Settings = Requirement( - name='RQ.SRS-006.RBAC.Role.Alter.Settings', - version='1.0', + name="RQ.SRS-006.RBAC.Role.Alter.Settings", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering the settings of one **role** using `ALTER ROLE role SETTINGS ...` statement.\n' - 'Altering variable values, creating max and min values, specifying readonly or writable, and specifying the\n' - 'profiles for which this alter change shall be applied to, are all supported, using the following syntax.\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL support altering the settings of one **role** using `ALTER ROLE role SETTINGS ...` statement.\n" + "Altering variable values, creating max and min values, specifying readonly or writable, and specifying the\n" + "profiles for which this alter change shall be applied to, are all supported, using the following syntax.\n" + "\n" + "```sql\n" "[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]\n" - '```\n' - '\n' - 'One or more variables and profiles may be specified as shown above.\n' - '\n' - ), + "```\n" + "\n" + "One or more variables and profiles may be specified as shown above.\n" + "\n" + ), link=None, level=4, - num='5.4.8.5') + num="5.4.8.5", +) RQ_SRS_006_RBAC_Role_Alter_Syntax = Requirement( - name='RQ.SRS-006.RBAC.Role.Alter.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.Role.Alter.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '```sql\n' - 'ALTER ROLE [IF EXISTS] name [ON CLUSTER cluster_name]\n' - ' [RENAME TO new_name]\n' + "```sql\n" + "ALTER ROLE [IF EXISTS] name [ON CLUSTER cluster_name]\n" + " [RENAME TO new_name]\n" " [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | PROFILE 'profile_name'] [,...]\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=4, - num='5.4.8.6') + num="5.4.8.6", +) RQ_SRS_006_RBAC_Role_Drop = Requirement( - name='RQ.SRS-006.RBAC.Role.Drop', - version='1.0', + name="RQ.SRS-006.RBAC.Role.Drop", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support removing one or more roles using `DROP ROLE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support removing one or more roles using `DROP ROLE` statement.\n" + "\n" + ), link=None, level=4, - num='5.4.9.1') + num="5.4.9.1", +) RQ_SRS_006_RBAC_Role_Drop_IfExists = Requirement( - name='RQ.SRS-006.RBAC.Role.Drop.IfExists', - version='1.0', + name="RQ.SRS-006.RBAC.Role.Drop.IfExists", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP ROLE` statement\n' - 'to skip raising an exception if the role does not exist.\n' - 'If the `IF EXISTS` clause is not specified then an exception SHALL be\n' - 'raised if a role does not exist.\n' - '\n' - ), + "[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP ROLE` statement\n" + "to skip raising an exception if the role does not exist.\n" + "If the `IF EXISTS` clause is not specified then an exception SHALL be\n" + "raised if a role does not exist.\n" + "\n" + ), link=None, level=4, - num='5.4.9.2') + num="5.4.9.2", +) RQ_SRS_006_RBAC_Role_Drop_Cluster = Requirement( - name='RQ.SRS-006.RBAC.Role.Drop.Cluster', - version='1.0', + name="RQ.SRS-006.RBAC.Role.Drop.Cluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using `ON CLUSTER` clause in the `DROP ROLE` statement to specify the cluster from which to drop the specified role.\n' - '\n' - ), + "[ClickHouse] SHALL support using `ON CLUSTER` clause in the `DROP ROLE` statement to specify the cluster from which to drop the specified role.\n" + "\n" + ), link=None, level=4, - num='5.4.9.3') + num="5.4.9.3", +) RQ_SRS_006_RBAC_Role_Drop_Syntax = Requirement( - name='RQ.SRS-006.RBAC.Role.Drop.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.Role.Drop.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `DROP ROLE` statement\n' - '\n' - '``` sql\n' - 'DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name]\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for the `DROP ROLE` statement\n" + "\n" + "``` sql\n" + "DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name]\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.4.9.4') + num="5.4.9.4", +) RQ_SRS_006_RBAC_Role_ShowCreate = Requirement( - name='RQ.SRS-006.RBAC.Role.ShowCreate', - version='1.0', + name="RQ.SRS-006.RBAC.Role.ShowCreate", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support viewing the settings for a role upon creation with the `SHOW CREATE ROLE`\n' - 'statement.\n' - '\n' - ), + "[ClickHouse] SHALL support viewing the settings for a role upon creation with the `SHOW CREATE ROLE`\n" + "statement.\n" + "\n" + ), link=None, level=4, - num='5.4.10.1') + num="5.4.10.1", +) RQ_SRS_006_RBAC_Role_ShowCreate_Syntax = Requirement( - name='RQ.SRS-006.RBAC.Role.ShowCreate.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.Role.ShowCreate.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `SHOW CREATE ROLE` command.\n' - '\n' - '```sql\n' - 'SHOW CREATE ROLE name\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for the `SHOW CREATE ROLE` command.\n" + "\n" + "```sql\n" + "SHOW CREATE ROLE name\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.4.10.2') + num="5.4.10.2", +) RQ_SRS_006_RBAC_PartialRevokes = Requirement( - name='RQ.SRS-006.RBAC.PartialRevokes', - version='1.0', + name="RQ.SRS-006.RBAC.PartialRevokes", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support partial revoking of privileges granted\n' - 'to a **user** or a **role**.\n' - '\n' - ), + "[ClickHouse] SHALL support partial revoking of privileges granted\n" + "to a **user** or a **role**.\n" + "\n" + ), link=None, level=3, - num='5.5.1') + num="5.5.1", +) RQ_SRS_006_RBAC_PartialRevoke_Syntax = Requirement( - name='RQ.SRS-006.RBAC.PartialRevoke.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.PartialRevoke.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support partial revokes by using `partial_revokes` variable\n' - 'that can be set or unset using the following syntax.\n' - '\n' - 'To disable partial revokes the `partial_revokes` variable SHALL be set to `0`\n' - '\n' - '```sql\n' - 'SET partial_revokes = 0\n' - '```\n' - '\n' - 'To enable partial revokes the `partial revokes` variable SHALL be set to `1`\n' - '\n' - '```sql\n' - 'SET partial_revokes = 1\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support partial revokes by using `partial_revokes` variable\n" + "that can be set or unset using the following syntax.\n" + "\n" + "To disable partial revokes the `partial_revokes` variable SHALL be set to `0`\n" + "\n" + "```sql\n" + "SET partial_revokes = 0\n" + "```\n" + "\n" + "To enable partial revokes the `partial revokes` variable SHALL be set to `1`\n" + "\n" + "```sql\n" + "SET partial_revokes = 1\n" + "```\n" + "\n" + ), link=None, level=3, - num='5.5.2') + num="5.5.2", +) RQ_SRS_006_RBAC_SettingsProfile = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support creation and manipulation of **settings profiles**\n' - 'that can include value definition for one or more variables and can\n' - 'can be assigned to one or more **users** or **roles**.\n' - '\n' - ), + "[ClickHouse] SHALL support creation and manipulation of **settings profiles**\n" + "that can include value definition for one or more variables and can\n" + "can be assigned to one or more **users** or **roles**.\n" + "\n" + ), link=None, level=3, - num='5.6.1') + num="5.6.1", +) RQ_SRS_006_RBAC_SettingsProfile_Constraints = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Constraints', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Constraints", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning min, max and read-only constraints\n' - 'for the variables specified in the **settings profile**.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning min, max and read-only constraints\n" + "for the variables specified in the **settings profile**.\n" + "\n" + ), link=None, level=3, - num='5.6.2') + num="5.6.2", +) RQ_SRS_006_RBAC_SettingsProfile_Create = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Create', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Create", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support creating settings profile using the `CREATE SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support creating settings profile using the `CREATE SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.3.1') + num="5.6.3.1", +) RQ_SRS_006_RBAC_SettingsProfile_Create_IfNotExists = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Create.IfNotExists', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Create.IfNotExists", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE SETTINGS PROFILE` statement\n' - 'to skip raising an exception if a settings profile with the same **name** already exists.\n' - 'If `IF NOT EXISTS` clause is not specified then an exception SHALL be raised if\n' - 'a settings profile with the same **name** already exists.\n' - '\n' - ), + "[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE SETTINGS PROFILE` statement\n" + "to skip raising an exception if a settings profile with the same **name** already exists.\n" + "If `IF NOT EXISTS` clause is not specified then an exception SHALL be raised if\n" + "a settings profile with the same **name** already exists.\n" + "\n" + ), link=None, level=4, - num='5.6.3.2') + num="5.6.3.2", +) RQ_SRS_006_RBAC_SettingsProfile_Create_Replace = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Create.Replace', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Replace", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE SETTINGS PROFILE` statement\n' - 'to replace existing settings profile if it already exists.\n' - '\n' - ), + "[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE SETTINGS PROFILE` statement\n" + "to replace existing settings profile if it already exists.\n" + "\n" + ), link=None, level=4, - num='5.6.3.3') + num="5.6.3.3", +) RQ_SRS_006_RBAC_SettingsProfile_Create_Variables = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Create.Variables', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Variables", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning values and constraints to one or more\n' - 'variables in the `CREATE SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning values and constraints to one or more\n" + "variables in the `CREATE SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.3.4') + num="5.6.3.4", +) RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Value = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Value', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Value", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning variable value in the `CREATE SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning variable value in the `CREATE SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.3.5') + num="5.6.3.5", +) RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Constraints', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Constraints", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support setting `MIN`, `MAX`, `READONLY`, and `WRITABLE`\n' - 'constraints for the variables in the `CREATE SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support setting `MIN`, `MAX`, `READONLY`, and `WRITABLE`\n" + "constraints for the variables in the `CREATE SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.3.6') + num="5.6.3.6", +) RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning settings profile to one or more users\n' - 'or roles in the `CREATE SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning settings profile to one or more users\n" + "or roles in the `CREATE SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.3.7') + num="5.6.3.7", +) RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_None = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.None', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.None", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning settings profile to no users or roles using\n' - '`TO NONE` clause in the `CREATE SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning settings profile to no users or roles using\n" + "`TO NONE` clause in the `CREATE SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.3.8') + num="5.6.3.8", +) RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_All = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.All', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.All", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning settings profile to all current users and roles\n' - 'using `TO ALL` clause in the `CREATE SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning settings profile to all current users and roles\n" + "using `TO ALL` clause in the `CREATE SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.3.9') + num="5.6.3.9", +) RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_AllExcept = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.AllExcept', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.AllExcept", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support excluding assignment to one or more users or roles using\n' - 'the `ALL EXCEPT` clause in the `CREATE SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support excluding assignment to one or more users or roles using\n" + "the `ALL EXCEPT` clause in the `CREATE SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.3.10') + num="5.6.3.10", +) RQ_SRS_006_RBAC_SettingsProfile_Create_Inherit = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Create.Inherit', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Inherit", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support inheriting profile settings from indicated profile using\n' - 'the `INHERIT` clause in the `CREATE SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support inheriting profile settings from indicated profile using\n" + "the `INHERIT` clause in the `CREATE SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.3.11') + num="5.6.3.11", +) RQ_SRS_006_RBAC_SettingsProfile_Create_OnCluster = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Create.OnCluster', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Create.OnCluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying what cluster to create settings profile on\n' - 'using `ON CLUSTER` clause in the `CREATE SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying what cluster to create settings profile on\n" + "using `ON CLUSTER` clause in the `CREATE SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.3.12') + num="5.6.3.12", +) RQ_SRS_006_RBAC_SettingsProfile_Create_Syntax = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Create.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `CREATE SETTINGS PROFILE` statement.\n' - '\n' - '``` sql\n' - 'CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] name\n' - ' [ON CLUSTER cluster_name]\n' + "[ClickHouse] SHALL support the following syntax for the `CREATE SETTINGS PROFILE` statement.\n" + "\n" + "``` sql\n" + "CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] name\n" + " [ON CLUSTER cluster_name]\n" " [SET varname [= value] [MIN min] [MAX max] [READONLY|WRITABLE] | [INHERIT 'profile_name'] [,...]]\n" - ' [TO {user_or_role [,...] | NONE | ALL | ALL EXCEPT user_or_role [,...]}]\n' - '```\n' - '\n' - ), + " [TO {user_or_role [,...] | NONE | ALL | ALL EXCEPT user_or_role [,...]}]\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.6.3.13') + num="5.6.3.13", +) RQ_SRS_006_RBAC_SettingsProfile_Alter = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Alter', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Alter", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering settings profile using the `ALTER STETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering settings profile using the `ALTER STETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.4.1') + num="5.6.4.1", +) RQ_SRS_006_RBAC_SettingsProfile_Alter_IfExists = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Alter.IfExists', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.IfExists", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `IF EXISTS` clause in the `ALTER SETTINGS PROFILE` statement\n' - 'to not raise exception if a settings profile does not exist.\n' - 'If the `IF EXISTS` clause is not specified then an exception SHALL be\n' - 'raised if a settings profile does not exist.\n' - '\n' - ), + "[ClickHouse] SHALL support `IF EXISTS` clause in the `ALTER SETTINGS PROFILE` statement\n" + "to not raise exception if a settings profile does not exist.\n" + "If the `IF EXISTS` clause is not specified then an exception SHALL be\n" + "raised if a settings profile does not exist.\n" + "\n" + ), link=None, level=4, - num='5.6.4.2') + num="5.6.4.2", +) RQ_SRS_006_RBAC_SettingsProfile_Alter_Rename = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Rename', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Rename", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support renaming settings profile using the `RANAME TO` clause\n' - 'in the `ALTER SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support renaming settings profile using the `RANAME TO` clause\n" + "in the `ALTER SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.4.3') + num="5.6.4.3", +) RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering values and constraints of one or more\n' - 'variables in the `ALTER SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering values and constraints of one or more\n" + "variables in the `ALTER SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.4.4') + num="5.6.4.4", +) RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Value = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Value', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Value", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering value of the variable in the `ALTER SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering value of the variable in the `ALTER SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.4.5') + num="5.6.4.5", +) RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Constraints', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Constraints", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering `MIN`, `MAX`, `READONLY`, and `WRITABLE`\n' - 'constraints for the variables in the `ALTER SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering `MIN`, `MAX`, `READONLY`, and `WRITABLE`\n" + "constraints for the variables in the `ALTER SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.4.6') + num="5.6.4.6", +) RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support reassigning settings profile to one or more users\n' - 'or roles using the `TO` clause in the `ALTER SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support reassigning settings profile to one or more users\n" + "or roles using the `TO` clause in the `ALTER SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.4.7') + num="5.6.4.7", +) RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_None = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.None', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.None", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support reassigning settings profile to no users or roles using the\n' - '`TO NONE` clause in the `ALTER SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support reassigning settings profile to no users or roles using the\n" + "`TO NONE` clause in the `ALTER SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.4.8') + num="5.6.4.8", +) RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_All = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.All', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.All", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support reassigning settings profile to all current users and roles\n' - 'using the `TO ALL` clause in the `ALTER SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support reassigning settings profile to all current users and roles\n" + "using the `TO ALL` clause in the `ALTER SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.4.9') + num="5.6.4.9", +) RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_AllExcept = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.AllExcept', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.AllExcept", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support excluding assignment to one or more users or roles using\n' - 'the `TO ALL EXCEPT` clause in the `ALTER SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support excluding assignment to one or more users or roles using\n" + "the `TO ALL EXCEPT` clause in the `ALTER SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.4.10') + num="5.6.4.10", +) RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_Inherit = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.Inherit', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.Inherit", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering the settings profile by inheriting settings from\n' - 'specified profile using `INHERIT` clause in the `ALTER SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering the settings profile by inheriting settings from\n" + "specified profile using `INHERIT` clause in the `ALTER SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.4.11') + num="5.6.4.11", +) RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_OnCluster = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.OnCluster', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.OnCluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering the settings profile on a specified cluster using\n' - '`ON CLUSTER` clause in the `ALTER SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering the settings profile on a specified cluster using\n" + "`ON CLUSTER` clause in the `ALTER SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.4.12') + num="5.6.4.12", +) RQ_SRS_006_RBAC_SettingsProfile_Alter_Syntax = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `ALTER SETTINGS PROFILE` statement.\n' - '\n' - '``` sql\n' - 'ALTER SETTINGS PROFILE [IF EXISTS] name\n' - ' [ON CLUSTER cluster_name]\n' - ' [RENAME TO new_name]\n' + "[ClickHouse] SHALL support the following syntax for the `ALTER SETTINGS PROFILE` statement.\n" + "\n" + "``` sql\n" + "ALTER SETTINGS PROFILE [IF EXISTS] name\n" + " [ON CLUSTER cluster_name]\n" + " [RENAME TO new_name]\n" " [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [READONLY|WRITABLE] | INHERIT 'profile_name'] [,...]\n" - ' [TO {user_or_role [,...] | NONE | ALL | ALL EXCEPT user_or_role [,...]]}\n' - '```\n' - '\n' - ), + " [TO {user_or_role [,...] | NONE | ALL | ALL EXCEPT user_or_role [,...]]}\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.6.4.13') + num="5.6.4.13", +) RQ_SRS_006_RBAC_SettingsProfile_Drop = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Drop', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Drop", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support removing one or more settings profiles using the `DROP SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support removing one or more settings profiles using the `DROP SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.5.1') + num="5.6.5.1", +) RQ_SRS_006_RBAC_SettingsProfile_Drop_IfExists = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Drop.IfExists', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Drop.IfExists", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP SETTINGS PROFILE` statement\n' - 'to skip raising an exception if the settings profile does not exist.\n' - 'If the `IF EXISTS` clause is not specified then an exception SHALL be\n' - 'raised if a settings profile does not exist.\n' - '\n' - ), + "[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP SETTINGS PROFILE` statement\n" + "to skip raising an exception if the settings profile does not exist.\n" + "If the `IF EXISTS` clause is not specified then an exception SHALL be\n" + "raised if a settings profile does not exist.\n" + "\n" + ), link=None, level=4, - num='5.6.5.2') + num="5.6.5.2", +) RQ_SRS_006_RBAC_SettingsProfile_Drop_OnCluster = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Drop.OnCluster', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Drop.OnCluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support dropping one or more settings profiles on specified cluster using\n' - '`ON CLUSTER` clause in the `DROP SETTINGS PROFILE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support dropping one or more settings profiles on specified cluster using\n" + "`ON CLUSTER` clause in the `DROP SETTINGS PROFILE` statement.\n" + "\n" + ), link=None, level=4, - num='5.6.5.3') + num="5.6.5.3", +) RQ_SRS_006_RBAC_SettingsProfile_Drop_Syntax = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.Drop.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.Drop.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `DROP SETTINGS PROFILE` statement\n' - '\n' - '``` sql\n' - 'DROP SETTINGS PROFILE [IF EXISTS] name [,name,...]\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for the `DROP SETTINGS PROFILE` statement\n" + "\n" + "``` sql\n" + "DROP SETTINGS PROFILE [IF EXISTS] name [,name,...]\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.6.5.4') + num="5.6.5.4", +) RQ_SRS_006_RBAC_SettingsProfile_ShowCreateSettingsProfile = Requirement( - name='RQ.SRS-006.RBAC.SettingsProfile.ShowCreateSettingsProfile', - version='1.0', + name="RQ.SRS-006.RBAC.SettingsProfile.ShowCreateSettingsProfile", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support showing the `CREATE SETTINGS PROFILE` statement used to create the settings profile\n' - 'using the `SHOW CREATE SETTINGS PROFILE` statement with the following syntax\n' - '\n' - '``` sql\n' - 'SHOW CREATE SETTINGS PROFILE name\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support showing the `CREATE SETTINGS PROFILE` statement used to create the settings profile\n" + "using the `SHOW CREATE SETTINGS PROFILE` statement with the following syntax\n" + "\n" + "``` sql\n" + "SHOW CREATE SETTINGS PROFILE name\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.6.6.1') + num="5.6.6.1", +) RQ_SRS_006_RBAC_Quotas = Requirement( - name='RQ.SRS-006.RBAC.Quotas', - version='1.0', + name="RQ.SRS-006.RBAC.Quotas", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support creation and manipulation of **quotas**\n' - 'that can be used to limit resource usage by a **user** or a **role**\n' - 'over a period of time.\n' - '\n' - ), + "[ClickHouse] SHALL support creation and manipulation of **quotas**\n" + "that can be used to limit resource usage by a **user** or a **role**\n" + "over a period of time.\n" + "\n" + ), link=None, level=3, - num='5.7.1') + num="5.7.1", +) RQ_SRS_006_RBAC_Quotas_Keyed = Requirement( - name='RQ.SRS-006.RBAC.Quotas.Keyed', - version='1.0', + name="RQ.SRS-006.RBAC.Quotas.Keyed", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support creating **quotas** that are keyed\n' - 'so that a quota is tracked separately for each key value.\n' - '\n' - ), + "[ClickHouse] SHALL support creating **quotas** that are keyed\n" + "so that a quota is tracked separately for each key value.\n" + "\n" + ), link=None, level=3, - num='5.7.2') + num="5.7.2", +) RQ_SRS_006_RBAC_Quotas_Queries = Requirement( - name='RQ.SRS-006.RBAC.Quotas.Queries', - version='1.0', + name="RQ.SRS-006.RBAC.Quotas.Queries", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support setting **queries** quota to limit the total number of requests.\n' - '\n' - ), + "[ClickHouse] SHALL support setting **queries** quota to limit the total number of requests.\n" + "\n" + ), link=None, level=3, - num='5.7.3') + num="5.7.3", +) RQ_SRS_006_RBAC_Quotas_Errors = Requirement( - name='RQ.SRS-006.RBAC.Quotas.Errors', - version='1.0', + name="RQ.SRS-006.RBAC.Quotas.Errors", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support setting **errors** quota to limit the number of queries that threw an exception.\n' - '\n' - ), + "[ClickHouse] SHALL support setting **errors** quota to limit the number of queries that threw an exception.\n" + "\n" + ), link=None, level=3, - num='5.7.4') + num="5.7.4", +) RQ_SRS_006_RBAC_Quotas_ResultRows = Requirement( - name='RQ.SRS-006.RBAC.Quotas.ResultRows', - version='1.0', + name="RQ.SRS-006.RBAC.Quotas.ResultRows", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support setting **result rows** quota to limit the\n' - 'the total number of rows given as the result.\n' - '\n' - ), + "[ClickHouse] SHALL support setting **result rows** quota to limit the\n" + "the total number of rows given as the result.\n" + "\n" + ), link=None, level=3, - num='5.7.5') + num="5.7.5", +) RQ_SRS_006_RBAC_Quotas_ReadRows = Requirement( - name='RQ.SRS-006.RBAC.Quotas.ReadRows', - version='1.0', + name="RQ.SRS-006.RBAC.Quotas.ReadRows", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support setting **read rows** quota to limit the total\n' - 'number of source rows read from tables for running the query on all remote servers.\n' - '\n' - ), + "[ClickHouse] SHALL support setting **read rows** quota to limit the total\n" + "number of source rows read from tables for running the query on all remote servers.\n" + "\n" + ), link=None, level=3, - num='5.7.6') + num="5.7.6", +) RQ_SRS_006_RBAC_Quotas_ResultBytes = Requirement( - name='RQ.SRS-006.RBAC.Quotas.ResultBytes', - version='1.0', + name="RQ.SRS-006.RBAC.Quotas.ResultBytes", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support setting **result bytes** quota to limit the total number\n' - 'of bytes that can be returned as the result.\n' - '\n' - ), + "[ClickHouse] SHALL support setting **result bytes** quota to limit the total number\n" + "of bytes that can be returned as the result.\n" + "\n" + ), link=None, level=3, - num='5.7.7') + num="5.7.7", +) RQ_SRS_006_RBAC_Quotas_ReadBytes = Requirement( - name='RQ.SRS-006.RBAC.Quotas.ReadBytes', - version='1.0', + name="RQ.SRS-006.RBAC.Quotas.ReadBytes", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support setting **read bytes** quota to limit the total number\n' - 'of source bytes read from tables for running the query on all remote servers.\n' - '\n' - ), + "[ClickHouse] SHALL support setting **read bytes** quota to limit the total number\n" + "of source bytes read from tables for running the query on all remote servers.\n" + "\n" + ), link=None, level=3, - num='5.7.8') + num="5.7.8", +) RQ_SRS_006_RBAC_Quotas_ExecutionTime = Requirement( - name='RQ.SRS-006.RBAC.Quotas.ExecutionTime', - version='1.0', + name="RQ.SRS-006.RBAC.Quotas.ExecutionTime", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support setting **execution time** quota to limit the maximum\n' - 'query execution time.\n' - '\n' - ), + "[ClickHouse] SHALL support setting **execution time** quota to limit the maximum\n" + "query execution time.\n" + "\n" + ), link=None, level=3, - num='5.7.9') + num="5.7.9", +) RQ_SRS_006_RBAC_Quota_Create = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support creating quotas using the `CREATE QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support creating quotas using the `CREATE QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.10.1') + num="5.7.10.1", +) RQ_SRS_006_RBAC_Quota_Create_IfNotExists = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.IfNotExists', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.IfNotExists", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE QUOTA` statement\n' - 'to skip raising an exception if a quota with the same **name** already exists.\n' - 'If `IF NOT EXISTS` clause is not specified then an exception SHALL be raised if\n' - 'a quota with the same **name** already exists.\n' - '\n' - ), + "[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE QUOTA` statement\n" + "to skip raising an exception if a quota with the same **name** already exists.\n" + "If `IF NOT EXISTS` clause is not specified then an exception SHALL be raised if\n" + "a quota with the same **name** already exists.\n" + "\n" + ), link=None, level=4, - num='5.7.10.2') + num="5.7.10.2", +) RQ_SRS_006_RBAC_Quota_Create_Replace = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.Replace', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.Replace", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE QUOTA` statement\n' - 'to replace existing quota if it already exists.\n' - '\n' - ), + "[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE QUOTA` statement\n" + "to replace existing quota if it already exists.\n" + "\n" + ), link=None, level=4, - num='5.7.10.3') + num="5.7.10.3", +) RQ_SRS_006_RBAC_Quota_Create_Cluster = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.Cluster', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.Cluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support creating quotas on a specific cluster with the\n' - '`ON CLUSTER` clause in the `CREATE QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support creating quotas on a specific cluster with the\n" + "`ON CLUSTER` clause in the `CREATE QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.10.4') + num="5.7.10.4", +) RQ_SRS_006_RBAC_Quota_Create_Interval = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.Interval', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.Interval", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support defining the quota interval that specifies\n' - 'a period of time over for which the quota SHALL apply using the\n' - '`FOR INTERVAL` clause in the `CREATE QUOTA` statement.\n' - '\n' - 'This statement SHALL also support a number and a time period which will be one\n' - 'of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be:\n' - '\n' - '`FOR INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some real number\n' - 'to define the interval.\n' - '\n' - ), + "[ClickHouse] SHALL support defining the quota interval that specifies\n" + "a period of time over for which the quota SHALL apply using the\n" + "`FOR INTERVAL` clause in the `CREATE QUOTA` statement.\n" + "\n" + "This statement SHALL also support a number and a time period which will be one\n" + "of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be:\n" + "\n" + "`FOR INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some real number\n" + "to define the interval.\n" + "\n" + ), link=None, level=4, - num='5.7.10.5') + num="5.7.10.5", +) RQ_SRS_006_RBAC_Quota_Create_Interval_Randomized = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.Interval.Randomized', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.Interval.Randomized", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support defining the quota randomized interval that specifies\n' - 'a period of time over for which the quota SHALL apply using the\n' - '`FOR RANDOMIZED INTERVAL` clause in the `CREATE QUOTA` statement.\n' - '\n' - 'This statement SHALL also support a number and a time period which will be one\n' - 'of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be:\n' - '\n' - '`FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some\n' - 'real number to define the interval.\n' - '\n' - ), + "[ClickHouse] SHALL support defining the quota randomized interval that specifies\n" + "a period of time over for which the quota SHALL apply using the\n" + "`FOR RANDOMIZED INTERVAL` clause in the `CREATE QUOTA` statement.\n" + "\n" + "This statement SHALL also support a number and a time period which will be one\n" + "of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be:\n" + "\n" + "`FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some\n" + "real number to define the interval.\n" + "\n" + ), link=None, level=4, - num='5.7.10.6') + num="5.7.10.6", +) RQ_SRS_006_RBAC_Quota_Create_Queries = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.Queries', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.Queries", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support limiting number of requests over a period of time\n' - 'using the `QUERIES` clause in the `CREATE QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support limiting number of requests over a period of time\n" + "using the `QUERIES` clause in the `CREATE QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.10.7') + num="5.7.10.7", +) RQ_SRS_006_RBAC_Quota_Create_Errors = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.Errors', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.Errors", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support limiting number of queries that threw an exception\n' - 'using the `ERRORS` clause in the `CREATE QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support limiting number of queries that threw an exception\n" + "using the `ERRORS` clause in the `CREATE QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.10.8') + num="5.7.10.8", +) RQ_SRS_006_RBAC_Quota_Create_ResultRows = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.ResultRows', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.ResultRows", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support limiting the total number of rows given as the result\n' - 'using the `RESULT ROWS` clause in the `CREATE QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support limiting the total number of rows given as the result\n" + "using the `RESULT ROWS` clause in the `CREATE QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.10.9') + num="5.7.10.9", +) RQ_SRS_006_RBAC_Quota_Create_ReadRows = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.ReadRows', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.ReadRows", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support limiting the total number of source rows read from tables\n' - 'for running the query on all remote servers\n' - 'using the `READ ROWS` clause in the `CREATE QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support limiting the total number of source rows read from tables\n" + "for running the query on all remote servers\n" + "using the `READ ROWS` clause in the `CREATE QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.10.10') + num="5.7.10.10", +) RQ_SRS_006_RBAC_Quota_Create_ResultBytes = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.ResultBytes', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.ResultBytes", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support limiting the total number of bytes that can be returned as the result\n' - 'using the `RESULT BYTES` clause in the `CREATE QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support limiting the total number of bytes that can be returned as the result\n" + "using the `RESULT BYTES` clause in the `CREATE QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.10.11') + num="5.7.10.11", +) RQ_SRS_006_RBAC_Quota_Create_ReadBytes = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.ReadBytes', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.ReadBytes", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support limiting the total number of source bytes read from tables\n' - 'for running the query on all remote servers\n' - 'using the `READ BYTES` clause in the `CREATE QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support limiting the total number of source bytes read from tables\n" + "for running the query on all remote servers\n" + "using the `READ BYTES` clause in the `CREATE QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.10.12') + num="5.7.10.12", +) RQ_SRS_006_RBAC_Quota_Create_ExecutionTime = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.ExecutionTime', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.ExecutionTime", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support limiting the maximum query execution time\n' - 'using the `EXECUTION TIME` clause in the `CREATE QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support limiting the maximum query execution time\n" + "using the `EXECUTION TIME` clause in the `CREATE QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.10.13') + num="5.7.10.13", +) RQ_SRS_006_RBAC_Quota_Create_NoLimits = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.NoLimits', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.NoLimits", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support limiting the maximum query execution time\n' - 'using the `NO LIMITS` clause in the `CREATE QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support limiting the maximum query execution time\n" + "using the `NO LIMITS` clause in the `CREATE QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.10.14') + num="5.7.10.14", +) RQ_SRS_006_RBAC_Quota_Create_TrackingOnly = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.TrackingOnly', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.TrackingOnly", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support limiting the maximum query execution time\n' - 'using the `TRACKING ONLY` clause in the `CREATE QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support limiting the maximum query execution time\n" + "using the `TRACKING ONLY` clause in the `CREATE QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.10.15') + num="5.7.10.15", +) RQ_SRS_006_RBAC_Quota_Create_KeyedBy = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.KeyedBy', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.KeyedBy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support to track quota for some key\n' - 'following the `KEYED BY` clause in the `CREATE QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support to track quota for some key\n" + "following the `KEYED BY` clause in the `CREATE QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.10.16') + num="5.7.10.16", +) RQ_SRS_006_RBAC_Quota_Create_KeyedByOptions = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.KeyedByOptions', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.KeyedByOptions", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support to track quota separately for some parameter\n' + "[ClickHouse] SHALL support to track quota separately for some parameter\n" "using the `KEYED BY 'parameter'` clause in the `CREATE QUOTA` statement.\n" - '\n' + "\n" "'parameter' can be one of:\n" "`{'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}`\n" - '\n' - ), + "\n" + ), link=None, level=4, - num='5.7.10.17') + num="5.7.10.17", +) RQ_SRS_006_RBAC_Quota_Create_Assignment = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.Assignment', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.Assignment", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning quota to one or more users\n' - 'or roles using the `TO` clause in the `CREATE QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning quota to one or more users\n" + "or roles using the `TO` clause in the `CREATE QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.10.18') + num="5.7.10.18", +) RQ_SRS_006_RBAC_Quota_Create_Assignment_None = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.Assignment.None', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.Assignment.None", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning quota to no users or roles using\n' - '`TO NONE` clause in the `CREATE QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning quota to no users or roles using\n" + "`TO NONE` clause in the `CREATE QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.10.19') + num="5.7.10.19", +) RQ_SRS_006_RBAC_Quota_Create_Assignment_All = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.Assignment.All', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.Assignment.All", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning quota to all current users and roles\n' - 'using `TO ALL` clause in the `CREATE QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning quota to all current users and roles\n" + "using `TO ALL` clause in the `CREATE QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.10.20') + num="5.7.10.20", +) RQ_SRS_006_RBAC_Quota_Create_Assignment_Except = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.Assignment.Except', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.Assignment.Except", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support excluding assignment of quota to one or more users or roles using\n' - 'the `EXCEPT` clause in the `CREATE QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support excluding assignment of quota to one or more users or roles using\n" + "the `EXCEPT` clause in the `CREATE QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.10.21') + num="5.7.10.21", +) RQ_SRS_006_RBAC_Quota_Create_Syntax = Requirement( - name='RQ.SRS-006.RBAC.Quota.Create.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Create.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `CREATE QUOTA` statement\n' - '\n' - '```sql\n' - 'CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]\n' + "[ClickHouse] SHALL support the following syntax for the `CREATE QUOTA` statement\n" + "\n" + "```sql\n" + "CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]\n" " [KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}]\n" - ' [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY}\n' - ' {MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] |\n' - ' NO LIMITS | TRACKING ONLY} [,...]]\n' - ' [TO {role [,...] | ALL | ALL EXCEPT role [,...]}]\n' - '```\n' - '\n' - ), + " [FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY}\n" + " {MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] |\n" + " NO LIMITS | TRACKING ONLY} [,...]]\n" + " [TO {role [,...] | ALL | ALL EXCEPT role [,...]}]\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.7.10.22') + num="5.7.10.22", +) RQ_SRS_006_RBAC_Quota_Alter = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering quotas using the `ALTER QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering quotas using the `ALTER QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.11.1') + num="5.7.11.1", +) RQ_SRS_006_RBAC_Quota_Alter_IfExists = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.IfExists', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.IfExists", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `IF EXISTS` clause in the `ALTER QUOTA` statement\n' - 'to skip raising an exception if a quota does not exist.\n' - 'If the `IF EXISTS` clause is not specified then an exception SHALL be raised if\n' - 'a quota does not exist.\n' - '\n' - ), + "[ClickHouse] SHALL support `IF EXISTS` clause in the `ALTER QUOTA` statement\n" + "to skip raising an exception if a quota does not exist.\n" + "If the `IF EXISTS` clause is not specified then an exception SHALL be raised if\n" + "a quota does not exist.\n" + "\n" + ), link=None, level=4, - num='5.7.11.2') + num="5.7.11.2", +) RQ_SRS_006_RBAC_Quota_Alter_Rename = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.Rename', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.Rename", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `RENAME TO` clause in the `ALTER QUOTA` statement\n' - 'to rename the quota to the specified name.\n' - '\n' - ), + "[ClickHouse] SHALL support `RENAME TO` clause in the `ALTER QUOTA` statement\n" + "to rename the quota to the specified name.\n" + "\n" + ), link=None, level=4, - num='5.7.11.3') + num="5.7.11.3", +) RQ_SRS_006_RBAC_Quota_Alter_Cluster = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.Cluster', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.Cluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering quotas on a specific cluster with the\n' - '`ON CLUSTER` clause in the `ALTER QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering quotas on a specific cluster with the\n" + "`ON CLUSTER` clause in the `ALTER QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.11.4') + num="5.7.11.4", +) RQ_SRS_006_RBAC_Quota_Alter_Interval = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.Interval', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.Interval", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support redefining the quota interval that specifies\n' - 'a period of time over for which the quota SHALL apply using the\n' - '`FOR INTERVAL` clause in the `ALTER QUOTA` statement.\n' - '\n' - 'This statement SHALL also support a number and a time period which will be one\n' - 'of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be:\n' - '\n' - '`FOR INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some real number\n' - 'to define the interval.\n' - '\n' - ), + "[ClickHouse] SHALL support redefining the quota interval that specifies\n" + "a period of time over for which the quota SHALL apply using the\n" + "`FOR INTERVAL` clause in the `ALTER QUOTA` statement.\n" + "\n" + "This statement SHALL also support a number and a time period which will be one\n" + "of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be:\n" + "\n" + "`FOR INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some real number\n" + "to define the interval.\n" + "\n" + ), link=None, level=4, - num='5.7.11.5') + num="5.7.11.5", +) RQ_SRS_006_RBAC_Quota_Alter_Interval_Randomized = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.Interval.Randomized', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.Interval.Randomized", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support redefining the quota randomized interval that specifies\n' - 'a period of time over for which the quota SHALL apply using the\n' - '`FOR RANDOMIZED INTERVAL` clause in the `ALTER QUOTA` statement.\n' - '\n' - 'This statement SHALL also support a number and a time period which will be one\n' - 'of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be:\n' - '\n' - '`FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some\n' - 'real number to define the interval.\n' - '\n' - ), + "[ClickHouse] SHALL support redefining the quota randomized interval that specifies\n" + "a period of time over for which the quota SHALL apply using the\n" + "`FOR RANDOMIZED INTERVAL` clause in the `ALTER QUOTA` statement.\n" + "\n" + "This statement SHALL also support a number and a time period which will be one\n" + "of `{SECOND | MINUTE | HOUR | DAY | MONTH}`. Thus, the complete syntax SHALL be:\n" + "\n" + "`FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY}` where number is some\n" + "real number to define the interval.\n" + "\n" + ), link=None, level=4, - num='5.7.11.6') + num="5.7.11.6", +) RQ_SRS_006_RBAC_Quota_Alter_Queries = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.Queries', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.Queries", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering the limit of number of requests over a period of time\n' - 'using the `QUERIES` clause in the `ALTER QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering the limit of number of requests over a period of time\n" + "using the `QUERIES` clause in the `ALTER QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.11.7') + num="5.7.11.7", +) RQ_SRS_006_RBAC_Quota_Alter_Errors = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.Errors', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.Errors", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering the limit of number of queries that threw an exception\n' - 'using the `ERRORS` clause in the `ALTER QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering the limit of number of queries that threw an exception\n" + "using the `ERRORS` clause in the `ALTER QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.11.8') + num="5.7.11.8", +) RQ_SRS_006_RBAC_Quota_Alter_ResultRows = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.ResultRows', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.ResultRows", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering the limit of the total number of rows given as the result\n' - 'using the `RESULT ROWS` clause in the `ALTER QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering the limit of the total number of rows given as the result\n" + "using the `RESULT ROWS` clause in the `ALTER QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.11.9') + num="5.7.11.9", +) RQ_SRS_006_RBAC_Quota_Alter_ReadRows = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.ReadRows', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.ReadRows", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering the limit of the total number of source rows read from tables\n' - 'for running the query on all remote servers\n' - 'using the `READ ROWS` clause in the `ALTER QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering the limit of the total number of source rows read from tables\n" + "for running the query on all remote servers\n" + "using the `READ ROWS` clause in the `ALTER QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.11.10') + num="5.7.11.10", +) RQ_SRS_006_RBAC_Quota_ALter_ResultBytes = Requirement( - name='RQ.SRS-006.RBAC.Quota.ALter.ResultBytes', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.ALter.ResultBytes", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering the limit of the total number of bytes that can be returned as the result\n' - 'using the `RESULT BYTES` clause in the `ALTER QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering the limit of the total number of bytes that can be returned as the result\n" + "using the `RESULT BYTES` clause in the `ALTER QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.11.11') + num="5.7.11.11", +) RQ_SRS_006_RBAC_Quota_Alter_ReadBytes = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.ReadBytes', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.ReadBytes", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering the limit of the total number of source bytes read from tables\n' - 'for running the query on all remote servers\n' - 'using the `READ BYTES` clause in the `ALTER QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering the limit of the total number of source bytes read from tables\n" + "for running the query on all remote servers\n" + "using the `READ BYTES` clause in the `ALTER QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.11.12') + num="5.7.11.12", +) RQ_SRS_006_RBAC_Quota_Alter_ExecutionTime = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.ExecutionTime', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.ExecutionTime", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering the limit of the maximum query execution time\n' - 'using the `EXECUTION TIME` clause in the `ALTER QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering the limit of the maximum query execution time\n" + "using the `EXECUTION TIME` clause in the `ALTER QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.11.13') + num="5.7.11.13", +) RQ_SRS_006_RBAC_Quota_Alter_NoLimits = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.NoLimits', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.NoLimits", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support limiting the maximum query execution time\n' - 'using the `NO LIMITS` clause in the `ALTER QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support limiting the maximum query execution time\n" + "using the `NO LIMITS` clause in the `ALTER QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.11.14') + num="5.7.11.14", +) RQ_SRS_006_RBAC_Quota_Alter_TrackingOnly = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.TrackingOnly', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.TrackingOnly", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support limiting the maximum query execution time\n' - 'using the `TRACKING ONLY` clause in the `ALTER QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support limiting the maximum query execution time\n" + "using the `TRACKING ONLY` clause in the `ALTER QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.11.15') + num="5.7.11.15", +) RQ_SRS_006_RBAC_Quota_Alter_KeyedBy = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.KeyedBy', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.KeyedBy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering quota to track quota separately for some key\n' - 'following the `KEYED BY` clause in the `ALTER QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering quota to track quota separately for some key\n" + "following the `KEYED BY` clause in the `ALTER QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.11.16') + num="5.7.11.16", +) RQ_SRS_006_RBAC_Quota_Alter_KeyedByOptions = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.KeyedByOptions', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.KeyedByOptions", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering quota to track quota separately for some parameter\n' + "[ClickHouse] SHALL support altering quota to track quota separately for some parameter\n" "using the `KEYED BY 'parameter'` clause in the `ALTER QUOTA` statement.\n" - '\n' + "\n" "'parameter' can be one of:\n" "`{'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}`\n" - '\n' - ), + "\n" + ), link=None, level=4, - num='5.7.11.17') + num="5.7.11.17", +) RQ_SRS_006_RBAC_Quota_Alter_Assignment = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.Assignment', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.Assignment", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support reassigning quota to one or more users\n' - 'or roles using the `TO` clause in the `ALTER QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support reassigning quota to one or more users\n" + "or roles using the `TO` clause in the `ALTER QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.11.18') + num="5.7.11.18", +) RQ_SRS_006_RBAC_Quota_Alter_Assignment_None = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.Assignment.None', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.Assignment.None", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support reassigning quota to no users or roles using\n' - '`TO NONE` clause in the `ALTER QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support reassigning quota to no users or roles using\n" + "`TO NONE` clause in the `ALTER QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.11.19') + num="5.7.11.19", +) RQ_SRS_006_RBAC_Quota_Alter_Assignment_All = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.Assignment.All', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.Assignment.All", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support reassigning quota to all current users and roles\n' - 'using `TO ALL` clause in the `ALTER QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support reassigning quota to all current users and roles\n" + "using `TO ALL` clause in the `ALTER QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.11.20') + num="5.7.11.20", +) RQ_SRS_006_RBAC_Quota_Alter_Assignment_Except = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.Assignment.Except', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.Assignment.Except", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support excluding assignment of quota to one or more users or roles using\n' - 'the `EXCEPT` clause in the `ALTER QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support excluding assignment of quota to one or more users or roles using\n" + "the `EXCEPT` clause in the `ALTER QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.11.21') + num="5.7.11.21", +) RQ_SRS_006_RBAC_Quota_Alter_Syntax = Requirement( - name='RQ.SRS-006.RBAC.Quota.Alter.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Alter.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `ALTER QUOTA` statement\n' - '\n' - '``` sql\n' - 'ALTER QUOTA [IF EXIST] name\n' - ' {{{QUERIES | ERRORS | RESULT ROWS | READ ROWS | RESULT BYTES | READ BYTES | EXECUTION TIME} number} [, ...] FOR INTERVAL number time_unit} [, ...]\n' - ' [KEYED BY USERNAME | KEYED BY IP | NOT KEYED] [ALLOW CUSTOM KEY | DISALLOW CUSTOM KEY]\n' - ' [TO {user_or_role [,...] | NONE | ALL} [EXCEPT user_or_role [,...]]]\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for the `ALTER QUOTA` statement\n" + "\n" + "``` sql\n" + "ALTER QUOTA [IF EXIST] name\n" + " {{{QUERIES | ERRORS | RESULT ROWS | READ ROWS | RESULT BYTES | READ BYTES | EXECUTION TIME} number} [, ...] FOR INTERVAL number time_unit} [, ...]\n" + " [KEYED BY USERNAME | KEYED BY IP | NOT KEYED] [ALLOW CUSTOM KEY | DISALLOW CUSTOM KEY]\n" + " [TO {user_or_role [,...] | NONE | ALL} [EXCEPT user_or_role [,...]]]\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.7.11.22') + num="5.7.11.22", +) RQ_SRS_006_RBAC_Quota_Drop = Requirement( - name='RQ.SRS-006.RBAC.Quota.Drop', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Drop", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support removing one or more quotas using the `DROP QUOTA` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support removing one or more quotas using the `DROP QUOTA` statement.\n" + "\n" + ), link=None, level=4, - num='5.7.12.1') + num="5.7.12.1", +) RQ_SRS_006_RBAC_Quota_Drop_IfExists = Requirement( - name='RQ.SRS-006.RBAC.Quota.Drop.IfExists', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Drop.IfExists", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP QUOTA` statement\n' - 'to skip raising an exception when the quota does not exist.\n' - 'If the `IF EXISTS` clause is not specified then an exception SHALL be\n' - 'raised if the quota does not exist.\n' - '\n' - ), + "[ClickHouse] SHALL support using `IF EXISTS` clause in the `DROP QUOTA` statement\n" + "to skip raising an exception when the quota does not exist.\n" + "If the `IF EXISTS` clause is not specified then an exception SHALL be\n" + "raised if the quota does not exist.\n" + "\n" + ), link=None, level=4, - num='5.7.12.2') + num="5.7.12.2", +) RQ_SRS_006_RBAC_Quota_Drop_Cluster = Requirement( - name='RQ.SRS-006.RBAC.Quota.Drop.Cluster', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Drop.Cluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using `ON CLUSTER` clause in the `DROP QUOTA` statement\n' - 'to indicate the cluster the quota to be dropped is located on.\n' - '\n' - ), + "[ClickHouse] SHALL support using `ON CLUSTER` clause in the `DROP QUOTA` statement\n" + "to indicate the cluster the quota to be dropped is located on.\n" + "\n" + ), link=None, level=4, - num='5.7.12.3') + num="5.7.12.3", +) RQ_SRS_006_RBAC_Quota_Drop_Syntax = Requirement( - name='RQ.SRS-006.RBAC.Quota.Drop.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.Drop.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `DROP QUOTA` statement\n' - '\n' - '``` sql\n' - 'DROP QUOTA [IF EXISTS] name [,name...]\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for the `DROP QUOTA` statement\n" + "\n" + "``` sql\n" + "DROP QUOTA [IF EXISTS] name [,name...]\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.7.12.4') + num="5.7.12.4", +) RQ_SRS_006_RBAC_Quota_ShowQuotas = Requirement( - name='RQ.SRS-006.RBAC.Quota.ShowQuotas', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.ShowQuotas", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support showing all of the current quotas\n' - 'using the `SHOW QUOTAS` statement with the following syntax\n' - '\n' - ), + "[ClickHouse] SHALL support showing all of the current quotas\n" + "using the `SHOW QUOTAS` statement with the following syntax\n" + "\n" + ), link=None, level=4, - num='5.7.13.1') + num="5.7.13.1", +) RQ_SRS_006_RBAC_Quota_ShowQuotas_IntoOutfile = Requirement( - name='RQ.SRS-006.RBAC.Quota.ShowQuotas.IntoOutfile', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.ShowQuotas.IntoOutfile", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `INTO OUTFILE` clause in the `SHOW QUOTAS` statement to define an outfile by some given string literal.\n' - '\n' - ), + "[ClickHouse] SHALL support the `INTO OUTFILE` clause in the `SHOW QUOTAS` statement to define an outfile by some given string literal.\n" + "\n" + ), link=None, level=4, - num='5.7.13.2') + num="5.7.13.2", +) RQ_SRS_006_RBAC_Quota_ShowQuotas_Format = Requirement( - name='RQ.SRS-006.RBAC.Quota.ShowQuotas.Format', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.ShowQuotas.Format", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `FORMAT` clause in the `SHOW QUOTAS` statement to define a format for the output quota list.\n' - '\n' - 'The types of valid formats are many, listed in output column:\n' - 'https://clickhouse.com/docs/en/interfaces/formats/\n' - '\n' - ), + "[ClickHouse] SHALL support the `FORMAT` clause in the `SHOW QUOTAS` statement to define a format for the output quota list.\n" + "\n" + "The types of valid formats are many, listed in output column:\n" + "https://clickhouse.com/docs/en/interfaces/formats/\n" + "\n" + ), link=None, level=4, - num='5.7.13.3') + num="5.7.13.3", +) RQ_SRS_006_RBAC_Quota_ShowQuotas_Settings = Requirement( - name='RQ.SRS-006.RBAC.Quota.ShowQuotas.Settings', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.ShowQuotas.Settings", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `SETTINGS` clause in the `SHOW QUOTAS` statement to define settings in the showing of all quotas.\n' - '\n' - ), + "[ClickHouse] SHALL support the `SETTINGS` clause in the `SHOW QUOTAS` statement to define settings in the showing of all quotas.\n" + "\n" + ), link=None, level=4, - num='5.7.13.4') + num="5.7.13.4", +) RQ_SRS_006_RBAC_Quota_ShowQuotas_Syntax = Requirement( - name='RQ.SRS-006.RBAC.Quota.ShowQuotas.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.ShowQuotas.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using the `SHOW QUOTAS` statement\n' - 'with the following syntax\n' - '``` sql\n' - 'SHOW QUOTAS\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support using the `SHOW QUOTAS` statement\n" + "with the following syntax\n" + "``` sql\n" + "SHOW QUOTAS\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.7.13.5') + num="5.7.13.5", +) RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Name = Requirement( - name='RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Name', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Name", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support showing the `CREATE QUOTA` statement used to create the quota with some given name\n' - 'using the `SHOW CREATE QUOTA` statement with the following syntax\n' - '\n' - '``` sql\n' - 'SHOW CREATE QUOTA name\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support showing the `CREATE QUOTA` statement used to create the quota with some given name\n" + "using the `SHOW CREATE QUOTA` statement with the following syntax\n" + "\n" + "``` sql\n" + "SHOW CREATE QUOTA name\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.7.14.1') + num="5.7.14.1", +) RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Current = Requirement( - name='RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Current', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Current", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support showing the `CREATE QUOTA` statement used to create the CURRENT quota\n' - 'using the `SHOW CREATE QUOTA CURRENT` statement or the shorthand form\n' - '`SHOW CREATE QUOTA`\n' - '\n' - ), + "[ClickHouse] SHALL support showing the `CREATE QUOTA` statement used to create the CURRENT quota\n" + "using the `SHOW CREATE QUOTA CURRENT` statement or the shorthand form\n" + "`SHOW CREATE QUOTA`\n" + "\n" + ), link=None, level=4, - num='5.7.14.2') + num="5.7.14.2", +) RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Syntax = Requirement( - name='RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax when\n' - 'using the `SHOW CREATE QUOTA` statement.\n' - '\n' - '```sql\n' - 'SHOW CREATE QUOTA [name | CURRENT]\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax when\n" + "using the `SHOW CREATE QUOTA` statement.\n" + "\n" + "```sql\n" + "SHOW CREATE QUOTA [name | CURRENT]\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.7.14.3') + num="5.7.14.3", +) RQ_SRS_006_RBAC_RowPolicy = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support creation and manipulation of table **row policies**\n' - 'that can be used to limit access to the table contents for a **user** or a **role**\n' - 'using a specified **condition**.\n' - '\n' - ), + "[ClickHouse] SHALL support creation and manipulation of table **row policies**\n" + "that can be used to limit access to the table contents for a **user** or a **role**\n" + "using a specified **condition**.\n" + "\n" + ), link=None, level=3, - num='5.8.1') + num="5.8.1", +) RQ_SRS_006_RBAC_RowPolicy_Condition = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Condition', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Condition", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support row policy **conditions** that can be any SQL\n' - 'expression that returns a boolean.\n' - '\n' - ), + "[ClickHouse] SHALL support row policy **conditions** that can be any SQL\n" + "expression that returns a boolean.\n" + "\n" + ), link=None, level=3, - num='5.8.2') + num="5.8.2", +) RQ_SRS_006_RBAC_RowPolicy_Restriction = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Restriction', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Restriction", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL restrict all access to a table when a row policy with a condition is created on that table.\n' - 'All users require a permissive row policy in order to view the table.\n' - '\n' - ), + "[ClickHouse] SHALL restrict all access to a table when a row policy with a condition is created on that table.\n" + "All users require a permissive row policy in order to view the table.\n" + "\n" + ), link=None, level=3, - num='5.8.3') + num="5.8.3", +) RQ_SRS_006_RBAC_RowPolicy_Nesting = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Nesting', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Nesting", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL restrict rows of tables or views created on top of a table with row policies according to those policies.\n' - '\n' - ), + "[ClickHouse] SHALL restrict rows of tables or views created on top of a table with row policies according to those policies.\n" + "\n" + ), link=None, level=3, - num='5.8.4') + num="5.8.4", +) RQ_SRS_006_RBAC_RowPolicy_Create = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Create', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Create", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support creating row policy using the `CREATE ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support creating row policy using the `CREATE ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.5.1') + num="5.8.5.1", +) RQ_SRS_006_RBAC_RowPolicy_Create_IfNotExists = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Create.IfNotExists', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Create.IfNotExists", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE ROW POLICY` statement\n' - 'to skip raising an exception if a row policy with the same **name** already exists.\n' - 'If the `IF NOT EXISTS` clause is not specified then an exception SHALL be raised if\n' - 'a row policy with the same **name** already exists.\n' - '\n' - ), + "[ClickHouse] SHALL support `IF NOT EXISTS` clause in the `CREATE ROW POLICY` statement\n" + "to skip raising an exception if a row policy with the same **name** already exists.\n" + "If the `IF NOT EXISTS` clause is not specified then an exception SHALL be raised if\n" + "a row policy with the same **name** already exists.\n" + "\n" + ), link=None, level=4, - num='5.8.5.2') + num="5.8.5.2", +) RQ_SRS_006_RBAC_RowPolicy_Create_Replace = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Create.Replace', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Create.Replace", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE ROW POLICY` statement\n' - 'to replace existing row policy if it already exists.\n' - '\n' - ), + "[ClickHouse] SHALL support `OR REPLACE` clause in the `CREATE ROW POLICY` statement\n" + "to replace existing row policy if it already exists.\n" + "\n" + ), link=None, level=4, - num='5.8.5.3') + num="5.8.5.3", +) RQ_SRS_006_RBAC_RowPolicy_Create_OnCluster = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Create.OnCluster', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Create.OnCluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying cluster on which to create the role policy\n' - 'using the `ON CLUSTER` clause in the `CREATE ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying cluster on which to create the role policy\n" + "using the `ON CLUSTER` clause in the `CREATE ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.5.4') + num="5.8.5.4", +) RQ_SRS_006_RBAC_RowPolicy_Create_On = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Create.On', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Create.On", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying table on which to create the role policy\n' - 'using the `ON` clause in the `CREATE ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying table on which to create the role policy\n" + "using the `ON` clause in the `CREATE ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.5.5') + num="5.8.5.5", +) RQ_SRS_006_RBAC_RowPolicy_Create_Access = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Create.Access', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Create.Access", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support allowing or restricting access to rows using the\n' - '`AS` clause in the `CREATE ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support allowing or restricting access to rows using the\n" + "`AS` clause in the `CREATE ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.5.6') + num="5.8.5.6", +) RQ_SRS_006_RBAC_RowPolicy_Create_Access_Permissive = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Create.Access.Permissive', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Create.Access.Permissive", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support allowing access to rows using the\n' - '`AS PERMISSIVE` clause in the `CREATE ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support allowing access to rows using the\n" + "`AS PERMISSIVE` clause in the `CREATE ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.5.7') + num="5.8.5.7", +) RQ_SRS_006_RBAC_RowPolicy_Create_Access_Restrictive = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Create.Access.Restrictive', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Create.Access.Restrictive", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support restricting access to rows using the\n' - '`AS RESTRICTIVE` clause in the `CREATE ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support restricting access to rows using the\n" + "`AS RESTRICTIVE` clause in the `CREATE ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.5.8') + num="5.8.5.8", +) RQ_SRS_006_RBAC_RowPolicy_Create_ForSelect = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Create.ForSelect', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Create.ForSelect", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying which rows are affected\n' - 'using the `FOR SELECT` clause in the `CREATE ROW POLICY` statement.\n' - 'REQUIRES CONDITION.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying which rows are affected\n" + "using the `FOR SELECT` clause in the `CREATE ROW POLICY` statement.\n" + "REQUIRES CONDITION.\n" + "\n" + ), link=None, level=4, - num='5.8.5.9') + num="5.8.5.9", +) RQ_SRS_006_RBAC_RowPolicy_Create_Condition = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Create.Condition', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Create.Condition", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying a condition that\n' - 'that can be any SQL expression which returns a boolean using the `USING`\n' - 'clause in the `CREATE ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying a condition that\n" + "that can be any SQL expression which returns a boolean using the `USING`\n" + "clause in the `CREATE ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.5.10') + num="5.8.5.10", +) RQ_SRS_006_RBAC_RowPolicy_Create_Assignment = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Create.Assignment', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Create.Assignment", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning row policy to one or more users\n' - 'or roles using the `TO` clause in the `CREATE ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning row policy to one or more users\n" + "or roles using the `TO` clause in the `CREATE ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.5.11') + num="5.8.5.11", +) RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_None = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.None', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.None", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning row policy to no users or roles using\n' - 'the `TO NONE` clause in the `CREATE ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning row policy to no users or roles using\n" + "the `TO NONE` clause in the `CREATE ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.5.12') + num="5.8.5.12", +) RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_All = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.All', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.All", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support assigning row policy to all current users and roles\n' - 'using `TO ALL` clause in the `CREATE ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support assigning row policy to all current users and roles\n" + "using `TO ALL` clause in the `CREATE ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.5.13') + num="5.8.5.13", +) RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_AllExcept = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.AllExcept', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.AllExcept", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support excluding assignment of row policy to one or more users or roles using\n' - 'the `ALL EXCEPT` clause in the `CREATE ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support excluding assignment of row policy to one or more users or roles using\n" + "the `ALL EXCEPT` clause in the `CREATE ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.5.14') + num="5.8.5.14", +) RQ_SRS_006_RBAC_RowPolicy_Create_Syntax = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Create.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Create.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `CRETE ROW POLICY` statement\n' - '\n' - '``` sql\n' - 'CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name [ON CLUSTER cluster_name] ON [db.]table\n' - ' [AS {PERMISSIVE | RESTRICTIVE}]\n' - ' [FOR SELECT]\n' - ' [USING condition]\n' - ' [TO {role [,...] | ALL | ALL EXCEPT role [,...]}]\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for the `CRETE ROW POLICY` statement\n" + "\n" + "``` sql\n" + "CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name [ON CLUSTER cluster_name] ON [db.]table\n" + " [AS {PERMISSIVE | RESTRICTIVE}]\n" + " [FOR SELECT]\n" + " [USING condition]\n" + " [TO {role [,...] | ALL | ALL EXCEPT role [,...]}]\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.8.5.15') + num="5.8.5.15", +) RQ_SRS_006_RBAC_RowPolicy_Alter = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Alter', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Alter", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering row policy using the `ALTER ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering row policy using the `ALTER ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.6.1') + num="5.8.6.1", +) RQ_SRS_006_RBAC_RowPolicy_Alter_IfExists = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Alter.IfExists', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Alter.IfExists", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `IF EXISTS` clause in the `ALTER ROW POLICY` statement\n' - 'to skip raising an exception if a row policy does not exist.\n' - 'If the `IF EXISTS` clause is not specified then an exception SHALL be raised if\n' - 'a row policy does not exist.\n' - '\n' - ), + "[ClickHouse] SHALL support the `IF EXISTS` clause in the `ALTER ROW POLICY` statement\n" + "to skip raising an exception if a row policy does not exist.\n" + "If the `IF EXISTS` clause is not specified then an exception SHALL be raised if\n" + "a row policy does not exist.\n" + "\n" + ), link=None, level=4, - num='5.8.6.2') + num="5.8.6.2", +) RQ_SRS_006_RBAC_RowPolicy_Alter_ForSelect = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Alter.ForSelect', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Alter.ForSelect", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support modifying rows on which to apply the row policy\n' - 'using the `FOR SELECT` clause in the `ALTER ROW POLICY` statement.\n' - 'REQUIRES FUNCTION CONFIRMATION.\n' - '\n' - ), + "[ClickHouse] SHALL support modifying rows on which to apply the row policy\n" + "using the `FOR SELECT` clause in the `ALTER ROW POLICY` statement.\n" + "REQUIRES FUNCTION CONFIRMATION.\n" + "\n" + ), link=None, level=4, - num='5.8.6.3') + num="5.8.6.3", +) RQ_SRS_006_RBAC_RowPolicy_Alter_OnCluster = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Alter.OnCluster', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Alter.OnCluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying cluster on which to alter the row policy\n' - 'using the `ON CLUSTER` clause in the `ALTER ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying cluster on which to alter the row policy\n" + "using the `ON CLUSTER` clause in the `ALTER ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.6.4') + num="5.8.6.4", +) RQ_SRS_006_RBAC_RowPolicy_Alter_On = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Alter.On', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Alter.On", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying table on which to alter the row policy\n' - 'using the `ON` clause in the `ALTER ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying table on which to alter the row policy\n" + "using the `ON` clause in the `ALTER ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.6.5') + num="5.8.6.5", +) RQ_SRS_006_RBAC_RowPolicy_Alter_Rename = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Alter.Rename', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Rename", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support renaming the row policy using the `RENAME` clause\n' - 'in the `ALTER ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support renaming the row policy using the `RENAME` clause\n" + "in the `ALTER ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.6.6') + num="5.8.6.6", +) RQ_SRS_006_RBAC_RowPolicy_Alter_Access = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Alter.Access', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Access", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support altering access to rows using the\n' - '`AS` clause in the `ALTER ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support altering access to rows using the\n" + "`AS` clause in the `ALTER ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.6.7') + num="5.8.6.7", +) RQ_SRS_006_RBAC_RowPolicy_Alter_Access_Permissive = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Permissive', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Permissive", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support permitting access to rows using the\n' - '`AS PERMISSIVE` clause in the `ALTER ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support permitting access to rows using the\n" + "`AS PERMISSIVE` clause in the `ALTER ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.6.8') + num="5.8.6.8", +) RQ_SRS_006_RBAC_RowPolicy_Alter_Access_Restrictive = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Restrictive', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Restrictive", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support restricting access to rows using the\n' - '`AS RESTRICTIVE` clause in the `ALTER ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support restricting access to rows using the\n" + "`AS RESTRICTIVE` clause in the `ALTER ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.6.9') + num="5.8.6.9", +) RQ_SRS_006_RBAC_RowPolicy_Alter_Condition = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Alter.Condition', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Condition", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support re-specifying the row policy condition\n' - 'using the `USING` clause in the `ALTER ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support re-specifying the row policy condition\n" + "using the `USING` clause in the `ALTER ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.6.10') + num="5.8.6.10", +) RQ_SRS_006_RBAC_RowPolicy_Alter_Condition_None = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Alter.Condition.None', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Condition.None", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support removing the row policy condition\n' - 'using the `USING NONE` clause in the `ALTER ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support removing the row policy condition\n" + "using the `USING NONE` clause in the `ALTER ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.6.11') + num="5.8.6.11", +) RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support reassigning row policy to one or more users\n' - 'or roles using the `TO` clause in the `ALTER ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support reassigning row policy to one or more users\n" + "or roles using the `TO` clause in the `ALTER ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.6.12') + num="5.8.6.12", +) RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_None = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.None', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.None", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support reassigning row policy to no users or roles using\n' - 'the `TO NONE` clause in the `ALTER ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support reassigning row policy to no users or roles using\n" + "the `TO NONE` clause in the `ALTER ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.6.13') + num="5.8.6.13", +) RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_All = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.All', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.All", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support reassigning row policy to all current users and roles\n' - 'using the `TO ALL` clause in the `ALTER ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support reassigning row policy to all current users and roles\n" + "using the `TO ALL` clause in the `ALTER ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.6.14') + num="5.8.6.14", +) RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_AllExcept = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.AllExcept', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.AllExcept", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support excluding assignment of row policy to one or more users or roles using\n' - 'the `ALL EXCEPT` clause in the `ALTER ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support excluding assignment of row policy to one or more users or roles using\n" + "the `ALL EXCEPT` clause in the `ALTER ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.6.15') + num="5.8.6.15", +) RQ_SRS_006_RBAC_RowPolicy_Alter_Syntax = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Alter.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `ALTER ROW POLICY` statement\n' - '\n' - '``` sql\n' - 'ALTER [ROW] POLICY [IF EXISTS] name [ON CLUSTER cluster_name] ON [database.]table\n' - ' [RENAME TO new_name]\n' - ' [AS {PERMISSIVE | RESTRICTIVE}]\n' - ' [FOR SELECT]\n' - ' [USING {condition | NONE}][,...]\n' - ' [TO {role [,...] | ALL | ALL EXCEPT role [,...]}]\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for the `ALTER ROW POLICY` statement\n" + "\n" + "``` sql\n" + "ALTER [ROW] POLICY [IF EXISTS] name [ON CLUSTER cluster_name] ON [database.]table\n" + " [RENAME TO new_name]\n" + " [AS {PERMISSIVE | RESTRICTIVE}]\n" + " [FOR SELECT]\n" + " [USING {condition | NONE}][,...]\n" + " [TO {role [,...] | ALL | ALL EXCEPT role [,...]}]\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.8.6.16') + num="5.8.6.16", +) RQ_SRS_006_RBAC_RowPolicy_Drop = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Drop', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Drop", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support removing one or more row policies using the `DROP ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support removing one or more row policies using the `DROP ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.7.1') + num="5.8.7.1", +) RQ_SRS_006_RBAC_RowPolicy_Drop_IfExists = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Drop.IfExists', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Drop.IfExists", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using the `IF EXISTS` clause in the `DROP ROW POLICY` statement\n' - 'to skip raising an exception when the row policy does not exist.\n' - 'If the `IF EXISTS` clause is not specified then an exception SHALL be\n' - 'raised if the row policy does not exist.\n' - '\n' - ), + "[ClickHouse] SHALL support using the `IF EXISTS` clause in the `DROP ROW POLICY` statement\n" + "to skip raising an exception when the row policy does not exist.\n" + "If the `IF EXISTS` clause is not specified then an exception SHALL be\n" + "raised if the row policy does not exist.\n" + "\n" + ), link=None, level=4, - num='5.8.7.2') + num="5.8.7.2", +) RQ_SRS_006_RBAC_RowPolicy_Drop_On = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Drop.On', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Drop.On", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support removing row policy from one or more specified tables\n' - 'using the `ON` clause in the `DROP ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support removing row policy from one or more specified tables\n" + "using the `ON` clause in the `DROP ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.7.3') + num="5.8.7.3", +) RQ_SRS_006_RBAC_RowPolicy_Drop_OnCluster = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Drop.OnCluster', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Drop.OnCluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support removing row policy from specified cluster\n' - 'using the `ON CLUSTER` clause in the `DROP ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support removing row policy from specified cluster\n" + "using the `ON CLUSTER` clause in the `DROP ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.7.4') + num="5.8.7.4", +) RQ_SRS_006_RBAC_RowPolicy_Drop_Syntax = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.Drop.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.Drop.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `DROP ROW POLICY` statement.\n' - '\n' - '``` sql\n' - 'DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name]\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for the `DROP ROW POLICY` statement.\n" + "\n" + "``` sql\n" + "DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name]\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.8.7.5') + num="5.8.7.5", +) RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support showing the `CREATE ROW POLICY` statement used to create the row policy\n' - 'using the `SHOW CREATE ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support showing the `CREATE ROW POLICY` statement used to create the row policy\n" + "using the `SHOW CREATE ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.8.1') + num="5.8.8.1", +) RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy_On = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.On', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.On", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support showing statement used to create row policy on specific table\n' - 'using the `ON` in the `SHOW CREATE ROW POLICY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support showing statement used to create row policy on specific table\n" + "using the `ON` in the `SHOW CREATE ROW POLICY` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.8.2') + num="5.8.8.2", +) RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy_Syntax = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for `SHOW CREATE ROW POLICY`.\n' - '\n' - '``` sql\n' - 'SHOW CREATE [ROW] POLICY name ON [database.]table\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for `SHOW CREATE ROW POLICY`.\n" + "\n" + "``` sql\n" + "SHOW CREATE [ROW] POLICY name ON [database.]table\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.8.8.3') + num="5.8.8.3", +) RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support showing row policies using the `SHOW ROW POLICIES` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support showing row policies using the `SHOW ROW POLICIES` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.8.4') + num="5.8.8.4", +) RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies_On = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.On', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.On", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support showing row policies on a specific table\n' - 'using the `ON` clause in the `SHOW ROW POLICIES` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support showing row policies on a specific table\n" + "using the `ON` clause in the `SHOW ROW POLICIES` statement.\n" + "\n" + ), link=None, level=4, - num='5.8.8.5') + num="5.8.8.5", +) RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies_Syntax = Requirement( - name='RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for `SHOW ROW POLICIES`.\n' - '\n' - '```sql\n' - 'SHOW [ROW] POLICIES [ON [database.]table]\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for `SHOW ROW POLICIES`.\n" + "\n" + "```sql\n" + "SHOW [ROW] POLICIES [ON [database.]table]\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.8.8.6') + num="5.8.8.6", +) RQ_SRS_006_RBAC_SetDefaultRole = Requirement( - name='RQ.SRS-006.RBAC.SetDefaultRole', - version='1.0', + name="RQ.SRS-006.RBAC.SetDefaultRole", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support setting or changing granted roles to default for one or more\n' - 'users using `SET DEFAULT ROLE` statement which\n' - 'SHALL permanently change the default roles for the user or users if successful.\n' - '\n' - ), + "[ClickHouse] SHALL support setting or changing granted roles to default for one or more\n" + "users using `SET DEFAULT ROLE` statement which\n" + "SHALL permanently change the default roles for the user or users if successful.\n" + "\n" + ), link=None, level=3, - num='5.9.1') + num="5.9.1", +) RQ_SRS_006_RBAC_SetDefaultRole_CurrentUser = Requirement( - name='RQ.SRS-006.RBAC.SetDefaultRole.CurrentUser', - version='1.0', + name="RQ.SRS-006.RBAC.SetDefaultRole.CurrentUser", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support setting or changing granted roles to default for\n' - 'the current user using `CURRENT_USER` clause in the `SET DEFAULT ROLE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support setting or changing granted roles to default for\n" + "the current user using `CURRENT_USER` clause in the `SET DEFAULT ROLE` statement.\n" + "\n" + ), link=None, level=3, - num='5.9.2') + num="5.9.2", +) RQ_SRS_006_RBAC_SetDefaultRole_All = Requirement( - name='RQ.SRS-006.RBAC.SetDefaultRole.All', - version='1.0', + name="RQ.SRS-006.RBAC.SetDefaultRole.All", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support setting or changing all granted roles to default\n' - 'for one or more users using `ALL` clause in the `SET DEFAULT ROLE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support setting or changing all granted roles to default\n" + "for one or more users using `ALL` clause in the `SET DEFAULT ROLE` statement.\n" + "\n" + ), link=None, level=3, - num='5.9.3') + num="5.9.3", +) RQ_SRS_006_RBAC_SetDefaultRole_AllExcept = Requirement( - name='RQ.SRS-006.RBAC.SetDefaultRole.AllExcept', - version='1.0', + name="RQ.SRS-006.RBAC.SetDefaultRole.AllExcept", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support setting or changing all granted roles except those specified\n' - 'to default for one or more users using `ALL EXCEPT` clause in the `SET DEFAULT ROLE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support setting or changing all granted roles except those specified\n" + "to default for one or more users using `ALL EXCEPT` clause in the `SET DEFAULT ROLE` statement.\n" + "\n" + ), link=None, level=3, - num='5.9.4') + num="5.9.4", +) RQ_SRS_006_RBAC_SetDefaultRole_None = Requirement( - name='RQ.SRS-006.RBAC.SetDefaultRole.None', - version='1.0', + name="RQ.SRS-006.RBAC.SetDefaultRole.None", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support removing all granted roles from default\n' - 'for one or more users using `NONE` clause in the `SET DEFAULT ROLE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support removing all granted roles from default\n" + "for one or more users using `NONE` clause in the `SET DEFAULT ROLE` statement.\n" + "\n" + ), link=None, level=3, - num='5.9.5') + num="5.9.5", +) RQ_SRS_006_RBAC_SetDefaultRole_Syntax = Requirement( - name='RQ.SRS-006.RBAC.SetDefaultRole.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.SetDefaultRole.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `SET DEFAULT ROLE` statement.\n' - '\n' - '```sql\n' - 'SET DEFAULT ROLE\n' - ' {NONE | role [,...] | ALL | ALL EXCEPT role [,...]}\n' - ' TO {user|CURRENT_USER} [,...]\n' - '\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for the `SET DEFAULT ROLE` statement.\n" + "\n" + "```sql\n" + "SET DEFAULT ROLE\n" + " {NONE | role [,...] | ALL | ALL EXCEPT role [,...]}\n" + " TO {user|CURRENT_USER} [,...]\n" + "\n" + "```\n" + "\n" + ), link=None, level=3, - num='5.9.6') + num="5.9.6", +) RQ_SRS_006_RBAC_SetRole = Requirement( - name='RQ.SRS-006.RBAC.SetRole', - version='1.0', + name="RQ.SRS-006.RBAC.SetRole", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support activating role or roles for the current user\n' - 'using `SET ROLE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support activating role or roles for the current user\n" + "using `SET ROLE` statement.\n" + "\n" + ), link=None, level=3, - num='5.10.1') + num="5.10.1", +) RQ_SRS_006_RBAC_SetRole_Default = Requirement( - name='RQ.SRS-006.RBAC.SetRole.Default', - version='1.0', + name="RQ.SRS-006.RBAC.SetRole.Default", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support activating default roles for the current user\n' - 'using `DEFAULT` clause in the `SET ROLE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support activating default roles for the current user\n" + "using `DEFAULT` clause in the `SET ROLE` statement.\n" + "\n" + ), link=None, level=3, - num='5.10.2') + num="5.10.2", +) RQ_SRS_006_RBAC_SetRole_None = Requirement( - name='RQ.SRS-006.RBAC.SetRole.None', - version='1.0', + name="RQ.SRS-006.RBAC.SetRole.None", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support activating no roles for the current user\n' - 'using `NONE` clause in the `SET ROLE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support activating no roles for the current user\n" + "using `NONE` clause in the `SET ROLE` statement.\n" + "\n" + ), link=None, level=3, - num='5.10.3') + num="5.10.3", +) RQ_SRS_006_RBAC_SetRole_All = Requirement( - name='RQ.SRS-006.RBAC.SetRole.All', - version='1.0', + name="RQ.SRS-006.RBAC.SetRole.All", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support activating all roles for the current user\n' - 'using `ALL` clause in the `SET ROLE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support activating all roles for the current user\n" + "using `ALL` clause in the `SET ROLE` statement.\n" + "\n" + ), link=None, level=3, - num='5.10.4') + num="5.10.4", +) RQ_SRS_006_RBAC_SetRole_AllExcept = Requirement( - name='RQ.SRS-006.RBAC.SetRole.AllExcept', - version='1.0', + name="RQ.SRS-006.RBAC.SetRole.AllExcept", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support activating all roles except those specified\n' - 'for the current user using `ALL EXCEPT` clause in the `SET ROLE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support activating all roles except those specified\n" + "for the current user using `ALL EXCEPT` clause in the `SET ROLE` statement.\n" + "\n" + ), link=None, level=3, - num='5.10.5') + num="5.10.5", +) RQ_SRS_006_RBAC_SetRole_Syntax = Requirement( - name='RQ.SRS-006.RBAC.SetRole.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.SetRole.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '```sql\n' - 'SET ROLE {DEFAULT | NONE | role [,...] | ALL | ALL EXCEPT role [,...]}\n' - '```\n' - '\n' - ), + "```sql\n" + "SET ROLE {DEFAULT | NONE | role [,...] | ALL | ALL EXCEPT role [,...]}\n" + "```\n" + "\n" + ), link=None, level=3, - num='5.10.6') + num="5.10.6", +) RQ_SRS_006_RBAC_Grant_Privilege_To = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.To', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.To", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting privileges to one or more users or roles using `TO` clause\n' - 'in the `GRANT PRIVILEGE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting privileges to one or more users or roles using `TO` clause\n" + "in the `GRANT PRIVILEGE` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.1') + num="5.11.1", +) RQ_SRS_006_RBAC_Grant_Privilege_ToCurrentUser = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.ToCurrentUser', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.ToCurrentUser", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting privileges to current user using `TO CURRENT_USER` clause\n' - 'in the `GRANT PRIVILEGE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting privileges to current user using `TO CURRENT_USER` clause\n" + "in the `GRANT PRIVILEGE` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.2') + num="5.11.2", +) RQ_SRS_006_RBAC_Grant_Privilege_Select = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.Select', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.Select", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting the **select** privilege to one or more users or roles\n' - 'for a database or a table using the `GRANT SELECT` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting the **select** privilege to one or more users or roles\n" + "for a database or a table using the `GRANT SELECT` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.3') + num="5.11.3", +) RQ_SRS_006_RBAC_Grant_Privilege_Insert = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.Insert', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.Insert", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting the **insert** privilege to one or more users or roles\n' - 'for a database or a table using the `GRANT INSERT` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting the **insert** privilege to one or more users or roles\n" + "for a database or a table using the `GRANT INSERT` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.4') + num="5.11.4", +) RQ_SRS_006_RBAC_Grant_Privilege_Alter = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.Alter', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.Alter", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting the **alter** privilege to one or more users or roles\n' - 'for a database or a table using the `GRANT ALTER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting the **alter** privilege to one or more users or roles\n" + "for a database or a table using the `GRANT ALTER` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.5') + num="5.11.5", +) RQ_SRS_006_RBAC_Grant_Privilege_Create = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.Create', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.Create", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting the **create** privilege to one or more users or roles\n' - 'using the `GRANT CREATE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting the **create** privilege to one or more users or roles\n" + "using the `GRANT CREATE` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.6') + num="5.11.6", +) RQ_SRS_006_RBAC_Grant_Privilege_Drop = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.Drop', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.Drop", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting the **drop** privilege to one or more users or roles\n' - 'using the `GRANT DROP` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting the **drop** privilege to one or more users or roles\n" + "using the `GRANT DROP` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.7') + num="5.11.7", +) RQ_SRS_006_RBAC_Grant_Privilege_Truncate = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.Truncate', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.Truncate", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting the **truncate** privilege to one or more users or roles\n' - 'for a database or a table using `GRANT TRUNCATE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting the **truncate** privilege to one or more users or roles\n" + "for a database or a table using `GRANT TRUNCATE` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.8') + num="5.11.8", +) RQ_SRS_006_RBAC_Grant_Privilege_Optimize = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.Optimize', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.Optimize", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting the **optimize** privilege to one or more users or roles\n' - 'for a database or a table using `GRANT OPTIMIZE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting the **optimize** privilege to one or more users or roles\n" + "for a database or a table using `GRANT OPTIMIZE` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.9') + num="5.11.9", +) RQ_SRS_006_RBAC_Grant_Privilege_Show = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.Show', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.Show", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting the **show** privilege to one or more users or roles\n' - 'for a database or a table using `GRANT SHOW` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting the **show** privilege to one or more users or roles\n" + "for a database or a table using `GRANT SHOW` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.10') + num="5.11.10", +) RQ_SRS_006_RBAC_Grant_Privilege_KillQuery = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.KillQuery', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.KillQuery", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting the **kill query** privilege to one or more users or roles\n' - 'for a database or a table using `GRANT KILL QUERY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting the **kill query** privilege to one or more users or roles\n" + "for a database or a table using `GRANT KILL QUERY` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.11') + num="5.11.11", +) RQ_SRS_006_RBAC_Grant_Privilege_AccessManagement = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.AccessManagement', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.AccessManagement", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting the **access management** privileges to one or more users or roles\n' - 'for a database or a table using `GRANT ACCESS MANAGEMENT` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting the **access management** privileges to one or more users or roles\n" + "for a database or a table using `GRANT ACCESS MANAGEMENT` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.12') + num="5.11.12", +) RQ_SRS_006_RBAC_Grant_Privilege_System = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.System', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.System", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting the **system** privileges to one or more users or roles\n' - 'for a database or a table using `GRANT SYSTEM` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting the **system** privileges to one or more users or roles\n" + "for a database or a table using `GRANT SYSTEM` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.13') + num="5.11.13", +) RQ_SRS_006_RBAC_Grant_Privilege_Introspection = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.Introspection', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.Introspection", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting the **introspection** privileges to one or more users or roles\n' - 'for a database or a table using `GRANT INTROSPECTION` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting the **introspection** privileges to one or more users or roles\n" + "for a database or a table using `GRANT INTROSPECTION` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.14') + num="5.11.14", +) RQ_SRS_006_RBAC_Grant_Privilege_Sources = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.Sources', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.Sources", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting the **sources** privileges to one or more users or roles\n' - 'for a database or a table using `GRANT SOURCES` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting the **sources** privileges to one or more users or roles\n" + "for a database or a table using `GRANT SOURCES` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.15') + num="5.11.15", +) RQ_SRS_006_RBAC_Grant_Privilege_DictGet = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.DictGet', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.DictGet", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting the **dictGet** privilege to one or more users or roles\n' - 'for a database or a table using `GRANT dictGet` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting the **dictGet** privilege to one or more users or roles\n" + "for a database or a table using `GRANT dictGet` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.16') + num="5.11.16", +) RQ_SRS_006_RBAC_Grant_Privilege_None = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.None', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.None", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting no privileges to one or more users or roles\n' - 'for a database or a table using `GRANT NONE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting no privileges to one or more users or roles\n" + "for a database or a table using `GRANT NONE` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.17') + num="5.11.17", +) RQ_SRS_006_RBAC_Grant_Privilege_All = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.All', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.All", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting the **all** privileges to one or more users or roles\n' - 'using the `GRANT ALL` or `GRANT ALL PRIVILEGES` statements.\n' - '\n' - ), + "[ClickHouse] SHALL support granting the **all** privileges to one or more users or roles\n" + "using the `GRANT ALL` or `GRANT ALL PRIVILEGES` statements.\n" + "\n" + ), link=None, level=3, - num='5.11.18') + num="5.11.18", +) RQ_SRS_006_RBAC_Grant_Privilege_GrantOption = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.GrantOption', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.GrantOption", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting the **grant option** privilege to one or more users or roles\n' - 'for a database or a table using the `WITH GRANT OPTION` clause in the `GRANT` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting the **grant option** privilege to one or more users or roles\n" + "for a database or a table using the `WITH GRANT OPTION` clause in the `GRANT` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.19') + num="5.11.19", +) RQ_SRS_006_RBAC_Grant_Privilege_On = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.On', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.On", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `ON` clause in the `GRANT` privilege statement\n' - 'which SHALL allow to specify one or more tables to which the privilege SHALL\n' - 'be granted using the following patterns\n' - '\n' - '* `*.*` any table in any database\n' - '* `database.*` any table in the specified database\n' - '* `database.table` specific table in the specified database\n' - '* `*` any table in the current database\n' - '* `table` specific table in the current database\n' - '\n' - ), + "[ClickHouse] SHALL support the `ON` clause in the `GRANT` privilege statement\n" + "which SHALL allow to specify one or more tables to which the privilege SHALL\n" + "be granted using the following patterns\n" + "\n" + "* `*.*` any table in any database\n" + "* `database.*` any table in the specified database\n" + "* `database.table` specific table in the specified database\n" + "* `*` any table in the current database\n" + "* `table` specific table in the current database\n" + "\n" + ), link=None, level=3, - num='5.11.20') + num="5.11.20", +) RQ_SRS_006_RBAC_Grant_Privilege_PrivilegeColumns = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.PrivilegeColumns', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.PrivilegeColumns", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting the privilege **some_privilege** to one or more users or roles\n' - 'for a database or a table using the `GRANT some_privilege(column)` statement for one column.\n' - 'Multiple columns will be supported with `GRANT some_privilege(column1, column2...)` statement.\n' - 'The privileges will be granted for only the specified columns.\n' - '\n' - ), + "[ClickHouse] SHALL support granting the privilege **some_privilege** to one or more users or roles\n" + "for a database or a table using the `GRANT some_privilege(column)` statement for one column.\n" + "Multiple columns will be supported with `GRANT some_privilege(column1, column2...)` statement.\n" + "The privileges will be granted for only the specified columns.\n" + "\n" + ), link=None, level=3, - num='5.11.21') + num="5.11.21", +) RQ_SRS_006_RBAC_Grant_Privilege_OnCluster = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.OnCluster', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.OnCluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying cluster on which to grant privileges using the `ON CLUSTER`\n' - 'clause in the `GRANT PRIVILEGE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying cluster on which to grant privileges using the `ON CLUSTER`\n" + "clause in the `GRANT PRIVILEGE` statement.\n" + "\n" + ), link=None, level=3, - num='5.11.22') + num="5.11.22", +) RQ_SRS_006_RBAC_Grant_Privilege_Syntax = Requirement( - name='RQ.SRS-006.RBAC.Grant.Privilege.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Privilege.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `GRANT` statement that\n' - 'grants explicit privileges to a user or a role.\n' - '\n' - '```sql\n' - 'GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...]\n' - ' ON {db.table|db.*|*.*|table|*}\n' - ' TO {user | role | CURRENT_USER} [,...]\n' - ' [WITH GRANT OPTION]\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for the `GRANT` statement that\n" + "grants explicit privileges to a user or a role.\n" + "\n" + "```sql\n" + "GRANT [ON CLUSTER cluster_name] privilege[(column_name [,...])] [,...]\n" + " ON {db.table|db.*|*.*|table|*}\n" + " TO {user | role | CURRENT_USER} [,...]\n" + " [WITH GRANT OPTION]\n" + "```\n" + "\n" + ), link=None, level=3, - num='5.11.23') + num="5.11.23", +) RQ_SRS_006_RBAC_Revoke_Privilege_Cluster = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.Cluster', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.Cluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking privileges to one or more users or roles\n' - 'for a database or a table on some specific cluster using the `REVOKE ON CLUSTER cluster_name` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking privileges to one or more users or roles\n" + "for a database or a table on some specific cluster using the `REVOKE ON CLUSTER cluster_name` statement.\n" + "\n" + ), link=None, level=3, - num='5.12.1') + num="5.12.1", +) RQ_SRS_006_RBAC_Revoke_Privilege_Select = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.Select', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.Select", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking the **select** privilege to one or more users or roles\n' - 'for a database or a table using the `REVOKE SELECT` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking the **select** privilege to one or more users or roles\n" + "for a database or a table using the `REVOKE SELECT` statement.\n" + "\n" + ), link=None, level=3, - num='5.12.2') + num="5.12.2", +) RQ_SRS_006_RBAC_Revoke_Privilege_Insert = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.Insert', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.Insert", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking the **insert** privilege to one or more users or roles\n' - 'for a database or a table using the `REVOKE INSERT` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking the **insert** privilege to one or more users or roles\n" + "for a database or a table using the `REVOKE INSERT` statement.\n" + "\n" + ), link=None, level=3, - num='5.12.3') + num="5.12.3", +) RQ_SRS_006_RBAC_Revoke_Privilege_Alter = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.Alter', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.Alter", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking the **alter** privilege to one or more users or roles\n' - 'for a database or a table using the `REVOKE ALTER` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking the **alter** privilege to one or more users or roles\n" + "for a database or a table using the `REVOKE ALTER` statement.\n" + "\n" + ), link=None, level=3, - num='5.12.4') + num="5.12.4", +) RQ_SRS_006_RBAC_Revoke_Privilege_Create = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.Create', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.Create", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking the **create** privilege to one or more users or roles\n' - 'using the `REVOKE CREATE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking the **create** privilege to one or more users or roles\n" + "using the `REVOKE CREATE` statement.\n" + "\n" + ), link=None, level=3, - num='5.12.5') + num="5.12.5", +) RQ_SRS_006_RBAC_Revoke_Privilege_Drop = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.Drop', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.Drop", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking the **drop** privilege to one or more users or roles\n' - 'using the `REVOKE DROP` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking the **drop** privilege to one or more users or roles\n" + "using the `REVOKE DROP` statement.\n" + "\n" + ), link=None, level=3, - num='5.12.6') + num="5.12.6", +) RQ_SRS_006_RBAC_Revoke_Privilege_Truncate = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.Truncate', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.Truncate", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking the **truncate** privilege to one or more users or roles\n' - 'for a database or a table using the `REVOKE TRUNCATE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking the **truncate** privilege to one or more users or roles\n" + "for a database or a table using the `REVOKE TRUNCATE` statement.\n" + "\n" + ), link=None, level=3, - num='5.12.7') + num="5.12.7", +) RQ_SRS_006_RBAC_Revoke_Privilege_Optimize = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.Optimize', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.Optimize", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking the **optimize** privilege to one or more users or roles\n' - 'for a database or a table using the `REVOKE OPTIMIZE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking the **optimize** privilege to one or more users or roles\n" + "for a database or a table using the `REVOKE OPTIMIZE` statement.\n" + "\n" + ), link=None, level=3, - num='5.12.8') + num="5.12.8", +) RQ_SRS_006_RBAC_Revoke_Privilege_Show = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.Show', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.Show", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking the **show** privilege to one or more users or roles\n' - 'for a database or a table using the `REVOKE SHOW` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking the **show** privilege to one or more users or roles\n" + "for a database or a table using the `REVOKE SHOW` statement.\n" + "\n" + ), link=None, level=3, - num='5.12.9') + num="5.12.9", +) RQ_SRS_006_RBAC_Revoke_Privilege_KillQuery = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.KillQuery', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.KillQuery", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking the **kill query** privilege to one or more users or roles\n' - 'for a database or a table using the `REVOKE KILL QUERY` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking the **kill query** privilege to one or more users or roles\n" + "for a database or a table using the `REVOKE KILL QUERY` statement.\n" + "\n" + ), link=None, level=3, - num='5.12.10') + num="5.12.10", +) RQ_SRS_006_RBAC_Revoke_Privilege_AccessManagement = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.AccessManagement', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.AccessManagement", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking the **access management** privilege to one or more users or roles\n' - 'for a database or a table using the `REVOKE ACCESS MANAGEMENT` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking the **access management** privilege to one or more users or roles\n" + "for a database or a table using the `REVOKE ACCESS MANAGEMENT` statement.\n" + "\n" + ), link=None, level=3, - num='5.12.11') + num="5.12.11", +) RQ_SRS_006_RBAC_Revoke_Privilege_System = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.System', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.System", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking the **system** privilege to one or more users or roles\n' - 'for a database or a table using the `REVOKE SYSTEM` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking the **system** privilege to one or more users or roles\n" + "for a database or a table using the `REVOKE SYSTEM` statement.\n" + "\n" + ), link=None, level=3, - num='5.12.12') + num="5.12.12", +) RQ_SRS_006_RBAC_Revoke_Privilege_Introspection = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.Introspection', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.Introspection", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking the **introspection** privilege to one or more users or roles\n' - 'for a database or a table using the `REVOKE INTROSPECTION` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking the **introspection** privilege to one or more users or roles\n" + "for a database or a table using the `REVOKE INTROSPECTION` statement.\n" + "\n" + ), link=None, level=3, - num='5.12.13') + num="5.12.13", +) RQ_SRS_006_RBAC_Revoke_Privilege_Sources = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.Sources', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.Sources", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking the **sources** privilege to one or more users or roles\n' - 'for a database or a table using the `REVOKE SOURCES` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking the **sources** privilege to one or more users or roles\n" + "for a database or a table using the `REVOKE SOURCES` statement.\n" + "\n" + ), link=None, level=3, - num='5.12.14') + num="5.12.14", +) RQ_SRS_006_RBAC_Revoke_Privilege_DictGet = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.DictGet', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.DictGet", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking the **dictGet** privilege to one or more users or roles\n' - 'for a database or a table using the `REVOKE dictGet` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking the **dictGet** privilege to one or more users or roles\n" + "for a database or a table using the `REVOKE dictGet` statement.\n" + "\n" + ), link=None, level=3, - num='5.12.15') + num="5.12.15", +) RQ_SRS_006_RBAC_Revoke_Privilege_PrivilegeColumns = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.PrivilegeColumns', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.PrivilegeColumns", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking the privilege **some_privilege** to one or more users or roles\n' - 'for a database or a table using the `REVOKE some_privilege(column)` statement for one column.\n' - 'Multiple columns will be supported with `REVOKE some_privilege(column1, column2...)` statement.\n' - 'The privileges will be revoked for only the specified columns.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking the privilege **some_privilege** to one or more users or roles\n" + "for a database or a table using the `REVOKE some_privilege(column)` statement for one column.\n" + "Multiple columns will be supported with `REVOKE some_privilege(column1, column2...)` statement.\n" + "The privileges will be revoked for only the specified columns.\n" + "\n" + ), link=None, level=3, - num='5.12.16') + num="5.12.16", +) RQ_SRS_006_RBAC_Revoke_Privilege_Multiple = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.Multiple', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.Multiple", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking MULTIPLE **privileges** to one or more users or roles\n' - 'for a database or a table using the `REVOKE privilege1, privilege2...` statement.\n' - '**privileges** refers to any set of Clickhouse defined privilege, whose hierarchy includes\n' - 'SELECT, INSERT, ALTER, CREATE, DROP, TRUNCATE, OPTIMIZE, SHOW, KILL QUERY, ACCESS MANAGEMENT,\n' - 'SYSTEM, INTROSPECTION, SOURCES, dictGet and all of their sub-privileges.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking MULTIPLE **privileges** to one or more users or roles\n" + "for a database or a table using the `REVOKE privilege1, privilege2...` statement.\n" + "**privileges** refers to any set of Clickhouse defined privilege, whose hierarchy includes\n" + "SELECT, INSERT, ALTER, CREATE, DROP, TRUNCATE, OPTIMIZE, SHOW, KILL QUERY, ACCESS MANAGEMENT,\n" + "SYSTEM, INTROSPECTION, SOURCES, dictGet and all of their sub-privileges.\n" + "\n" + ), link=None, level=3, - num='5.12.17') + num="5.12.17", +) RQ_SRS_006_RBAC_Revoke_Privilege_All = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.All', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.All", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking **all** privileges to one or more users or roles\n' - 'for a database or a table using the `REVOKE ALL` or `REVOKE ALL PRIVILEGES` statements.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking **all** privileges to one or more users or roles\n" + "for a database or a table using the `REVOKE ALL` or `REVOKE ALL PRIVILEGES` statements.\n" + "\n" + ), link=None, level=3, - num='5.12.18') + num="5.12.18", +) RQ_SRS_006_RBAC_Revoke_Privilege_None = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.None', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.None", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking **no** privileges to one or more users or roles\n' - 'for a database or a table using the `REVOKE NONE` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking **no** privileges to one or more users or roles\n" + "for a database or a table using the `REVOKE NONE` statement.\n" + "\n" + ), link=None, level=3, - num='5.12.19') + num="5.12.19", +) RQ_SRS_006_RBAC_Revoke_Privilege_On = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.On', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.On", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `ON` clause in the `REVOKE` privilege statement\n' - 'which SHALL allow to specify one or more tables to which the privilege SHALL\n' - 'be revoked using the following patterns\n' - '\n' - '* `db.table` specific table in the specified database\n' - '* `db.*` any table in the specified database\n' - '* `*.*` any table in any database\n' - '* `table` specific table in the current database\n' - '* `*` any table in the current database\n' - '\n' - ), + "[ClickHouse] SHALL support the `ON` clause in the `REVOKE` privilege statement\n" + "which SHALL allow to specify one or more tables to which the privilege SHALL\n" + "be revoked using the following patterns\n" + "\n" + "* `db.table` specific table in the specified database\n" + "* `db.*` any table in the specified database\n" + "* `*.*` any table in any database\n" + "* `table` specific table in the current database\n" + "* `*` any table in the current database\n" + "\n" + ), link=None, level=3, - num='5.12.20') + num="5.12.20", +) RQ_SRS_006_RBAC_Revoke_Privilege_From = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.From', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.From", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `FROM` clause in the `REVOKE` privilege statement\n' - 'which SHALL allow to specify one or more users to which the privilege SHALL\n' - 'be revoked using the following patterns\n' - '\n' - '* `{user | CURRENT_USER} [,...]` some combination of users by name, which may include the current user\n' - '* `ALL` all users\n' - '* `ALL EXCEPT {user | CURRENT_USER} [,...]` the logical reverse of the first pattern\n' - '\n' - ), + "[ClickHouse] SHALL support the `FROM` clause in the `REVOKE` privilege statement\n" + "which SHALL allow to specify one or more users to which the privilege SHALL\n" + "be revoked using the following patterns\n" + "\n" + "* `{user | CURRENT_USER} [,...]` some combination of users by name, which may include the current user\n" + "* `ALL` all users\n" + "* `ALL EXCEPT {user | CURRENT_USER} [,...]` the logical reverse of the first pattern\n" + "\n" + ), link=None, level=3, - num='5.12.21') + num="5.12.21", +) RQ_SRS_006_RBAC_Revoke_Privilege_Syntax = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Privilege.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Privilege.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `REVOKE` statement that\n' - 'revokes explicit privileges of a user or a role.\n' - '\n' - '```sql\n' - 'REVOKE [ON CLUSTER cluster_name] privilege\n' - ' [(column_name [,...])] [,...]\n' - ' ON {db.table|db.*|*.*|table|*}\n' - ' FROM {user | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user | CURRENT_USER} [,...]\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for the `REVOKE` statement that\n" + "revokes explicit privileges of a user or a role.\n" + "\n" + "```sql\n" + "REVOKE [ON CLUSTER cluster_name] privilege\n" + " [(column_name [,...])] [,...]\n" + " ON {db.table|db.*|*.*|table|*}\n" + " FROM {user | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user | CURRENT_USER} [,...]\n" + "```\n" + "\n" + ), link=None, level=3, - num='5.12.22') + num="5.12.22", +) RQ_SRS_006_RBAC_Grant_Role = Requirement( - name='RQ.SRS-006.RBAC.Grant.Role', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Role", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting one or more roles to\n' - 'one or more users or roles using the `GRANT` role statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting one or more roles to\n" + "one or more users or roles using the `GRANT` role statement.\n" + "\n" + ), link=None, level=3, - num='5.13.1') + num="5.13.1", +) RQ_SRS_006_RBAC_Grant_Role_CurrentUser = Requirement( - name='RQ.SRS-006.RBAC.Grant.Role.CurrentUser', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Role.CurrentUser", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting one or more roles to current user using\n' - '`TO CURRENT_USER` clause in the `GRANT` role statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting one or more roles to current user using\n" + "`TO CURRENT_USER` clause in the `GRANT` role statement.\n" + "\n" + ), link=None, level=3, - num='5.13.2') + num="5.13.2", +) RQ_SRS_006_RBAC_Grant_Role_AdminOption = Requirement( - name='RQ.SRS-006.RBAC.Grant.Role.AdminOption', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Role.AdminOption", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting `admin option` privilege\n' - 'to one or more users or roles using the `WITH ADMIN OPTION` clause\n' - 'in the `GRANT` role statement.\n' - '\n' - ), + "[ClickHouse] SHALL support granting `admin option` privilege\n" + "to one or more users or roles using the `WITH ADMIN OPTION` clause\n" + "in the `GRANT` role statement.\n" + "\n" + ), link=None, level=3, - num='5.13.3') + num="5.13.3", +) RQ_SRS_006_RBAC_Grant_Role_OnCluster = Requirement( - name='RQ.SRS-006.RBAC.Grant.Role.OnCluster', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Role.OnCluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support specifying cluster on which the user is to be granted one or more roles\n' - 'using `ON CLUSTER` clause in the `GRANT` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support specifying cluster on which the user is to be granted one or more roles\n" + "using `ON CLUSTER` clause in the `GRANT` statement.\n" + "\n" + ), link=None, level=3, - num='5.13.4') + num="5.13.4", +) RQ_SRS_006_RBAC_Grant_Role_Syntax = Requirement( - name='RQ.SRS-006.RBAC.Grant.Role.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.Grant.Role.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for `GRANT` role statement\n' - '\n' - '``` sql\n' - 'GRANT\n' - ' ON CLUSTER cluster_name\n' - ' role [, role ...]\n' - ' TO {user | role | CURRENT_USER} [,...]\n' - ' [WITH ADMIN OPTION]\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for `GRANT` role statement\n" + "\n" + "``` sql\n" + "GRANT\n" + " ON CLUSTER cluster_name\n" + " role [, role ...]\n" + " TO {user | role | CURRENT_USER} [,...]\n" + " [WITH ADMIN OPTION]\n" + "```\n" + "\n" + ), link=None, level=3, - num='5.13.5') + num="5.13.5", +) RQ_SRS_006_RBAC_Revoke_Role = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Role', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Role", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking one or more roles from\n' - 'one or more users or roles using the `REVOKE` role statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking one or more roles from\n" + "one or more users or roles using the `REVOKE` role statement.\n" + "\n" + ), link=None, level=3, - num='5.14.1') + num="5.14.1", +) RQ_SRS_006_RBAC_Revoke_Role_Keywords = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Role.Keywords', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Role.Keywords", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking one or more roles from\n' - 'special groupings of one or more users or roles with the `ALL`, `ALL EXCEPT`,\n' - 'and `CURRENT_USER` keywords.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking one or more roles from\n" + "special groupings of one or more users or roles with the `ALL`, `ALL EXCEPT`,\n" + "and `CURRENT_USER` keywords.\n" + "\n" + ), link=None, level=3, - num='5.14.2') + num="5.14.2", +) RQ_SRS_006_RBAC_Revoke_Role_Cluster = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Role.Cluster', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Role.Cluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking one or more roles from\n' - 'one or more users or roles from one or more clusters\n' - 'using the `REVOKE ON CLUSTER` role statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking one or more roles from\n" + "one or more users or roles from one or more clusters\n" + "using the `REVOKE ON CLUSTER` role statement.\n" + "\n" + ), link=None, level=3, - num='5.14.3') + num="5.14.3", +) RQ_SRS_006_RBAC_Revoke_AdminOption = Requirement( - name='RQ.SRS-006.RBAC.Revoke.AdminOption', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.AdminOption", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking `admin option` privilege\n' - 'in one or more users or roles using the `ADMIN OPTION FOR` clause\n' - 'in the `REVOKE` role statement.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking `admin option` privilege\n" + "in one or more users or roles using the `ADMIN OPTION FOR` clause\n" + "in the `REVOKE` role statement.\n" + "\n" + ), link=None, level=3, - num='5.14.4') + num="5.14.4", +) RQ_SRS_006_RBAC_Revoke_Role_Syntax = Requirement( - name='RQ.SRS-006.RBAC.Revoke.Role.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.Revoke.Role.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the following syntax for the `REVOKE` role statement\n' - '\n' - '```sql\n' - 'REVOKE [ON CLUSTER cluster_name] [ADMIN OPTION FOR]\n' - ' role [,...]\n' - ' FROM {user | role | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...]\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support the following syntax for the `REVOKE` role statement\n" + "\n" + "```sql\n" + "REVOKE [ON CLUSTER cluster_name] [ADMIN OPTION FOR]\n" + " role [,...]\n" + " FROM {user | role | CURRENT_USER} [,...] | ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...]\n" + "```\n" + "\n" + ), link=None, level=3, - num='5.14.5') + num="5.14.5", +) RQ_SRS_006_RBAC_Show_Grants = Requirement( - name='RQ.SRS-006.RBAC.Show.Grants', - version='1.0', + name="RQ.SRS-006.RBAC.Show.Grants", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support listing all the privileges granted to current user and role\n' - 'using the `SHOW GRANTS` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support listing all the privileges granted to current user and role\n" + "using the `SHOW GRANTS` statement.\n" + "\n" + ), link=None, level=3, - num='5.15.1') + num="5.15.1", +) RQ_SRS_006_RBAC_Show_Grants_For = Requirement( - name='RQ.SRS-006.RBAC.Show.Grants.For', - version='1.0', + name="RQ.SRS-006.RBAC.Show.Grants.For", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support listing all the privileges granted to a user or a role\n' - 'using the `FOR` clause in the `SHOW GRANTS` statement.\n' - '\n' - ), + "[ClickHouse] SHALL support listing all the privileges granted to a user or a role\n" + "using the `FOR` clause in the `SHOW GRANTS` statement.\n" + "\n" + ), link=None, level=3, - num='5.15.2') + num="5.15.2", +) RQ_SRS_006_RBAC_Show_Grants_Syntax = Requirement( - name='RQ.SRS-006.RBAC.Show.Grants.Syntax', - version='1.0', + name="RQ.SRS-006.RBAC.Show.Grants.Syntax", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[Clickhouse] SHALL use the following syntax for the `SHOW GRANTS` statement\n' - '\n' - '``` sql\n' - 'SHOW GRANTS [FOR user_or_role]\n' - '```\n' - '\n' - ), + "[Clickhouse] SHALL use the following syntax for the `SHOW GRANTS` statement\n" + "\n" + "``` sql\n" + "SHOW GRANTS [FOR user_or_role]\n" + "```\n" + "\n" + ), link=None, level=3, - num='5.15.3') + num="5.15.3", +) RQ_SRS_006_RBAC_Table_PublicTables = Requirement( - name='RQ.SRS-006.RBAC.Table.PublicTables', - version='1.0', + name="RQ.SRS-006.RBAC.Table.PublicTables", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support that a user without any privileges will be able to access the following tables\n' - '\n' - '* system.one\n' - '* system.numbers\n' - '* system.contributors\n' - '* system.functions\n' - '\n' - ), + "[ClickHouse] SHALL support that a user without any privileges will be able to access the following tables\n" + "\n" + "* system.one\n" + "* system.numbers\n" + "* system.contributors\n" + "* system.functions\n" + "\n" + ), link=None, level=3, - num='5.16.1') + num="5.16.1", +) RQ_SRS_006_RBAC_Table_SensitiveTables = Requirement( - name='RQ.SRS-006.RBAC.Table.SensitiveTables', - version='1.0', + name="RQ.SRS-006.RBAC.Table.SensitiveTables", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL not support a user with no privileges accessing the following `system` tables:\n' - '\n' - '* processes\n' - '* query_log\n' - '* query_thread_log\n' - '* query_views_log\n' - '* clusters\n' - '* events\n' - '* graphite_retentions\n' - '* stack_trace\n' - '* trace_log\n' - '* user_directories\n' - '* zookeeper\n' - '* macros\n' - '\n' - ), + "[ClickHouse] SHALL not support a user with no privileges accessing the following `system` tables:\n" + "\n" + "* processes\n" + "* query_log\n" + "* query_thread_log\n" + "* query_views_log\n" + "* clusters\n" + "* events\n" + "* graphite_retentions\n" + "* stack_trace\n" + "* trace_log\n" + "* user_directories\n" + "* zookeeper\n" + "* macros\n" + "\n" + ), link=None, level=3, - num='5.16.2') + num="5.16.2", +) RQ_SRS_006_RBAC_DistributedTable_Create = Requirement( - name='RQ.SRS-006.RBAC.DistributedTable.Create', - version='1.0', + name="RQ.SRS-006.RBAC.DistributedTable.Create", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully `CREATE` a distributed table if and only if\n' - 'the user has **create table** privilege on the table and **remote** privilege on *.*\n' - '\n' - ), + "[ClickHouse] SHALL successfully `CREATE` a distributed table if and only if\n" + "the user has **create table** privilege on the table and **remote** privilege on *.*\n" + "\n" + ), link=None, level=3, - num='5.17.1') + num="5.17.1", +) RQ_SRS_006_RBAC_DistributedTable_Select = Requirement( - name='RQ.SRS-006.RBAC.DistributedTable.Select', - version='1.0', + name="RQ.SRS-006.RBAC.DistributedTable.Select", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully `SELECT` from a distributed table if and only if\n' - 'the user has **select** privilege on the table and on the remote table specified in the `CREATE` query of the distributed table.\n' - '\n' - 'Does not require **select** privilege for the remote table if the remote table does not exist on the same server as the user.\n' - '\n' - ), + "[ClickHouse] SHALL successfully `SELECT` from a distributed table if and only if\n" + "the user has **select** privilege on the table and on the remote table specified in the `CREATE` query of the distributed table.\n" + "\n" + "Does not require **select** privilege for the remote table if the remote table does not exist on the same server as the user.\n" + "\n" + ), link=None, level=3, - num='5.17.2') + num="5.17.2", +) RQ_SRS_006_RBAC_DistributedTable_Insert = Requirement( - name='RQ.SRS-006.RBAC.DistributedTable.Insert', - version='1.0', + name="RQ.SRS-006.RBAC.DistributedTable.Insert", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully `INSERT` into a distributed table if and only if\n' - 'the user has **insert** privilege on the table and on the remote table specified in the `CREATE` query of the distributed table.\n' - '\n' - 'Does not require **insert** privilege for the remote table if the remote table does not exist on the same server as the user,\n' - 'insert executes into the remote table on a different server.\n' - '\n' - ), + "[ClickHouse] SHALL successfully `INSERT` into a distributed table if and only if\n" + "the user has **insert** privilege on the table and on the remote table specified in the `CREATE` query of the distributed table.\n" + "\n" + "Does not require **insert** privilege for the remote table if the remote table does not exist on the same server as the user,\n" + "insert executes into the remote table on a different server.\n" + "\n" + ), link=None, level=3, - num='5.17.3') + num="5.17.3", +) RQ_SRS_006_RBAC_DistributedTable_SpecialTables = Requirement( - name='RQ.SRS-006.RBAC.DistributedTable.SpecialTables', - version='1.0', + name="RQ.SRS-006.RBAC.DistributedTable.SpecialTables", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute a query using a distributed table that uses one of the special tables if and only if\n' - 'the user has the necessary privileges to interact with that special table, either granted directly or through a role.\n' - 'Special tables include:\n' - '* materialized view\n' - '* distributed table\n' - '* source table of a materialized view\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute a query using a distributed table that uses one of the special tables if and only if\n" + "the user has the necessary privileges to interact with that special table, either granted directly or through a role.\n" + "Special tables include:\n" + "* materialized view\n" + "* distributed table\n" + "* source table of a materialized view\n" + "\n" + ), link=None, level=3, - num='5.17.4') + num="5.17.4", +) RQ_SRS_006_RBAC_DistributedTable_LocalUser = Requirement( - name='RQ.SRS-006.RBAC.DistributedTable.LocalUser', - version='1.0', + name="RQ.SRS-006.RBAC.DistributedTable.LocalUser", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute a query using a distributed table from\n' - 'a user present locally, but not remotely.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute a query using a distributed table from\n" + "a user present locally, but not remotely.\n" + "\n" + ), link=None, level=3, - num='5.17.5') + num="5.17.5", +) RQ_SRS_006_RBAC_DistributedTable_SameUserDifferentNodesDifferentPrivileges = Requirement( - name='RQ.SRS-006.RBAC.DistributedTable.SameUserDifferentNodesDifferentPrivileges', - version='1.0', + name="RQ.SRS-006.RBAC.DistributedTable.SameUserDifferentNodesDifferentPrivileges", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute a query using a distributed table by a user that exists on multiple nodes\n' - 'if and only if the user has the required privileges on the node the query is being executed from.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute a query using a distributed table by a user that exists on multiple nodes\n" + "if and only if the user has the required privileges on the node the query is being executed from.\n" + "\n" + ), link=None, level=3, - num='5.17.6') + num="5.17.6", +) RQ_SRS_006_RBAC_View = Requirement( - name='RQ.SRS-006.RBAC.View', - version='1.0', + name="RQ.SRS-006.RBAC.View", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to **create**, **select** and **drop**\n' - 'privileges for a view for users or roles.\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to **create**, **select** and **drop**\n" + "privileges for a view for users or roles.\n" + "\n" + ), link=None, level=4, - num='5.18.1.1') + num="5.18.1.1", +) RQ_SRS_006_RBAC_View_Create = Requirement( - name='RQ.SRS-006.RBAC.View.Create', - version='1.0', + name="RQ.SRS-006.RBAC.View.Create", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only successfully execute a `CREATE VIEW` command if and only if\n' - 'the user has **create view** privilege either explicitly or through roles.\n' - '\n' - 'If the stored query includes one or more source tables, the user must have **select** privilege\n' - 'on all the source tables either explicitly or through a role.\n' - 'For example,\n' - '```sql\n' - 'CREATE VIEW view AS SELECT * FROM source_table\n' - 'CREATE VIEW view AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression))\n' - 'CREATE VIEW view AS SELECT * FROM table0 JOIN table1 USING column\n' - 'CREATE VIEW view AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2\n' - 'CREATE VIEW view AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression))\n' - 'CREATE VIEW view0 AS SELECT column FROM view1 UNION ALL SELECT column FROM view2\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL only successfully execute a `CREATE VIEW` command if and only if\n" + "the user has **create view** privilege either explicitly or through roles.\n" + "\n" + "If the stored query includes one or more source tables, the user must have **select** privilege\n" + "on all the source tables either explicitly or through a role.\n" + "For example,\n" + "```sql\n" + "CREATE VIEW view AS SELECT * FROM source_table\n" + "CREATE VIEW view AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression))\n" + "CREATE VIEW view AS SELECT * FROM table0 JOIN table1 USING column\n" + "CREATE VIEW view AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2\n" + "CREATE VIEW view AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression))\n" + "CREATE VIEW view0 AS SELECT column FROM view1 UNION ALL SELECT column FROM view2\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.18.1.2') + num="5.18.1.2", +) RQ_SRS_006_RBAC_View_Select = Requirement( - name='RQ.SRS-006.RBAC.View.Select', - version='1.0', + name="RQ.SRS-006.RBAC.View.Select", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only successfully `SELECT` from a view if and only if\n' - 'the user has **select** privilege for that view either explicitly or through a role.\n' - '\n' - 'If the stored query includes one or more source tables, the user must have **select** privilege\n' - 'on all the source tables either explicitly or through a role.\n' - 'For example,\n' - '```sql\n' - 'CREATE VIEW view AS SELECT * FROM source_table\n' - 'CREATE VIEW view AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression))\n' - 'CREATE VIEW view AS SELECT * FROM table0 JOIN table1 USING column\n' - 'CREATE VIEW view AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2\n' - 'CREATE VIEW view AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression))\n' - 'CREATE VIEW view0 AS SELECT column FROM view1 UNION ALL SELECT column FROM view2\n' - '\n' - 'SELECT * FROM view\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL only successfully `SELECT` from a view if and only if\n" + "the user has **select** privilege for that view either explicitly or through a role.\n" + "\n" + "If the stored query includes one or more source tables, the user must have **select** privilege\n" + "on all the source tables either explicitly or through a role.\n" + "For example,\n" + "```sql\n" + "CREATE VIEW view AS SELECT * FROM source_table\n" + "CREATE VIEW view AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression))\n" + "CREATE VIEW view AS SELECT * FROM table0 JOIN table1 USING column\n" + "CREATE VIEW view AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2\n" + "CREATE VIEW view AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression))\n" + "CREATE VIEW view0 AS SELECT column FROM view1 UNION ALL SELECT column FROM view2\n" + "\n" + "SELECT * FROM view\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.18.1.3') + num="5.18.1.3", +) RQ_SRS_006_RBAC_View_Drop = Requirement( - name='RQ.SRS-006.RBAC.View.Drop', - version='1.0', + name="RQ.SRS-006.RBAC.View.Drop", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only successfully execute a `DROP VIEW` command if and only if\n' - 'the user has **drop view** privilege on that view either explicitly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL only successfully execute a `DROP VIEW` command if and only if\n" + "the user has **drop view** privilege on that view either explicitly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.18.1.4') + num="5.18.1.4", +) RQ_SRS_006_RBAC_MaterializedView = Requirement( - name='RQ.SRS-006.RBAC.MaterializedView', - version='1.0', + name="RQ.SRS-006.RBAC.MaterializedView", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to **create**, **select**, **alter** and **drop**\n' - 'privileges for a materialized view for users or roles.\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to **create**, **select**, **alter** and **drop**\n" + "privileges for a materialized view for users or roles.\n" + "\n" + ), link=None, level=4, - num='5.18.2.1') + num="5.18.2.1", +) RQ_SRS_006_RBAC_MaterializedView_Create = Requirement( - name='RQ.SRS-006.RBAC.MaterializedView.Create', - version='1.0', + name="RQ.SRS-006.RBAC.MaterializedView.Create", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only successfully execute a `CREATE MATERIALIZED VIEW` command if and only if\n' - 'the user has **create view** privilege either explicitly or through roles.\n' - '\n' - 'If `POPULATE` is specified, the user must have `INSERT` privilege on the view,\n' - 'either explicitly or through roles.\n' - 'For example,\n' - '```sql\n' - 'CREATE MATERIALIZED VIEW view ENGINE = Memory POPULATE AS SELECT * FROM source_table\n' - '```\n' - '\n' - 'If the stored query includes one or more source tables, the user must have **select** privilege\n' - 'on all the source tables either explicitly or through a role.\n' - 'For example,\n' - '```sql\n' - 'CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM source_table\n' - 'CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression))\n' - 'CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM table0 JOIN table1 USING column\n' - 'CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2\n' - 'CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression))\n' - 'CREATE MATERIALIZED VIEW view0 ENGINE = Memory AS SELECT column FROM view1 UNION ALL SELECT column FROM view2\n' - '```\n' - '\n' - 'If the materialized view has a target table explicitly declared in the `TO` clause, the user must have\n' - '**insert** and **select** privilege on the target table.\n' - 'For example,\n' - '```sql\n' - 'CREATE MATERIALIZED VIEW view TO target_table AS SELECT * FROM source_table\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL only successfully execute a `CREATE MATERIALIZED VIEW` command if and only if\n" + "the user has **create view** privilege either explicitly or through roles.\n" + "\n" + "If `POPULATE` is specified, the user must have `INSERT` privilege on the view,\n" + "either explicitly or through roles.\n" + "For example,\n" + "```sql\n" + "CREATE MATERIALIZED VIEW view ENGINE = Memory POPULATE AS SELECT * FROM source_table\n" + "```\n" + "\n" + "If the stored query includes one or more source tables, the user must have **select** privilege\n" + "on all the source tables either explicitly or through a role.\n" + "For example,\n" + "```sql\n" + "CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM source_table\n" + "CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression))\n" + "CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM table0 JOIN table1 USING column\n" + "CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2\n" + "CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression))\n" + "CREATE MATERIALIZED VIEW view0 ENGINE = Memory AS SELECT column FROM view1 UNION ALL SELECT column FROM view2\n" + "```\n" + "\n" + "If the materialized view has a target table explicitly declared in the `TO` clause, the user must have\n" + "**insert** and **select** privilege on the target table.\n" + "For example,\n" + "```sql\n" + "CREATE MATERIALIZED VIEW view TO target_table AS SELECT * FROM source_table\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.18.2.2') + num="5.18.2.2", +) RQ_SRS_006_RBAC_MaterializedView_Select = Requirement( - name='RQ.SRS-006.RBAC.MaterializedView.Select', - version='1.0', + name="RQ.SRS-006.RBAC.MaterializedView.Select", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only successfully `SELECT` from a materialized view if and only if\n' - 'the user has **select** privilege for that view either explicitly or through a role.\n' - '\n' - 'If the stored query includes one or more source tables, the user must have **select** privilege\n' - 'on all the source tables either explicitly or through a role.\n' - 'For example,\n' - '```sql\n' - 'CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM source_table\n' - 'CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression))\n' - 'CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM table0 JOIN table1 USING column\n' - 'CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2\n' - 'CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression))\n' - 'CREATE MATERIALIZED VIEW view0 ENGINE = Memory AS SELECT column FROM view1 UNION ALL SELECT column FROM view2\n' - '\n' - 'SELECT * FROM view\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL only successfully `SELECT` from a materialized view if and only if\n" + "the user has **select** privilege for that view either explicitly or through a role.\n" + "\n" + "If the stored query includes one or more source tables, the user must have **select** privilege\n" + "on all the source tables either explicitly or through a role.\n" + "For example,\n" + "```sql\n" + "CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM source_table\n" + "CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression))\n" + "CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM table0 JOIN table1 USING column\n" + "CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2\n" + "CREATE MATERIALIZED VIEW view ENGINE = Memory AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression))\n" + "CREATE MATERIALIZED VIEW view0 ENGINE = Memory AS SELECT column FROM view1 UNION ALL SELECT column FROM view2\n" + "\n" + "SELECT * FROM view\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.18.2.3') + num="5.18.2.3", +) RQ_SRS_006_RBAC_MaterializedView_Select_TargetTable = Requirement( - name='RQ.SRS-006.RBAC.MaterializedView.Select.TargetTable', - version='1.0', + name="RQ.SRS-006.RBAC.MaterializedView.Select.TargetTable", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only successfully `SELECT` from the target table, implicit or explicit, of a materialized view if and only if\n' - 'the user has `SELECT` privilege for the table, either explicitly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL only successfully `SELECT` from the target table, implicit or explicit, of a materialized view if and only if\n" + "the user has `SELECT` privilege for the table, either explicitly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.18.2.4') + num="5.18.2.4", +) RQ_SRS_006_RBAC_MaterializedView_Select_SourceTable = Requirement( - name='RQ.SRS-006.RBAC.MaterializedView.Select.SourceTable', - version='1.0', + name="RQ.SRS-006.RBAC.MaterializedView.Select.SourceTable", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only successfully `SELECT` from the source table of a materialized view if and only if\n' - 'the user has `SELECT` privilege for the table, either explicitly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL only successfully `SELECT` from the source table of a materialized view if and only if\n" + "the user has `SELECT` privilege for the table, either explicitly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.18.2.5') + num="5.18.2.5", +) RQ_SRS_006_RBAC_MaterializedView_Drop = Requirement( - name='RQ.SRS-006.RBAC.MaterializedView.Drop', - version='1.0', + name="RQ.SRS-006.RBAC.MaterializedView.Drop", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only successfully execute a `DROP VIEW` command if and only if\n' - 'the user has **drop view** privilege on that view either explicitly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL only successfully execute a `DROP VIEW` command if and only if\n" + "the user has **drop view** privilege on that view either explicitly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.18.2.6') + num="5.18.2.6", +) RQ_SRS_006_RBAC_MaterializedView_ModifyQuery = Requirement( - name='RQ.SRS-006.RBAC.MaterializedView.ModifyQuery', - version='1.0', + name="RQ.SRS-006.RBAC.MaterializedView.ModifyQuery", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only successfully execute a `MODIFY QUERY` command if and only if\n' - 'the user has **modify query** privilege on that view either explicitly or through a role.\n' - '\n' - 'If the new query includes one or more source tables, the user must have **select** privilege\n' - 'on all the source tables either explicitly or through a role.\n' - 'For example,\n' - '```sql\n' - 'ALTER TABLE view MODIFY QUERY SELECT * FROM source_table\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL only successfully execute a `MODIFY QUERY` command if and only if\n" + "the user has **modify query** privilege on that view either explicitly or through a role.\n" + "\n" + "If the new query includes one or more source tables, the user must have **select** privilege\n" + "on all the source tables either explicitly or through a role.\n" + "For example,\n" + "```sql\n" + "ALTER TABLE view MODIFY QUERY SELECT * FROM source_table\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.18.2.7') + num="5.18.2.7", +) RQ_SRS_006_RBAC_MaterializedView_Insert = Requirement( - name='RQ.SRS-006.RBAC.MaterializedView.Insert', - version='1.0', + name="RQ.SRS-006.RBAC.MaterializedView.Insert", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only succesfully `INSERT` into a materialized view if and only if\n' - 'the user has `INSERT` privilege on the view, either explicitly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL only succesfully `INSERT` into a materialized view if and only if\n" + "the user has `INSERT` privilege on the view, either explicitly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.18.2.8') + num="5.18.2.8", +) RQ_SRS_006_RBAC_MaterializedView_Insert_SourceTable = Requirement( - name='RQ.SRS-006.RBAC.MaterializedView.Insert.SourceTable', - version='1.0', + name="RQ.SRS-006.RBAC.MaterializedView.Insert.SourceTable", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only succesfully `INSERT` into a source table of a materialized view if and only if\n' - 'the user has `INSERT` privilege on the source table, either explicitly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL only succesfully `INSERT` into a source table of a materialized view if and only if\n" + "the user has `INSERT` privilege on the source table, either explicitly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.18.2.9') + num="5.18.2.9", +) RQ_SRS_006_RBAC_MaterializedView_Insert_TargetTable = Requirement( - name='RQ.SRS-006.RBAC.MaterializedView.Insert.TargetTable', - version='1.0', + name="RQ.SRS-006.RBAC.MaterializedView.Insert.TargetTable", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only succesfully `INSERT` into a target table of a materialized view if and only if\n' - 'the user has `INSERT` privelege on the target table, either explicitly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL only succesfully `INSERT` into a target table of a materialized view if and only if\n" + "the user has `INSERT` privelege on the target table, either explicitly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.18.2.10') + num="5.18.2.10", +) RQ_SRS_006_RBAC_LiveView = Requirement( - name='RQ.SRS-006.RBAC.LiveView', - version='1.0', + name="RQ.SRS-006.RBAC.LiveView", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to **create**, **select**, **alter** and **drop**\n' - 'privileges for a live view for users or roles.\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to **create**, **select**, **alter** and **drop**\n" + "privileges for a live view for users or roles.\n" + "\n" + ), link=None, level=4, - num='5.18.3.1') + num="5.18.3.1", +) RQ_SRS_006_RBAC_LiveView_Create = Requirement( - name='RQ.SRS-006.RBAC.LiveView.Create', - version='1.0', + name="RQ.SRS-006.RBAC.LiveView.Create", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only successfully execute a `CREATE LIVE VIEW` command if and only if\n' - 'the user has **create view** privilege either explicitly or through roles.\n' - '\n' - 'If the stored query includes one or more source tables, the user must have **select** privilege\n' - 'on all the source tables either explicitly or through a role.\n' - 'For example,\n' - '```sql\n' - 'CREATE LIVE VIEW view AS SELECT * FROM source_table\n' - 'CREATE LIVE VIEW view AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression))\n' - 'CREATE LIVE VIEW view AS SELECT * FROM table0 JOIN table1 USING column\n' - 'CREATE LIVE VIEW view AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2\n' - 'CREATE LIVE VIEW view AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression))\n' - 'CREATE LIVE VIEW view0 AS SELECT column FROM view1 UNION ALL SELECT column FROM view2\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL only successfully execute a `CREATE LIVE VIEW` command if and only if\n" + "the user has **create view** privilege either explicitly or through roles.\n" + "\n" + "If the stored query includes one or more source tables, the user must have **select** privilege\n" + "on all the source tables either explicitly or through a role.\n" + "For example,\n" + "```sql\n" + "CREATE LIVE VIEW view AS SELECT * FROM source_table\n" + "CREATE LIVE VIEW view AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression))\n" + "CREATE LIVE VIEW view AS SELECT * FROM table0 JOIN table1 USING column\n" + "CREATE LIVE VIEW view AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2\n" + "CREATE LIVE VIEW view AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression))\n" + "CREATE LIVE VIEW view0 AS SELECT column FROM view1 UNION ALL SELECT column FROM view2\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.18.3.2') + num="5.18.3.2", +) RQ_SRS_006_RBAC_LiveView_Select = Requirement( - name='RQ.SRS-006.RBAC.LiveView.Select', - version='1.0', + name="RQ.SRS-006.RBAC.LiveView.Select", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only successfully `SELECT` from a live view if and only if\n' - 'the user has **select** privilege for that view either explicitly or through a role.\n' - '\n' - 'If the stored query includes one or more source tables, the user must have **select** privilege\n' - 'on all the source tables either explicitly or through a role.\n' - 'For example,\n' - '```sql\n' - 'CREATE LIVE VIEW view AS SELECT * FROM source_table\n' - 'CREATE LIVE VIEW view AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression))\n' - 'CREATE LIVE VIEW view AS SELECT * FROM table0 JOIN table1 USING column\n' - 'CREATE LIVE VIEW view AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2\n' - 'CREATE LIVE VIEW view AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression))\n' - 'CREATE LIVE VIEW view0 AS SELECT column FROM view1 UNION ALL SELECT column FROM view2\n' - '\n' - 'SELECT * FROM view\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL only successfully `SELECT` from a live view if and only if\n" + "the user has **select** privilege for that view either explicitly or through a role.\n" + "\n" + "If the stored query includes one or more source tables, the user must have **select** privilege\n" + "on all the source tables either explicitly or through a role.\n" + "For example,\n" + "```sql\n" + "CREATE LIVE VIEW view AS SELECT * FROM source_table\n" + "CREATE LIVE VIEW view AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression))\n" + "CREATE LIVE VIEW view AS SELECT * FROM table0 JOIN table1 USING column\n" + "CREATE LIVE VIEW view AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2\n" + "CREATE LIVE VIEW view AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression))\n" + "CREATE LIVE VIEW view0 AS SELECT column FROM view1 UNION ALL SELECT column FROM view2\n" + "\n" + "SELECT * FROM view\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.18.3.3') + num="5.18.3.3", +) RQ_SRS_006_RBAC_LiveView_Drop = Requirement( - name='RQ.SRS-006.RBAC.LiveView.Drop', - version='1.0', + name="RQ.SRS-006.RBAC.LiveView.Drop", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only successfully execute a `DROP VIEW` command if and only if\n' - 'the user has **drop view** privilege on that view either explicitly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL only successfully execute a `DROP VIEW` command if and only if\n" + "the user has **drop view** privilege on that view either explicitly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.18.3.4') + num="5.18.3.4", +) RQ_SRS_006_RBAC_LiveView_Refresh = Requirement( - name='RQ.SRS-006.RBAC.LiveView.Refresh', - version='1.0', + name="RQ.SRS-006.RBAC.LiveView.Refresh", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only successfully execute an `ALTER LIVE VIEW REFRESH` command if and only if\n' - 'the user has **refresh** privilege on that view either explicitly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL only successfully execute an `ALTER LIVE VIEW REFRESH` command if and only if\n" + "the user has **refresh** privilege on that view either explicitly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.18.3.5') + num="5.18.3.5", +) RQ_SRS_006_RBAC_Select = Requirement( - name='RQ.SRS-006.RBAC.Select', - version='1.0', + name="RQ.SRS-006.RBAC.Select", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL execute `SELECT` if and only if the user\n' - 'has the **select** privilege for the destination table\n' - 'either because of the explicit grant or through one of the roles assigned to the user.\n' - '\n' - ), + "[ClickHouse] SHALL execute `SELECT` if and only if the user\n" + "has the **select** privilege for the destination table\n" + "either because of the explicit grant or through one of the roles assigned to the user.\n" + "\n" + ), link=None, level=3, - num='5.19.1') + num="5.19.1", +) RQ_SRS_006_RBAC_Select_Column = Requirement( - name='RQ.SRS-006.RBAC.Select.Column', - version='1.0', + name="RQ.SRS-006.RBAC.Select.Column", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting or revoking **select** privilege\n' - 'for one or more specified columns in a table to one or more **users** or **roles**.\n' - 'Any `SELECT` statements SHALL not to be executed, unless the user\n' - 'has the **select** privilege for the destination column\n' - 'either because of the explicit grant or through one of the roles assigned to the user.\n' - '\n' - ), + "[ClickHouse] SHALL support granting or revoking **select** privilege\n" + "for one or more specified columns in a table to one or more **users** or **roles**.\n" + "Any `SELECT` statements SHALL not to be executed, unless the user\n" + "has the **select** privilege for the destination column\n" + "either because of the explicit grant or through one of the roles assigned to the user.\n" + "\n" + ), link=None, level=3, - num='5.19.2') + num="5.19.2", +) RQ_SRS_006_RBAC_Select_Cluster = Requirement( - name='RQ.SRS-006.RBAC.Select.Cluster', - version='1.0', + name="RQ.SRS-006.RBAC.Select.Cluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting or revoking **select** privilege\n' - 'on a specified cluster to one or more **users** or **roles**.\n' - 'Any `SELECT` statements SHALL succeed only on nodes where\n' - 'the table exists and privilege was granted.\n' - '\n' - ), + "[ClickHouse] SHALL support granting or revoking **select** privilege\n" + "on a specified cluster to one or more **users** or **roles**.\n" + "Any `SELECT` statements SHALL succeed only on nodes where\n" + "the table exists and privilege was granted.\n" + "\n" + ), link=None, level=3, - num='5.19.3') + num="5.19.3", +) RQ_SRS_006_RBAC_Select_TableEngines = Requirement( - name='RQ.SRS-006.RBAC.Select.TableEngines', - version='1.0', + name="RQ.SRS-006.RBAC.Select.TableEngines", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to the **select** privilege\n' - 'on tables created using the following engines\n' - '\n' - '* MergeTree\n' - '* ReplacingMergeTree\n' - '* SummingMergeTree\n' - '* AggregatingMergeTree\n' - '* CollapsingMergeTree\n' - '* VersionedCollapsingMergeTree\n' - '* GraphiteMergeTree\n' - '* ReplicatedMergeTree\n' - '* ReplicatedSummingMergeTree\n' - '* ReplicatedReplacingMergeTree\n' - '* ReplicatedAggregatingMergeTree\n' - '* ReplicatedCollapsingMergeTree\n' - '* ReplicatedVersionedCollapsingMergeTree\n' - '* ReplicatedGraphiteMergeTree\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to the **select** privilege\n" + "on tables created using the following engines\n" + "\n" + "* MergeTree\n" + "* ReplacingMergeTree\n" + "* SummingMergeTree\n" + "* AggregatingMergeTree\n" + "* CollapsingMergeTree\n" + "* VersionedCollapsingMergeTree\n" + "* GraphiteMergeTree\n" + "* ReplicatedMergeTree\n" + "* ReplicatedSummingMergeTree\n" + "* ReplicatedReplacingMergeTree\n" + "* ReplicatedAggregatingMergeTree\n" + "* ReplicatedCollapsingMergeTree\n" + "* ReplicatedVersionedCollapsingMergeTree\n" + "* ReplicatedGraphiteMergeTree\n" + "\n" + ), link=None, level=3, - num='5.19.4') + num="5.19.4", +) RQ_SRS_006_RBAC_Insert = Requirement( - name='RQ.SRS-006.RBAC.Insert', - version='1.0', + name="RQ.SRS-006.RBAC.Insert", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL execute `INSERT INTO` if and only if the user\n' - 'has the **insert** privilege for the destination table\n' - 'either because of the explicit grant or through one of the roles assigned to the user.\n' - '\n' - ), + "[ClickHouse] SHALL execute `INSERT INTO` if and only if the user\n" + "has the **insert** privilege for the destination table\n" + "either because of the explicit grant or through one of the roles assigned to the user.\n" + "\n" + ), link=None, level=3, - num='5.20.1') + num="5.20.1", +) RQ_SRS_006_RBAC_Insert_Column = Requirement( - name='RQ.SRS-006.RBAC.Insert.Column', - version='1.0', + name="RQ.SRS-006.RBAC.Insert.Column", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting or revoking **insert** privilege\n' - 'for one or more specified columns in a table to one or more **users** or **roles**.\n' - 'Any `INSERT INTO` statements SHALL not to be executed, unless the user\n' - 'has the **insert** privilege for the destination column\n' - 'either because of the explicit grant or through one of the roles assigned to the user.\n' - '\n' - ), + "[ClickHouse] SHALL support granting or revoking **insert** privilege\n" + "for one or more specified columns in a table to one or more **users** or **roles**.\n" + "Any `INSERT INTO` statements SHALL not to be executed, unless the user\n" + "has the **insert** privilege for the destination column\n" + "either because of the explicit grant or through one of the roles assigned to the user.\n" + "\n" + ), link=None, level=3, - num='5.20.2') + num="5.20.2", +) RQ_SRS_006_RBAC_Insert_Cluster = Requirement( - name='RQ.SRS-006.RBAC.Insert.Cluster', - version='1.0', + name="RQ.SRS-006.RBAC.Insert.Cluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting or revoking **insert** privilege\n' - 'on a specified cluster to one or more **users** or **roles**.\n' - 'Any `INSERT INTO` statements SHALL succeed only on nodes where\n' - 'the table exists and privilege was granted.\n' - '\n' - ), + "[ClickHouse] SHALL support granting or revoking **insert** privilege\n" + "on a specified cluster to one or more **users** or **roles**.\n" + "Any `INSERT INTO` statements SHALL succeed only on nodes where\n" + "the table exists and privilege was granted.\n" + "\n" + ), link=None, level=3, - num='5.20.3') + num="5.20.3", +) RQ_SRS_006_RBAC_Insert_TableEngines = Requirement( - name='RQ.SRS-006.RBAC.Insert.TableEngines', - version='1.0', + name="RQ.SRS-006.RBAC.Insert.TableEngines", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to the **insert** privilege\n' - 'on tables created using the following engines\n' - '\n' - '* MergeTree\n' - '* ReplacingMergeTree\n' - '* SummingMergeTree\n' - '* AggregatingMergeTree\n' - '* CollapsingMergeTree\n' - '* VersionedCollapsingMergeTree\n' - '* GraphiteMergeTree\n' - '* ReplicatedMergeTree\n' - '* ReplicatedSummingMergeTree\n' - '* ReplicatedReplacingMergeTree\n' - '* ReplicatedAggregatingMergeTree\n' - '* ReplicatedCollapsingMergeTree\n' - '* ReplicatedVersionedCollapsingMergeTree\n' - '* ReplicatedGraphiteMergeTree\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to the **insert** privilege\n" + "on tables created using the following engines\n" + "\n" + "* MergeTree\n" + "* ReplacingMergeTree\n" + "* SummingMergeTree\n" + "* AggregatingMergeTree\n" + "* CollapsingMergeTree\n" + "* VersionedCollapsingMergeTree\n" + "* GraphiteMergeTree\n" + "* ReplicatedMergeTree\n" + "* ReplicatedSummingMergeTree\n" + "* ReplicatedReplacingMergeTree\n" + "* ReplicatedAggregatingMergeTree\n" + "* ReplicatedCollapsingMergeTree\n" + "* ReplicatedVersionedCollapsingMergeTree\n" + "* ReplicatedGraphiteMergeTree\n" + "\n" + ), link=None, level=3, - num='5.20.4') + num="5.20.4", +) RQ_SRS_006_RBAC_Privileges_AlterColumn = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterColumn', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterColumn", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to the **alter column** privilege\n' - 'for a database or a specific table to one or more **users** or **roles**.\n' - 'Any `ALTER TABLE ... ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN` statements SHALL\n' - 'return an error, unless the user has the **alter column** privilege for\n' - 'the destination table either because of the explicit grant or through one of\n' - 'the roles assigned to the user.\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to the **alter column** privilege\n" + "for a database or a specific table to one or more **users** or **roles**.\n" + "Any `ALTER TABLE ... ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN` statements SHALL\n" + "return an error, unless the user has the **alter column** privilege for\n" + "the destination table either because of the explicit grant or through one of\n" + "the roles assigned to the user.\n" + "\n" + ), link=None, level=4, - num='5.21.1.1') + num="5.21.1.1", +) RQ_SRS_006_RBAC_Privileges_AlterColumn_Grant = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterColumn.Grant', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterColumn.Grant", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting **alter column** privilege\n' - 'for a database or a specific table to one or more **users** or **roles**.\n' - '\n' - ), + "[ClickHouse] SHALL support granting **alter column** privilege\n" + "for a database or a specific table to one or more **users** or **roles**.\n" + "\n" + ), link=None, level=4, - num='5.21.1.2') + num="5.21.1.2", +) RQ_SRS_006_RBAC_Privileges_AlterColumn_Revoke = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterColumn.Revoke', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterColumn.Revoke", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking **alter column** privilege\n' - 'for a database or a specific table to one or more **users** or **roles**\n' - '\n' - ), + "[ClickHouse] SHALL support revoking **alter column** privilege\n" + "for a database or a specific table to one or more **users** or **roles**\n" + "\n" + ), link=None, level=4, - num='5.21.1.3') + num="5.21.1.3", +) RQ_SRS_006_RBAC_Privileges_AlterColumn_Column = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterColumn.Column', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterColumn.Column", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting or revoking **alter column** privilege\n' - 'for one or more specified columns in a table to one or more **users** or **roles**.\n' - 'Any `ALTER TABLE ... ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN` statements SHALL return an error,\n' - 'unless the user has the **alter column** privilege for the destination column\n' - 'either because of the explicit grant or through one of the roles assigned to the user.\n' - '\n' - ), + "[ClickHouse] SHALL support granting or revoking **alter column** privilege\n" + "for one or more specified columns in a table to one or more **users** or **roles**.\n" + "Any `ALTER TABLE ... ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN` statements SHALL return an error,\n" + "unless the user has the **alter column** privilege for the destination column\n" + "either because of the explicit grant or through one of the roles assigned to the user.\n" + "\n" + ), link=None, level=4, - num='5.21.1.4') + num="5.21.1.4", +) RQ_SRS_006_RBAC_Privileges_AlterColumn_Cluster = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterColumn.Cluster', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterColumn.Cluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting or revoking **alter column** privilege\n' - 'on a specified cluster to one or more **users** or **roles**.\n' - 'Any `ALTER TABLE ... ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN`\n' - 'statements SHALL succeed only on nodes where the table exists and privilege was granted.\n' - '\n' - ), + "[ClickHouse] SHALL support granting or revoking **alter column** privilege\n" + "on a specified cluster to one or more **users** or **roles**.\n" + "Any `ALTER TABLE ... ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN`\n" + "statements SHALL succeed only on nodes where the table exists and privilege was granted.\n" + "\n" + ), link=None, level=4, - num='5.21.1.5') + num="5.21.1.5", +) RQ_SRS_006_RBAC_Privileges_AlterColumn_TableEngines = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterColumn.TableEngines', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterColumn.TableEngines", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to the **alter column** privilege\n' - 'on tables created using the following engines\n' - '\n' - '* MergeTree\n' - '* ReplacingMergeTree\n' - '* SummingMergeTree\n' - '* AggregatingMergeTree\n' - '* CollapsingMergeTree\n' - '* VersionedCollapsingMergeTree\n' - '* GraphiteMergeTree\n' - '* ReplicatedMergeTree\n' - '* ReplicatedSummingMergeTree\n' - '* ReplicatedReplacingMergeTree\n' - '* ReplicatedAggregatingMergeTree\n' - '* ReplicatedCollapsingMergeTree\n' - '* ReplicatedVersionedCollapsingMergeTree\n' - '* ReplicatedGraphiteMergeTree\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to the **alter column** privilege\n" + "on tables created using the following engines\n" + "\n" + "* MergeTree\n" + "* ReplacingMergeTree\n" + "* SummingMergeTree\n" + "* AggregatingMergeTree\n" + "* CollapsingMergeTree\n" + "* VersionedCollapsingMergeTree\n" + "* GraphiteMergeTree\n" + "* ReplicatedMergeTree\n" + "* ReplicatedSummingMergeTree\n" + "* ReplicatedReplacingMergeTree\n" + "* ReplicatedAggregatingMergeTree\n" + "* ReplicatedCollapsingMergeTree\n" + "* ReplicatedVersionedCollapsingMergeTree\n" + "* ReplicatedGraphiteMergeTree\n" + "\n" + ), link=None, level=4, - num='5.21.1.6') + num="5.21.1.6", +) RQ_SRS_006_RBAC_Privileges_AlterIndex = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterIndex', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterIndex", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to the **alter index** privilege\n' - 'for a database or a specific table to one or more **users** or **roles**.\n' - 'Any `ALTER TABLE ... ORDER BY | ADD|DROP|MATERIALIZE|CLEAR INDEX` statements SHALL\n' - 'return an error, unless the user has the **alter index** privilege for\n' - 'the destination table either because of the explicit grant or through one of\n' - 'the roles assigned to the user.\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to the **alter index** privilege\n" + "for a database or a specific table to one or more **users** or **roles**.\n" + "Any `ALTER TABLE ... ORDER BY | ADD|DROP|MATERIALIZE|CLEAR INDEX` statements SHALL\n" + "return an error, unless the user has the **alter index** privilege for\n" + "the destination table either because of the explicit grant or through one of\n" + "the roles assigned to the user.\n" + "\n" + ), link=None, level=4, - num='5.21.2.1') + num="5.21.2.1", +) RQ_SRS_006_RBAC_Privileges_AlterIndex_Grant = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterIndex.Grant', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterIndex.Grant", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting **alter index** privilege\n' - 'for a database or a specific table to one or more **users** or **roles**.\n' - '\n' - ), + "[ClickHouse] SHALL support granting **alter index** privilege\n" + "for a database or a specific table to one or more **users** or **roles**.\n" + "\n" + ), link=None, level=4, - num='5.21.2.2') + num="5.21.2.2", +) RQ_SRS_006_RBAC_Privileges_AlterIndex_Revoke = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterIndex.Revoke', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterIndex.Revoke", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking **alter index** privilege\n' - 'for a database or a specific table to one or more **users** or **roles**\n' - '\n' - ), + "[ClickHouse] SHALL support revoking **alter index** privilege\n" + "for a database or a specific table to one or more **users** or **roles**\n" + "\n" + ), link=None, level=4, - num='5.21.2.3') + num="5.21.2.3", +) RQ_SRS_006_RBAC_Privileges_AlterIndex_Cluster = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterIndex.Cluster', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterIndex.Cluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting or revoking **alter index** privilege\n' - 'on a specified cluster to one or more **users** or **roles**.\n' - 'Any `ALTER TABLE ... ORDER BY | ADD|DROP|MATERIALIZE|CLEAR INDEX`\n' - 'statements SHALL succeed only on nodes where the table exists and privilege was granted.\n' - '\n' - ), + "[ClickHouse] SHALL support granting or revoking **alter index** privilege\n" + "on a specified cluster to one or more **users** or **roles**.\n" + "Any `ALTER TABLE ... ORDER BY | ADD|DROP|MATERIALIZE|CLEAR INDEX`\n" + "statements SHALL succeed only on nodes where the table exists and privilege was granted.\n" + "\n" + ), link=None, level=4, - num='5.21.2.4') + num="5.21.2.4", +) RQ_SRS_006_RBAC_Privileges_AlterIndex_TableEngines = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterIndex.TableEngines', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterIndex.TableEngines", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to the **alter index** privilege\n' - 'on tables created using the following engines\n' - '\n' - '* MergeTree\n' - '* ReplacingMergeTree\n' - '* SummingMergeTree\n' - '* AggregatingMergeTree\n' - '* CollapsingMergeTree\n' - '* VersionedCollapsingMergeTree\n' - '* GraphiteMergeTree\n' - '* ReplicatedMergeTree\n' - '* ReplicatedSummingMergeTree\n' - '* ReplicatedReplacingMergeTree\n' - '* ReplicatedAggregatingMergeTree\n' - '* ReplicatedCollapsingMergeTree\n' - '* ReplicatedVersionedCollapsingMergeTree\n' - '* ReplicatedGraphiteMergeTree\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to the **alter index** privilege\n" + "on tables created using the following engines\n" + "\n" + "* MergeTree\n" + "* ReplacingMergeTree\n" + "* SummingMergeTree\n" + "* AggregatingMergeTree\n" + "* CollapsingMergeTree\n" + "* VersionedCollapsingMergeTree\n" + "* GraphiteMergeTree\n" + "* ReplicatedMergeTree\n" + "* ReplicatedSummingMergeTree\n" + "* ReplicatedReplacingMergeTree\n" + "* ReplicatedAggregatingMergeTree\n" + "* ReplicatedCollapsingMergeTree\n" + "* ReplicatedVersionedCollapsingMergeTree\n" + "* ReplicatedGraphiteMergeTree\n" + "\n" + ), link=None, level=4, - num='5.21.2.5') + num="5.21.2.5", +) RQ_SRS_006_RBAC_Privileges_AlterConstraint = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterConstraint', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterConstraint", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to the **alter constraint** privilege\n' - 'for a database or a specific table to one or more **users** or **roles**.\n' - 'Any `ALTER TABLE ... ADD|CREATE CONSTRAINT` statements SHALL\n' - 'return an error, unless the user has the **alter constraint** privilege for\n' - 'the destination table either because of the explicit grant or through one of\n' - 'the roles assigned to the user.\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to the **alter constraint** privilege\n" + "for a database or a specific table to one or more **users** or **roles**.\n" + "Any `ALTER TABLE ... ADD|CREATE CONSTRAINT` statements SHALL\n" + "return an error, unless the user has the **alter constraint** privilege for\n" + "the destination table either because of the explicit grant or through one of\n" + "the roles assigned to the user.\n" + "\n" + ), link=None, level=4, - num='5.21.3.1') + num="5.21.3.1", +) RQ_SRS_006_RBAC_Privileges_AlterConstraint_Grant = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterConstraint.Grant', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterConstraint.Grant", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting **alter constraint** privilege\n' - 'for a database or a specific table to one or more **users** or **roles**.\n' - '\n' - ), + "[ClickHouse] SHALL support granting **alter constraint** privilege\n" + "for a database or a specific table to one or more **users** or **roles**.\n" + "\n" + ), link=None, level=4, - num='5.21.3.2') + num="5.21.3.2", +) RQ_SRS_006_RBAC_Privileges_AlterConstraint_Revoke = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterConstraint.Revoke', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterConstraint.Revoke", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking **alter constraint** privilege\n' - 'for a database or a specific table to one or more **users** or **roles**\n' - '\n' - ), + "[ClickHouse] SHALL support revoking **alter constraint** privilege\n" + "for a database or a specific table to one or more **users** or **roles**\n" + "\n" + ), link=None, level=4, - num='5.21.3.3') + num="5.21.3.3", +) RQ_SRS_006_RBAC_Privileges_AlterConstraint_Cluster = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterConstraint.Cluster', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterConstraint.Cluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting or revoking **alter constraint** privilege\n' - 'on a specified cluster to one or more **users** or **roles**.\n' - 'Any `ALTER TABLE ... ADD|DROP CONSTRAINT`\n' - 'statements SHALL succeed only on nodes where the table exists and privilege was granted.\n' - '\n' - ), + "[ClickHouse] SHALL support granting or revoking **alter constraint** privilege\n" + "on a specified cluster to one or more **users** or **roles**.\n" + "Any `ALTER TABLE ... ADD|DROP CONSTRAINT`\n" + "statements SHALL succeed only on nodes where the table exists and privilege was granted.\n" + "\n" + ), link=None, level=4, - num='5.21.3.4') + num="5.21.3.4", +) RQ_SRS_006_RBAC_Privileges_AlterConstraint_TableEngines = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterConstraint.TableEngines', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterConstraint.TableEngines", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to the **alter constraint** privilege\n' - 'on tables created using the following engines\n' - '\n' - '* MergeTree\n' - '* ReplacingMergeTree\n' - '* SummingMergeTree\n' - '* AggregatingMergeTree\n' - '* CollapsingMergeTree\n' - '* VersionedCollapsingMergeTree\n' - '* GraphiteMergeTree\n' - '* ReplicatedMergeTree\n' - '* ReplicatedSummingMergeTree\n' - '* ReplicatedReplacingMergeTree\n' - '* ReplicatedAggregatingMergeTree\n' - '* ReplicatedCollapsingMergeTree\n' - '* ReplicatedVersionedCollapsingMergeTree\n' - '* ReplicatedGraphiteMergeTree\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to the **alter constraint** privilege\n" + "on tables created using the following engines\n" + "\n" + "* MergeTree\n" + "* ReplacingMergeTree\n" + "* SummingMergeTree\n" + "* AggregatingMergeTree\n" + "* CollapsingMergeTree\n" + "* VersionedCollapsingMergeTree\n" + "* GraphiteMergeTree\n" + "* ReplicatedMergeTree\n" + "* ReplicatedSummingMergeTree\n" + "* ReplicatedReplacingMergeTree\n" + "* ReplicatedAggregatingMergeTree\n" + "* ReplicatedCollapsingMergeTree\n" + "* ReplicatedVersionedCollapsingMergeTree\n" + "* ReplicatedGraphiteMergeTree\n" + "\n" + ), link=None, level=4, - num='5.21.3.5') + num="5.21.3.5", +) RQ_SRS_006_RBAC_Privileges_AlterTTL = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterTTL', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterTTL", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to the **alter ttl** or **alter materialize ttl** privilege\n' - 'for a database or a specific table to one or more **users** or **roles**.\n' - 'Any `ALTER TABLE ... ALTER TTL | ALTER MATERIALIZE TTL` statements SHALL\n' - 'return an error, unless the user has the **alter ttl** or **alter materialize ttl** privilege for\n' - 'the destination table either because of the explicit grant or through one of\n' - 'the roles assigned to the user.\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to the **alter ttl** or **alter materialize ttl** privilege\n" + "for a database or a specific table to one or more **users** or **roles**.\n" + "Any `ALTER TABLE ... ALTER TTL | ALTER MATERIALIZE TTL` statements SHALL\n" + "return an error, unless the user has the **alter ttl** or **alter materialize ttl** privilege for\n" + "the destination table either because of the explicit grant or through one of\n" + "the roles assigned to the user.\n" + "\n" + ), link=None, level=4, - num='5.21.4.1') + num="5.21.4.1", +) RQ_SRS_006_RBAC_Privileges_AlterTTL_Grant = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterTTL.Grant', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterTTL.Grant", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting **alter ttl** or **alter materialize ttl** privilege\n' - 'for a database or a specific table to one or more **users** or **roles**.\n' - '\n' - ), + "[ClickHouse] SHALL support granting **alter ttl** or **alter materialize ttl** privilege\n" + "for a database or a specific table to one or more **users** or **roles**.\n" + "\n" + ), link=None, level=4, - num='5.21.4.2') + num="5.21.4.2", +) RQ_SRS_006_RBAC_Privileges_AlterTTL_Revoke = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterTTL.Revoke', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterTTL.Revoke", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking **alter ttl** or **alter materialize ttl** privilege\n' - 'for a database or a specific table to one or more **users** or **roles**\n' - '\n' - ), + "[ClickHouse] SHALL support revoking **alter ttl** or **alter materialize ttl** privilege\n" + "for a database or a specific table to one or more **users** or **roles**\n" + "\n" + ), link=None, level=4, - num='5.21.4.3') + num="5.21.4.3", +) RQ_SRS_006_RBAC_Privileges_AlterTTL_Cluster = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterTTL.Cluster', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterTTL.Cluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting or revoking **alter ttl** or **alter materialize ttl** privilege\n' - 'on a specified cluster to one or more **users** or **roles**.\n' - 'Any `ALTER TABLE ... ALTER TTL | ALTER MATERIALIZE TTL`\n' - 'statements SHALL succeed only on nodes where the table exists and privilege was granted.\n' - '\n' - ), + "[ClickHouse] SHALL support granting or revoking **alter ttl** or **alter materialize ttl** privilege\n" + "on a specified cluster to one or more **users** or **roles**.\n" + "Any `ALTER TABLE ... ALTER TTL | ALTER MATERIALIZE TTL`\n" + "statements SHALL succeed only on nodes where the table exists and privilege was granted.\n" + "\n" + ), link=None, level=4, - num='5.21.4.4') + num="5.21.4.4", +) RQ_SRS_006_RBAC_Privileges_AlterTTL_TableEngines = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterTTL.TableEngines', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterTTL.TableEngines", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to the **alter ttl** or **alter materialize ttl** privilege\n' - 'on tables created using the following engines\n' - '\n' - '* MergeTree\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to the **alter ttl** or **alter materialize ttl** privilege\n" + "on tables created using the following engines\n" + "\n" + "* MergeTree\n" + "\n" + ), link=None, level=4, - num='5.21.4.5') + num="5.21.4.5", +) RQ_SRS_006_RBAC_Privileges_AlterSettings = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterSettings', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterSettings", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to the **alter settings** privilege\n' - 'for a database or a specific table to one or more **users** or **roles**.\n' - 'Any `ALTER TABLE ... MODIFY SETTING setting` statements SHALL\n' - 'return an error, unless the user has the **alter settings** privilege for\n' - 'the destination table either because of the explicit grant or through one of\n' - 'the roles assigned to the user. The **alter settings** privilege allows\n' - 'modifying table engine settings. It doesn’t affect settings or server configuration parameters.\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to the **alter settings** privilege\n" + "for a database or a specific table to one or more **users** or **roles**.\n" + "Any `ALTER TABLE ... MODIFY SETTING setting` statements SHALL\n" + "return an error, unless the user has the **alter settings** privilege for\n" + "the destination table either because of the explicit grant or through one of\n" + "the roles assigned to the user. The **alter settings** privilege allows\n" + "modifying table engine settings. It doesn’t affect settings or server configuration parameters.\n" + "\n" + ), link=None, level=4, - num='5.21.5.1') + num="5.21.5.1", +) RQ_SRS_006_RBAC_Privileges_AlterSettings_Grant = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterSettings.Grant', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterSettings.Grant", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting **alter settings** privilege\n' - 'for a database or a specific table to one or more **users** or **roles**.\n' - '\n' - ), + "[ClickHouse] SHALL support granting **alter settings** privilege\n" + "for a database or a specific table to one or more **users** or **roles**.\n" + "\n" + ), link=None, level=4, - num='5.21.5.2') + num="5.21.5.2", +) RQ_SRS_006_RBAC_Privileges_AlterSettings_Revoke = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterSettings.Revoke', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterSettings.Revoke", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking **alter settings** privilege\n' - 'for a database or a specific table to one or more **users** or **roles**\n' - '\n' - ), + "[ClickHouse] SHALL support revoking **alter settings** privilege\n" + "for a database or a specific table to one or more **users** or **roles**\n" + "\n" + ), link=None, level=4, - num='5.21.5.3') + num="5.21.5.3", +) RQ_SRS_006_RBAC_Privileges_AlterSettings_Cluster = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterSettings.Cluster', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterSettings.Cluster", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting or revoking **alter settings** privilege\n' - 'on a specified cluster to one or more **users** or **roles**.\n' - 'Any `ALTER TABLE ... MODIFY SETTING setting`\n' - 'statements SHALL succeed only on nodes where the table exists and privilege was granted.\n' - '\n' - ), + "[ClickHouse] SHALL support granting or revoking **alter settings** privilege\n" + "on a specified cluster to one or more **users** or **roles**.\n" + "Any `ALTER TABLE ... MODIFY SETTING setting`\n" + "statements SHALL succeed only on nodes where the table exists and privilege was granted.\n" + "\n" + ), link=None, level=4, - num='5.21.5.4') + num="5.21.5.4", +) RQ_SRS_006_RBAC_Privileges_AlterSettings_TableEngines = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterSettings.TableEngines', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterSettings.TableEngines", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to the **alter settings** privilege\n' - 'on tables created using the following engines\n' - '\n' - '* MergeTree\n' - '* ReplacingMergeTree\n' - '* SummingMergeTree\n' - '* AggregatingMergeTree\n' - '* CollapsingMergeTree\n' - '* VersionedCollapsingMergeTree\n' - '* GraphiteMergeTree\n' - '* ReplicatedMergeTree\n' - '* ReplicatedSummingMergeTree\n' - '* ReplicatedReplacingMergeTree\n' - '* ReplicatedAggregatingMergeTree\n' - '* ReplicatedCollapsingMergeTree\n' - '* ReplicatedVersionedCollapsingMergeTree\n' - '* ReplicatedGraphiteMergeTree\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to the **alter settings** privilege\n" + "on tables created using the following engines\n" + "\n" + "* MergeTree\n" + "* ReplacingMergeTree\n" + "* SummingMergeTree\n" + "* AggregatingMergeTree\n" + "* CollapsingMergeTree\n" + "* VersionedCollapsingMergeTree\n" + "* GraphiteMergeTree\n" + "* ReplicatedMergeTree\n" + "* ReplicatedSummingMergeTree\n" + "* ReplicatedReplacingMergeTree\n" + "* ReplicatedAggregatingMergeTree\n" + "* ReplicatedCollapsingMergeTree\n" + "* ReplicatedVersionedCollapsingMergeTree\n" + "* ReplicatedGraphiteMergeTree\n" + "\n" + ), link=None, level=4, - num='5.21.5.5') + num="5.21.5.5", +) RQ_SRS_006_RBAC_Privileges_AlterUpdate = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterUpdate', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterUpdate", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `ALTER UPDATE` statement if and only if the user has **alter update** privilege for that column,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `ALTER UPDATE` statement if and only if the user has **alter update** privilege for that column,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.21.6.1') + num="5.21.6.1", +) RQ_SRS_006_RBAC_Privileges_AlterUpdate_Grant = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterUpdate.Grant', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterUpdate.Grant", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting **alter update** privilege on a column level\n' - 'to one or more **users** or **roles**.\n' - '\n' - ), + "[ClickHouse] SHALL support granting **alter update** privilege on a column level\n" + "to one or more **users** or **roles**.\n" + "\n" + ), link=None, level=4, - num='5.21.6.2') + num="5.21.6.2", +) RQ_SRS_006_RBAC_Privileges_AlterUpdate_Revoke = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterUpdate.Revoke', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterUpdate.Revoke", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking **alter update** privilege on a column level\n' - 'from one or more **users** or **roles**.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking **alter update** privilege on a column level\n" + "from one or more **users** or **roles**.\n" + "\n" + ), link=None, level=4, - num='5.21.6.3') + num="5.21.6.3", +) RQ_SRS_006_RBAC_Privileges_AlterUpdate_TableEngines = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterUpdate.TableEngines', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterUpdate.TableEngines", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to the **alter update** privilege\n' - 'on tables created using the following engines\n' - '\n' - '* MergeTree\n' - '* ReplacingMergeTree\n' - '* SummingMergeTree\n' - '* AggregatingMergeTree\n' - '* CollapsingMergeTree\n' - '* VersionedCollapsingMergeTree\n' - '* GraphiteMergeTree\n' - '* ReplicatedMergeTree\n' - '* ReplicatedSummingMergeTree\n' - '* ReplicatedReplacingMergeTree\n' - '* ReplicatedAggregatingMergeTree\n' - '* ReplicatedCollapsingMergeTree\n' - '* ReplicatedVersionedCollapsingMergeTree\n' - '* ReplicatedGraphiteMergeTree\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to the **alter update** privilege\n" + "on tables created using the following engines\n" + "\n" + "* MergeTree\n" + "* ReplacingMergeTree\n" + "* SummingMergeTree\n" + "* AggregatingMergeTree\n" + "* CollapsingMergeTree\n" + "* VersionedCollapsingMergeTree\n" + "* GraphiteMergeTree\n" + "* ReplicatedMergeTree\n" + "* ReplicatedSummingMergeTree\n" + "* ReplicatedReplacingMergeTree\n" + "* ReplicatedAggregatingMergeTree\n" + "* ReplicatedCollapsingMergeTree\n" + "* ReplicatedVersionedCollapsingMergeTree\n" + "* ReplicatedGraphiteMergeTree\n" + "\n" + ), link=None, level=4, - num='5.21.6.4') + num="5.21.6.4", +) RQ_SRS_006_RBAC_Privileges_AlterDelete = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterDelete', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterDelete", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `ALTER DELETE` statement if and only if the user has **alter delete** privilege for that table,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `ALTER DELETE` statement if and only if the user has **alter delete** privilege for that table,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.21.7.1') + num="5.21.7.1", +) RQ_SRS_006_RBAC_Privileges_AlterDelete_Grant = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterDelete.Grant', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterDelete.Grant", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting **alter delete** privilege on a column level\n' - 'to one or more **users** or **roles**.\n' - '\n' - ), + "[ClickHouse] SHALL support granting **alter delete** privilege on a column level\n" + "to one or more **users** or **roles**.\n" + "\n" + ), link=None, level=4, - num='5.21.7.2') + num="5.21.7.2", +) RQ_SRS_006_RBAC_Privileges_AlterDelete_Revoke = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterDelete.Revoke', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterDelete.Revoke", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking **alter delete** privilege on a column level\n' - 'from one or more **users** or **roles**.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking **alter delete** privilege on a column level\n" + "from one or more **users** or **roles**.\n" + "\n" + ), link=None, level=4, - num='5.21.7.3') + num="5.21.7.3", +) RQ_SRS_006_RBAC_Privileges_AlterDelete_TableEngines = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterDelete.TableEngines', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterDelete.TableEngines", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to the **alter delete** privilege\n' - 'on tables created using the following engines\n' - '\n' - '* MergeTree\n' - '* ReplacingMergeTree\n' - '* SummingMergeTree\n' - '* AggregatingMergeTree\n' - '* CollapsingMergeTree\n' - '* VersionedCollapsingMergeTree\n' - '* GraphiteMergeTree\n' - '* ReplicatedMergeTree\n' - '* ReplicatedSummingMergeTree\n' - '* ReplicatedReplacingMergeTree\n' - '* ReplicatedAggregatingMergeTree\n' - '* ReplicatedCollapsingMergeTree\n' - '* ReplicatedVersionedCollapsingMergeTree\n' - '* ReplicatedGraphiteMergeTree\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to the **alter delete** privilege\n" + "on tables created using the following engines\n" + "\n" + "* MergeTree\n" + "* ReplacingMergeTree\n" + "* SummingMergeTree\n" + "* AggregatingMergeTree\n" + "* CollapsingMergeTree\n" + "* VersionedCollapsingMergeTree\n" + "* GraphiteMergeTree\n" + "* ReplicatedMergeTree\n" + "* ReplicatedSummingMergeTree\n" + "* ReplicatedReplacingMergeTree\n" + "* ReplicatedAggregatingMergeTree\n" + "* ReplicatedCollapsingMergeTree\n" + "* ReplicatedVersionedCollapsingMergeTree\n" + "* ReplicatedGraphiteMergeTree\n" + "\n" + ), link=None, level=4, - num='5.21.7.4') + num="5.21.7.4", +) RQ_SRS_006_RBAC_Privileges_AlterFreeze = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterFreeze', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterFreeze", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `ALTER FREEZE` statement if and only if the user has **alter freeze** privilege for that table,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `ALTER FREEZE` statement if and only if the user has **alter freeze** privilege for that table,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.21.8.1') + num="5.21.8.1", +) RQ_SRS_006_RBAC_Privileges_AlterFreeze_Grant = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterFreeze.Grant', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterFreeze.Grant", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting **alter freeze** privilege on a column level\n' - 'to one or more **users** or **roles**.\n' - '\n' - ), + "[ClickHouse] SHALL support granting **alter freeze** privilege on a column level\n" + "to one or more **users** or **roles**.\n" + "\n" + ), link=None, level=4, - num='5.21.8.2') + num="5.21.8.2", +) RQ_SRS_006_RBAC_Privileges_AlterFreeze_Revoke = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterFreeze.Revoke', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterFreeze.Revoke", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking **alter freeze** privilege on a column level\n' - 'from one or more **users** or **roles**.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking **alter freeze** privilege on a column level\n" + "from one or more **users** or **roles**.\n" + "\n" + ), link=None, level=4, - num='5.21.8.3') + num="5.21.8.3", +) RQ_SRS_006_RBAC_Privileges_AlterFreeze_TableEngines = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterFreeze.TableEngines', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterFreeze.TableEngines", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to the **alter freeze** privilege\n' - 'on tables created using the following engines\n' - '\n' - '* MergeTree\n' - '* ReplacingMergeTree\n' - '* SummingMergeTree\n' - '* AggregatingMergeTree\n' - '* CollapsingMergeTree\n' - '* VersionedCollapsingMergeTree\n' - '* GraphiteMergeTree\n' - '* ReplicatedMergeTree\n' - '* ReplicatedSummingMergeTree\n' - '* ReplicatedReplacingMergeTree\n' - '* ReplicatedAggregatingMergeTree\n' - '* ReplicatedCollapsingMergeTree\n' - '* ReplicatedVersionedCollapsingMergeTree\n' - '* ReplicatedGraphiteMergeTree\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to the **alter freeze** privilege\n" + "on tables created using the following engines\n" + "\n" + "* MergeTree\n" + "* ReplacingMergeTree\n" + "* SummingMergeTree\n" + "* AggregatingMergeTree\n" + "* CollapsingMergeTree\n" + "* VersionedCollapsingMergeTree\n" + "* GraphiteMergeTree\n" + "* ReplicatedMergeTree\n" + "* ReplicatedSummingMergeTree\n" + "* ReplicatedReplacingMergeTree\n" + "* ReplicatedAggregatingMergeTree\n" + "* ReplicatedCollapsingMergeTree\n" + "* ReplicatedVersionedCollapsingMergeTree\n" + "* ReplicatedGraphiteMergeTree\n" + "\n" + ), link=None, level=4, - num='5.21.8.4') + num="5.21.8.4", +) RQ_SRS_006_RBAC_Privileges_AlterFetch = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterFetch', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterFetch", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `ALTER FETCH` statement if and only if the user has **alter fetch** privilege for that table,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `ALTER FETCH` statement if and only if the user has **alter fetch** privilege for that table,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.21.9.1') + num="5.21.9.1", +) RQ_SRS_006_RBAC_Privileges_AlterFetch_Grant = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterFetch.Grant', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterFetch.Grant", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting **alter fetch** privilege on a column level\n' - 'to one or more **users** or **roles**.\n' - '\n' - ), + "[ClickHouse] SHALL support granting **alter fetch** privilege on a column level\n" + "to one or more **users** or **roles**.\n" + "\n" + ), link=None, level=4, - num='5.21.9.2') + num="5.21.9.2", +) RQ_SRS_006_RBAC_Privileges_AlterFetch_Revoke = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterFetch.Revoke', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterFetch.Revoke", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking **alter fetch** privilege on a column level\n' - 'from one or more **users** or **roles**.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking **alter fetch** privilege on a column level\n" + "from one or more **users** or **roles**.\n" + "\n" + ), link=None, level=4, - num='5.21.9.3') + num="5.21.9.3", +) RQ_SRS_006_RBAC_Privileges_AlterFetch_TableEngines = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterFetch.TableEngines', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterFetch.TableEngines", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to the **alter fetch** privilege\n' - 'on tables created using the following engines\n' - '\n' - '* ReplicatedMergeTree\n' - '* ReplicatedSummingMergeTree\n' - '* ReplicatedReplacingMergeTree\n' - '* ReplicatedAggregatingMergeTree\n' - '* ReplicatedCollapsingMergeTree\n' - '* ReplicatedVersionedCollapsingMergeTree\n' - '* ReplicatedGraphiteMergeTree\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to the **alter fetch** privilege\n" + "on tables created using the following engines\n" + "\n" + "* ReplicatedMergeTree\n" + "* ReplicatedSummingMergeTree\n" + "* ReplicatedReplacingMergeTree\n" + "* ReplicatedAggregatingMergeTree\n" + "* ReplicatedCollapsingMergeTree\n" + "* ReplicatedVersionedCollapsingMergeTree\n" + "* ReplicatedGraphiteMergeTree\n" + "\n" + ), link=None, level=4, - num='5.21.9.4') + num="5.21.9.4", +) RQ_SRS_006_RBAC_Privileges_AlterMove = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterMove', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterMove", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `ALTER MOVE` statement if and only if the user has **alter move**, **select**, and **alter delete** privilege on the source table\n' - 'and **insert** privilege on the target table, either directly or through a role.\n' - 'For example,\n' - '```sql\n' - 'ALTER TABLE source_table MOVE PARTITION 1 TO target_table\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `ALTER MOVE` statement if and only if the user has **alter move**, **select**, and **alter delete** privilege on the source table\n" + "and **insert** privilege on the target table, either directly or through a role.\n" + "For example,\n" + "```sql\n" + "ALTER TABLE source_table MOVE PARTITION 1 TO target_table\n" + "```\n" + "\n" + ), link=None, level=4, - num='5.21.10.1') + num="5.21.10.1", +) RQ_SRS_006_RBAC_Privileges_AlterMove_Grant = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterMove.Grant', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterMove.Grant", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting **alter move** privilege on a column level\n' - 'to one or more **users** or **roles**.\n' - '\n' - ), + "[ClickHouse] SHALL support granting **alter move** privilege on a column level\n" + "to one or more **users** or **roles**.\n" + "\n" + ), link=None, level=4, - num='5.21.10.2') + num="5.21.10.2", +) RQ_SRS_006_RBAC_Privileges_AlterMove_Revoke = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterMove.Revoke', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterMove.Revoke", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support revoking **alter move** privilege on a column level\n' - 'from one or more **users** or **roles**.\n' - '\n' - ), + "[ClickHouse] SHALL support revoking **alter move** privilege on a column level\n" + "from one or more **users** or **roles**.\n" + "\n" + ), link=None, level=4, - num='5.21.10.3') + num="5.21.10.3", +) RQ_SRS_006_RBAC_Privileges_AlterMove_TableEngines = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterMove.TableEngines', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterMove.TableEngines", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support controlling access to the **alter move** privilege\n' - 'on tables created using the following engines\n' - '\n' - '* MergeTree\n' - '* ReplacingMergeTree\n' - '* SummingMergeTree\n' - '* AggregatingMergeTree\n' - '* CollapsingMergeTree\n' - '* VersionedCollapsingMergeTree\n' - '* GraphiteMergeTree\n' - '* ReplicatedMergeTree\n' - '* ReplicatedSummingMergeTree\n' - '* ReplicatedReplacingMergeTree\n' - '* ReplicatedAggregatingMergeTree\n' - '* ReplicatedCollapsingMergeTree\n' - '* ReplicatedVersionedCollapsingMergeTree\n' - '* ReplicatedGraphiteMergeTree\n' - '\n' - ), + "[ClickHouse] SHALL support controlling access to the **alter move** privilege\n" + "on tables created using the following engines\n" + "\n" + "* MergeTree\n" + "* ReplacingMergeTree\n" + "* SummingMergeTree\n" + "* AggregatingMergeTree\n" + "* CollapsingMergeTree\n" + "* VersionedCollapsingMergeTree\n" + "* GraphiteMergeTree\n" + "* ReplicatedMergeTree\n" + "* ReplicatedSummingMergeTree\n" + "* ReplicatedReplacingMergeTree\n" + "* ReplicatedAggregatingMergeTree\n" + "* ReplicatedCollapsingMergeTree\n" + "* ReplicatedVersionedCollapsingMergeTree\n" + "* ReplicatedGraphiteMergeTree\n" + "\n" + ), link=None, level=4, - num='5.21.10.4') + num="5.21.10.4", +) RQ_SRS_006_RBAC_Privileges_CreateTable = Requirement( - name='RQ.SRS-006.RBAC.Privileges.CreateTable', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.CreateTable", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL only successfully execute a `CREATE TABLE` command if and only if\n' - 'the user has **create table** privilege either explicitly or through roles.\n' - '\n' - 'If the stored query includes one or more source tables, the user must have **select** privilege\n' + "[ClickHouse] SHALL only successfully execute a `CREATE TABLE` command if and only if\n" + "the user has **create table** privilege either explicitly or through roles.\n" + "\n" + "If the stored query includes one or more source tables, the user must have **select** privilege\n" "on all the source tables and **insert** for the table they're trying to create either explicitly or through a role.\n" - 'For example,\n' - '```sql\n' - 'CREATE TABLE table AS SELECT * FROM source_table\n' - 'CREATE TABLE table AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression))\n' - 'CREATE TABLE table AS SELECT * FROM table0 JOIN table1 USING column\n' - 'CREATE TABLE table AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2\n' - 'CREATE TABLE table AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression))\n' - 'CREATE TABLE table0 AS SELECT column FROM table1 UNION ALL SELECT column FROM table2\n' - '```\n' - '\n' - ), + "For example,\n" + "```sql\n" + "CREATE TABLE table AS SELECT * FROM source_table\n" + "CREATE TABLE table AS SELECT * FROM table0 WHERE column IN (SELECT column FROM table1 WHERE column IN (SELECT column FROM table2 WHERE expression))\n" + "CREATE TABLE table AS SELECT * FROM table0 JOIN table1 USING column\n" + "CREATE TABLE table AS SELECT * FROM table0 UNION ALL SELECT * FROM table1 UNION ALL SELECT * FROM table2\n" + "CREATE TABLE table AS SELECT column FROM table0 JOIN table1 USING column UNION ALL SELECT column FROM table2 WHERE column IN (SELECT column FROM table3 WHERE column IN (SELECT column FROM table4 WHERE expression))\n" + "CREATE TABLE table0 AS SELECT column FROM table1 UNION ALL SELECT column FROM table2\n" + "```\n" + "\n" + ), link=None, level=3, - num='5.22.1') + num="5.22.1", +) RQ_SRS_006_RBAC_Privileges_CreateDatabase = Requirement( - name='RQ.SRS-006.RBAC.Privileges.CreateDatabase', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.CreateDatabase", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `CREATE DATABASE` statement if and only if the user has **create database** privilege on the database,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `CREATE DATABASE` statement if and only if the user has **create database** privilege on the database,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.22.2') + num="5.22.2", +) RQ_SRS_006_RBAC_Privileges_CreateDictionary = Requirement( - name='RQ.SRS-006.RBAC.Privileges.CreateDictionary', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.CreateDictionary", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `CREATE DICTIONARY` statement if and only if the user has **create dictionary** privilege on the dictionary,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `CREATE DICTIONARY` statement if and only if the user has **create dictionary** privilege on the dictionary,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.22.3') + num="5.22.3", +) RQ_SRS_006_RBAC_Privileges_CreateTemporaryTable = Requirement( - name='RQ.SRS-006.RBAC.Privileges.CreateTemporaryTable', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.CreateTemporaryTable", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `CREATE TEMPORARY TABLE` statement if and only if the user has **create temporary table** privilege on the table,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `CREATE TEMPORARY TABLE` statement if and only if the user has **create temporary table** privilege on the table,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.22.4') + num="5.22.4", +) RQ_SRS_006_RBAC_Privileges_AttachDatabase = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AttachDatabase', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AttachDatabase", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `ATTACH DATABASE` statement if and only if the user has **create database** privilege on the database,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `ATTACH DATABASE` statement if and only if the user has **create database** privilege on the database,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.23.1') + num="5.23.1", +) RQ_SRS_006_RBAC_Privileges_AttachDictionary = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AttachDictionary', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AttachDictionary", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `ATTACH DICTIONARY` statement if and only if the user has **create dictionary** privilege on the dictionary,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `ATTACH DICTIONARY` statement if and only if the user has **create dictionary** privilege on the dictionary,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.23.2') + num="5.23.2", +) RQ_SRS_006_RBAC_Privileges_AttachTemporaryTable = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AttachTemporaryTable', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AttachTemporaryTable", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `ATTACH TEMPORARY TABLE` statement if and only if the user has **create temporary table** privilege on the table,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `ATTACH TEMPORARY TABLE` statement if and only if the user has **create temporary table** privilege on the table,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.23.3') + num="5.23.3", +) RQ_SRS_006_RBAC_Privileges_AttachTable = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AttachTable', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AttachTable", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `ATTACH TABLE` statement if and only if the user has **create table** privilege on the table,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `ATTACH TABLE` statement if and only if the user has **create table** privilege on the table,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.23.4') + num="5.23.4", +) RQ_SRS_006_RBAC_Privileges_DropTable = Requirement( - name='RQ.SRS-006.RBAC.Privileges.DropTable', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.DropTable", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `DROP TABLE` statement if and only if the user has **drop table** privilege on the table,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `DROP TABLE` statement if and only if the user has **drop table** privilege on the table,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.24.1') + num="5.24.1", +) RQ_SRS_006_RBAC_Privileges_DropDatabase = Requirement( - name='RQ.SRS-006.RBAC.Privileges.DropDatabase', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.DropDatabase", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `DROP DATABASE` statement if and only if the user has **drop database** privilege on the database,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `DROP DATABASE` statement if and only if the user has **drop database** privilege on the database,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.24.2') + num="5.24.2", +) RQ_SRS_006_RBAC_Privileges_DropDictionary = Requirement( - name='RQ.SRS-006.RBAC.Privileges.DropDictionary', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.DropDictionary", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `DROP DICTIONARY` statement if and only if the user has **drop dictionary** privilege on the dictionary,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `DROP DICTIONARY` statement if and only if the user has **drop dictionary** privilege on the dictionary,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.24.3') + num="5.24.3", +) RQ_SRS_006_RBAC_Privileges_DetachTable = Requirement( - name='RQ.SRS-006.RBAC.Privileges.DetachTable', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.DetachTable", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `DETACH TABLE` statement if and only if the user has **drop table** privilege on the table,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `DETACH TABLE` statement if and only if the user has **drop table** privilege on the table,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.25.1') + num="5.25.1", +) RQ_SRS_006_RBAC_Privileges_DetachView = Requirement( - name='RQ.SRS-006.RBAC.Privileges.DetachView', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.DetachView", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `DETACH VIEW` statement if and only if the user has **drop view** privilege on the view,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `DETACH VIEW` statement if and only if the user has **drop view** privilege on the view,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.25.2') + num="5.25.2", +) RQ_SRS_006_RBAC_Privileges_DetachDatabase = Requirement( - name='RQ.SRS-006.RBAC.Privileges.DetachDatabase', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.DetachDatabase", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `DETACH DATABASE` statement if and only if the user has **drop database** privilege on the database,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `DETACH DATABASE` statement if and only if the user has **drop database** privilege on the database,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.25.3') + num="5.25.3", +) RQ_SRS_006_RBAC_Privileges_DetachDictionary = Requirement( - name='RQ.SRS-006.RBAC.Privileges.DetachDictionary', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.DetachDictionary", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `DETACH DICTIONARY` statement if and only if the user has **drop dictionary** privilege on the dictionary,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `DETACH DICTIONARY` statement if and only if the user has **drop dictionary** privilege on the dictionary,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.25.4') + num="5.25.4", +) RQ_SRS_006_RBAC_Privileges_Truncate = Requirement( - name='RQ.SRS-006.RBAC.Privileges.Truncate', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.Truncate", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `TRUNCATE TABLE` statement if and only if the user has **truncate table** privilege on the table,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `TRUNCATE TABLE` statement if and only if the user has **truncate table** privilege on the table,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.26.1') + num="5.26.1", +) RQ_SRS_006_RBAC_Privileges_Optimize = Requirement( - name='RQ.SRS-006.RBAC.Privileges.Optimize', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.Optimize", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `OPTIMIZE TABLE` statement if and only if the user has **optimize table** privilege on the table,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `OPTIMIZE TABLE` statement if and only if the user has **optimize table** privilege on the table,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.27.1') + num="5.27.1", +) RQ_SRS_006_RBAC_Privileges_KillQuery = Requirement( - name='RQ.SRS-006.RBAC.Privileges.KillQuery', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.KillQuery", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `KILL QUERY` statement if and only if the user has **kill query** privilege,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `KILL QUERY` statement if and only if the user has **kill query** privilege,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.28.1') + num="5.28.1", +) RQ_SRS_006_RBAC_Privileges_KillMutation = Requirement( - name='RQ.SRS-006.RBAC.Privileges.KillMutation', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.KillMutation", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `KILL MUTATION` statement if and only if\n' - 'the user has the privilege that created the mutation, either directly or through a role.\n' - 'For example, to `KILL MUTATION` after `ALTER UPDATE` query, the user needs `ALTER UPDATE` privilege.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `KILL MUTATION` statement if and only if\n" + "the user has the privilege that created the mutation, either directly or through a role.\n" + "For example, to `KILL MUTATION` after `ALTER UPDATE` query, the user needs `ALTER UPDATE` privilege.\n" + "\n" + ), link=None, level=3, - num='5.29.1') + num="5.29.1", +) RQ_SRS_006_RBAC_Privileges_KillMutation_AlterUpdate = Requirement( - name='RQ.SRS-006.RBAC.Privileges.KillMutation.AlterUpdate', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.KillMutation.AlterUpdate", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `KILL MUTATION` query on an `ALTER UPDATE` mutation if and only if\n' - 'the user has `ALTER UPDATE` privilege on the table where the mutation was created, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `KILL MUTATION` query on an `ALTER UPDATE` mutation if and only if\n" + "the user has `ALTER UPDATE` privilege on the table where the mutation was created, either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.29.2') + num="5.29.2", +) RQ_SRS_006_RBAC_Privileges_KillMutation_AlterDelete = Requirement( - name='RQ.SRS-006.RBAC.Privileges.KillMutation.AlterDelete', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.KillMutation.AlterDelete", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `KILL MUTATION` query on an `ALTER DELETE` mutation if and only if\n' - 'the user has `ALTER DELETE` privilege on the table where the mutation was created, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `KILL MUTATION` query on an `ALTER DELETE` mutation if and only if\n" + "the user has `ALTER DELETE` privilege on the table where the mutation was created, either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.29.3') + num="5.29.3", +) RQ_SRS_006_RBAC_Privileges_KillMutation_AlterDropColumn = Requirement( - name='RQ.SRS-006.RBAC.Privileges.KillMutation.AlterDropColumn', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.KillMutation.AlterDropColumn", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `KILL MUTATION` query on an `ALTER DROP COLUMN` mutation if and only if\n' - 'the user has `ALTER DROP COLUMN` privilege on the table where the mutation was created, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `KILL MUTATION` query on an `ALTER DROP COLUMN` mutation if and only if\n" + "the user has `ALTER DROP COLUMN` privilege on the table where the mutation was created, either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.29.4') + num="5.29.4", +) RQ_SRS_006_RBAC_ShowTables_Privilege = Requirement( - name='RQ.SRS-006.RBAC.ShowTables.Privilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowTables.Privilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL grant **show tables** privilege on a table to a user if that user has recieved any grant,\n' - 'including `SHOW TABLES`, on that table, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL grant **show tables** privilege on a table to a user if that user has recieved any grant,\n" + "including `SHOW TABLES`, on that table, either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.30.1') + num="5.30.1", +) RQ_SRS_006_RBAC_ShowTables_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.ShowTables.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowTables.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `SHOW TABLES` statement if and only if the user has **show tables** privilege,\n' - 'or any privilege on the table either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `SHOW TABLES` statement if and only if the user has **show tables** privilege,\n" + "or any privilege on the table either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.30.2') + num="5.30.2", +) RQ_SRS_006_RBAC_ExistsTable_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.ExistsTable.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.ExistsTable.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `EXISTS table` statement if and only if the user has **show tables** privilege,\n' - 'or any privilege on the table either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `EXISTS table` statement if and only if the user has **show tables** privilege,\n" + "or any privilege on the table either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.30.3') + num="5.30.3", +) RQ_SRS_006_RBAC_CheckTable_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.CheckTable.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.CheckTable.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `CHECK table` statement if and only if the user has **show tables** privilege,\n' - 'or any privilege on the table either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `CHECK table` statement if and only if the user has **show tables** privilege,\n" + "or any privilege on the table either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.30.4') + num="5.30.4", +) RQ_SRS_006_RBAC_ShowDatabases_Privilege = Requirement( - name='RQ.SRS-006.RBAC.ShowDatabases.Privilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowDatabases.Privilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL grant **show databases** privilege on a database to a user if that user has recieved any grant,\n' - 'including `SHOW DATABASES`, on that table, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL grant **show databases** privilege on a database to a user if that user has recieved any grant,\n" + "including `SHOW DATABASES`, on that table, either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.30.5') + num="5.30.5", +) RQ_SRS_006_RBAC_ShowDatabases_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.ShowDatabases.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowDatabases.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `SHOW DATABASES` statement if and only if the user has **show databases** privilege,\n' - 'or any privilege on the database either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `SHOW DATABASES` statement if and only if the user has **show databases** privilege,\n" + "or any privilege on the database either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.30.6') + num="5.30.6", +) RQ_SRS_006_RBAC_ShowCreateDatabase_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.ShowCreateDatabase.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowCreateDatabase.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `SHOW CREATE DATABASE` statement if and only if the user has **show databases** privilege,\n' - 'or any privilege on the database either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `SHOW CREATE DATABASE` statement if and only if the user has **show databases** privilege,\n" + "or any privilege on the database either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.30.7') + num="5.30.7", +) RQ_SRS_006_RBAC_UseDatabase_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.UseDatabase.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.UseDatabase.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `USE database` statement if and only if the user has **show databases** privilege,\n' - 'or any privilege on the database either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `USE database` statement if and only if the user has **show databases** privilege,\n" + "or any privilege on the database either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.30.8') + num="5.30.8", +) RQ_SRS_006_RBAC_ShowColumns_Privilege = Requirement( - name='RQ.SRS-006.RBAC.ShowColumns.Privilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowColumns.Privilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting or revoking the `SHOW COLUMNS` privilege.\n' - '\n' - ), + "[ClickHouse] SHALL support granting or revoking the `SHOW COLUMNS` privilege.\n" + "\n" + ), link=None, level=3, - num='5.30.9') + num="5.30.9", +) RQ_SRS_006_RBAC_ShowCreateTable_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.ShowCreateTable.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowCreateTable.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `SHOW CREATE TABLE` statement if and only if the user has **show columns** privilege on that table,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `SHOW CREATE TABLE` statement if and only if the user has **show columns** privilege on that table,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.30.10') + num="5.30.10", +) RQ_SRS_006_RBAC_DescribeTable_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.DescribeTable.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.DescribeTable.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `DESCRIBE table` statement if and only if the user has **show columns** privilege on that table,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `DESCRIBE table` statement if and only if the user has **show columns** privilege on that table,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.30.11') + num="5.30.11", +) RQ_SRS_006_RBAC_ShowDictionaries_Privilege = Requirement( - name='RQ.SRS-006.RBAC.ShowDictionaries.Privilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowDictionaries.Privilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL grant **show dictionaries** privilege on a dictionary to a user if that user has recieved any grant,\n' - 'including `SHOW DICTIONARIES`, on that dictionary, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL grant **show dictionaries** privilege on a dictionary to a user if that user has recieved any grant,\n" + "including `SHOW DICTIONARIES`, on that dictionary, either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.30.12') + num="5.30.12", +) RQ_SRS_006_RBAC_ShowDictionaries_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.ShowDictionaries.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowDictionaries.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `SHOW DICTIONARIES` statement if and only if the user has **show dictionaries** privilege,\n' - 'or any privilege on the dictionary either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `SHOW DICTIONARIES` statement if and only if the user has **show dictionaries** privilege,\n" + "or any privilege on the dictionary either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.30.13') + num="5.30.13", +) RQ_SRS_006_RBAC_ShowCreateDictionary_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.ShowCreateDictionary.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowCreateDictionary.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `SHOW CREATE DICTIONARY` statement if and only if the user has **show dictionaries** privilege,\n' - 'or any privilege on the dictionary either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `SHOW CREATE DICTIONARY` statement if and only if the user has **show dictionaries** privilege,\n" + "or any privilege on the dictionary either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.30.14') + num="5.30.14", +) RQ_SRS_006_RBAC_ExistsDictionary_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.ExistsDictionary.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.ExistsDictionary.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `EXISTS dictionary` statement if and only if the user has **show dictionaries** privilege,\n' - 'or any privilege on the dictionary either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `EXISTS dictionary` statement if and only if the user has **show dictionaries** privilege,\n" + "or any privilege on the dictionary either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.30.15') + num="5.30.15", +) RQ_SRS_006_RBAC_Privileges_CreateUser = Requirement( - name='RQ.SRS-006.RBAC.Privileges.CreateUser', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.CreateUser", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `CREATE USER` statement if and only if the user has **create user** privilege,\n' - 'or either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `CREATE USER` statement if and only if the user has **create user** privilege,\n" + "or either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.31.1') + num="5.31.1", +) RQ_SRS_006_RBAC_Privileges_CreateUser_DefaultRole = Requirement( - name='RQ.SRS-006.RBAC.Privileges.CreateUser.DefaultRole', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.CreateUser.DefaultRole", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `CREATE USER` statement with `DEFAULT ROLE ` clause if and only if\n' - 'the user has **create user** privilege and the role with **admin option**, or either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `CREATE USER` statement with `DEFAULT ROLE ` clause if and only if\n" + "the user has **create user** privilege and the role with **admin option**, or either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.31.2') + num="5.31.2", +) RQ_SRS_006_RBAC_Privileges_AlterUser = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterUser', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterUser", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `ALTER USER` statement if and only if the user has **alter user** privilege,\n' - 'or either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `ALTER USER` statement if and only if the user has **alter user** privilege,\n" + "or either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.31.3') + num="5.31.3", +) RQ_SRS_006_RBAC_Privileges_DropUser = Requirement( - name='RQ.SRS-006.RBAC.Privileges.DropUser', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.DropUser", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `DROP USER` statement if and only if the user has **drop user** privilege,\n' - 'or either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `DROP USER` statement if and only if the user has **drop user** privilege,\n" + "or either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.31.4') + num="5.31.4", +) RQ_SRS_006_RBAC_Privileges_CreateRole = Requirement( - name='RQ.SRS-006.RBAC.Privileges.CreateRole', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.CreateRole", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `CREATE ROLE` statement if and only if the user has **create role** privilege,\n' - 'or either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `CREATE ROLE` statement if and only if the user has **create role** privilege,\n" + "or either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.31.5') + num="5.31.5", +) RQ_SRS_006_RBAC_Privileges_AlterRole = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterRole', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterRole", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `ALTER ROLE` statement if and only if the user has **alter role** privilege,\n' - 'or either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `ALTER ROLE` statement if and only if the user has **alter role** privilege,\n" + "or either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.31.6') + num="5.31.6", +) RQ_SRS_006_RBAC_Privileges_DropRole = Requirement( - name='RQ.SRS-006.RBAC.Privileges.DropRole', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.DropRole", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `DROP ROLE` statement if and only if the user has **drop role** privilege,\n' - 'or either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `DROP ROLE` statement if and only if the user has **drop role** privilege,\n" + "or either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.31.7') + num="5.31.7", +) RQ_SRS_006_RBAC_Privileges_CreateRowPolicy = Requirement( - name='RQ.SRS-006.RBAC.Privileges.CreateRowPolicy', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.CreateRowPolicy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `CREATE ROW POLICY` statement if and only if the user has **create row policy** privilege,\n' - 'or either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `CREATE ROW POLICY` statement if and only if the user has **create row policy** privilege,\n" + "or either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.31.8') + num="5.31.8", +) RQ_SRS_006_RBAC_Privileges_AlterRowPolicy = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterRowPolicy', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterRowPolicy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `ALTER ROW POLICY` statement if and only if the user has **alter row policy** privilege,\n' - 'or either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `ALTER ROW POLICY` statement if and only if the user has **alter row policy** privilege,\n" + "or either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.31.9') + num="5.31.9", +) RQ_SRS_006_RBAC_Privileges_DropRowPolicy = Requirement( - name='RQ.SRS-006.RBAC.Privileges.DropRowPolicy', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.DropRowPolicy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `DROP ROW POLICY` statement if and only if the user has **drop row policy** privilege,\n' - 'or either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `DROP ROW POLICY` statement if and only if the user has **drop row policy** privilege,\n" + "or either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.31.10') + num="5.31.10", +) RQ_SRS_006_RBAC_Privileges_CreateQuota = Requirement( - name='RQ.SRS-006.RBAC.Privileges.CreateQuota', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.CreateQuota", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `CREATE QUOTA` statement if and only if the user has **create quota** privilege,\n' - 'or either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `CREATE QUOTA` statement if and only if the user has **create quota** privilege,\n" + "or either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.31.11') + num="5.31.11", +) RQ_SRS_006_RBAC_Privileges_AlterQuota = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterQuota', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterQuota", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `ALTER QUOTA` statement if and only if the user has **alter quota** privilege,\n' - 'or either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `ALTER QUOTA` statement if and only if the user has **alter quota** privilege,\n" + "or either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.31.12') + num="5.31.12", +) RQ_SRS_006_RBAC_Privileges_DropQuota = Requirement( - name='RQ.SRS-006.RBAC.Privileges.DropQuota', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.DropQuota", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `DROP QUOTA` statement if and only if the user has **drop quota** privilege,\n' - 'or either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `DROP QUOTA` statement if and only if the user has **drop quota** privilege,\n" + "or either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.31.13') + num="5.31.13", +) RQ_SRS_006_RBAC_Privileges_CreateSettingsProfile = Requirement( - name='RQ.SRS-006.RBAC.Privileges.CreateSettingsProfile', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.CreateSettingsProfile", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `CREATE SETTINGS PROFILE` statement if and only if the user has **create settings profile** privilege,\n' - 'or either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `CREATE SETTINGS PROFILE` statement if and only if the user has **create settings profile** privilege,\n" + "or either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.31.14') + num="5.31.14", +) RQ_SRS_006_RBAC_Privileges_AlterSettingsProfile = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AlterSettingsProfile', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AlterSettingsProfile", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `ALTER SETTINGS PROFILE` statement if and only if the user has **alter settings profile** privilege,\n' - 'or either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `ALTER SETTINGS PROFILE` statement if and only if the user has **alter settings profile** privilege,\n" + "or either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.31.15') + num="5.31.15", +) RQ_SRS_006_RBAC_Privileges_DropSettingsProfile = Requirement( - name='RQ.SRS-006.RBAC.Privileges.DropSettingsProfile', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.DropSettingsProfile", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `DROP SETTINGS PROFILE` statement if and only if the user has **drop settings profile** privilege,\n' - 'or either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `DROP SETTINGS PROFILE` statement if and only if the user has **drop settings profile** privilege,\n" + "or either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.31.16') + num="5.31.16", +) RQ_SRS_006_RBAC_Privileges_RoleAdmin = Requirement( - name='RQ.SRS-006.RBAC.Privileges.RoleAdmin', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.RoleAdmin", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute any role grant or revoke by a user with `ROLE ADMIN` privilege.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute any role grant or revoke by a user with `ROLE ADMIN` privilege.\n" + "\n" + ), link=None, level=3, - num='5.31.17') + num="5.31.17", +) RQ_SRS_006_RBAC_ShowUsers_Privilege = Requirement( - name='RQ.SRS-006.RBAC.ShowUsers.Privilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowUsers.Privilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SHOW USERS` privilege when\n' - 'the user is granted `SHOW USERS`, `SHOW CREATE USER`, `SHOW ACCESS`, or `ACCESS MANAGEMENT`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SHOW USERS` privilege when\n" + "the user is granted `SHOW USERS`, `SHOW CREATE USER`, `SHOW ACCESS`, or `ACCESS MANAGEMENT`.\n" + "\n" + ), link=None, level=4, - num='5.31.18.1') + num="5.31.18.1", +) RQ_SRS_006_RBAC_ShowUsers_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.ShowUsers.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowUsers.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `SHOW USERS` statement if and only if the user has **show users** privilege,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `SHOW USERS` statement if and only if the user has **show users** privilege,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.31.18.2') + num="5.31.18.2", +) RQ_SRS_006_RBAC_ShowCreateUser_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.ShowCreateUser.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowCreateUser.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `SHOW CREATE USER` statement if and only if the user has **show users** privilege,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `SHOW CREATE USER` statement if and only if the user has **show users** privilege,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.31.18.3') + num="5.31.18.3", +) RQ_SRS_006_RBAC_ShowRoles_Privilege = Requirement( - name='RQ.SRS-006.RBAC.ShowRoles.Privilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowRoles.Privilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SHOW ROLES` privilege when\n' - 'the user is granted `SHOW ROLES`, `SHOW CREATE ROLE`, `SHOW ACCESS`, or `ACCESS MANAGEMENT`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SHOW ROLES` privilege when\n" + "the user is granted `SHOW ROLES`, `SHOW CREATE ROLE`, `SHOW ACCESS`, or `ACCESS MANAGEMENT`.\n" + "\n" + ), link=None, level=4, - num='5.31.18.4') + num="5.31.18.4", +) RQ_SRS_006_RBAC_ShowRoles_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.ShowRoles.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowRoles.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `SHOW ROLES` statement if and only if the user has **show roles** privilege,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `SHOW ROLES` statement if and only if the user has **show roles** privilege,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.31.18.5') + num="5.31.18.5", +) RQ_SRS_006_RBAC_ShowCreateRole_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.ShowCreateRole.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowCreateRole.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `SHOW CREATE ROLE` statement if and only if the user has **show roles** privilege,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `SHOW CREATE ROLE` statement if and only if the user has **show roles** privilege,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.31.18.6') + num="5.31.18.6", +) RQ_SRS_006_RBAC_ShowRowPolicies_Privilege = Requirement( - name='RQ.SRS-006.RBAC.ShowRowPolicies.Privilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowRowPolicies.Privilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SHOW ROW POLICIES` privilege when\n' - 'the user is granted `SHOW ROW POLICIES`, `SHOW POLICIES`, `SHOW CREATE ROW POLICY`,\n' - '`SHOW CREATE POLICY`, `SHOW ACCESS`, or `ACCESS MANAGEMENT`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SHOW ROW POLICIES` privilege when\n" + "the user is granted `SHOW ROW POLICIES`, `SHOW POLICIES`, `SHOW CREATE ROW POLICY`,\n" + "`SHOW CREATE POLICY`, `SHOW ACCESS`, or `ACCESS MANAGEMENT`.\n" + "\n" + ), link=None, level=4, - num='5.31.18.7') + num="5.31.18.7", +) RQ_SRS_006_RBAC_ShowRowPolicies_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.ShowRowPolicies.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowRowPolicies.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `SHOW ROW POLICIES` or `SHOW POLICIES` statement if and only if\n' - 'the user has **show row policies** privilege, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `SHOW ROW POLICIES` or `SHOW POLICIES` statement if and only if\n" + "the user has **show row policies** privilege, either directly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.31.18.8') + num="5.31.18.8", +) RQ_SRS_006_RBAC_ShowCreateRowPolicy_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.ShowCreateRowPolicy.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowCreateRowPolicy.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `SHOW CREATE ROW POLICY` or `SHOW CREATE POLICY` statement\n' - 'if and only if the user has **show row policies** privilege,either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `SHOW CREATE ROW POLICY` or `SHOW CREATE POLICY` statement\n" + "if and only if the user has **show row policies** privilege,either directly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.31.18.9') + num="5.31.18.9", +) RQ_SRS_006_RBAC_ShowQuotas_Privilege = Requirement( - name='RQ.SRS-006.RBAC.ShowQuotas.Privilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowQuotas.Privilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SHOW QUOTAS` privilege when\n' - 'the user is granted `SHOW QUOTAS`, `SHOW CREATE QUOTA`, `SHOW ACCESS`, or `ACCESS MANAGEMENT`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SHOW QUOTAS` privilege when\n" + "the user is granted `SHOW QUOTAS`, `SHOW CREATE QUOTA`, `SHOW ACCESS`, or `ACCESS MANAGEMENT`.\n" + "\n" + ), link=None, level=4, - num='5.31.18.10') + num="5.31.18.10", +) RQ_SRS_006_RBAC_ShowQuotas_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.ShowQuotas.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowQuotas.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `SHOW QUOTAS` statement if and only if the user has **show quotas** privilege,\n' - 'either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `SHOW QUOTAS` statement if and only if the user has **show quotas** privilege,\n" + "either directly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.31.18.11') + num="5.31.18.11", +) RQ_SRS_006_RBAC_ShowCreateQuota_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.ShowCreateQuota.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowCreateQuota.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `SHOW CREATE QUOTA` statement if and only if\n' - 'the user has **show quotas** privilege, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `SHOW CREATE QUOTA` statement if and only if\n" + "the user has **show quotas** privilege, either directly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.31.18.12') + num="5.31.18.12", +) RQ_SRS_006_RBAC_ShowSettingsProfiles_Privilege = Requirement( - name='RQ.SRS-006.RBAC.ShowSettingsProfiles.Privilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowSettingsProfiles.Privilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SHOW SETTINGS PROFILES` privilege when\n' - 'the user is granted `SHOW SETTINGS PROFILES`, `SHOW PROFILES`, `SHOW CREATE SETTINGS PROFILE`,\n' - '`SHOW SETTINGS PROFILE`, `SHOW ACCESS`, or `ACCESS MANAGEMENT`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SHOW SETTINGS PROFILES` privilege when\n" + "the user is granted `SHOW SETTINGS PROFILES`, `SHOW PROFILES`, `SHOW CREATE SETTINGS PROFILE`,\n" + "`SHOW SETTINGS PROFILE`, `SHOW ACCESS`, or `ACCESS MANAGEMENT`.\n" + "\n" + ), link=None, level=4, - num='5.31.18.13') + num="5.31.18.13", +) RQ_SRS_006_RBAC_ShowSettingsProfiles_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.ShowSettingsProfiles.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowSettingsProfiles.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `SHOW SETTINGS PROFILES` or `SHOW PROFILES` statement\n' - 'if and only if the user has **show settings profiles** privilege, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `SHOW SETTINGS PROFILES` or `SHOW PROFILES` statement\n" + "if and only if the user has **show settings profiles** privilege, either directly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.31.18.14') + num="5.31.18.14", +) RQ_SRS_006_RBAC_ShowCreateSettingsProfile_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.ShowCreateSettingsProfile.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.ShowCreateSettingsProfile.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `SHOW CREATE SETTINGS PROFILE` or `SHOW CREATE PROFILE` statement\n' - 'if and only if the user has **show settings profiles** privilege, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `SHOW CREATE SETTINGS PROFILE` or `SHOW CREATE PROFILE` statement\n" + "if and only if the user has **show settings profiles** privilege, either directly or through a role.\n" + "\n" + ), link=None, level=4, - num='5.31.18.15') + num="5.31.18.15", +) RQ_SRS_006_RBAC_dictGet_Privilege = Requirement( - name='RQ.SRS-006.RBAC.dictGet.Privilege', - version='1.0', + name="RQ.SRS-006.RBAC.dictGet.Privilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `dictGet` privilege when\n' - 'the user is granted `dictGet`, `dictHas`, `dictGetHierarchy`, or `dictIsIn`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `dictGet` privilege when\n" + "the user is granted `dictGet`, `dictHas`, `dictGetHierarchy`, or `dictIsIn`.\n" + "\n" + ), link=None, level=3, - num='5.32.1') + num="5.32.1", +) RQ_SRS_006_RBAC_dictGet_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.dictGet.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.dictGet.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `dictGet` statement\n' - 'if and only if the user has **dictGet** privilege on that dictionary, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `dictGet` statement\n" + "if and only if the user has **dictGet** privilege on that dictionary, either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.32.2') + num="5.32.2", +) RQ_SRS_006_RBAC_dictGet_Type_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.dictGet.Type.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.dictGet.Type.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `dictGet[TYPE]` statement\n' - 'if and only if the user has **dictGet** privilege on that dictionary, either directly or through a role.\n' - 'Available types:\n' - '\n' - '* Int8\n' - '* Int16\n' - '* Int32\n' - '* Int64\n' - '* UInt8\n' - '* UInt16\n' - '* UInt32\n' - '* UInt64\n' - '* Float32\n' - '* Float64\n' - '* Date\n' - '* DateTime\n' - '* UUID\n' - '* String\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `dictGet[TYPE]` statement\n" + "if and only if the user has **dictGet** privilege on that dictionary, either directly or through a role.\n" + "Available types:\n" + "\n" + "* Int8\n" + "* Int16\n" + "* Int32\n" + "* Int64\n" + "* UInt8\n" + "* UInt16\n" + "* UInt32\n" + "* UInt64\n" + "* Float32\n" + "* Float64\n" + "* Date\n" + "* DateTime\n" + "* UUID\n" + "* String\n" + "\n" + ), link=None, level=3, - num='5.32.3') + num="5.32.3", +) RQ_SRS_006_RBAC_dictGet_OrDefault_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.dictGet.OrDefault.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.dictGet.OrDefault.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `dictGetOrDefault` statement\n' - 'if and only if the user has **dictGet** privilege on that dictionary, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `dictGetOrDefault` statement\n" + "if and only if the user has **dictGet** privilege on that dictionary, either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.32.4') + num="5.32.4", +) RQ_SRS_006_RBAC_dictHas_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.dictHas.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.dictHas.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `dictHas` statement\n' - 'if and only if the user has **dictGet** privilege, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `dictHas` statement\n" + "if and only if the user has **dictGet** privilege, either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.32.5') + num="5.32.5", +) RQ_SRS_006_RBAC_dictGetHierarchy_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.dictGetHierarchy.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.dictGetHierarchy.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `dictGetHierarchy` statement\n' - 'if and only if the user has **dictGet** privilege, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `dictGetHierarchy` statement\n" + "if and only if the user has **dictGet** privilege, either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.32.6') + num="5.32.6", +) RQ_SRS_006_RBAC_dictIsIn_RequiredPrivilege = Requirement( - name='RQ.SRS-006.RBAC.dictIsIn.RequiredPrivilege', - version='1.0', + name="RQ.SRS-006.RBAC.dictIsIn.RequiredPrivilege", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `dictIsIn` statement\n' - 'if and only if the user has **dictGet** privilege, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `dictIsIn` statement\n" + "if and only if the user has **dictGet** privilege, either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.32.7') + num="5.32.7", +) RQ_SRS_006_RBAC_Privileges_Introspection = Requirement( - name='RQ.SRS-006.RBAC.Privileges.Introspection', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.Introspection", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `INTROSPECTION` privilege when\n' - 'the user is granted `INTROSPECTION` or `INTROSPECTION FUNCTIONS`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `INTROSPECTION` privilege when\n" + "the user is granted `INTROSPECTION` or `INTROSPECTION FUNCTIONS`.\n" + "\n" + ), link=None, level=3, - num='5.33.1') + num="5.33.1", +) RQ_SRS_006_RBAC_Privileges_Introspection_addressToLine = Requirement( - name='RQ.SRS-006.RBAC.Privileges.Introspection.addressToLine', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.Introspection.addressToLine", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `addressToLine` statement if and only if\n' - 'the user has **introspection** privilege, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `addressToLine` statement if and only if\n" + "the user has **introspection** privilege, either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.33.2') + num="5.33.2", +) RQ_SRS_006_RBAC_Privileges_Introspection_addressToSymbol = Requirement( - name='RQ.SRS-006.RBAC.Privileges.Introspection.addressToSymbol', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.Introspection.addressToSymbol", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `addressToSymbol` statement if and only if\n' - 'the user has **introspection** privilege, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `addressToSymbol` statement if and only if\n" + "the user has **introspection** privilege, either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.33.3') + num="5.33.3", +) RQ_SRS_006_RBAC_Privileges_Introspection_demangle = Requirement( - name='RQ.SRS-006.RBAC.Privileges.Introspection.demangle', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.Introspection.demangle", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `demangle` statement if and only if\n' - 'the user has **introspection** privilege, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `demangle` statement if and only if\n" + "the user has **introspection** privilege, either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.33.4') + num="5.33.4", +) RQ_SRS_006_RBAC_Privileges_System_Shutdown = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.Shutdown', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.Shutdown", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM SHUTDOWN` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM SHUTDOWN`, `SHUTDOWN`,or `SYSTEM KILL`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM SHUTDOWN` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM SHUTDOWN`, `SHUTDOWN`,or `SYSTEM KILL`.\n" + "\n" + ), link=None, level=3, - num='5.34.1') + num="5.34.1", +) RQ_SRS_006_RBAC_Privileges_System_DropCache = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.DropCache', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.DropCache", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM DROP CACHE` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM DROP CACHE`, or `DROP CACHE`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM DROP CACHE` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM DROP CACHE`, or `DROP CACHE`.\n" + "\n" + ), link=None, level=3, - num='5.34.2') + num="5.34.2", +) RQ_SRS_006_RBAC_Privileges_System_DropCache_DNS = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.DropCache.DNS', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.DropCache.DNS", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM DROP DNS CACHE` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM DROP CACHE`, `DROP CACHE`, `SYSTEM DROP DNS CACHE`,\n' - '`SYSTEM DROP DNS`, `DROP DNS CACHE`, or `DROP DNS`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM DROP DNS CACHE` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM DROP CACHE`, `DROP CACHE`, `SYSTEM DROP DNS CACHE`,\n" + "`SYSTEM DROP DNS`, `DROP DNS CACHE`, or `DROP DNS`.\n" + "\n" + ), link=None, level=3, - num='5.34.3') + num="5.34.3", +) RQ_SRS_006_RBAC_Privileges_System_DropCache_Mark = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.DropCache.Mark', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.DropCache.Mark", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM DROP MARK CACHE` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM DROP CACHE`, `DROP CACHE`, `SYSTEM DROP MARK CACHE`,\n' - '`SYSTEM DROP MARK`, `DROP MARK CACHE`, or `DROP MARKS`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM DROP MARK CACHE` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM DROP CACHE`, `DROP CACHE`, `SYSTEM DROP MARK CACHE`,\n" + "`SYSTEM DROP MARK`, `DROP MARK CACHE`, or `DROP MARKS`.\n" + "\n" + ), link=None, level=3, - num='5.34.4') + num="5.34.4", +) RQ_SRS_006_RBAC_Privileges_System_DropCache_Uncompressed = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.DropCache.Uncompressed', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.DropCache.Uncompressed", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM DROP UNCOMPRESSED CACHE` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM DROP CACHE`, `DROP CACHE`, `SYSTEM DROP UNCOMPRESSED CACHE`,\n' - '`SYSTEM DROP UNCOMPRESSED`, `DROP UNCOMPRESSED CACHE`, or `DROP UNCOMPRESSED`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM DROP UNCOMPRESSED CACHE` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM DROP CACHE`, `DROP CACHE`, `SYSTEM DROP UNCOMPRESSED CACHE`,\n" + "`SYSTEM DROP UNCOMPRESSED`, `DROP UNCOMPRESSED CACHE`, or `DROP UNCOMPRESSED`.\n" + "\n" + ), link=None, level=3, - num='5.34.5') + num="5.34.5", +) RQ_SRS_006_RBAC_Privileges_System_Reload = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.Reload', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.Reload", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM RELOAD` privilege when\n' - 'the user is granted `SYSTEM` or `SYSTEM RELOAD`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM RELOAD` privilege when\n" + "the user is granted `SYSTEM` or `SYSTEM RELOAD`.\n" + "\n" + ), link=None, level=3, - num='5.34.6') + num="5.34.6", +) RQ_SRS_006_RBAC_Privileges_System_Reload_Config = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.Reload.Config', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.Reload.Config", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM RELOAD CONFIG` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM RELOAD`, `SYSTEM RELOAD CONFIG`, or `RELOAD CONFIG`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM RELOAD CONFIG` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM RELOAD`, `SYSTEM RELOAD CONFIG`, or `RELOAD CONFIG`.\n" + "\n" + ), link=None, level=3, - num='5.34.7') + num="5.34.7", +) RQ_SRS_006_RBAC_Privileges_System_Reload_Dictionary = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.Reload.Dictionary', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.Reload.Dictionary", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM RELOAD DICTIONARY` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM RELOAD`, `SYSTEM RELOAD DICTIONARIES`, `RELOAD DICTIONARIES`, or `RELOAD DICTIONARY`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM RELOAD DICTIONARY` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM RELOAD`, `SYSTEM RELOAD DICTIONARIES`, `RELOAD DICTIONARIES`, or `RELOAD DICTIONARY`.\n" + "\n" + ), link=None, level=3, - num='5.34.8') + num="5.34.8", +) RQ_SRS_006_RBAC_Privileges_System_Reload_Dictionaries = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.Reload.Dictionaries', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.Reload.Dictionaries", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM RELOAD DICTIONARIES` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM RELOAD`, `SYSTEM RELOAD DICTIONARIES`, `RELOAD DICTIONARIES`, or `RELOAD DICTIONARY`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM RELOAD DICTIONARIES` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM RELOAD`, `SYSTEM RELOAD DICTIONARIES`, `RELOAD DICTIONARIES`, or `RELOAD DICTIONARY`.\n" + "\n" + ), link=None, level=3, - num='5.34.9') + num="5.34.9", +) RQ_SRS_006_RBAC_Privileges_System_Reload_EmbeddedDictionaries = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.Reload.EmbeddedDictionaries', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.Reload.EmbeddedDictionaries", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM RELOAD EMBEDDED DICTIONARIES` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM RELOAD`, `SYSTEM RELOAD DICTIONARY ON *.*`, or `SYSTEM RELOAD EMBEDDED DICTIONARIES`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM RELOAD EMBEDDED DICTIONARIES` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM RELOAD`, `SYSTEM RELOAD DICTIONARY ON *.*`, or `SYSTEM RELOAD EMBEDDED DICTIONARIES`.\n" + "\n" + ), link=None, level=3, - num='5.34.10') + num="5.34.10", +) RQ_SRS_006_RBAC_Privileges_System_Merges = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.Merges', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.Merges", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM MERGES` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM MERGES`, `SYSTEM STOP MERGES`, `SYSTEM START MERGES`, `STOP MERGES`, or `START MERGES`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM MERGES` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM MERGES`, `SYSTEM STOP MERGES`, `SYSTEM START MERGES`, `STOP MERGES`, or `START MERGES`.\n" + "\n" + ), link=None, level=3, - num='5.34.11') + num="5.34.11", +) RQ_SRS_006_RBAC_Privileges_System_TTLMerges = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.TTLMerges', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.TTLMerges", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM TTL MERGES` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM TTL MERGES`, `SYSTEM STOP TTL MERGES`, `SYSTEM START TTL MERGES`, `STOP TTL MERGES`, or `START TTL MERGES`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM TTL MERGES` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM TTL MERGES`, `SYSTEM STOP TTL MERGES`, `SYSTEM START TTL MERGES`, `STOP TTL MERGES`, or `START TTL MERGES`.\n" + "\n" + ), link=None, level=3, - num='5.34.12') + num="5.34.12", +) RQ_SRS_006_RBAC_Privileges_System_Fetches = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.Fetches', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.Fetches", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM FETCHES` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM FETCHES`, `SYSTEM STOP FETCHES`, `SYSTEM START FETCHES`, `STOP FETCHES`, or `START FETCHES`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM FETCHES` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM FETCHES`, `SYSTEM STOP FETCHES`, `SYSTEM START FETCHES`, `STOP FETCHES`, or `START FETCHES`.\n" + "\n" + ), link=None, level=3, - num='5.34.13') + num="5.34.13", +) RQ_SRS_006_RBAC_Privileges_System_Moves = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.Moves', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.Moves", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM MOVES` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM MOVES`, `SYSTEM STOP MOVES`, `SYSTEM START MOVES`, `STOP MOVES`, or `START MOVES`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM MOVES` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM MOVES`, `SYSTEM STOP MOVES`, `SYSTEM START MOVES`, `STOP MOVES`, or `START MOVES`.\n" + "\n" + ), link=None, level=3, - num='5.34.14') + num="5.34.14", +) RQ_SRS_006_RBAC_Privileges_System_Sends = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.Sends', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.Sends", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM SENDS` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM SENDS`, `SYSTEM STOP SENDS`, `SYSTEM START SENDS`, `STOP SENDS`, or `START SENDS`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM SENDS` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM SENDS`, `SYSTEM STOP SENDS`, `SYSTEM START SENDS`, `STOP SENDS`, or `START SENDS`.\n" + "\n" + ), link=None, level=3, - num='5.34.15') + num="5.34.15", +) RQ_SRS_006_RBAC_Privileges_System_Sends_Distributed = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.Sends.Distributed', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.Sends.Distributed", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM DISTRIBUTED SENDS` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM DISTRIBUTED SENDS`, `SYSTEM STOP DISTRIBUTED SENDS`,\n' - '`SYSTEM START DISTRIBUTED SENDS`, `STOP DISTRIBUTED SENDS`, or `START DISTRIBUTED SENDS`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM DISTRIBUTED SENDS` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM DISTRIBUTED SENDS`, `SYSTEM STOP DISTRIBUTED SENDS`,\n" + "`SYSTEM START DISTRIBUTED SENDS`, `STOP DISTRIBUTED SENDS`, or `START DISTRIBUTED SENDS`.\n" + "\n" + ), link=None, level=3, - num='5.34.16') + num="5.34.16", +) RQ_SRS_006_RBAC_Privileges_System_Sends_Replicated = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.Sends.Replicated', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.Sends.Replicated", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM REPLICATED SENDS` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM REPLICATED SENDS`, `SYSTEM STOP REPLICATED SENDS`,\n' - '`SYSTEM START REPLICATED SENDS`, `STOP REPLICATED SENDS`, or `START REPLICATED SENDS`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM REPLICATED SENDS` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM REPLICATED SENDS`, `SYSTEM STOP REPLICATED SENDS`,\n" + "`SYSTEM START REPLICATED SENDS`, `STOP REPLICATED SENDS`, or `START REPLICATED SENDS`.\n" + "\n" + ), link=None, level=3, - num='5.34.17') + num="5.34.17", +) RQ_SRS_006_RBAC_Privileges_System_ReplicationQueues = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.ReplicationQueues', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.ReplicationQueues", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM REPLICATION QUEUES` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM REPLICATION QUEUES`, `SYSTEM STOP REPLICATION QUEUES`,\n' - '`SYSTEM START REPLICATION QUEUES`, `STOP REPLICATION QUEUES`, or `START REPLICATION QUEUES`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM REPLICATION QUEUES` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM REPLICATION QUEUES`, `SYSTEM STOP REPLICATION QUEUES`,\n" + "`SYSTEM START REPLICATION QUEUES`, `STOP REPLICATION QUEUES`, or `START REPLICATION QUEUES`.\n" + "\n" + ), link=None, level=3, - num='5.34.18') + num="5.34.18", +) RQ_SRS_006_RBAC_Privileges_System_SyncReplica = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.SyncReplica', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.SyncReplica", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM SYNC REPLICA` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM SYNC REPLICA`, or `SYNC REPLICA`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM SYNC REPLICA` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM SYNC REPLICA`, or `SYNC REPLICA`.\n" + "\n" + ), link=None, level=3, - num='5.34.19') + num="5.34.19", +) RQ_SRS_006_RBAC_Privileges_System_RestartReplica = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.RestartReplica', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.RestartReplica", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM RESTART REPLICA` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM RESTART REPLICA`, or `RESTART REPLICA`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM RESTART REPLICA` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM RESTART REPLICA`, or `RESTART REPLICA`.\n" + "\n" + ), link=None, level=3, - num='5.34.20') + num="5.34.20", +) RQ_SRS_006_RBAC_Privileges_System_Flush = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.Flush', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.Flush", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM FLUSH` privilege when\n' - 'the user is granted `SYSTEM` or `SYSTEM FLUSH`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM FLUSH` privilege when\n" + "the user is granted `SYSTEM` or `SYSTEM FLUSH`.\n" + "\n" + ), link=None, level=3, - num='5.34.21') + num="5.34.21", +) RQ_SRS_006_RBAC_Privileges_System_Flush_Distributed = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.Flush.Distributed', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.Flush.Distributed", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM FLUSH DISTRIBUTED` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM FLUSH DISTRIBUTED`, or `FLUSH DISTRIBUTED`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM FLUSH DISTRIBUTED` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM FLUSH DISTRIBUTED`, or `FLUSH DISTRIBUTED`.\n" + "\n" + ), link=None, level=3, - num='5.34.22') + num="5.34.22", +) RQ_SRS_006_RBAC_Privileges_System_Flush_Logs = Requirement( - name='RQ.SRS-006.RBAC.Privileges.System.Flush.Logs', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.System.Flush.Logs", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully grant `SYSTEM FLUSH LOGS` privilege when\n' - 'the user is granted `SYSTEM`, `SYSTEM FLUSH LOGS`, or `FLUSH LOGS`.\n' - '\n' - ), + "[ClickHouse] SHALL successfully grant `SYSTEM FLUSH LOGS` privilege when\n" + "the user is granted `SYSTEM`, `SYSTEM FLUSH LOGS`, or `FLUSH LOGS`.\n" + "\n" + ), link=None, level=3, - num='5.34.23') + num="5.34.23", +) RQ_SRS_006_RBAC_Privileges_Sources = Requirement( - name='RQ.SRS-006.RBAC.Privileges.Sources', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.Sources", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting or revoking `SOURCES` privilege from\n' - 'the user, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL support granting or revoking `SOURCES` privilege from\n" + "the user, either directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.35.1') + num="5.35.1", +) RQ_SRS_006_RBAC_Privileges_Sources_File = Requirement( - name='RQ.SRS-006.RBAC.Privileges.Sources.File', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.Sources.File", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the use of `FILE` source by a user if and only if\n' - 'the user has `FILE` or `SOURCES` privileges granted to them directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL support the use of `FILE` source by a user if and only if\n" + "the user has `FILE` or `SOURCES` privileges granted to them directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.35.2') + num="5.35.2", +) RQ_SRS_006_RBAC_Privileges_Sources_URL = Requirement( - name='RQ.SRS-006.RBAC.Privileges.Sources.URL', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.Sources.URL", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the use of `URL` source by a user if and only if\n' - 'the user has `URL` or `SOURCES` privileges granted to them directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL support the use of `URL` source by a user if and only if\n" + "the user has `URL` or `SOURCES` privileges granted to them directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.35.3') + num="5.35.3", +) RQ_SRS_006_RBAC_Privileges_Sources_Remote = Requirement( - name='RQ.SRS-006.RBAC.Privileges.Sources.Remote', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.Sources.Remote", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the use of `REMOTE` source by a user if and only if\n' - 'the user has `REMOTE` or `SOURCES` privileges granted to them directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL support the use of `REMOTE` source by a user if and only if\n" + "the user has `REMOTE` or `SOURCES` privileges granted to them directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.35.4') + num="5.35.4", +) RQ_SRS_006_RBAC_Privileges_Sources_MySQL = Requirement( - name='RQ.SRS-006.RBAC.Privileges.Sources.MySQL', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.Sources.MySQL", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the use of `MySQL` source by a user if and only if\n' - 'the user has `MySQL` or `SOURCES` privileges granted to them directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL support the use of `MySQL` source by a user if and only if\n" + "the user has `MySQL` or `SOURCES` privileges granted to them directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.35.5') + num="5.35.5", +) RQ_SRS_006_RBAC_Privileges_Sources_ODBC = Requirement( - name='RQ.SRS-006.RBAC.Privileges.Sources.ODBC', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.Sources.ODBC", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the use of `ODBC` source by a user if and only if\n' - 'the user has `ODBC` or `SOURCES` privileges granted to them directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL support the use of `ODBC` source by a user if and only if\n" + "the user has `ODBC` or `SOURCES` privileges granted to them directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.35.6') + num="5.35.6", +) RQ_SRS_006_RBAC_Privileges_Sources_JDBC = Requirement( - name='RQ.SRS-006.RBAC.Privileges.Sources.JDBC', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.Sources.JDBC", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the use of `JDBC` source by a user if and only if\n' - 'the user has `JDBC` or `SOURCES` privileges granted to them directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL support the use of `JDBC` source by a user if and only if\n" + "the user has `JDBC` or `SOURCES` privileges granted to them directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.35.7') + num="5.35.7", +) RQ_SRS_006_RBAC_Privileges_Sources_HDFS = Requirement( - name='RQ.SRS-006.RBAC.Privileges.Sources.HDFS', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.Sources.HDFS", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the use of `HDFS` source by a user if and only if\n' - 'the user has `HDFS` or `SOURCES` privileges granted to them directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL support the use of `HDFS` source by a user if and only if\n" + "the user has `HDFS` or `SOURCES` privileges granted to them directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.35.8') + num="5.35.8", +) RQ_SRS_006_RBAC_Privileges_Sources_S3 = Requirement( - name='RQ.SRS-006.RBAC.Privileges.Sources.S3', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.Sources.S3", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the use of `S3` source by a user if and only if\n' - 'the user has `S3` or `SOURCES` privileges granted to them directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL support the use of `S3` source by a user if and only if\n" + "the user has `S3` or `SOURCES` privileges granted to them directly or through a role.\n" + "\n" + ), link=None, level=3, - num='5.35.9') + num="5.35.9", +) RQ_SRS_006_RBAC_Privileges_GrantOption = Requirement( - name='RQ.SRS-006.RBAC.Privileges.GrantOption', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.GrantOption", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL successfully execute `GRANT` or `REVOKE` privilege statements by a user if and only if\n' - 'the user has that privilege with `GRANT OPTION`, either directly or through a role.\n' - '\n' - ), + "[ClickHouse] SHALL successfully execute `GRANT` or `REVOKE` privilege statements by a user if and only if\n" + "the user has that privilege with `GRANT OPTION`, either directly or through a role.\n" + "\n" + ), link=None, level=2, - num='5.36') + num="5.36", +) RQ_SRS_006_RBAC_Privileges_All = Requirement( - name='RQ.SRS-006.RBAC.Privileges.All', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.All", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting or revoking `ALL` privilege\n' - 'using `GRANT ALL ON *.* TO user`.\n' - '\n' - ), + "[ClickHouse] SHALL support granting or revoking `ALL` privilege\n" + "using `GRANT ALL ON *.* TO user`.\n" + "\n" + ), link=None, level=2, - num='5.37') + num="5.37", +) RQ_SRS_006_RBAC_Privileges_RoleAll = Requirement( - name='RQ.SRS-006.RBAC.Privileges.RoleAll', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.RoleAll", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting a role named `ALL` using `GRANT ALL TO user`.\n' - 'This shall only grant the user the privileges that have been granted to the role.\n' - '\n' - ), + "[ClickHouse] SHALL support granting a role named `ALL` using `GRANT ALL TO user`.\n" + "This shall only grant the user the privileges that have been granted to the role.\n" + "\n" + ), link=None, level=2, - num='5.38') + num="5.38", +) RQ_SRS_006_RBAC_Privileges_None = Requirement( - name='RQ.SRS-006.RBAC.Privileges.None', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.None", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support granting or revoking `NONE` privilege\n' - 'using `GRANT NONE TO user` or `GRANT USAGE ON *.* TO user`.\n' - '\n' - ), + "[ClickHouse] SHALL support granting or revoking `NONE` privilege\n" + "using `GRANT NONE TO user` or `GRANT USAGE ON *.* TO user`.\n" + "\n" + ), link=None, level=2, - num='5.39') + num="5.39", +) RQ_SRS_006_RBAC_Privileges_AdminOption = Requirement( - name='RQ.SRS-006.RBAC.Privileges.AdminOption', - version='1.0', + name="RQ.SRS-006.RBAC.Privileges.AdminOption", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support a user granting or revoking a role if and only if\n' - 'the user has that role with `ADMIN OPTION` privilege.\n' - '\n' - ), + "[ClickHouse] SHALL support a user granting or revoking a role if and only if\n" + "the user has that role with `ADMIN OPTION` privilege.\n" + "\n" + ), link=None, level=2, - num='5.40') + num="5.40", +) SRS_006_ClickHouse_Role_Based_Access_Control = Specification( - name='SRS-006 ClickHouse Role Based Access Control', + name="SRS-006 ClickHouse Role Based Access Control", description=None, author=None, date=None, @@ -8871,597 +9380,1391 @@ SRS_006_ClickHouse_Role_Based_Access_Control = Specification( parent=None, children=None, headings=( - Heading(name='Revision History', level=1, num='1'), - Heading(name='Introduction', level=1, num='2'), - Heading(name='Terminology', level=1, num='3'), - Heading(name='Privilege Definitions', level=1, num='4'), - Heading(name='Requirements', level=1, num='5'), - Heading(name='Generic', level=2, num='5.1'), - Heading(name='RQ.SRS-006.RBAC', level=3, num='5.1.1'), - Heading(name='Login', level=2, num='5.2'), - Heading(name='RQ.SRS-006.RBAC.Login', level=3, num='5.2.1'), - Heading(name='RQ.SRS-006.RBAC.Login.DefaultUser', level=3, num='5.2.2'), - Heading(name='User', level=2, num='5.3'), - Heading(name='RQ.SRS-006.RBAC.User', level=3, num='5.3.1'), - Heading(name='RQ.SRS-006.RBAC.User.Roles', level=3, num='5.3.2'), - Heading(name='RQ.SRS-006.RBAC.User.Privileges', level=3, num='5.3.3'), - Heading(name='RQ.SRS-006.RBAC.User.Variables', level=3, num='5.3.4'), - Heading(name='RQ.SRS-006.RBAC.User.Variables.Constraints', level=3, num='5.3.5'), - Heading(name='RQ.SRS-006.RBAC.User.SettingsProfile', level=3, num='5.3.6'), - Heading(name='RQ.SRS-006.RBAC.User.Quotas', level=3, num='5.3.7'), - Heading(name='RQ.SRS-006.RBAC.User.RowPolicies', level=3, num='5.3.8'), - Heading(name='RQ.SRS-006.RBAC.User.DefaultRole', level=3, num='5.3.9'), - Heading(name='RQ.SRS-006.RBAC.User.RoleSelection', level=3, num='5.3.10'), - Heading(name='RQ.SRS-006.RBAC.User.ShowCreate', level=3, num='5.3.11'), - Heading(name='RQ.SRS-006.RBAC.User.ShowPrivileges', level=3, num='5.3.12'), - Heading(name='RQ.SRS-006.RBAC.User.Use.DefaultRole', level=3, num='5.3.13'), - Heading(name='RQ.SRS-006.RBAC.User.Use.AllRolesWhenNoDefaultRole', level=3, num='5.3.14'), - Heading(name='Create User', level=3, num='5.3.15'), - Heading(name='RQ.SRS-006.RBAC.User.Create', level=4, num='5.3.15.1'), - Heading(name='RQ.SRS-006.RBAC.User.Create.IfNotExists', level=4, num='5.3.15.2'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Replace', level=4, num='5.3.15.3'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Password.NoPassword', level=4, num='5.3.15.4'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Password.NoPassword.Login', level=4, num='5.3.15.5'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Password.PlainText', level=4, num='5.3.15.6'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Password.PlainText.Login', level=4, num='5.3.15.7'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Password.Sha256Password', level=4, num='5.3.15.8'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Password.Sha256Password.Login', level=4, num='5.3.15.9'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash', level=4, num='5.3.15.10'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash.Login', level=4, num='5.3.15.11'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password', level=4, num='5.3.15.12'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password.Login', level=4, num='5.3.15.13'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash', level=4, num='5.3.15.14'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash.Login', level=4, num='5.3.15.15'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Host.Name', level=4, num='5.3.15.16'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Host.Regexp', level=4, num='5.3.15.17'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Host.IP', level=4, num='5.3.15.18'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Host.Any', level=4, num='5.3.15.19'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Host.None', level=4, num='5.3.15.20'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Host.Local', level=4, num='5.3.15.21'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Host.Like', level=4, num='5.3.15.22'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Host.Default', level=4, num='5.3.15.23'), - Heading(name='RQ.SRS-006.RBAC.User.Create.DefaultRole', level=4, num='5.3.15.24'), - Heading(name='RQ.SRS-006.RBAC.User.Create.DefaultRole.None', level=4, num='5.3.15.25'), - Heading(name='RQ.SRS-006.RBAC.User.Create.DefaultRole.All', level=4, num='5.3.15.26'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Settings', level=4, num='5.3.15.27'), - Heading(name='RQ.SRS-006.RBAC.User.Create.OnCluster', level=4, num='5.3.15.28'), - Heading(name='RQ.SRS-006.RBAC.User.Create.Syntax', level=4, num='5.3.15.29'), - Heading(name='Alter User', level=3, num='5.3.16'), - Heading(name='RQ.SRS-006.RBAC.User.Alter', level=4, num='5.3.16.1'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.OrderOfEvaluation', level=4, num='5.3.16.2'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.IfExists', level=4, num='5.3.16.3'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.Cluster', level=4, num='5.3.16.4'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.Rename', level=4, num='5.3.16.5'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.Password.PlainText', level=4, num='5.3.16.6'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.Password.Sha256Password', level=4, num='5.3.16.7'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.Password.DoubleSha1Password', level=4, num='5.3.16.8'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.Host.AddDrop', level=4, num='5.3.16.9'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.Host.Local', level=4, num='5.3.16.10'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.Host.Name', level=4, num='5.3.16.11'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.Host.Regexp', level=4, num='5.3.16.12'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.Host.IP', level=4, num='5.3.16.13'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.Host.Like', level=4, num='5.3.16.14'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.Host.Any', level=4, num='5.3.16.15'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.Host.None', level=4, num='5.3.16.16'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.DefaultRole', level=4, num='5.3.16.17'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.DefaultRole.All', level=4, num='5.3.16.18'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.DefaultRole.AllExcept', level=4, num='5.3.16.19'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.Settings', level=4, num='5.3.16.20'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.Settings.Min', level=4, num='5.3.16.21'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.Settings.Max', level=4, num='5.3.16.22'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.Settings.Profile', level=4, num='5.3.16.23'), - Heading(name='RQ.SRS-006.RBAC.User.Alter.Syntax', level=4, num='5.3.16.24'), - Heading(name='Show Create User', level=3, num='5.3.17'), - Heading(name='RQ.SRS-006.RBAC.User.ShowCreateUser', level=4, num='5.3.17.1'), - Heading(name='RQ.SRS-006.RBAC.User.ShowCreateUser.For', level=4, num='5.3.17.2'), - Heading(name='RQ.SRS-006.RBAC.User.ShowCreateUser.Syntax', level=4, num='5.3.17.3'), - Heading(name='Drop User', level=3, num='5.3.18'), - Heading(name='RQ.SRS-006.RBAC.User.Drop', level=4, num='5.3.18.1'), - Heading(name='RQ.SRS-006.RBAC.User.Drop.IfExists', level=4, num='5.3.18.2'), - Heading(name='RQ.SRS-006.RBAC.User.Drop.OnCluster', level=4, num='5.3.18.3'), - Heading(name='RQ.SRS-006.RBAC.User.Drop.Syntax', level=4, num='5.3.18.4'), - Heading(name='Role', level=2, num='5.4'), - Heading(name='RQ.SRS-006.RBAC.Role', level=3, num='5.4.1'), - Heading(name='RQ.SRS-006.RBAC.Role.Privileges', level=3, num='5.4.2'), - Heading(name='RQ.SRS-006.RBAC.Role.Variables', level=3, num='5.4.3'), - Heading(name='RQ.SRS-006.RBAC.Role.SettingsProfile', level=3, num='5.4.4'), - Heading(name='RQ.SRS-006.RBAC.Role.Quotas', level=3, num='5.4.5'), - Heading(name='RQ.SRS-006.RBAC.Role.RowPolicies', level=3, num='5.4.6'), - Heading(name='Create Role', level=3, num='5.4.7'), - Heading(name='RQ.SRS-006.RBAC.Role.Create', level=4, num='5.4.7.1'), - Heading(name='RQ.SRS-006.RBAC.Role.Create.IfNotExists', level=4, num='5.4.7.2'), - Heading(name='RQ.SRS-006.RBAC.Role.Create.Replace', level=4, num='5.4.7.3'), - Heading(name='RQ.SRS-006.RBAC.Role.Create.Settings', level=4, num='5.4.7.4'), - Heading(name='RQ.SRS-006.RBAC.Role.Create.Syntax', level=4, num='5.4.7.5'), - Heading(name='Alter Role', level=3, num='5.4.8'), - Heading(name='RQ.SRS-006.RBAC.Role.Alter', level=4, num='5.4.8.1'), - Heading(name='RQ.SRS-006.RBAC.Role.Alter.IfExists', level=4, num='5.4.8.2'), - Heading(name='RQ.SRS-006.RBAC.Role.Alter.Cluster', level=4, num='5.4.8.3'), - Heading(name='RQ.SRS-006.RBAC.Role.Alter.Rename', level=4, num='5.4.8.4'), - Heading(name='RQ.SRS-006.RBAC.Role.Alter.Settings', level=4, num='5.4.8.5'), - Heading(name='RQ.SRS-006.RBAC.Role.Alter.Syntax', level=4, num='5.4.8.6'), - Heading(name='Drop Role', level=3, num='5.4.9'), - Heading(name='RQ.SRS-006.RBAC.Role.Drop', level=4, num='5.4.9.1'), - Heading(name='RQ.SRS-006.RBAC.Role.Drop.IfExists', level=4, num='5.4.9.2'), - Heading(name='RQ.SRS-006.RBAC.Role.Drop.Cluster', level=4, num='5.4.9.3'), - Heading(name='RQ.SRS-006.RBAC.Role.Drop.Syntax', level=4, num='5.4.9.4'), - Heading(name='Show Create Role', level=3, num='5.4.10'), - Heading(name='RQ.SRS-006.RBAC.Role.ShowCreate', level=4, num='5.4.10.1'), - Heading(name='RQ.SRS-006.RBAC.Role.ShowCreate.Syntax', level=4, num='5.4.10.2'), - Heading(name='Partial Revokes', level=2, num='5.5'), - Heading(name='RQ.SRS-006.RBAC.PartialRevokes', level=3, num='5.5.1'), - Heading(name='RQ.SRS-006.RBAC.PartialRevoke.Syntax', level=3, num='5.5.2'), - Heading(name='Settings Profile', level=2, num='5.6'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile', level=3, num='5.6.1'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Constraints', level=3, num='5.6.2'), - Heading(name='Create Settings Profile', level=3, num='5.6.3'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Create', level=4, num='5.6.3.1'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Create.IfNotExists', level=4, num='5.6.3.2'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Create.Replace', level=4, num='5.6.3.3'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Create.Variables', level=4, num='5.6.3.4'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Value', level=4, num='5.6.3.5'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Constraints', level=4, num='5.6.3.6'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment', level=4, num='5.6.3.7'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.None', level=4, num='5.6.3.8'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.All', level=4, num='5.6.3.9'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.AllExcept', level=4, num='5.6.3.10'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Create.Inherit', level=4, num='5.6.3.11'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Create.OnCluster', level=4, num='5.6.3.12'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Create.Syntax', level=4, num='5.6.3.13'), - Heading(name='Alter Settings Profile', level=3, num='5.6.4'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Alter', level=4, num='5.6.4.1'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Alter.IfExists', level=4, num='5.6.4.2'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Rename', level=4, num='5.6.4.3'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables', level=4, num='5.6.4.4'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Value', level=4, num='5.6.4.5'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Constraints', level=4, num='5.6.4.6'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment', level=4, num='5.6.4.7'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.None', level=4, num='5.6.4.8'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.All', level=4, num='5.6.4.9'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.AllExcept', level=4, num='5.6.4.10'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.Inherit', level=4, num='5.6.4.11'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.OnCluster', level=4, num='5.6.4.12'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Alter.Syntax', level=4, num='5.6.4.13'), - Heading(name='Drop Settings Profile', level=3, num='5.6.5'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Drop', level=4, num='5.6.5.1'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Drop.IfExists', level=4, num='5.6.5.2'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Drop.OnCluster', level=4, num='5.6.5.3'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.Drop.Syntax', level=4, num='5.6.5.4'), - Heading(name='Show Create Settings Profile', level=3, num='5.6.6'), - Heading(name='RQ.SRS-006.RBAC.SettingsProfile.ShowCreateSettingsProfile', level=4, num='5.6.6.1'), - Heading(name='Quotas', level=2, num='5.7'), - Heading(name='RQ.SRS-006.RBAC.Quotas', level=3, num='5.7.1'), - Heading(name='RQ.SRS-006.RBAC.Quotas.Keyed', level=3, num='5.7.2'), - Heading(name='RQ.SRS-006.RBAC.Quotas.Queries', level=3, num='5.7.3'), - Heading(name='RQ.SRS-006.RBAC.Quotas.Errors', level=3, num='5.7.4'), - Heading(name='RQ.SRS-006.RBAC.Quotas.ResultRows', level=3, num='5.7.5'), - Heading(name='RQ.SRS-006.RBAC.Quotas.ReadRows', level=3, num='5.7.6'), - Heading(name='RQ.SRS-006.RBAC.Quotas.ResultBytes', level=3, num='5.7.7'), - Heading(name='RQ.SRS-006.RBAC.Quotas.ReadBytes', level=3, num='5.7.8'), - Heading(name='RQ.SRS-006.RBAC.Quotas.ExecutionTime', level=3, num='5.7.9'), - Heading(name='Create Quotas', level=3, num='5.7.10'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create', level=4, num='5.7.10.1'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.IfNotExists', level=4, num='5.7.10.2'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.Replace', level=4, num='5.7.10.3'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.Cluster', level=4, num='5.7.10.4'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.Interval', level=4, num='5.7.10.5'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.Interval.Randomized', level=4, num='5.7.10.6'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.Queries', level=4, num='5.7.10.7'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.Errors', level=4, num='5.7.10.8'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.ResultRows', level=4, num='5.7.10.9'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.ReadRows', level=4, num='5.7.10.10'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.ResultBytes', level=4, num='5.7.10.11'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.ReadBytes', level=4, num='5.7.10.12'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.ExecutionTime', level=4, num='5.7.10.13'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.NoLimits', level=4, num='5.7.10.14'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.TrackingOnly', level=4, num='5.7.10.15'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.KeyedBy', level=4, num='5.7.10.16'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.KeyedByOptions', level=4, num='5.7.10.17'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.Assignment', level=4, num='5.7.10.18'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.Assignment.None', level=4, num='5.7.10.19'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.Assignment.All', level=4, num='5.7.10.20'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.Assignment.Except', level=4, num='5.7.10.21'), - Heading(name='RQ.SRS-006.RBAC.Quota.Create.Syntax', level=4, num='5.7.10.22'), - Heading(name='Alter Quota', level=3, num='5.7.11'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter', level=4, num='5.7.11.1'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.IfExists', level=4, num='5.7.11.2'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.Rename', level=4, num='5.7.11.3'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.Cluster', level=4, num='5.7.11.4'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.Interval', level=4, num='5.7.11.5'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.Interval.Randomized', level=4, num='5.7.11.6'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.Queries', level=4, num='5.7.11.7'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.Errors', level=4, num='5.7.11.8'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.ResultRows', level=4, num='5.7.11.9'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.ReadRows', level=4, num='5.7.11.10'), - Heading(name='RQ.SRS-006.RBAC.Quota.ALter.ResultBytes', level=4, num='5.7.11.11'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.ReadBytes', level=4, num='5.7.11.12'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.ExecutionTime', level=4, num='5.7.11.13'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.NoLimits', level=4, num='5.7.11.14'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.TrackingOnly', level=4, num='5.7.11.15'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.KeyedBy', level=4, num='5.7.11.16'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.KeyedByOptions', level=4, num='5.7.11.17'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.Assignment', level=4, num='5.7.11.18'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.Assignment.None', level=4, num='5.7.11.19'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.Assignment.All', level=4, num='5.7.11.20'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.Assignment.Except', level=4, num='5.7.11.21'), - Heading(name='RQ.SRS-006.RBAC.Quota.Alter.Syntax', level=4, num='5.7.11.22'), - Heading(name='Drop Quota', level=3, num='5.7.12'), - Heading(name='RQ.SRS-006.RBAC.Quota.Drop', level=4, num='5.7.12.1'), - Heading(name='RQ.SRS-006.RBAC.Quota.Drop.IfExists', level=4, num='5.7.12.2'), - Heading(name='RQ.SRS-006.RBAC.Quota.Drop.Cluster', level=4, num='5.7.12.3'), - Heading(name='RQ.SRS-006.RBAC.Quota.Drop.Syntax', level=4, num='5.7.12.4'), - Heading(name='Show Quotas', level=3, num='5.7.13'), - Heading(name='RQ.SRS-006.RBAC.Quota.ShowQuotas', level=4, num='5.7.13.1'), - Heading(name='RQ.SRS-006.RBAC.Quota.ShowQuotas.IntoOutfile', level=4, num='5.7.13.2'), - Heading(name='RQ.SRS-006.RBAC.Quota.ShowQuotas.Format', level=4, num='5.7.13.3'), - Heading(name='RQ.SRS-006.RBAC.Quota.ShowQuotas.Settings', level=4, num='5.7.13.4'), - Heading(name='RQ.SRS-006.RBAC.Quota.ShowQuotas.Syntax', level=4, num='5.7.13.5'), - Heading(name='Show Create Quota', level=3, num='5.7.14'), - Heading(name='RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Name', level=4, num='5.7.14.1'), - Heading(name='RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Current', level=4, num='5.7.14.2'), - Heading(name='RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Syntax', level=4, num='5.7.14.3'), - Heading(name='Row Policy', level=2, num='5.8'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy', level=3, num='5.8.1'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Condition', level=3, num='5.8.2'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Restriction', level=3, num='5.8.3'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Nesting', level=3, num='5.8.4'), - Heading(name='Create Row Policy', level=3, num='5.8.5'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Create', level=4, num='5.8.5.1'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Create.IfNotExists', level=4, num='5.8.5.2'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Create.Replace', level=4, num='5.8.5.3'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Create.OnCluster', level=4, num='5.8.5.4'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Create.On', level=4, num='5.8.5.5'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Create.Access', level=4, num='5.8.5.6'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Create.Access.Permissive', level=4, num='5.8.5.7'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Create.Access.Restrictive', level=4, num='5.8.5.8'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Create.ForSelect', level=4, num='5.8.5.9'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Create.Condition', level=4, num='5.8.5.10'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Create.Assignment', level=4, num='5.8.5.11'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.None', level=4, num='5.8.5.12'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.All', level=4, num='5.8.5.13'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.AllExcept', level=4, num='5.8.5.14'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Create.Syntax', level=4, num='5.8.5.15'), - Heading(name='Alter Row Policy', level=3, num='5.8.6'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Alter', level=4, num='5.8.6.1'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Alter.IfExists', level=4, num='5.8.6.2'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Alter.ForSelect', level=4, num='5.8.6.3'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Alter.OnCluster', level=4, num='5.8.6.4'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Alter.On', level=4, num='5.8.6.5'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Alter.Rename', level=4, num='5.8.6.6'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Alter.Access', level=4, num='5.8.6.7'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Permissive', level=4, num='5.8.6.8'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Restrictive', level=4, num='5.8.6.9'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Alter.Condition', level=4, num='5.8.6.10'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Alter.Condition.None', level=4, num='5.8.6.11'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment', level=4, num='5.8.6.12'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.None', level=4, num='5.8.6.13'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.All', level=4, num='5.8.6.14'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.AllExcept', level=4, num='5.8.6.15'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Alter.Syntax', level=4, num='5.8.6.16'), - Heading(name='Drop Row Policy', level=3, num='5.8.7'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Drop', level=4, num='5.8.7.1'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Drop.IfExists', level=4, num='5.8.7.2'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Drop.On', level=4, num='5.8.7.3'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Drop.OnCluster', level=4, num='5.8.7.4'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.Drop.Syntax', level=4, num='5.8.7.5'), - Heading(name='Show Create Row Policy', level=3, num='5.8.8'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy', level=4, num='5.8.8.1'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.On', level=4, num='5.8.8.2'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.Syntax', level=4, num='5.8.8.3'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies', level=4, num='5.8.8.4'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.On', level=4, num='5.8.8.5'), - Heading(name='RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.Syntax', level=4, num='5.8.8.6'), - Heading(name='Set Default Role', level=2, num='5.9'), - Heading(name='RQ.SRS-006.RBAC.SetDefaultRole', level=3, num='5.9.1'), - Heading(name='RQ.SRS-006.RBAC.SetDefaultRole.CurrentUser', level=3, num='5.9.2'), - Heading(name='RQ.SRS-006.RBAC.SetDefaultRole.All', level=3, num='5.9.3'), - Heading(name='RQ.SRS-006.RBAC.SetDefaultRole.AllExcept', level=3, num='5.9.4'), - Heading(name='RQ.SRS-006.RBAC.SetDefaultRole.None', level=3, num='5.9.5'), - Heading(name='RQ.SRS-006.RBAC.SetDefaultRole.Syntax', level=3, num='5.9.6'), - Heading(name='Set Role', level=2, num='5.10'), - Heading(name='RQ.SRS-006.RBAC.SetRole', level=3, num='5.10.1'), - Heading(name='RQ.SRS-006.RBAC.SetRole.Default', level=3, num='5.10.2'), - Heading(name='RQ.SRS-006.RBAC.SetRole.None', level=3, num='5.10.3'), - Heading(name='RQ.SRS-006.RBAC.SetRole.All', level=3, num='5.10.4'), - Heading(name='RQ.SRS-006.RBAC.SetRole.AllExcept', level=3, num='5.10.5'), - Heading(name='RQ.SRS-006.RBAC.SetRole.Syntax', level=3, num='5.10.6'), - Heading(name='Grant', level=2, num='5.11'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.To', level=3, num='5.11.1'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.ToCurrentUser', level=3, num='5.11.2'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.Select', level=3, num='5.11.3'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.Insert', level=3, num='5.11.4'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.Alter', level=3, num='5.11.5'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.Create', level=3, num='5.11.6'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.Drop', level=3, num='5.11.7'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.Truncate', level=3, num='5.11.8'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.Optimize', level=3, num='5.11.9'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.Show', level=3, num='5.11.10'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.KillQuery', level=3, num='5.11.11'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.AccessManagement', level=3, num='5.11.12'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.System', level=3, num='5.11.13'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.Introspection', level=3, num='5.11.14'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.Sources', level=3, num='5.11.15'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.DictGet', level=3, num='5.11.16'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.None', level=3, num='5.11.17'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.All', level=3, num='5.11.18'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.GrantOption', level=3, num='5.11.19'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.On', level=3, num='5.11.20'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.PrivilegeColumns', level=3, num='5.11.21'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.OnCluster', level=3, num='5.11.22'), - Heading(name='RQ.SRS-006.RBAC.Grant.Privilege.Syntax', level=3, num='5.11.23'), - Heading(name='Revoke', level=2, num='5.12'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.Cluster', level=3, num='5.12.1'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.Select', level=3, num='5.12.2'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.Insert', level=3, num='5.12.3'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.Alter', level=3, num='5.12.4'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.Create', level=3, num='5.12.5'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.Drop', level=3, num='5.12.6'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.Truncate', level=3, num='5.12.7'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.Optimize', level=3, num='5.12.8'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.Show', level=3, num='5.12.9'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.KillQuery', level=3, num='5.12.10'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.AccessManagement', level=3, num='5.12.11'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.System', level=3, num='5.12.12'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.Introspection', level=3, num='5.12.13'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.Sources', level=3, num='5.12.14'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.DictGet', level=3, num='5.12.15'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.PrivilegeColumns', level=3, num='5.12.16'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.Multiple', level=3, num='5.12.17'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.All', level=3, num='5.12.18'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.None', level=3, num='5.12.19'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.On', level=3, num='5.12.20'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.From', level=3, num='5.12.21'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Privilege.Syntax', level=3, num='5.12.22'), - Heading(name='Grant Role', level=2, num='5.13'), - Heading(name='RQ.SRS-006.RBAC.Grant.Role', level=3, num='5.13.1'), - Heading(name='RQ.SRS-006.RBAC.Grant.Role.CurrentUser', level=3, num='5.13.2'), - Heading(name='RQ.SRS-006.RBAC.Grant.Role.AdminOption', level=3, num='5.13.3'), - Heading(name='RQ.SRS-006.RBAC.Grant.Role.OnCluster', level=3, num='5.13.4'), - Heading(name='RQ.SRS-006.RBAC.Grant.Role.Syntax', level=3, num='5.13.5'), - Heading(name='Revoke Role', level=2, num='5.14'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Role', level=3, num='5.14.1'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Role.Keywords', level=3, num='5.14.2'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Role.Cluster', level=3, num='5.14.3'), - Heading(name='RQ.SRS-006.RBAC.Revoke.AdminOption', level=3, num='5.14.4'), - Heading(name='RQ.SRS-006.RBAC.Revoke.Role.Syntax', level=3, num='5.14.5'), - Heading(name='Show Grants', level=2, num='5.15'), - Heading(name='RQ.SRS-006.RBAC.Show.Grants', level=3, num='5.15.1'), - Heading(name='RQ.SRS-006.RBAC.Show.Grants.For', level=3, num='5.15.2'), - Heading(name='RQ.SRS-006.RBAC.Show.Grants.Syntax', level=3, num='5.15.3'), - Heading(name='Table Privileges', level=2, num='5.16'), - Heading(name='RQ.SRS-006.RBAC.Table.PublicTables', level=3, num='5.16.1'), - Heading(name='RQ.SRS-006.RBAC.Table.SensitiveTables', level=3, num='5.16.2'), - Heading(name='Distributed Tables', level=2, num='5.17'), - Heading(name='RQ.SRS-006.RBAC.DistributedTable.Create', level=3, num='5.17.1'), - Heading(name='RQ.SRS-006.RBAC.DistributedTable.Select', level=3, num='5.17.2'), - Heading(name='RQ.SRS-006.RBAC.DistributedTable.Insert', level=3, num='5.17.3'), - Heading(name='RQ.SRS-006.RBAC.DistributedTable.SpecialTables', level=3, num='5.17.4'), - Heading(name='RQ.SRS-006.RBAC.DistributedTable.LocalUser', level=3, num='5.17.5'), - Heading(name='RQ.SRS-006.RBAC.DistributedTable.SameUserDifferentNodesDifferentPrivileges', level=3, num='5.17.6'), - Heading(name='Views', level=2, num='5.18'), - Heading(name='View', level=3, num='5.18.1'), - Heading(name='RQ.SRS-006.RBAC.View', level=4, num='5.18.1.1'), - Heading(name='RQ.SRS-006.RBAC.View.Create', level=4, num='5.18.1.2'), - Heading(name='RQ.SRS-006.RBAC.View.Select', level=4, num='5.18.1.3'), - Heading(name='RQ.SRS-006.RBAC.View.Drop', level=4, num='5.18.1.4'), - Heading(name='Materialized View', level=3, num='5.18.2'), - Heading(name='RQ.SRS-006.RBAC.MaterializedView', level=4, num='5.18.2.1'), - Heading(name='RQ.SRS-006.RBAC.MaterializedView.Create', level=4, num='5.18.2.2'), - Heading(name='RQ.SRS-006.RBAC.MaterializedView.Select', level=4, num='5.18.2.3'), - Heading(name='RQ.SRS-006.RBAC.MaterializedView.Select.TargetTable', level=4, num='5.18.2.4'), - Heading(name='RQ.SRS-006.RBAC.MaterializedView.Select.SourceTable', level=4, num='5.18.2.5'), - Heading(name='RQ.SRS-006.RBAC.MaterializedView.Drop', level=4, num='5.18.2.6'), - Heading(name='RQ.SRS-006.RBAC.MaterializedView.ModifyQuery', level=4, num='5.18.2.7'), - Heading(name='RQ.SRS-006.RBAC.MaterializedView.Insert', level=4, num='5.18.2.8'), - Heading(name='RQ.SRS-006.RBAC.MaterializedView.Insert.SourceTable', level=4, num='5.18.2.9'), - Heading(name='RQ.SRS-006.RBAC.MaterializedView.Insert.TargetTable', level=4, num='5.18.2.10'), - Heading(name='Live View', level=3, num='5.18.3'), - Heading(name='RQ.SRS-006.RBAC.LiveView', level=4, num='5.18.3.1'), - Heading(name='RQ.SRS-006.RBAC.LiveView.Create', level=4, num='5.18.3.2'), - Heading(name='RQ.SRS-006.RBAC.LiveView.Select', level=4, num='5.18.3.3'), - Heading(name='RQ.SRS-006.RBAC.LiveView.Drop', level=4, num='5.18.3.4'), - Heading(name='RQ.SRS-006.RBAC.LiveView.Refresh', level=4, num='5.18.3.5'), - Heading(name='Select', level=2, num='5.19'), - Heading(name='RQ.SRS-006.RBAC.Select', level=3, num='5.19.1'), - Heading(name='RQ.SRS-006.RBAC.Select.Column', level=3, num='5.19.2'), - Heading(name='RQ.SRS-006.RBAC.Select.Cluster', level=3, num='5.19.3'), - Heading(name='RQ.SRS-006.RBAC.Select.TableEngines', level=3, num='5.19.4'), - Heading(name='Insert', level=2, num='5.20'), - Heading(name='RQ.SRS-006.RBAC.Insert', level=3, num='5.20.1'), - Heading(name='RQ.SRS-006.RBAC.Insert.Column', level=3, num='5.20.2'), - Heading(name='RQ.SRS-006.RBAC.Insert.Cluster', level=3, num='5.20.3'), - Heading(name='RQ.SRS-006.RBAC.Insert.TableEngines', level=3, num='5.20.4'), - Heading(name='Alter', level=2, num='5.21'), - Heading(name='Alter Column', level=3, num='5.21.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterColumn', level=4, num='5.21.1.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterColumn.Grant', level=4, num='5.21.1.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterColumn.Revoke', level=4, num='5.21.1.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterColumn.Column', level=4, num='5.21.1.4'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterColumn.Cluster', level=4, num='5.21.1.5'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterColumn.TableEngines', level=4, num='5.21.1.6'), - Heading(name='Alter Index', level=3, num='5.21.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterIndex', level=4, num='5.21.2.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterIndex.Grant', level=4, num='5.21.2.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterIndex.Revoke', level=4, num='5.21.2.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterIndex.Cluster', level=4, num='5.21.2.4'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterIndex.TableEngines', level=4, num='5.21.2.5'), - Heading(name='Alter Constraint', level=3, num='5.21.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterConstraint', level=4, num='5.21.3.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterConstraint.Grant', level=4, num='5.21.3.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterConstraint.Revoke', level=4, num='5.21.3.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterConstraint.Cluster', level=4, num='5.21.3.4'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterConstraint.TableEngines', level=4, num='5.21.3.5'), - Heading(name='Alter TTL', level=3, num='5.21.4'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterTTL', level=4, num='5.21.4.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterTTL.Grant', level=4, num='5.21.4.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterTTL.Revoke', level=4, num='5.21.4.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterTTL.Cluster', level=4, num='5.21.4.4'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterTTL.TableEngines', level=4, num='5.21.4.5'), - Heading(name='Alter Settings', level=3, num='5.21.5'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterSettings', level=4, num='5.21.5.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterSettings.Grant', level=4, num='5.21.5.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterSettings.Revoke', level=4, num='5.21.5.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterSettings.Cluster', level=4, num='5.21.5.4'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterSettings.TableEngines', level=4, num='5.21.5.5'), - Heading(name='Alter Update', level=3, num='5.21.6'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterUpdate', level=4, num='5.21.6.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterUpdate.Grant', level=4, num='5.21.6.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterUpdate.Revoke', level=4, num='5.21.6.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterUpdate.TableEngines', level=4, num='5.21.6.4'), - Heading(name='Alter Delete', level=3, num='5.21.7'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterDelete', level=4, num='5.21.7.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterDelete.Grant', level=4, num='5.21.7.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterDelete.Revoke', level=4, num='5.21.7.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterDelete.TableEngines', level=4, num='5.21.7.4'), - Heading(name='Alter Freeze Partition', level=3, num='5.21.8'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterFreeze', level=4, num='5.21.8.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterFreeze.Grant', level=4, num='5.21.8.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterFreeze.Revoke', level=4, num='5.21.8.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterFreeze.TableEngines', level=4, num='5.21.8.4'), - Heading(name='Alter Fetch Partition', level=3, num='5.21.9'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterFetch', level=4, num='5.21.9.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterFetch.Grant', level=4, num='5.21.9.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterFetch.Revoke', level=4, num='5.21.9.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterFetch.TableEngines', level=4, num='5.21.9.4'), - Heading(name='Alter Move Partition', level=3, num='5.21.10'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterMove', level=4, num='5.21.10.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterMove.Grant', level=4, num='5.21.10.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterMove.Revoke', level=4, num='5.21.10.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterMove.TableEngines', level=4, num='5.21.10.4'), - Heading(name='Create', level=2, num='5.22'), - Heading(name='RQ.SRS-006.RBAC.Privileges.CreateTable', level=3, num='5.22.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.CreateDatabase', level=3, num='5.22.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.CreateDictionary', level=3, num='5.22.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.CreateTemporaryTable', level=3, num='5.22.4'), - Heading(name='Attach', level=2, num='5.23'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AttachDatabase', level=3, num='5.23.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AttachDictionary', level=3, num='5.23.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AttachTemporaryTable', level=3, num='5.23.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AttachTable', level=3, num='5.23.4'), - Heading(name='Drop', level=2, num='5.24'), - Heading(name='RQ.SRS-006.RBAC.Privileges.DropTable', level=3, num='5.24.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.DropDatabase', level=3, num='5.24.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.DropDictionary', level=3, num='5.24.3'), - Heading(name='Detach', level=2, num='5.25'), - Heading(name='RQ.SRS-006.RBAC.Privileges.DetachTable', level=3, num='5.25.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.DetachView', level=3, num='5.25.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.DetachDatabase', level=3, num='5.25.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.DetachDictionary', level=3, num='5.25.4'), - Heading(name='Truncate', level=2, num='5.26'), - Heading(name='RQ.SRS-006.RBAC.Privileges.Truncate', level=3, num='5.26.1'), - Heading(name='Optimize', level=2, num='5.27'), - Heading(name='RQ.SRS-006.RBAC.Privileges.Optimize', level=3, num='5.27.1'), - Heading(name='Kill Query', level=2, num='5.28'), - Heading(name='RQ.SRS-006.RBAC.Privileges.KillQuery', level=3, num='5.28.1'), - Heading(name='Kill Mutation', level=2, num='5.29'), - Heading(name='RQ.SRS-006.RBAC.Privileges.KillMutation', level=3, num='5.29.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.KillMutation.AlterUpdate', level=3, num='5.29.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.KillMutation.AlterDelete', level=3, num='5.29.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.KillMutation.AlterDropColumn', level=3, num='5.29.4'), - Heading(name='Show', level=2, num='5.30'), - Heading(name='RQ.SRS-006.RBAC.ShowTables.Privilege', level=3, num='5.30.1'), - Heading(name='RQ.SRS-006.RBAC.ShowTables.RequiredPrivilege', level=3, num='5.30.2'), - Heading(name='RQ.SRS-006.RBAC.ExistsTable.RequiredPrivilege', level=3, num='5.30.3'), - Heading(name='RQ.SRS-006.RBAC.CheckTable.RequiredPrivilege', level=3, num='5.30.4'), - Heading(name='RQ.SRS-006.RBAC.ShowDatabases.Privilege', level=3, num='5.30.5'), - Heading(name='RQ.SRS-006.RBAC.ShowDatabases.RequiredPrivilege', level=3, num='5.30.6'), - Heading(name='RQ.SRS-006.RBAC.ShowCreateDatabase.RequiredPrivilege', level=3, num='5.30.7'), - Heading(name='RQ.SRS-006.RBAC.UseDatabase.RequiredPrivilege', level=3, num='5.30.8'), - Heading(name='RQ.SRS-006.RBAC.ShowColumns.Privilege', level=3, num='5.30.9'), - Heading(name='RQ.SRS-006.RBAC.ShowCreateTable.RequiredPrivilege', level=3, num='5.30.10'), - Heading(name='RQ.SRS-006.RBAC.DescribeTable.RequiredPrivilege', level=3, num='5.30.11'), - Heading(name='RQ.SRS-006.RBAC.ShowDictionaries.Privilege', level=3, num='5.30.12'), - Heading(name='RQ.SRS-006.RBAC.ShowDictionaries.RequiredPrivilege', level=3, num='5.30.13'), - Heading(name='RQ.SRS-006.RBAC.ShowCreateDictionary.RequiredPrivilege', level=3, num='5.30.14'), - Heading(name='RQ.SRS-006.RBAC.ExistsDictionary.RequiredPrivilege', level=3, num='5.30.15'), - Heading(name='Access Management', level=2, num='5.31'), - Heading(name='RQ.SRS-006.RBAC.Privileges.CreateUser', level=3, num='5.31.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.CreateUser.DefaultRole', level=3, num='5.31.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterUser', level=3, num='5.31.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.DropUser', level=3, num='5.31.4'), - Heading(name='RQ.SRS-006.RBAC.Privileges.CreateRole', level=3, num='5.31.5'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterRole', level=3, num='5.31.6'), - Heading(name='RQ.SRS-006.RBAC.Privileges.DropRole', level=3, num='5.31.7'), - Heading(name='RQ.SRS-006.RBAC.Privileges.CreateRowPolicy', level=3, num='5.31.8'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterRowPolicy', level=3, num='5.31.9'), - Heading(name='RQ.SRS-006.RBAC.Privileges.DropRowPolicy', level=3, num='5.31.10'), - Heading(name='RQ.SRS-006.RBAC.Privileges.CreateQuota', level=3, num='5.31.11'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterQuota', level=3, num='5.31.12'), - Heading(name='RQ.SRS-006.RBAC.Privileges.DropQuota', level=3, num='5.31.13'), - Heading(name='RQ.SRS-006.RBAC.Privileges.CreateSettingsProfile', level=3, num='5.31.14'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AlterSettingsProfile', level=3, num='5.31.15'), - Heading(name='RQ.SRS-006.RBAC.Privileges.DropSettingsProfile', level=3, num='5.31.16'), - Heading(name='RQ.SRS-006.RBAC.Privileges.RoleAdmin', level=3, num='5.31.17'), - Heading(name='Show Access', level=3, num='5.31.18'), - Heading(name='RQ.SRS-006.RBAC.ShowUsers.Privilege', level=4, num='5.31.18.1'), - Heading(name='RQ.SRS-006.RBAC.ShowUsers.RequiredPrivilege', level=4, num='5.31.18.2'), - Heading(name='RQ.SRS-006.RBAC.ShowCreateUser.RequiredPrivilege', level=4, num='5.31.18.3'), - Heading(name='RQ.SRS-006.RBAC.ShowRoles.Privilege', level=4, num='5.31.18.4'), - Heading(name='RQ.SRS-006.RBAC.ShowRoles.RequiredPrivilege', level=4, num='5.31.18.5'), - Heading(name='RQ.SRS-006.RBAC.ShowCreateRole.RequiredPrivilege', level=4, num='5.31.18.6'), - Heading(name='RQ.SRS-006.RBAC.ShowRowPolicies.Privilege', level=4, num='5.31.18.7'), - Heading(name='RQ.SRS-006.RBAC.ShowRowPolicies.RequiredPrivilege', level=4, num='5.31.18.8'), - Heading(name='RQ.SRS-006.RBAC.ShowCreateRowPolicy.RequiredPrivilege', level=4, num='5.31.18.9'), - Heading(name='RQ.SRS-006.RBAC.ShowQuotas.Privilege', level=4, num='5.31.18.10'), - Heading(name='RQ.SRS-006.RBAC.ShowQuotas.RequiredPrivilege', level=4, num='5.31.18.11'), - Heading(name='RQ.SRS-006.RBAC.ShowCreateQuota.RequiredPrivilege', level=4, num='5.31.18.12'), - Heading(name='RQ.SRS-006.RBAC.ShowSettingsProfiles.Privilege', level=4, num='5.31.18.13'), - Heading(name='RQ.SRS-006.RBAC.ShowSettingsProfiles.RequiredPrivilege', level=4, num='5.31.18.14'), - Heading(name='RQ.SRS-006.RBAC.ShowCreateSettingsProfile.RequiredPrivilege', level=4, num='5.31.18.15'), - Heading(name='dictGet', level=2, num='5.32'), - Heading(name='RQ.SRS-006.RBAC.dictGet.Privilege', level=3, num='5.32.1'), - Heading(name='RQ.SRS-006.RBAC.dictGet.RequiredPrivilege', level=3, num='5.32.2'), - Heading(name='RQ.SRS-006.RBAC.dictGet.Type.RequiredPrivilege', level=3, num='5.32.3'), - Heading(name='RQ.SRS-006.RBAC.dictGet.OrDefault.RequiredPrivilege', level=3, num='5.32.4'), - Heading(name='RQ.SRS-006.RBAC.dictHas.RequiredPrivilege', level=3, num='5.32.5'), - Heading(name='RQ.SRS-006.RBAC.dictGetHierarchy.RequiredPrivilege', level=3, num='5.32.6'), - Heading(name='RQ.SRS-006.RBAC.dictIsIn.RequiredPrivilege', level=3, num='5.32.7'), - Heading(name='Introspection', level=2, num='5.33'), - Heading(name='RQ.SRS-006.RBAC.Privileges.Introspection', level=3, num='5.33.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.Introspection.addressToLine', level=3, num='5.33.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.Introspection.addressToSymbol', level=3, num='5.33.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.Introspection.demangle', level=3, num='5.33.4'), - Heading(name='System', level=2, num='5.34'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.Shutdown', level=3, num='5.34.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.DropCache', level=3, num='5.34.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.DropCache.DNS', level=3, num='5.34.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.DropCache.Mark', level=3, num='5.34.4'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.DropCache.Uncompressed', level=3, num='5.34.5'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.Reload', level=3, num='5.34.6'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.Reload.Config', level=3, num='5.34.7'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.Reload.Dictionary', level=3, num='5.34.8'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.Reload.Dictionaries', level=3, num='5.34.9'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.Reload.EmbeddedDictionaries', level=3, num='5.34.10'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.Merges', level=3, num='5.34.11'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.TTLMerges', level=3, num='5.34.12'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.Fetches', level=3, num='5.34.13'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.Moves', level=3, num='5.34.14'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.Sends', level=3, num='5.34.15'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.Sends.Distributed', level=3, num='5.34.16'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.Sends.Replicated', level=3, num='5.34.17'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.ReplicationQueues', level=3, num='5.34.18'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.SyncReplica', level=3, num='5.34.19'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.RestartReplica', level=3, num='5.34.20'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.Flush', level=3, num='5.34.21'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.Flush.Distributed', level=3, num='5.34.22'), - Heading(name='RQ.SRS-006.RBAC.Privileges.System.Flush.Logs', level=3, num='5.34.23'), - Heading(name='Sources', level=2, num='5.35'), - Heading(name='RQ.SRS-006.RBAC.Privileges.Sources', level=3, num='5.35.1'), - Heading(name='RQ.SRS-006.RBAC.Privileges.Sources.File', level=3, num='5.35.2'), - Heading(name='RQ.SRS-006.RBAC.Privileges.Sources.URL', level=3, num='5.35.3'), - Heading(name='RQ.SRS-006.RBAC.Privileges.Sources.Remote', level=3, num='5.35.4'), - Heading(name='RQ.SRS-006.RBAC.Privileges.Sources.MySQL', level=3, num='5.35.5'), - Heading(name='RQ.SRS-006.RBAC.Privileges.Sources.ODBC', level=3, num='5.35.6'), - Heading(name='RQ.SRS-006.RBAC.Privileges.Sources.JDBC', level=3, num='5.35.7'), - Heading(name='RQ.SRS-006.RBAC.Privileges.Sources.HDFS', level=3, num='5.35.8'), - Heading(name='RQ.SRS-006.RBAC.Privileges.Sources.S3', level=3, num='5.35.9'), - Heading(name='RQ.SRS-006.RBAC.Privileges.GrantOption', level=2, num='5.36'), - Heading(name='RQ.SRS-006.RBAC.Privileges.All', level=2, num='5.37'), - Heading(name='RQ.SRS-006.RBAC.Privileges.RoleAll', level=2, num='5.38'), - Heading(name='RQ.SRS-006.RBAC.Privileges.None', level=2, num='5.39'), - Heading(name='RQ.SRS-006.RBAC.Privileges.AdminOption', level=2, num='5.40'), - Heading(name='References', level=1, num='6'), + Heading(name="Revision History", level=1, num="1"), + Heading(name="Introduction", level=1, num="2"), + Heading(name="Terminology", level=1, num="3"), + Heading(name="Privilege Definitions", level=1, num="4"), + Heading(name="Requirements", level=1, num="5"), + Heading(name="Generic", level=2, num="5.1"), + Heading(name="RQ.SRS-006.RBAC", level=3, num="5.1.1"), + Heading(name="Login", level=2, num="5.2"), + Heading(name="RQ.SRS-006.RBAC.Login", level=3, num="5.2.1"), + Heading(name="RQ.SRS-006.RBAC.Login.DefaultUser", level=3, num="5.2.2"), + Heading(name="User", level=2, num="5.3"), + Heading(name="RQ.SRS-006.RBAC.User", level=3, num="5.3.1"), + Heading(name="RQ.SRS-006.RBAC.User.Roles", level=3, num="5.3.2"), + Heading(name="RQ.SRS-006.RBAC.User.Privileges", level=3, num="5.3.3"), + Heading(name="RQ.SRS-006.RBAC.User.Variables", level=3, num="5.3.4"), + Heading( + name="RQ.SRS-006.RBAC.User.Variables.Constraints", level=3, num="5.3.5" ), + Heading(name="RQ.SRS-006.RBAC.User.SettingsProfile", level=3, num="5.3.6"), + Heading(name="RQ.SRS-006.RBAC.User.Quotas", level=3, num="5.3.7"), + Heading(name="RQ.SRS-006.RBAC.User.RowPolicies", level=3, num="5.3.8"), + Heading(name="RQ.SRS-006.RBAC.User.DefaultRole", level=3, num="5.3.9"), + Heading(name="RQ.SRS-006.RBAC.User.RoleSelection", level=3, num="5.3.10"), + Heading(name="RQ.SRS-006.RBAC.User.ShowCreate", level=3, num="5.3.11"), + Heading(name="RQ.SRS-006.RBAC.User.ShowPrivileges", level=3, num="5.3.12"), + Heading(name="RQ.SRS-006.RBAC.User.Use.DefaultRole", level=3, num="5.3.13"), + Heading( + name="RQ.SRS-006.RBAC.User.Use.AllRolesWhenNoDefaultRole", + level=3, + num="5.3.14", + ), + Heading(name="Create User", level=3, num="5.3.15"), + Heading(name="RQ.SRS-006.RBAC.User.Create", level=4, num="5.3.15.1"), + Heading( + name="RQ.SRS-006.RBAC.User.Create.IfNotExists", level=4, num="5.3.15.2" + ), + Heading(name="RQ.SRS-006.RBAC.User.Create.Replace", level=4, num="5.3.15.3"), + Heading( + name="RQ.SRS-006.RBAC.User.Create.Password.NoPassword", + level=4, + num="5.3.15.4", + ), + Heading( + name="RQ.SRS-006.RBAC.User.Create.Password.NoPassword.Login", + level=4, + num="5.3.15.5", + ), + Heading( + name="RQ.SRS-006.RBAC.User.Create.Password.PlainText", + level=4, + num="5.3.15.6", + ), + Heading( + name="RQ.SRS-006.RBAC.User.Create.Password.PlainText.Login", + level=4, + num="5.3.15.7", + ), + Heading( + name="RQ.SRS-006.RBAC.User.Create.Password.Sha256Password", + level=4, + num="5.3.15.8", + ), + Heading( + name="RQ.SRS-006.RBAC.User.Create.Password.Sha256Password.Login", + level=4, + num="5.3.15.9", + ), + Heading( + name="RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash", + level=4, + num="5.3.15.10", + ), + Heading( + name="RQ.SRS-006.RBAC.User.Create.Password.Sha256Hash.Login", + level=4, + num="5.3.15.11", + ), + Heading( + name="RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password", + level=4, + num="5.3.15.12", + ), + Heading( + name="RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Password.Login", + level=4, + num="5.3.15.13", + ), + Heading( + name="RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash", + level=4, + num="5.3.15.14", + ), + Heading( + name="RQ.SRS-006.RBAC.User.Create.Password.DoubleSha1Hash.Login", + level=4, + num="5.3.15.15", + ), + Heading(name="RQ.SRS-006.RBAC.User.Create.Host.Name", level=4, num="5.3.15.16"), + Heading( + name="RQ.SRS-006.RBAC.User.Create.Host.Regexp", level=4, num="5.3.15.17" + ), + Heading(name="RQ.SRS-006.RBAC.User.Create.Host.IP", level=4, num="5.3.15.18"), + Heading(name="RQ.SRS-006.RBAC.User.Create.Host.Any", level=4, num="5.3.15.19"), + Heading(name="RQ.SRS-006.RBAC.User.Create.Host.None", level=4, num="5.3.15.20"), + Heading( + name="RQ.SRS-006.RBAC.User.Create.Host.Local", level=4, num="5.3.15.21" + ), + Heading(name="RQ.SRS-006.RBAC.User.Create.Host.Like", level=4, num="5.3.15.22"), + Heading( + name="RQ.SRS-006.RBAC.User.Create.Host.Default", level=4, num="5.3.15.23" + ), + Heading( + name="RQ.SRS-006.RBAC.User.Create.DefaultRole", level=4, num="5.3.15.24" + ), + Heading( + name="RQ.SRS-006.RBAC.User.Create.DefaultRole.None", + level=4, + num="5.3.15.25", + ), + Heading( + name="RQ.SRS-006.RBAC.User.Create.DefaultRole.All", level=4, num="5.3.15.26" + ), + Heading(name="RQ.SRS-006.RBAC.User.Create.Settings", level=4, num="5.3.15.27"), + Heading(name="RQ.SRS-006.RBAC.User.Create.OnCluster", level=4, num="5.3.15.28"), + Heading(name="RQ.SRS-006.RBAC.User.Create.Syntax", level=4, num="5.3.15.29"), + Heading(name="Alter User", level=3, num="5.3.16"), + Heading(name="RQ.SRS-006.RBAC.User.Alter", level=4, num="5.3.16.1"), + Heading( + name="RQ.SRS-006.RBAC.User.Alter.OrderOfEvaluation", level=4, num="5.3.16.2" + ), + Heading(name="RQ.SRS-006.RBAC.User.Alter.IfExists", level=4, num="5.3.16.3"), + Heading(name="RQ.SRS-006.RBAC.User.Alter.Cluster", level=4, num="5.3.16.4"), + Heading(name="RQ.SRS-006.RBAC.User.Alter.Rename", level=4, num="5.3.16.5"), + Heading( + name="RQ.SRS-006.RBAC.User.Alter.Password.PlainText", + level=4, + num="5.3.16.6", + ), + Heading( + name="RQ.SRS-006.RBAC.User.Alter.Password.Sha256Password", + level=4, + num="5.3.16.7", + ), + Heading( + name="RQ.SRS-006.RBAC.User.Alter.Password.DoubleSha1Password", + level=4, + num="5.3.16.8", + ), + Heading( + name="RQ.SRS-006.RBAC.User.Alter.Host.AddDrop", level=4, num="5.3.16.9" + ), + Heading(name="RQ.SRS-006.RBAC.User.Alter.Host.Local", level=4, num="5.3.16.10"), + Heading(name="RQ.SRS-006.RBAC.User.Alter.Host.Name", level=4, num="5.3.16.11"), + Heading( + name="RQ.SRS-006.RBAC.User.Alter.Host.Regexp", level=4, num="5.3.16.12" + ), + Heading(name="RQ.SRS-006.RBAC.User.Alter.Host.IP", level=4, num="5.3.16.13"), + Heading(name="RQ.SRS-006.RBAC.User.Alter.Host.Like", level=4, num="5.3.16.14"), + Heading(name="RQ.SRS-006.RBAC.User.Alter.Host.Any", level=4, num="5.3.16.15"), + Heading(name="RQ.SRS-006.RBAC.User.Alter.Host.None", level=4, num="5.3.16.16"), + Heading( + name="RQ.SRS-006.RBAC.User.Alter.DefaultRole", level=4, num="5.3.16.17" + ), + Heading( + name="RQ.SRS-006.RBAC.User.Alter.DefaultRole.All", level=4, num="5.3.16.18" + ), + Heading( + name="RQ.SRS-006.RBAC.User.Alter.DefaultRole.AllExcept", + level=4, + num="5.3.16.19", + ), + Heading(name="RQ.SRS-006.RBAC.User.Alter.Settings", level=4, num="5.3.16.20"), + Heading( + name="RQ.SRS-006.RBAC.User.Alter.Settings.Min", level=4, num="5.3.16.21" + ), + Heading( + name="RQ.SRS-006.RBAC.User.Alter.Settings.Max", level=4, num="5.3.16.22" + ), + Heading( + name="RQ.SRS-006.RBAC.User.Alter.Settings.Profile", level=4, num="5.3.16.23" + ), + Heading(name="RQ.SRS-006.RBAC.User.Alter.Syntax", level=4, num="5.3.16.24"), + Heading(name="Show Create User", level=3, num="5.3.17"), + Heading(name="RQ.SRS-006.RBAC.User.ShowCreateUser", level=4, num="5.3.17.1"), + Heading( + name="RQ.SRS-006.RBAC.User.ShowCreateUser.For", level=4, num="5.3.17.2" + ), + Heading( + name="RQ.SRS-006.RBAC.User.ShowCreateUser.Syntax", level=4, num="5.3.17.3" + ), + Heading(name="Drop User", level=3, num="5.3.18"), + Heading(name="RQ.SRS-006.RBAC.User.Drop", level=4, num="5.3.18.1"), + Heading(name="RQ.SRS-006.RBAC.User.Drop.IfExists", level=4, num="5.3.18.2"), + Heading(name="RQ.SRS-006.RBAC.User.Drop.OnCluster", level=4, num="5.3.18.3"), + Heading(name="RQ.SRS-006.RBAC.User.Drop.Syntax", level=4, num="5.3.18.4"), + Heading(name="Role", level=2, num="5.4"), + Heading(name="RQ.SRS-006.RBAC.Role", level=3, num="5.4.1"), + Heading(name="RQ.SRS-006.RBAC.Role.Privileges", level=3, num="5.4.2"), + Heading(name="RQ.SRS-006.RBAC.Role.Variables", level=3, num="5.4.3"), + Heading(name="RQ.SRS-006.RBAC.Role.SettingsProfile", level=3, num="5.4.4"), + Heading(name="RQ.SRS-006.RBAC.Role.Quotas", level=3, num="5.4.5"), + Heading(name="RQ.SRS-006.RBAC.Role.RowPolicies", level=3, num="5.4.6"), + Heading(name="Create Role", level=3, num="5.4.7"), + Heading(name="RQ.SRS-006.RBAC.Role.Create", level=4, num="5.4.7.1"), + Heading(name="RQ.SRS-006.RBAC.Role.Create.IfNotExists", level=4, num="5.4.7.2"), + Heading(name="RQ.SRS-006.RBAC.Role.Create.Replace", level=4, num="5.4.7.3"), + Heading(name="RQ.SRS-006.RBAC.Role.Create.Settings", level=4, num="5.4.7.4"), + Heading(name="RQ.SRS-006.RBAC.Role.Create.Syntax", level=4, num="5.4.7.5"), + Heading(name="Alter Role", level=3, num="5.4.8"), + Heading(name="RQ.SRS-006.RBAC.Role.Alter", level=4, num="5.4.8.1"), + Heading(name="RQ.SRS-006.RBAC.Role.Alter.IfExists", level=4, num="5.4.8.2"), + Heading(name="RQ.SRS-006.RBAC.Role.Alter.Cluster", level=4, num="5.4.8.3"), + Heading(name="RQ.SRS-006.RBAC.Role.Alter.Rename", level=4, num="5.4.8.4"), + Heading(name="RQ.SRS-006.RBAC.Role.Alter.Settings", level=4, num="5.4.8.5"), + Heading(name="RQ.SRS-006.RBAC.Role.Alter.Syntax", level=4, num="5.4.8.6"), + Heading(name="Drop Role", level=3, num="5.4.9"), + Heading(name="RQ.SRS-006.RBAC.Role.Drop", level=4, num="5.4.9.1"), + Heading(name="RQ.SRS-006.RBAC.Role.Drop.IfExists", level=4, num="5.4.9.2"), + Heading(name="RQ.SRS-006.RBAC.Role.Drop.Cluster", level=4, num="5.4.9.3"), + Heading(name="RQ.SRS-006.RBAC.Role.Drop.Syntax", level=4, num="5.4.9.4"), + Heading(name="Show Create Role", level=3, num="5.4.10"), + Heading(name="RQ.SRS-006.RBAC.Role.ShowCreate", level=4, num="5.4.10.1"), + Heading(name="RQ.SRS-006.RBAC.Role.ShowCreate.Syntax", level=4, num="5.4.10.2"), + Heading(name="Partial Revokes", level=2, num="5.5"), + Heading(name="RQ.SRS-006.RBAC.PartialRevokes", level=3, num="5.5.1"), + Heading(name="RQ.SRS-006.RBAC.PartialRevoke.Syntax", level=3, num="5.5.2"), + Heading(name="Settings Profile", level=2, num="5.6"), + Heading(name="RQ.SRS-006.RBAC.SettingsProfile", level=3, num="5.6.1"), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Constraints", level=3, num="5.6.2" + ), + Heading(name="Create Settings Profile", level=3, num="5.6.3"), + Heading(name="RQ.SRS-006.RBAC.SettingsProfile.Create", level=4, num="5.6.3.1"), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Create.IfNotExists", + level=4, + num="5.6.3.2", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Replace", + level=4, + num="5.6.3.3", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Variables", + level=4, + num="5.6.3.4", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Value", + level=4, + num="5.6.3.5", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Variables.Constraints", + level=4, + num="5.6.3.6", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment", + level=4, + num="5.6.3.7", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.None", + level=4, + num="5.6.3.8", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.All", + level=4, + num="5.6.3.9", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Assignment.AllExcept", + level=4, + num="5.6.3.10", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Inherit", + level=4, + num="5.6.3.11", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Create.OnCluster", + level=4, + num="5.6.3.12", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Create.Syntax", + level=4, + num="5.6.3.13", + ), + Heading(name="Alter Settings Profile", level=3, num="5.6.4"), + Heading(name="RQ.SRS-006.RBAC.SettingsProfile.Alter", level=4, num="5.6.4.1"), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.IfExists", + level=4, + num="5.6.4.2", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Rename", level=4, num="5.6.4.3" + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables", + level=4, + num="5.6.4.4", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Value", + level=4, + num="5.6.4.5", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Variables.Constraints", + level=4, + num="5.6.4.6", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment", + level=4, + num="5.6.4.7", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.None", + level=4, + num="5.6.4.8", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.All", + level=4, + num="5.6.4.9", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.AllExcept", + level=4, + num="5.6.4.10", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.Inherit", + level=4, + num="5.6.4.11", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Assignment.OnCluster", + level=4, + num="5.6.4.12", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Alter.Syntax", level=4, num="5.6.4.13" + ), + Heading(name="Drop Settings Profile", level=3, num="5.6.5"), + Heading(name="RQ.SRS-006.RBAC.SettingsProfile.Drop", level=4, num="5.6.5.1"), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Drop.IfExists", level=4, num="5.6.5.2" + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Drop.OnCluster", + level=4, + num="5.6.5.3", + ), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.Drop.Syntax", level=4, num="5.6.5.4" + ), + Heading(name="Show Create Settings Profile", level=3, num="5.6.6"), + Heading( + name="RQ.SRS-006.RBAC.SettingsProfile.ShowCreateSettingsProfile", + level=4, + num="5.6.6.1", + ), + Heading(name="Quotas", level=2, num="5.7"), + Heading(name="RQ.SRS-006.RBAC.Quotas", level=3, num="5.7.1"), + Heading(name="RQ.SRS-006.RBAC.Quotas.Keyed", level=3, num="5.7.2"), + Heading(name="RQ.SRS-006.RBAC.Quotas.Queries", level=3, num="5.7.3"), + Heading(name="RQ.SRS-006.RBAC.Quotas.Errors", level=3, num="5.7.4"), + Heading(name="RQ.SRS-006.RBAC.Quotas.ResultRows", level=3, num="5.7.5"), + Heading(name="RQ.SRS-006.RBAC.Quotas.ReadRows", level=3, num="5.7.6"), + Heading(name="RQ.SRS-006.RBAC.Quotas.ResultBytes", level=3, num="5.7.7"), + Heading(name="RQ.SRS-006.RBAC.Quotas.ReadBytes", level=3, num="5.7.8"), + Heading(name="RQ.SRS-006.RBAC.Quotas.ExecutionTime", level=3, num="5.7.9"), + Heading(name="Create Quotas", level=3, num="5.7.10"), + Heading(name="RQ.SRS-006.RBAC.Quota.Create", level=4, num="5.7.10.1"), + Heading( + name="RQ.SRS-006.RBAC.Quota.Create.IfNotExists", level=4, num="5.7.10.2" + ), + Heading(name="RQ.SRS-006.RBAC.Quota.Create.Replace", level=4, num="5.7.10.3"), + Heading(name="RQ.SRS-006.RBAC.Quota.Create.Cluster", level=4, num="5.7.10.4"), + Heading(name="RQ.SRS-006.RBAC.Quota.Create.Interval", level=4, num="5.7.10.5"), + Heading( + name="RQ.SRS-006.RBAC.Quota.Create.Interval.Randomized", + level=4, + num="5.7.10.6", + ), + Heading(name="RQ.SRS-006.RBAC.Quota.Create.Queries", level=4, num="5.7.10.7"), + Heading(name="RQ.SRS-006.RBAC.Quota.Create.Errors", level=4, num="5.7.10.8"), + Heading( + name="RQ.SRS-006.RBAC.Quota.Create.ResultRows", level=4, num="5.7.10.9" + ), + Heading(name="RQ.SRS-006.RBAC.Quota.Create.ReadRows", level=4, num="5.7.10.10"), + Heading( + name="RQ.SRS-006.RBAC.Quota.Create.ResultBytes", level=4, num="5.7.10.11" + ), + Heading( + name="RQ.SRS-006.RBAC.Quota.Create.ReadBytes", level=4, num="5.7.10.12" + ), + Heading( + name="RQ.SRS-006.RBAC.Quota.Create.ExecutionTime", level=4, num="5.7.10.13" + ), + Heading(name="RQ.SRS-006.RBAC.Quota.Create.NoLimits", level=4, num="5.7.10.14"), + Heading( + name="RQ.SRS-006.RBAC.Quota.Create.TrackingOnly", level=4, num="5.7.10.15" + ), + Heading(name="RQ.SRS-006.RBAC.Quota.Create.KeyedBy", level=4, num="5.7.10.16"), + Heading( + name="RQ.SRS-006.RBAC.Quota.Create.KeyedByOptions", level=4, num="5.7.10.17" + ), + Heading( + name="RQ.SRS-006.RBAC.Quota.Create.Assignment", level=4, num="5.7.10.18" + ), + Heading( + name="RQ.SRS-006.RBAC.Quota.Create.Assignment.None", + level=4, + num="5.7.10.19", + ), + Heading( + name="RQ.SRS-006.RBAC.Quota.Create.Assignment.All", level=4, num="5.7.10.20" + ), + Heading( + name="RQ.SRS-006.RBAC.Quota.Create.Assignment.Except", + level=4, + num="5.7.10.21", + ), + Heading(name="RQ.SRS-006.RBAC.Quota.Create.Syntax", level=4, num="5.7.10.22"), + Heading(name="Alter Quota", level=3, num="5.7.11"), + Heading(name="RQ.SRS-006.RBAC.Quota.Alter", level=4, num="5.7.11.1"), + Heading(name="RQ.SRS-006.RBAC.Quota.Alter.IfExists", level=4, num="5.7.11.2"), + Heading(name="RQ.SRS-006.RBAC.Quota.Alter.Rename", level=4, num="5.7.11.3"), + Heading(name="RQ.SRS-006.RBAC.Quota.Alter.Cluster", level=4, num="5.7.11.4"), + Heading(name="RQ.SRS-006.RBAC.Quota.Alter.Interval", level=4, num="5.7.11.5"), + Heading( + name="RQ.SRS-006.RBAC.Quota.Alter.Interval.Randomized", + level=4, + num="5.7.11.6", + ), + Heading(name="RQ.SRS-006.RBAC.Quota.Alter.Queries", level=4, num="5.7.11.7"), + Heading(name="RQ.SRS-006.RBAC.Quota.Alter.Errors", level=4, num="5.7.11.8"), + Heading(name="RQ.SRS-006.RBAC.Quota.Alter.ResultRows", level=4, num="5.7.11.9"), + Heading(name="RQ.SRS-006.RBAC.Quota.Alter.ReadRows", level=4, num="5.7.11.10"), + Heading( + name="RQ.SRS-006.RBAC.Quota.ALter.ResultBytes", level=4, num="5.7.11.11" + ), + Heading(name="RQ.SRS-006.RBAC.Quota.Alter.ReadBytes", level=4, num="5.7.11.12"), + Heading( + name="RQ.SRS-006.RBAC.Quota.Alter.ExecutionTime", level=4, num="5.7.11.13" + ), + Heading(name="RQ.SRS-006.RBAC.Quota.Alter.NoLimits", level=4, num="5.7.11.14"), + Heading( + name="RQ.SRS-006.RBAC.Quota.Alter.TrackingOnly", level=4, num="5.7.11.15" + ), + Heading(name="RQ.SRS-006.RBAC.Quota.Alter.KeyedBy", level=4, num="5.7.11.16"), + Heading( + name="RQ.SRS-006.RBAC.Quota.Alter.KeyedByOptions", level=4, num="5.7.11.17" + ), + Heading( + name="RQ.SRS-006.RBAC.Quota.Alter.Assignment", level=4, num="5.7.11.18" + ), + Heading( + name="RQ.SRS-006.RBAC.Quota.Alter.Assignment.None", level=4, num="5.7.11.19" + ), + Heading( + name="RQ.SRS-006.RBAC.Quota.Alter.Assignment.All", level=4, num="5.7.11.20" + ), + Heading( + name="RQ.SRS-006.RBAC.Quota.Alter.Assignment.Except", + level=4, + num="5.7.11.21", + ), + Heading(name="RQ.SRS-006.RBAC.Quota.Alter.Syntax", level=4, num="5.7.11.22"), + Heading(name="Drop Quota", level=3, num="5.7.12"), + Heading(name="RQ.SRS-006.RBAC.Quota.Drop", level=4, num="5.7.12.1"), + Heading(name="RQ.SRS-006.RBAC.Quota.Drop.IfExists", level=4, num="5.7.12.2"), + Heading(name="RQ.SRS-006.RBAC.Quota.Drop.Cluster", level=4, num="5.7.12.3"), + Heading(name="RQ.SRS-006.RBAC.Quota.Drop.Syntax", level=4, num="5.7.12.4"), + Heading(name="Show Quotas", level=3, num="5.7.13"), + Heading(name="RQ.SRS-006.RBAC.Quota.ShowQuotas", level=4, num="5.7.13.1"), + Heading( + name="RQ.SRS-006.RBAC.Quota.ShowQuotas.IntoOutfile", level=4, num="5.7.13.2" + ), + Heading( + name="RQ.SRS-006.RBAC.Quota.ShowQuotas.Format", level=4, num="5.7.13.3" + ), + Heading( + name="RQ.SRS-006.RBAC.Quota.ShowQuotas.Settings", level=4, num="5.7.13.4" + ), + Heading( + name="RQ.SRS-006.RBAC.Quota.ShowQuotas.Syntax", level=4, num="5.7.13.5" + ), + Heading(name="Show Create Quota", level=3, num="5.7.14"), + Heading( + name="RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Name", level=4, num="5.7.14.1" + ), + Heading( + name="RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Current", + level=4, + num="5.7.14.2", + ), + Heading( + name="RQ.SRS-006.RBAC.Quota.ShowCreateQuota.Syntax", level=4, num="5.7.14.3" + ), + Heading(name="Row Policy", level=2, num="5.8"), + Heading(name="RQ.SRS-006.RBAC.RowPolicy", level=3, num="5.8.1"), + Heading(name="RQ.SRS-006.RBAC.RowPolicy.Condition", level=3, num="5.8.2"), + Heading(name="RQ.SRS-006.RBAC.RowPolicy.Restriction", level=3, num="5.8.3"), + Heading(name="RQ.SRS-006.RBAC.RowPolicy.Nesting", level=3, num="5.8.4"), + Heading(name="Create Row Policy", level=3, num="5.8.5"), + Heading(name="RQ.SRS-006.RBAC.RowPolicy.Create", level=4, num="5.8.5.1"), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Create.IfNotExists", level=4, num="5.8.5.2" + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Create.Replace", level=4, num="5.8.5.3" + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Create.OnCluster", level=4, num="5.8.5.4" + ), + Heading(name="RQ.SRS-006.RBAC.RowPolicy.Create.On", level=4, num="5.8.5.5"), + Heading(name="RQ.SRS-006.RBAC.RowPolicy.Create.Access", level=4, num="5.8.5.6"), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Create.Access.Permissive", + level=4, + num="5.8.5.7", + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Create.Access.Restrictive", + level=4, + num="5.8.5.8", + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Create.ForSelect", level=4, num="5.8.5.9" + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Create.Condition", level=4, num="5.8.5.10" + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Create.Assignment", level=4, num="5.8.5.11" + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.None", + level=4, + num="5.8.5.12", + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.All", + level=4, + num="5.8.5.13", + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Create.Assignment.AllExcept", + level=4, + num="5.8.5.14", + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Create.Syntax", level=4, num="5.8.5.15" + ), + Heading(name="Alter Row Policy", level=3, num="5.8.6"), + Heading(name="RQ.SRS-006.RBAC.RowPolicy.Alter", level=4, num="5.8.6.1"), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Alter.IfExists", level=4, num="5.8.6.2" + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Alter.ForSelect", level=4, num="5.8.6.3" + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Alter.OnCluster", level=4, num="5.8.6.4" + ), + Heading(name="RQ.SRS-006.RBAC.RowPolicy.Alter.On", level=4, num="5.8.6.5"), + Heading(name="RQ.SRS-006.RBAC.RowPolicy.Alter.Rename", level=4, num="5.8.6.6"), + Heading(name="RQ.SRS-006.RBAC.RowPolicy.Alter.Access", level=4, num="5.8.6.7"), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Permissive", + level=4, + num="5.8.6.8", + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Access.Restrictive", + level=4, + num="5.8.6.9", + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Condition", level=4, num="5.8.6.10" + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Condition.None", + level=4, + num="5.8.6.11", + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment", level=4, num="5.8.6.12" + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.None", + level=4, + num="5.8.6.13", + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.All", + level=4, + num="5.8.6.14", + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Alter.Assignment.AllExcept", + level=4, + num="5.8.6.15", + ), + Heading(name="RQ.SRS-006.RBAC.RowPolicy.Alter.Syntax", level=4, num="5.8.6.16"), + Heading(name="Drop Row Policy", level=3, num="5.8.7"), + Heading(name="RQ.SRS-006.RBAC.RowPolicy.Drop", level=4, num="5.8.7.1"), + Heading(name="RQ.SRS-006.RBAC.RowPolicy.Drop.IfExists", level=4, num="5.8.7.2"), + Heading(name="RQ.SRS-006.RBAC.RowPolicy.Drop.On", level=4, num="5.8.7.3"), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.Drop.OnCluster", level=4, num="5.8.7.4" + ), + Heading(name="RQ.SRS-006.RBAC.RowPolicy.Drop.Syntax", level=4, num="5.8.7.5"), + Heading(name="Show Create Row Policy", level=3, num="5.8.8"), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy", level=4, num="5.8.8.1" + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.On", + level=4, + num="5.8.8.2", + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.ShowCreateRowPolicy.Syntax", + level=4, + num="5.8.8.3", + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies", level=4, num="5.8.8.4" + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.On", level=4, num="5.8.8.5" + ), + Heading( + name="RQ.SRS-006.RBAC.RowPolicy.ShowRowPolicies.Syntax", + level=4, + num="5.8.8.6", + ), + Heading(name="Set Default Role", level=2, num="5.9"), + Heading(name="RQ.SRS-006.RBAC.SetDefaultRole", level=3, num="5.9.1"), + Heading( + name="RQ.SRS-006.RBAC.SetDefaultRole.CurrentUser", level=3, num="5.9.2" + ), + Heading(name="RQ.SRS-006.RBAC.SetDefaultRole.All", level=3, num="5.9.3"), + Heading(name="RQ.SRS-006.RBAC.SetDefaultRole.AllExcept", level=3, num="5.9.4"), + Heading(name="RQ.SRS-006.RBAC.SetDefaultRole.None", level=3, num="5.9.5"), + Heading(name="RQ.SRS-006.RBAC.SetDefaultRole.Syntax", level=3, num="5.9.6"), + Heading(name="Set Role", level=2, num="5.10"), + Heading(name="RQ.SRS-006.RBAC.SetRole", level=3, num="5.10.1"), + Heading(name="RQ.SRS-006.RBAC.SetRole.Default", level=3, num="5.10.2"), + Heading(name="RQ.SRS-006.RBAC.SetRole.None", level=3, num="5.10.3"), + Heading(name="RQ.SRS-006.RBAC.SetRole.All", level=3, num="5.10.4"), + Heading(name="RQ.SRS-006.RBAC.SetRole.AllExcept", level=3, num="5.10.5"), + Heading(name="RQ.SRS-006.RBAC.SetRole.Syntax", level=3, num="5.10.6"), + Heading(name="Grant", level=2, num="5.11"), + Heading(name="RQ.SRS-006.RBAC.Grant.Privilege.To", level=3, num="5.11.1"), + Heading( + name="RQ.SRS-006.RBAC.Grant.Privilege.ToCurrentUser", level=3, num="5.11.2" + ), + Heading(name="RQ.SRS-006.RBAC.Grant.Privilege.Select", level=3, num="5.11.3"), + Heading(name="RQ.SRS-006.RBAC.Grant.Privilege.Insert", level=3, num="5.11.4"), + Heading(name="RQ.SRS-006.RBAC.Grant.Privilege.Alter", level=3, num="5.11.5"), + Heading(name="RQ.SRS-006.RBAC.Grant.Privilege.Create", level=3, num="5.11.6"), + Heading(name="RQ.SRS-006.RBAC.Grant.Privilege.Drop", level=3, num="5.11.7"), + Heading(name="RQ.SRS-006.RBAC.Grant.Privilege.Truncate", level=3, num="5.11.8"), + Heading(name="RQ.SRS-006.RBAC.Grant.Privilege.Optimize", level=3, num="5.11.9"), + Heading(name="RQ.SRS-006.RBAC.Grant.Privilege.Show", level=3, num="5.11.10"), + Heading( + name="RQ.SRS-006.RBAC.Grant.Privilege.KillQuery", level=3, num="5.11.11" + ), + Heading( + name="RQ.SRS-006.RBAC.Grant.Privilege.AccessManagement", + level=3, + num="5.11.12", + ), + Heading(name="RQ.SRS-006.RBAC.Grant.Privilege.System", level=3, num="5.11.13"), + Heading( + name="RQ.SRS-006.RBAC.Grant.Privilege.Introspection", level=3, num="5.11.14" + ), + Heading(name="RQ.SRS-006.RBAC.Grant.Privilege.Sources", level=3, num="5.11.15"), + Heading(name="RQ.SRS-006.RBAC.Grant.Privilege.DictGet", level=3, num="5.11.16"), + Heading(name="RQ.SRS-006.RBAC.Grant.Privilege.None", level=3, num="5.11.17"), + Heading(name="RQ.SRS-006.RBAC.Grant.Privilege.All", level=3, num="5.11.18"), + Heading( + name="RQ.SRS-006.RBAC.Grant.Privilege.GrantOption", level=3, num="5.11.19" + ), + Heading(name="RQ.SRS-006.RBAC.Grant.Privilege.On", level=3, num="5.11.20"), + Heading( + name="RQ.SRS-006.RBAC.Grant.Privilege.PrivilegeColumns", + level=3, + num="5.11.21", + ), + Heading( + name="RQ.SRS-006.RBAC.Grant.Privilege.OnCluster", level=3, num="5.11.22" + ), + Heading(name="RQ.SRS-006.RBAC.Grant.Privilege.Syntax", level=3, num="5.11.23"), + Heading(name="Revoke", level=2, num="5.12"), + Heading(name="RQ.SRS-006.RBAC.Revoke.Privilege.Cluster", level=3, num="5.12.1"), + Heading(name="RQ.SRS-006.RBAC.Revoke.Privilege.Select", level=3, num="5.12.2"), + Heading(name="RQ.SRS-006.RBAC.Revoke.Privilege.Insert", level=3, num="5.12.3"), + Heading(name="RQ.SRS-006.RBAC.Revoke.Privilege.Alter", level=3, num="5.12.4"), + Heading(name="RQ.SRS-006.RBAC.Revoke.Privilege.Create", level=3, num="5.12.5"), + Heading(name="RQ.SRS-006.RBAC.Revoke.Privilege.Drop", level=3, num="5.12.6"), + Heading( + name="RQ.SRS-006.RBAC.Revoke.Privilege.Truncate", level=3, num="5.12.7" + ), + Heading( + name="RQ.SRS-006.RBAC.Revoke.Privilege.Optimize", level=3, num="5.12.8" + ), + Heading(name="RQ.SRS-006.RBAC.Revoke.Privilege.Show", level=3, num="5.12.9"), + Heading( + name="RQ.SRS-006.RBAC.Revoke.Privilege.KillQuery", level=3, num="5.12.10" + ), + Heading( + name="RQ.SRS-006.RBAC.Revoke.Privilege.AccessManagement", + level=3, + num="5.12.11", + ), + Heading(name="RQ.SRS-006.RBAC.Revoke.Privilege.System", level=3, num="5.12.12"), + Heading( + name="RQ.SRS-006.RBAC.Revoke.Privilege.Introspection", + level=3, + num="5.12.13", + ), + Heading( + name="RQ.SRS-006.RBAC.Revoke.Privilege.Sources", level=3, num="5.12.14" + ), + Heading( + name="RQ.SRS-006.RBAC.Revoke.Privilege.DictGet", level=3, num="5.12.15" + ), + Heading( + name="RQ.SRS-006.RBAC.Revoke.Privilege.PrivilegeColumns", + level=3, + num="5.12.16", + ), + Heading( + name="RQ.SRS-006.RBAC.Revoke.Privilege.Multiple", level=3, num="5.12.17" + ), + Heading(name="RQ.SRS-006.RBAC.Revoke.Privilege.All", level=3, num="5.12.18"), + Heading(name="RQ.SRS-006.RBAC.Revoke.Privilege.None", level=3, num="5.12.19"), + Heading(name="RQ.SRS-006.RBAC.Revoke.Privilege.On", level=3, num="5.12.20"), + Heading(name="RQ.SRS-006.RBAC.Revoke.Privilege.From", level=3, num="5.12.21"), + Heading(name="RQ.SRS-006.RBAC.Revoke.Privilege.Syntax", level=3, num="5.12.22"), + Heading(name="Grant Role", level=2, num="5.13"), + Heading(name="RQ.SRS-006.RBAC.Grant.Role", level=3, num="5.13.1"), + Heading(name="RQ.SRS-006.RBAC.Grant.Role.CurrentUser", level=3, num="5.13.2"), + Heading(name="RQ.SRS-006.RBAC.Grant.Role.AdminOption", level=3, num="5.13.3"), + Heading(name="RQ.SRS-006.RBAC.Grant.Role.OnCluster", level=3, num="5.13.4"), + Heading(name="RQ.SRS-006.RBAC.Grant.Role.Syntax", level=3, num="5.13.5"), + Heading(name="Revoke Role", level=2, num="5.14"), + Heading(name="RQ.SRS-006.RBAC.Revoke.Role", level=3, num="5.14.1"), + Heading(name="RQ.SRS-006.RBAC.Revoke.Role.Keywords", level=3, num="5.14.2"), + Heading(name="RQ.SRS-006.RBAC.Revoke.Role.Cluster", level=3, num="5.14.3"), + Heading(name="RQ.SRS-006.RBAC.Revoke.AdminOption", level=3, num="5.14.4"), + Heading(name="RQ.SRS-006.RBAC.Revoke.Role.Syntax", level=3, num="5.14.5"), + Heading(name="Show Grants", level=2, num="5.15"), + Heading(name="RQ.SRS-006.RBAC.Show.Grants", level=3, num="5.15.1"), + Heading(name="RQ.SRS-006.RBAC.Show.Grants.For", level=3, num="5.15.2"), + Heading(name="RQ.SRS-006.RBAC.Show.Grants.Syntax", level=3, num="5.15.3"), + Heading(name="Table Privileges", level=2, num="5.16"), + Heading(name="RQ.SRS-006.RBAC.Table.PublicTables", level=3, num="5.16.1"), + Heading(name="RQ.SRS-006.RBAC.Table.SensitiveTables", level=3, num="5.16.2"), + Heading(name="Distributed Tables", level=2, num="5.17"), + Heading(name="RQ.SRS-006.RBAC.DistributedTable.Create", level=3, num="5.17.1"), + Heading(name="RQ.SRS-006.RBAC.DistributedTable.Select", level=3, num="5.17.2"), + Heading(name="RQ.SRS-006.RBAC.DistributedTable.Insert", level=3, num="5.17.3"), + Heading( + name="RQ.SRS-006.RBAC.DistributedTable.SpecialTables", level=3, num="5.17.4" + ), + Heading( + name="RQ.SRS-006.RBAC.DistributedTable.LocalUser", level=3, num="5.17.5" + ), + Heading( + name="RQ.SRS-006.RBAC.DistributedTable.SameUserDifferentNodesDifferentPrivileges", + level=3, + num="5.17.6", + ), + Heading(name="Views", level=2, num="5.18"), + Heading(name="View", level=3, num="5.18.1"), + Heading(name="RQ.SRS-006.RBAC.View", level=4, num="5.18.1.1"), + Heading(name="RQ.SRS-006.RBAC.View.Create", level=4, num="5.18.1.2"), + Heading(name="RQ.SRS-006.RBAC.View.Select", level=4, num="5.18.1.3"), + Heading(name="RQ.SRS-006.RBAC.View.Drop", level=4, num="5.18.1.4"), + Heading(name="Materialized View", level=3, num="5.18.2"), + Heading(name="RQ.SRS-006.RBAC.MaterializedView", level=4, num="5.18.2.1"), + Heading( + name="RQ.SRS-006.RBAC.MaterializedView.Create", level=4, num="5.18.2.2" + ), + Heading( + name="RQ.SRS-006.RBAC.MaterializedView.Select", level=4, num="5.18.2.3" + ), + Heading( + name="RQ.SRS-006.RBAC.MaterializedView.Select.TargetTable", + level=4, + num="5.18.2.4", + ), + Heading( + name="RQ.SRS-006.RBAC.MaterializedView.Select.SourceTable", + level=4, + num="5.18.2.5", + ), + Heading(name="RQ.SRS-006.RBAC.MaterializedView.Drop", level=4, num="5.18.2.6"), + Heading( + name="RQ.SRS-006.RBAC.MaterializedView.ModifyQuery", level=4, num="5.18.2.7" + ), + Heading( + name="RQ.SRS-006.RBAC.MaterializedView.Insert", level=4, num="5.18.2.8" + ), + Heading( + name="RQ.SRS-006.RBAC.MaterializedView.Insert.SourceTable", + level=4, + num="5.18.2.9", + ), + Heading( + name="RQ.SRS-006.RBAC.MaterializedView.Insert.TargetTable", + level=4, + num="5.18.2.10", + ), + Heading(name="Live View", level=3, num="5.18.3"), + Heading(name="RQ.SRS-006.RBAC.LiveView", level=4, num="5.18.3.1"), + Heading(name="RQ.SRS-006.RBAC.LiveView.Create", level=4, num="5.18.3.2"), + Heading(name="RQ.SRS-006.RBAC.LiveView.Select", level=4, num="5.18.3.3"), + Heading(name="RQ.SRS-006.RBAC.LiveView.Drop", level=4, num="5.18.3.4"), + Heading(name="RQ.SRS-006.RBAC.LiveView.Refresh", level=4, num="5.18.3.5"), + Heading(name="Select", level=2, num="5.19"), + Heading(name="RQ.SRS-006.RBAC.Select", level=3, num="5.19.1"), + Heading(name="RQ.SRS-006.RBAC.Select.Column", level=3, num="5.19.2"), + Heading(name="RQ.SRS-006.RBAC.Select.Cluster", level=3, num="5.19.3"), + Heading(name="RQ.SRS-006.RBAC.Select.TableEngines", level=3, num="5.19.4"), + Heading(name="Insert", level=2, num="5.20"), + Heading(name="RQ.SRS-006.RBAC.Insert", level=3, num="5.20.1"), + Heading(name="RQ.SRS-006.RBAC.Insert.Column", level=3, num="5.20.2"), + Heading(name="RQ.SRS-006.RBAC.Insert.Cluster", level=3, num="5.20.3"), + Heading(name="RQ.SRS-006.RBAC.Insert.TableEngines", level=3, num="5.20.4"), + Heading(name="Alter", level=2, num="5.21"), + Heading(name="Alter Column", level=3, num="5.21.1"), + Heading(name="RQ.SRS-006.RBAC.Privileges.AlterColumn", level=4, num="5.21.1.1"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterColumn.Grant", level=4, num="5.21.1.2" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterColumn.Revoke", + level=4, + num="5.21.1.3", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterColumn.Column", + level=4, + num="5.21.1.4", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterColumn.Cluster", + level=4, + num="5.21.1.5", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterColumn.TableEngines", + level=4, + num="5.21.1.6", + ), + Heading(name="Alter Index", level=3, num="5.21.2"), + Heading(name="RQ.SRS-006.RBAC.Privileges.AlterIndex", level=4, num="5.21.2.1"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterIndex.Grant", level=4, num="5.21.2.2" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterIndex.Revoke", level=4, num="5.21.2.3" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterIndex.Cluster", + level=4, + num="5.21.2.4", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterIndex.TableEngines", + level=4, + num="5.21.2.5", + ), + Heading(name="Alter Constraint", level=3, num="5.21.3"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterConstraint", level=4, num="5.21.3.1" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterConstraint.Grant", + level=4, + num="5.21.3.2", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterConstraint.Revoke", + level=4, + num="5.21.3.3", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterConstraint.Cluster", + level=4, + num="5.21.3.4", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterConstraint.TableEngines", + level=4, + num="5.21.3.5", + ), + Heading(name="Alter TTL", level=3, num="5.21.4"), + Heading(name="RQ.SRS-006.RBAC.Privileges.AlterTTL", level=4, num="5.21.4.1"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterTTL.Grant", level=4, num="5.21.4.2" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterTTL.Revoke", level=4, num="5.21.4.3" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterTTL.Cluster", level=4, num="5.21.4.4" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterTTL.TableEngines", + level=4, + num="5.21.4.5", + ), + Heading(name="Alter Settings", level=3, num="5.21.5"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterSettings", level=4, num="5.21.5.1" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterSettings.Grant", + level=4, + num="5.21.5.2", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterSettings.Revoke", + level=4, + num="5.21.5.3", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterSettings.Cluster", + level=4, + num="5.21.5.4", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterSettings.TableEngines", + level=4, + num="5.21.5.5", + ), + Heading(name="Alter Update", level=3, num="5.21.6"), + Heading(name="RQ.SRS-006.RBAC.Privileges.AlterUpdate", level=4, num="5.21.6.1"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterUpdate.Grant", level=4, num="5.21.6.2" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterUpdate.Revoke", + level=4, + num="5.21.6.3", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterUpdate.TableEngines", + level=4, + num="5.21.6.4", + ), + Heading(name="Alter Delete", level=3, num="5.21.7"), + Heading(name="RQ.SRS-006.RBAC.Privileges.AlterDelete", level=4, num="5.21.7.1"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterDelete.Grant", level=4, num="5.21.7.2" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterDelete.Revoke", + level=4, + num="5.21.7.3", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterDelete.TableEngines", + level=4, + num="5.21.7.4", + ), + Heading(name="Alter Freeze Partition", level=3, num="5.21.8"), + Heading(name="RQ.SRS-006.RBAC.Privileges.AlterFreeze", level=4, num="5.21.8.1"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterFreeze.Grant", level=4, num="5.21.8.2" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterFreeze.Revoke", + level=4, + num="5.21.8.3", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterFreeze.TableEngines", + level=4, + num="5.21.8.4", + ), + Heading(name="Alter Fetch Partition", level=3, num="5.21.9"), + Heading(name="RQ.SRS-006.RBAC.Privileges.AlterFetch", level=4, num="5.21.9.1"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterFetch.Grant", level=4, num="5.21.9.2" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterFetch.Revoke", level=4, num="5.21.9.3" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterFetch.TableEngines", + level=4, + num="5.21.9.4", + ), + Heading(name="Alter Move Partition", level=3, num="5.21.10"), + Heading(name="RQ.SRS-006.RBAC.Privileges.AlterMove", level=4, num="5.21.10.1"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterMove.Grant", level=4, num="5.21.10.2" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterMove.Revoke", level=4, num="5.21.10.3" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterMove.TableEngines", + level=4, + num="5.21.10.4", + ), + Heading(name="Create", level=2, num="5.22"), + Heading(name="RQ.SRS-006.RBAC.Privileges.CreateTable", level=3, num="5.22.1"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.CreateDatabase", level=3, num="5.22.2" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.CreateDictionary", level=3, num="5.22.3" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.CreateTemporaryTable", + level=3, + num="5.22.4", + ), + Heading(name="Attach", level=2, num="5.23"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AttachDatabase", level=3, num="5.23.1" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AttachDictionary", level=3, num="5.23.2" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AttachTemporaryTable", + level=3, + num="5.23.3", + ), + Heading(name="RQ.SRS-006.RBAC.Privileges.AttachTable", level=3, num="5.23.4"), + Heading(name="Drop", level=2, num="5.24"), + Heading(name="RQ.SRS-006.RBAC.Privileges.DropTable", level=3, num="5.24.1"), + Heading(name="RQ.SRS-006.RBAC.Privileges.DropDatabase", level=3, num="5.24.2"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.DropDictionary", level=3, num="5.24.3" + ), + Heading(name="Detach", level=2, num="5.25"), + Heading(name="RQ.SRS-006.RBAC.Privileges.DetachTable", level=3, num="5.25.1"), + Heading(name="RQ.SRS-006.RBAC.Privileges.DetachView", level=3, num="5.25.2"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.DetachDatabase", level=3, num="5.25.3" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.DetachDictionary", level=3, num="5.25.4" + ), + Heading(name="Truncate", level=2, num="5.26"), + Heading(name="RQ.SRS-006.RBAC.Privileges.Truncate", level=3, num="5.26.1"), + Heading(name="Optimize", level=2, num="5.27"), + Heading(name="RQ.SRS-006.RBAC.Privileges.Optimize", level=3, num="5.27.1"), + Heading(name="Kill Query", level=2, num="5.28"), + Heading(name="RQ.SRS-006.RBAC.Privileges.KillQuery", level=3, num="5.28.1"), + Heading(name="Kill Mutation", level=2, num="5.29"), + Heading(name="RQ.SRS-006.RBAC.Privileges.KillMutation", level=3, num="5.29.1"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.KillMutation.AlterUpdate", + level=3, + num="5.29.2", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.KillMutation.AlterDelete", + level=3, + num="5.29.3", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.KillMutation.AlterDropColumn", + level=3, + num="5.29.4", + ), + Heading(name="Show", level=2, num="5.30"), + Heading(name="RQ.SRS-006.RBAC.ShowTables.Privilege", level=3, num="5.30.1"), + Heading( + name="RQ.SRS-006.RBAC.ShowTables.RequiredPrivilege", level=3, num="5.30.2" + ), + Heading( + name="RQ.SRS-006.RBAC.ExistsTable.RequiredPrivilege", level=3, num="5.30.3" + ), + Heading( + name="RQ.SRS-006.RBAC.CheckTable.RequiredPrivilege", level=3, num="5.30.4" + ), + Heading(name="RQ.SRS-006.RBAC.ShowDatabases.Privilege", level=3, num="5.30.5"), + Heading( + name="RQ.SRS-006.RBAC.ShowDatabases.RequiredPrivilege", + level=3, + num="5.30.6", + ), + Heading( + name="RQ.SRS-006.RBAC.ShowCreateDatabase.RequiredPrivilege", + level=3, + num="5.30.7", + ), + Heading( + name="RQ.SRS-006.RBAC.UseDatabase.RequiredPrivilege", level=3, num="5.30.8" + ), + Heading(name="RQ.SRS-006.RBAC.ShowColumns.Privilege", level=3, num="5.30.9"), + Heading( + name="RQ.SRS-006.RBAC.ShowCreateTable.RequiredPrivilege", + level=3, + num="5.30.10", + ), + Heading( + name="RQ.SRS-006.RBAC.DescribeTable.RequiredPrivilege", + level=3, + num="5.30.11", + ), + Heading( + name="RQ.SRS-006.RBAC.ShowDictionaries.Privilege", level=3, num="5.30.12" + ), + Heading( + name="RQ.SRS-006.RBAC.ShowDictionaries.RequiredPrivilege", + level=3, + num="5.30.13", + ), + Heading( + name="RQ.SRS-006.RBAC.ShowCreateDictionary.RequiredPrivilege", + level=3, + num="5.30.14", + ), + Heading( + name="RQ.SRS-006.RBAC.ExistsDictionary.RequiredPrivilege", + level=3, + num="5.30.15", + ), + Heading(name="Access Management", level=2, num="5.31"), + Heading(name="RQ.SRS-006.RBAC.Privileges.CreateUser", level=3, num="5.31.1"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.CreateUser.DefaultRole", + level=3, + num="5.31.2", + ), + Heading(name="RQ.SRS-006.RBAC.Privileges.AlterUser", level=3, num="5.31.3"), + Heading(name="RQ.SRS-006.RBAC.Privileges.DropUser", level=3, num="5.31.4"), + Heading(name="RQ.SRS-006.RBAC.Privileges.CreateRole", level=3, num="5.31.5"), + Heading(name="RQ.SRS-006.RBAC.Privileges.AlterRole", level=3, num="5.31.6"), + Heading(name="RQ.SRS-006.RBAC.Privileges.DropRole", level=3, num="5.31.7"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.CreateRowPolicy", level=3, num="5.31.8" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterRowPolicy", level=3, num="5.31.9" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.DropRowPolicy", level=3, num="5.31.10" + ), + Heading(name="RQ.SRS-006.RBAC.Privileges.CreateQuota", level=3, num="5.31.11"), + Heading(name="RQ.SRS-006.RBAC.Privileges.AlterQuota", level=3, num="5.31.12"), + Heading(name="RQ.SRS-006.RBAC.Privileges.DropQuota", level=3, num="5.31.13"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.CreateSettingsProfile", + level=3, + num="5.31.14", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.AlterSettingsProfile", + level=3, + num="5.31.15", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.DropSettingsProfile", + level=3, + num="5.31.16", + ), + Heading(name="RQ.SRS-006.RBAC.Privileges.RoleAdmin", level=3, num="5.31.17"), + Heading(name="Show Access", level=3, num="5.31.18"), + Heading(name="RQ.SRS-006.RBAC.ShowUsers.Privilege", level=4, num="5.31.18.1"), + Heading( + name="RQ.SRS-006.RBAC.ShowUsers.RequiredPrivilege", level=4, num="5.31.18.2" + ), + Heading( + name="RQ.SRS-006.RBAC.ShowCreateUser.RequiredPrivilege", + level=4, + num="5.31.18.3", + ), + Heading(name="RQ.SRS-006.RBAC.ShowRoles.Privilege", level=4, num="5.31.18.4"), + Heading( + name="RQ.SRS-006.RBAC.ShowRoles.RequiredPrivilege", level=4, num="5.31.18.5" + ), + Heading( + name="RQ.SRS-006.RBAC.ShowCreateRole.RequiredPrivilege", + level=4, + num="5.31.18.6", + ), + Heading( + name="RQ.SRS-006.RBAC.ShowRowPolicies.Privilege", level=4, num="5.31.18.7" + ), + Heading( + name="RQ.SRS-006.RBAC.ShowRowPolicies.RequiredPrivilege", + level=4, + num="5.31.18.8", + ), + Heading( + name="RQ.SRS-006.RBAC.ShowCreateRowPolicy.RequiredPrivilege", + level=4, + num="5.31.18.9", + ), + Heading(name="RQ.SRS-006.RBAC.ShowQuotas.Privilege", level=4, num="5.31.18.10"), + Heading( + name="RQ.SRS-006.RBAC.ShowQuotas.RequiredPrivilege", + level=4, + num="5.31.18.11", + ), + Heading( + name="RQ.SRS-006.RBAC.ShowCreateQuota.RequiredPrivilege", + level=4, + num="5.31.18.12", + ), + Heading( + name="RQ.SRS-006.RBAC.ShowSettingsProfiles.Privilege", + level=4, + num="5.31.18.13", + ), + Heading( + name="RQ.SRS-006.RBAC.ShowSettingsProfiles.RequiredPrivilege", + level=4, + num="5.31.18.14", + ), + Heading( + name="RQ.SRS-006.RBAC.ShowCreateSettingsProfile.RequiredPrivilege", + level=4, + num="5.31.18.15", + ), + Heading(name="dictGet", level=2, num="5.32"), + Heading(name="RQ.SRS-006.RBAC.dictGet.Privilege", level=3, num="5.32.1"), + Heading( + name="RQ.SRS-006.RBAC.dictGet.RequiredPrivilege", level=3, num="5.32.2" + ), + Heading( + name="RQ.SRS-006.RBAC.dictGet.Type.RequiredPrivilege", level=3, num="5.32.3" + ), + Heading( + name="RQ.SRS-006.RBAC.dictGet.OrDefault.RequiredPrivilege", + level=3, + num="5.32.4", + ), + Heading( + name="RQ.SRS-006.RBAC.dictHas.RequiredPrivilege", level=3, num="5.32.5" + ), + Heading( + name="RQ.SRS-006.RBAC.dictGetHierarchy.RequiredPrivilege", + level=3, + num="5.32.6", + ), + Heading( + name="RQ.SRS-006.RBAC.dictIsIn.RequiredPrivilege", level=3, num="5.32.7" + ), + Heading(name="Introspection", level=2, num="5.33"), + Heading(name="RQ.SRS-006.RBAC.Privileges.Introspection", level=3, num="5.33.1"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.Introspection.addressToLine", + level=3, + num="5.33.2", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.Introspection.addressToSymbol", + level=3, + num="5.33.3", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.Introspection.demangle", + level=3, + num="5.33.4", + ), + Heading(name="System", level=2, num="5.34"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.Shutdown", level=3, num="5.34.1" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.DropCache", level=3, num="5.34.2" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.DropCache.DNS", + level=3, + num="5.34.3", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.DropCache.Mark", + level=3, + num="5.34.4", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.DropCache.Uncompressed", + level=3, + num="5.34.5", + ), + Heading(name="RQ.SRS-006.RBAC.Privileges.System.Reload", level=3, num="5.34.6"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.Reload.Config", + level=3, + num="5.34.7", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.Reload.Dictionary", + level=3, + num="5.34.8", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.Reload.Dictionaries", + level=3, + num="5.34.9", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.Reload.EmbeddedDictionaries", + level=3, + num="5.34.10", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.Merges", level=3, num="5.34.11" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.TTLMerges", level=3, num="5.34.12" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.Fetches", level=3, num="5.34.13" + ), + Heading(name="RQ.SRS-006.RBAC.Privileges.System.Moves", level=3, num="5.34.14"), + Heading(name="RQ.SRS-006.RBAC.Privileges.System.Sends", level=3, num="5.34.15"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.Sends.Distributed", + level=3, + num="5.34.16", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.Sends.Replicated", + level=3, + num="5.34.17", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.ReplicationQueues", + level=3, + num="5.34.18", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.SyncReplica", level=3, num="5.34.19" + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.RestartReplica", + level=3, + num="5.34.20", + ), + Heading(name="RQ.SRS-006.RBAC.Privileges.System.Flush", level=3, num="5.34.21"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.Flush.Distributed", + level=3, + num="5.34.22", + ), + Heading( + name="RQ.SRS-006.RBAC.Privileges.System.Flush.Logs", level=3, num="5.34.23" + ), + Heading(name="Sources", level=2, num="5.35"), + Heading(name="RQ.SRS-006.RBAC.Privileges.Sources", level=3, num="5.35.1"), + Heading(name="RQ.SRS-006.RBAC.Privileges.Sources.File", level=3, num="5.35.2"), + Heading(name="RQ.SRS-006.RBAC.Privileges.Sources.URL", level=3, num="5.35.3"), + Heading( + name="RQ.SRS-006.RBAC.Privileges.Sources.Remote", level=3, num="5.35.4" + ), + Heading(name="RQ.SRS-006.RBAC.Privileges.Sources.MySQL", level=3, num="5.35.5"), + Heading(name="RQ.SRS-006.RBAC.Privileges.Sources.ODBC", level=3, num="5.35.6"), + Heading(name="RQ.SRS-006.RBAC.Privileges.Sources.JDBC", level=3, num="5.35.7"), + Heading(name="RQ.SRS-006.RBAC.Privileges.Sources.HDFS", level=3, num="5.35.8"), + Heading(name="RQ.SRS-006.RBAC.Privileges.Sources.S3", level=3, num="5.35.9"), + Heading(name="RQ.SRS-006.RBAC.Privileges.GrantOption", level=2, num="5.36"), + Heading(name="RQ.SRS-006.RBAC.Privileges.All", level=2, num="5.37"), + Heading(name="RQ.SRS-006.RBAC.Privileges.RoleAll", level=2, num="5.38"), + Heading(name="RQ.SRS-006.RBAC.Privileges.None", level=2, num="5.39"), + Heading(name="RQ.SRS-006.RBAC.Privileges.AdminOption", level=2, num="5.40"), + Heading(name="References", level=1, num="6"), + ), requirements=( RQ_SRS_006_RBAC, RQ_SRS_006_RBAC_Login, @@ -9977,8 +11280,8 @@ SRS_006_ClickHouse_Role_Based_Access_Control = Specification( RQ_SRS_006_RBAC_Privileges_RoleAll, RQ_SRS_006_RBAC_Privileges_None, RQ_SRS_006_RBAC_Privileges_AdminOption, - ), - content=''' + ), + content=""" # SRS-006 ClickHouse Role Based Access Control # Software Requirements Specification @@ -14498,4 +15801,5 @@ the user has that role with `ADMIN OPTION` privilege. [Git]: https://git-scm.com/ [MySQL]: https://dev.mysql.com/doc/refman/8.0/en/account-management-statements.html [PostgreSQL]: https://www.postgresql.org/docs/12/user-manag.html -''') +""", +) diff --git a/tests/testflows/rbac/tests/privileges/admin_option.py b/tests/testflows/rbac/tests/privileges/admin_option.py index f6115839bf5..467eab0ef4d 100644 --- a/tests/testflows/rbac/tests/privileges/admin_option.py +++ b/tests/testflows/rbac/tests/privileges/admin_option.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): - """Check that a user is able to grant role with `ADMIN OPTION` privilege granted directly. - """ + """Check that a user is able to grant role with `ADMIN OPTION` privilege granted directly.""" user_name = f"user_{getuid()}" @@ -19,10 +19,10 @@ def privileges_granted_directly(self, node=None): Suite(test=grant_role)(grant_target_name=user_name, user_name=user_name) + @TestSuite def privileges_granted_via_role(self, node=None): - """Check that a user is able to grant role with `ADMIN OPTION` privilege granted through a role. - """ + """Check that a user is able to grant role with `ADMIN OPTION` privilege granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -37,10 +37,10 @@ def privileges_granted_via_role(self, node=None): Suite(test=grant_role)(grant_target_name=role_name, user_name=user_name) + @TestSuite def grant_role(self, grant_target_name, user_name, node=None): - """Check that user is able to execute to grant roles if and only if they have role with `ADMIN OPTION`. - """ + """Check that user is able to execute to grant roles if and only if they have role with `ADMIN OPTION`.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -59,8 +59,12 @@ def grant_role(self, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't grant a role"): - node.query(f"GRANT {grant_role_name} TO {target_user_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"GRANT {grant_role_name} TO {target_user_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("Grant role with privilege"): grant_role_name = f"grant_role_{getuid()}" @@ -69,10 +73,15 @@ def grant_role(self, grant_target_name, user_name, node=None): with user(node, target_user_name), role(node, grant_role_name): with When(f"I grant ADMIN OPTION"): - node.query(f"GRANT {grant_role_name} TO {grant_target_name} WITH ADMIN OPTION") + node.query( + f"GRANT {grant_role_name} TO {grant_target_name} WITH ADMIN OPTION" + ) with Then("I check the user can grant a role"): - node.query(f"GRANT {grant_role_name} TO {target_user_name}", settings = [("user", f"{user_name}")]) + node.query( + f"GRANT {grant_role_name} TO {target_user_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("Grant role on cluster"): grant_role_name = f"grant_role_{getuid()}" @@ -86,14 +95,21 @@ def grant_role(self, grant_target_name, user_name, node=None): node.query(f"CREATE USER {target_user_name} ON CLUSTER sharded_cluster") with When("I grant ADMIN OPTION privilege"): - node.query(f"GRANT {grant_role_name} TO {grant_target_name} WITH ADMIN OPTION") + node.query( + f"GRANT {grant_role_name} TO {grant_target_name} WITH ADMIN OPTION" + ) with Then("I check the user can grant a role"): - node.query(f"GRANT {grant_role_name} TO {target_user_name} ON CLUSTER sharded_cluster", settings = [("user", f"{user_name}")]) + node.query( + f"GRANT {grant_role_name} TO {target_user_name} ON CLUSTER sharded_cluster", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the user"): - node.query(f"DROP ROLE IF EXISTS {grant_role_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP ROLE IF EXISTS {grant_role_name} ON CLUSTER sharded_cluster" + ) with Scenario("Grant role with revoked privilege"): grant_role_name = f"grant_role_{getuid()}" @@ -102,24 +118,30 @@ def grant_role(self, grant_target_name, user_name, node=None): with user(node, target_user_name), role(node, grant_role_name): with When(f"I grant ADMIN OPTION"): - node.query(f"GRANT {grant_role_name} TO {grant_target_name} WITH ADMIN OPTION") + node.query( + f"GRANT {grant_role_name} TO {grant_target_name} WITH ADMIN OPTION" + ) with And(f"I revoke ADMIN OPTION"): node.query(f"REVOKE {grant_role_name} FROM {grant_target_name}") with Then("I check the user cannot grant a role"): - node.query(f"GRANT {grant_role_name} TO {target_user_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"GRANT {grant_role_name} TO {target_user_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("admin option") @Requirements( RQ_SRS_006_RBAC_Privileges_AdminOption("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of ADMIN OPTION. - """ + """Check the RBAC functionality of ADMIN OPTION.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/all_role.py b/tests/testflows/rbac/tests/privileges/all_role.py index 629848a2746..a246237cb3e 100644 --- a/tests/testflows/rbac/tests/privileges/all_role.py +++ b/tests/testflows/rbac/tests/privileges/all_role.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestScenario def privilege_check(self, node=None): - '''Check that a role named ALL only grants privileges that it already has. - ''' + """Check that a role named ALL only grants privileges that it already has.""" user_name = f"user_{getuid()}" @@ -21,8 +21,9 @@ def privilege_check(self, node=None): node.query(f"GRANT ALL TO {user_name}") with Then("I check the user doesn't have any privileges"): - output = node.query("SHOW TABLES", settings=[("user",user_name)]).output - assert output == '', error() + output = node.query("SHOW TABLES", settings=[("user", user_name)]).output + assert output == "", error() + @TestFeature @Name("all role") @@ -30,8 +31,7 @@ def privilege_check(self, node=None): RQ_SRS_006_RBAC_Privileges_RoleAll("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of the role 'ALL'. - """ + """Check the RBAC functionality of the role 'ALL'.""" self.context.node = self.context.cluster.node(node) Scenario(run=privilege_check, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/alter/alter_column.py b/tests/testflows/rbac/tests/privileges/alter/alter_column.py index 2be20d4e667..05ce47c8852 100755 --- a/tests/testflows/rbac/tests/privileges/alter/alter_column.py +++ b/tests/testflows/rbac/tests/privileges/alter/alter_column.py @@ -9,26 +9,27 @@ import rbac.helper.errors as errors from rbac.helper.tables import table_types subprivileges = { - "ADD COLUMN" : 1 << 0, - "CLEAR COLUMN" : 1 << 1, - "MODIFY COLUMN" : 1 << 2, + "ADD COLUMN": 1 << 0, + "CLEAR COLUMN": 1 << 1, + "MODIFY COLUMN": 1 << 2, "RENAME COLUMN": 1 << 3, "COMMENT COLUMN": 1 << 4, "DROP COLUMN": 1 << 5, } aliases = { - "ADD COLUMN" : ["ALTER ADD COLUMN", "ADD COLUMN"], + "ADD COLUMN": ["ALTER ADD COLUMN", "ADD COLUMN"], "CLEAR COLUMN": ["ALTER CLEAR COLUMN", "CLEAR COLUMN"], - "MODIFY COLUMN" : ["ALTER MODIFY COLUMN", "MODIFY COLUMN"], - "RENAME COLUMN" : ["ALTER RENAME COLUMN", "RENAME COLUMN"], + "MODIFY COLUMN": ["ALTER MODIFY COLUMN", "MODIFY COLUMN"], + "RENAME COLUMN": ["ALTER RENAME COLUMN", "RENAME COLUMN"], "COMMENT COLUMN": ["ALTER COMMENT COLUMN", "COMMENT COLUMN"], "DROP COLUMN": ["ALTER DROP COLUMN", "DROP COLUMN"], - "ALTER COLUMN" : ["ALTER COLUMN", "ALL"], #super-privilege + "ALTER COLUMN": ["ALTER COLUMN", "ALL"], # super-privilege } # extra permutation is for 'ALTER COLUMN' super-privilege -permutation_count = (1 << len(subprivileges)) +permutation_count = 1 << len(subprivileges) + def permutations(table_type): """Uses stress flag and table type, returns list of all permutations to run @@ -45,8 +46,13 @@ def permutations(table_type): # "COMMENT COLUMN", "DROP COLUMN", "NONE", "DROP, RENAME, CLEAR", all, and # "ALTER COLUMN" # ] - return [1 << index for index in range(len(subprivileges))] + \ - [0, int('101010', 2), permutation_count-1, permutation_count] + return [1 << index for index in range(len(subprivileges))] + [ + 0, + int("101010", 2), + permutation_count - 1, + permutation_count, + ] + def alter_column_privileges(grants: int): """Takes in an integer, and returns the corresponding set of tests to grant and @@ -59,33 +65,47 @@ def alter_column_privileges(grants: int): # extra iteration for ALTER COLUMN if grants >= permutation_count: - privileges.append(aliases["ALTER COLUMN"][grants-permutation_count]) - elif grants==0: # No privileges + privileges.append(aliases["ALTER COLUMN"][grants - permutation_count]) + elif grants == 0: # No privileges privileges.append("NONE") else: - if (grants & subprivileges["ADD COLUMN"]): - privileges.append(aliases["ADD COLUMN"][grants % len(aliases["ADD COLUMN"])]) - if (grants & subprivileges["CLEAR COLUMN"]): - privileges.append(aliases["CLEAR COLUMN"][grants % len(aliases["CLEAR COLUMN"])]) - if (grants & subprivileges["MODIFY COLUMN"]): - privileges.append(aliases["MODIFY COLUMN"][grants % len(aliases["MODIFY COLUMN"])]) - if (grants & subprivileges["RENAME COLUMN"]): - privileges.append(aliases["RENAME COLUMN"][grants % len(aliases["RENAME COLUMN"])]) - if (grants & subprivileges["COMMENT COLUMN"]): - privileges.append(aliases["COMMENT COLUMN"][grants % len(aliases["COMMENT COLUMN"])]) - if (grants & subprivileges["DROP COLUMN"]): - privileges.append(aliases["DROP COLUMN"][grants % len(aliases["DROP COLUMN"])]) + if grants & subprivileges["ADD COLUMN"]: + privileges.append( + aliases["ADD COLUMN"][grants % len(aliases["ADD COLUMN"])] + ) + if grants & subprivileges["CLEAR COLUMN"]: + privileges.append( + aliases["CLEAR COLUMN"][grants % len(aliases["CLEAR COLUMN"])] + ) + if grants & subprivileges["MODIFY COLUMN"]: + privileges.append( + aliases["MODIFY COLUMN"][grants % len(aliases["MODIFY COLUMN"])] + ) + if grants & subprivileges["RENAME COLUMN"]: + privileges.append( + aliases["RENAME COLUMN"][grants % len(aliases["RENAME COLUMN"])] + ) + if grants & subprivileges["COMMENT COLUMN"]: + privileges.append( + aliases["COMMENT COLUMN"][grants % len(aliases["COMMENT COLUMN"])] + ) + if grants & subprivileges["DROP COLUMN"]: + privileges.append( + aliases["DROP COLUMN"][grants % len(aliases["DROP COLUMN"])] + ) note(f"Testing privileges: {privileges}") - return ', '.join(privileges) + return ", ".join(privileges) + def on_columns(privileges, columns): """For column-based tests. Takes in string output of alter_column_privileges() and adds columns for those privileges. """ - privileges = privileges.split(',') + privileges = privileges.split(",") privileges = [privilege + f"({columns})" for privilege in privileges] - return ', '.join(privileges) + return ", ".join(privileges) + def alter_column_privilege_handler(grants, table, user, node, columns=None): """For all 6 subprivileges, if the privilege is granted: run test to ensure correct behavior, @@ -97,69 +117,87 @@ def alter_column_privilege_handler(grants, table, user, node, columns=None): note(f"GRANTS: {grants}") # testing ALTER COLUMN is the same as testing all subprivileges - if grants > permutation_count-1: - grants = permutation_count-1 + if grants > permutation_count - 1: + grants = permutation_count - 1 # if 'columns' is not passed then one iteration with column = None columns = columns.split(",") if columns != None else [None] for column in columns: # will always run 6 tests per column depending on granted privileges - if (grants & subprivileges["ADD COLUMN"]): + if grants & subprivileges["ADD COLUMN"]: with When("I check add column when privilege is granted"): check_add_column_when_privilege_is_granted(table, user, node, column) else: with When("I check add column when privilege is not granted"): - check_add_column_when_privilege_is_not_granted(table, user, node, column) - if (grants & subprivileges["CLEAR COLUMN"]): + check_add_column_when_privilege_is_not_granted( + table, user, node, column + ) + if grants & subprivileges["CLEAR COLUMN"]: with When("I check clear column when privilege is granted"): check_clear_column_when_privilege_is_granted(table, user, node, column) else: with When("I check clear column when privilege is not granted"): - check_clear_column_when_privilege_is_not_granted(table, user, node, column) - if (grants & subprivileges["MODIFY COLUMN"]): + check_clear_column_when_privilege_is_not_granted( + table, user, node, column + ) + if grants & subprivileges["MODIFY COLUMN"]: with When("I check modify column when privilege is granted"): check_modify_column_when_privilege_is_granted(table, user, node, column) else: with When("I check modify column when privilege is not granted"): - check_modify_column_when_privilege_is_not_granted(table, user, node, column) - if (grants & subprivileges["RENAME COLUMN"]): + check_modify_column_when_privilege_is_not_granted( + table, user, node, column + ) + if grants & subprivileges["RENAME COLUMN"]: with When("I check rename column when privilege is granted"): check_rename_column_when_privilege_is_granted(table, user, node, column) else: with When("I check rename column when privilege is not granted"): - check_rename_column_when_privilege_is_not_granted(table, user, node, column) - if (grants & subprivileges["COMMENT COLUMN"]): + check_rename_column_when_privilege_is_not_granted( + table, user, node, column + ) + if grants & subprivileges["COMMENT COLUMN"]: with When("I check comment column when privilege is granted"): - check_comment_column_when_privilege_is_granted(table, user, node, column) + check_comment_column_when_privilege_is_granted( + table, user, node, column + ) else: with When("I check comment column when privilege is not granted"): - check_comment_column_when_privilege_is_not_granted(table, user, node, column) - if (grants & subprivileges["DROP COLUMN"]): + check_comment_column_when_privilege_is_not_granted( + table, user, node, column + ) + if grants & subprivileges["DROP COLUMN"]: with When("I check drop column when privilege is granted"): check_drop_column_when_privilege_is_granted(table, user, node, column) else: with When("I check drop column when privilege is not granted"): - check_drop_column_when_privilege_is_not_granted(table, user, node, column) + check_drop_column_when_privilege_is_not_granted( + table, user, node, column + ) + def check_add_column_when_privilege_is_granted(table, user, node, column=None): """Ensures ADD COLUMN runs as expected when the privilege is granted to the specified user. """ if column is None: - column = 'add' + column = "add" with Given(f"I add column '{column}'"): - node.query(f"ALTER TABLE {table} ADD COLUMN {column} String", - settings = [("user", user)]) + node.query( + f"ALTER TABLE {table} ADD COLUMN {column} String", settings=[("user", user)] + ) with Then("I insert data to tree"): - node.query(f"INSERT INTO {table} ({column}) VALUES ('3.4')") #String + node.query(f"INSERT INTO {table} ({column}) VALUES ('3.4')") # String with Then("I verify that the column was successfully added"): - column_data = node.query(f"SELECT {column} FROM {table} FORMAT JSONEachRow").output - column_data_list = column_data.split('\n') - output_rows = [{f"{column}":"3.4"}, {f"{column}":""}] + column_data = node.query( + f"SELECT {column} FROM {table} FORMAT JSONEachRow" + ).output + column_data_list = column_data.split("\n") + output_rows = [{f"{column}": "3.4"}, {f"{column}": ""}] for row in column_data_list: assert json.loads(row) in output_rows, error() @@ -167,12 +205,13 @@ def check_add_column_when_privilege_is_granted(table, user, node, column=None): with Finally(f"I drop column '{column}'"): node.query(f"ALTER TABLE {table} DROP COLUMN {column}") + def check_clear_column_when_privilege_is_granted(table, user, node, column=None): """Ensures CLEAR COLUMN runs as expected when the privilege is granted to the specified user. """ if column is None: - column = 'clear' + column = "clear" with Given(f"I add the column {column}"): node.query(f"ALTER TABLE {table} ADD COLUMN {column} String") @@ -181,25 +220,29 @@ def check_clear_column_when_privilege_is_granted(table, user, node, column=None) node.query(f"INSERT INTO {table} ({column}) VALUES ('ready to be cleared')") with When(f"I clear column '{column}'"): - node.query(f"ALTER TABLE {table} CLEAR COLUMN {column}", - settings = [("user", user)]) + node.query( + f"ALTER TABLE {table} CLEAR COLUMN {column}", settings=[("user", user)] + ) with Then("I verify that the column was successfully cleared"): - column_data = node.query(f"SELECT {column} FROM {table} FORMAT JSONEachRow").output - column_data_list = column_data.split('\n') + column_data = node.query( + f"SELECT {column} FROM {table} FORMAT JSONEachRow" + ).output + column_data_list = column_data.split("\n") for row in column_data_list: - assert json.loads(row) == {f"{column}":""}, error() + assert json.loads(row) == {f"{column}": ""}, error() with Finally(f"I drop column '{column}'"): node.query(f"ALTER TABLE {table} DROP COLUMN {column}") + def check_modify_column_when_privilege_is_granted(table, user, node, column=None): """Ensures MODIFY COLUMN runs as expected when the privilege is granted to the specified user. """ if column is None: - column = 'modify' + column = "modify" with Given(f"I add the column {column}"): node.query(f"ALTER TABLE {table} ADD COLUMN {column} String DEFAULT '0'") @@ -208,25 +251,36 @@ def check_modify_column_when_privilege_is_granted(table, user, node, column=None node.query(f"INSERT INTO {table} ({column}) VALUES ('3.4')") with When(f"I modify column '{column}' to type Float"): - node.query(f"ALTER TABLE {table} MODIFY COLUMN {column} Float64", - settings = [("user", user)]) + node.query( + f"ALTER TABLE {table} MODIFY COLUMN {column} Float64", + settings=[("user", user)], + ) with And("I run optimize table to ensure above UPDATE command is done"): node.query(f"OPTIMIZE TABLE {table} FINAL", timeout=900) with Then("I verify that the column type was modified"): - with When(f"I try to insert a String (old type) to column {column}, throws exception"): - exitcode, message = errors.cannot_parse_string_as_float('hello') - node.query(f"INSERT INTO {table} ({column}) VALUES ('hello')", - exitcode=exitcode, message=message) + with When( + f"I try to insert a String (old type) to column {column}, throws exception" + ): + exitcode, message = errors.cannot_parse_string_as_float("hello") + node.query( + f"INSERT INTO {table} ({column}) VALUES ('hello')", + exitcode=exitcode, + message=message, + ) - with And(f"I try to insert float data (correct type) to column {column}, will accept"): + with And( + f"I try to insert float data (correct type) to column {column}, will accept" + ): node.query(f"INSERT INTO {table} ({column}) VALUES (30.01)") with And("I verify that the date was inserted correctly"): - column_data = node.query(f"SELECT {column} FROM {table} FORMAT JSONEachRow").output - column_data_list = column_data.split('\n') - output_rows = [{f"{column}":30.01}, {f"{column}":3.4}, {f"{column}":0}] + column_data = node.query( + f"SELECT {column} FROM {table} FORMAT JSONEachRow" + ).output + column_data_list = column_data.split("\n") + output_rows = [{f"{column}": 30.01}, {f"{column}": 3.4}, {f"{column}": 0}] for row in column_data_list: assert json.loads(row) in output_rows, error() @@ -234,12 +288,13 @@ def check_modify_column_when_privilege_is_granted(table, user, node, column=None with Finally(f"I drop column '{column}'"): node.query(f"ALTER TABLE {table} DROP COLUMN {column}") + def check_rename_column_when_privilege_is_granted(table, user, node, column=None): """Ensures RENAME COLUMN runs as expected when the privilege is granted to the specified user. """ if column is None: - column = 'rename' + column = "rename" new_column = f"{column}_new" @@ -248,31 +303,43 @@ def check_rename_column_when_privilege_is_granted(table, user, node, column=None with And("I get the initial contents of the column"): # could be either str or float depending on MODIFY COLUMN - initial_column_data = node.query(f"SELECT {column} FROM {table} ORDER BY {column}" - " FORMAT JSONEachRow").output + initial_column_data = node.query( + f"SELECT {column} FROM {table} ORDER BY {column}" " FORMAT JSONEachRow" + ).output with When(f"I rename column '{column}' to '{new_column}'"): - node.query(f"ALTER TABLE {table} RENAME COLUMN {column} TO {new_column}", - settings = [("user", user)]) + node.query( + f"ALTER TABLE {table} RENAME COLUMN {column} TO {new_column}", + settings=[("user", user)], + ) with Then("I verify that the column was successfully renamed"): with When("I verify that the original column does not exist"): exitcode, message = errors.missing_columns(column) - node.query(f"SELECT {column} FROM {table} FORMAT JSONEachRow", - exitcode=exitcode, message=message) + node.query( + f"SELECT {column} FROM {table} FORMAT JSONEachRow", + exitcode=exitcode, + message=message, + ) - with And("I verify that the new column does exist as expected, with same values"): - new_column_data = node.query(f"SELECT {new_column} FROM {table} ORDER BY" - f" {new_column} FORMAT JSONEachRow").output + with And( + "I verify that the new column does exist as expected, with same values" + ): + new_column_data = node.query( + f"SELECT {new_column} FROM {table} ORDER BY" + f" {new_column} FORMAT JSONEachRow" + ).output - if initial_column_data == '': + if initial_column_data == "": assert initial_column_data == new_column_data, error() else: - new_column_data_list = new_column_data.split('\n') - initial_column_data_list = initial_column_data.split('\n') + new_column_data_list = new_column_data.split("\n") + initial_column_data_list = initial_column_data.split("\n") for new, initial in zip(new_column_data_list, initial_column_data_list): - assert json.loads(new)[new_column] == json.loads(initial)[column], error() + assert ( + json.loads(new)[new_column] == json.loads(initial)[column] + ), error() with Finally(f"I use default user to undo rename"): node.query(f"ALTER TABLE {table} RENAME COLUMN {new_column} TO {column}") @@ -280,28 +347,31 @@ def check_rename_column_when_privilege_is_granted(table, user, node, column=None with Finally(f"I drop column '{column}'"): node.query(f"ALTER TABLE {table} DROP COLUMN {column}") -def check_comment_column_when_privilege_is_granted(table, user, node, column='x'): + +def check_comment_column_when_privilege_is_granted(table, user, node, column="x"): """Ensures COMMENT COLUMN runs as expected when the privilege is granted to the specified user. """ if column is None: - column = 'comment' + column = "comment" with Given(f"I add the column {column}"): node.query(f"ALTER TABLE {table} ADD COLUMN {column} String") with And(f"I alter {column} with comment"): - node.query(f"ALTER TABLE {table} COMMENT COLUMN {column} 'This is a comment.'", - settings = [("user", user)]) + node.query( + f"ALTER TABLE {table} COMMENT COLUMN {column} 'This is a comment.'", + settings=[("user", user)], + ) with Then(f"I verify that the specified comment is present for {column}"): table_data = node.query(f"DESCRIBE TABLE {table} FORMAT JSONEachRow").output - table_data_list = table_data.split('\n') + table_data_list = table_data.split("\n") for row in table_data_list: row = json.loads(row) - if row['name'] == column: - assert row['comment'] == "This is a comment.", error() + if row["name"] == column: + assert row["comment"] == "This is a comment.", error() with Finally(f"I drop column '{column}'"): node.query(f"ALTER TABLE {table} DROP COLUMN {column}") @@ -313,6 +383,7 @@ def check_comment_column_when_privilege_is_granted(table, user, node, column='x' error() + def check_drop_column_when_privilege_is_granted(table, user, node, column=None): """Ensures DROP COLUMN runs as expected when the privilege is granted to the specified user. @@ -324,136 +395,200 @@ def check_drop_column_when_privilege_is_granted(table, user, node, column=None): else: exitcode, message = errors.wrong_column_name("fake_column") - node.query(f"ALTER TABLE {table} DROP COLUMN fake_column", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} DROP COLUMN fake_column", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) if column is None: - column = 'drop' + column = "drop" with Given(f"I add the column {column}"): node.query(f"ALTER TABLE {table} ADD COLUMN {column} String") with Then(f"I drop column {column} which exists"): - node.query(f"ALTER TABLE {table} DROP COLUMN {column}", - settings = [("user", user)]) + node.query( + f"ALTER TABLE {table} DROP COLUMN {column}", settings=[("user", user)] + ) with And(f"I verify that {column} has been dropped"): exitcode, message = errors.wrong_column_name(column) - node.query(f"ALTER TABLE {table} DROP COLUMN {column}", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} DROP COLUMN {column}", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) + def check_add_column_when_privilege_is_not_granted(table, user, node, column=None): """Ensures ADD COLUMN errors as expected without the required privilege for the specified user. """ if column is None: - column = 'add' + column = "add" with When("I try to use privilege that has not been granted"): exitcode, message = errors.not_enough_privileges(user) - node.query(f"ALTER TABLE {table} ADD COLUMN {column} String", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} ADD COLUMN {column} String", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) with Then("I try to ADD COLUMN"): - node.query(f"ALTER TABLE {table} ADD COLUMN {column} String", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} ADD COLUMN {column} String", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) + def check_clear_column_when_privilege_is_not_granted(table, user, node, column=None): """Ensures CLEAR COLUMN errors as expected without the required privilege for the specified user. """ if column is None: - column = 'clear' + column = "clear" with When("I try to use privilege that has not been granted"): exitcode, message = errors.not_enough_privileges(user) - node.query(f"ALTER TABLE {table} CLEAR COLUMN {column}", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} CLEAR COLUMN {column}", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) with And(f"I grant NONE to the user"): node.query(f"GRANT NONE TO {user}") with Then("I try to CLEAR COLUMN"): - node.query(f"ALTER TABLE {table} CLEAR COLUMN {column}", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} CLEAR COLUMN {column}", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) + def check_modify_column_when_privilege_is_not_granted(table, user, node, column=None): """Ensures MODIFY COLUMN errors as expected without the required privilege for the specified user. """ if column is None: - column = 'modify' + column = "modify" with When("I try to use privilege that has not been granted"): exitcode, message = errors.not_enough_privileges(user) - node.query(f"ALTER TABLE {table} MODIFY COLUMN {column} String", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} MODIFY COLUMN {column} String", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) with And(f"I grant NONE to the user"): node.query(f"GRANT NONE TO {user}") with Then("I try to MODIFY COLUMN"): - node.query(f"ALTER TABLE {table} MODIFY COLUMN {column} String", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} MODIFY COLUMN {column} String", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) + def check_rename_column_when_privilege_is_not_granted(table, user, node, column=None): """Ensures RENAME COLUMN errors as expected without the required privilege for the specified user. """ if column is None: - column = 'rename' + column = "rename" new_column = f"{column}_new" with When("I try to use privilege that has not been granted"): exitcode, message = errors.not_enough_privileges(user) - node.query(f"ALTER TABLE {table} RENAME COLUMN {column} TO {new_column}", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} RENAME COLUMN {column} TO {new_column}", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) with And(f"I grant NONE to the user"): node.query(f"GRANT NONE TO {user}") with Then("I try to RENAME COLUMN"): - node.query(f"ALTER TABLE {table} RENAME COLUMN {column} TO {new_column}", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} RENAME COLUMN {column} TO {new_column}", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) + def check_comment_column_when_privilege_is_not_granted(table, user, node, column=None): """Ensures COMMENT COLUMN errors as expected without the required privilege for the specified user. """ if column is None: - column = 'comment' + column = "comment" with When("I try to use privilege that has not been granted"): exitcode, message = errors.not_enough_privileges(user) - node.query(f"ALTER TABLE {table} COMMENT COLUMN {column} 'This is a comment.'", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} COMMENT COLUMN {column} 'This is a comment.'", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) with And(f"I grant NONE to the user"): node.query(f"GRANT NONE TO {user}") with When("I try to COMMENT COLUMN"): - node.query(f"ALTER TABLE {table} COMMENT COLUMN {column} 'This is a comment.'", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} COMMENT COLUMN {column} 'This is a comment.'", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) + def check_drop_column_when_privilege_is_not_granted(table, user, node, column=None): """Ensures DROP COLUMN errors as expected without the required privilege for the specified user. """ if column is None: - column = 'drop' + column = "drop" with When("I try to use privilege that has not been granted"): exitcode, message = errors.not_enough_privileges(user) - node.query(f"ALTER TABLE {table} DROP COLUMN {column}", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} DROP COLUMN {column}", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) with And(f"I grant NONE to the user"): node.query(f"GRANT NONE TO {user}") with Then("I try to DROP COLUMN"): - node.query(f"ALTER TABLE {table} DROP COLUMN {column}", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} DROP COLUMN {column}", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) + @TestScenario def user_with_some_privileges(self, permutation, table_type, node=None): @@ -475,6 +610,7 @@ def user_with_some_privileges(self, permutation, table_type, node=None): with Then(f"I try to ALTER COLUMN"): alter_column_privilege_handler(permutation, table_name, user_name, node) + @TestScenario @Requirements( RQ_SRS_006_RBAC_Privileges_AlterColumn_Revoke("1.0"), @@ -502,27 +638,41 @@ def user_with_revoked_privileges(self, permutation, table_type, node=None): # No privileges granted alter_column_privilege_handler(0, table_name, user_name, node) + @TestScenario -@Examples("grant_columns revoke_columns alter_columns_fail", [ - ("t1", "t1", "t2"), - ("t1,t3", "t1", "t2"), - ("t1,t3,t4", "t1,t3,t4", "t2"), -]) +@Examples( + "grant_columns revoke_columns alter_columns_fail", + [ + ("t1", "t1", "t2"), + ("t1,t3", "t1", "t2"), + ("t1,t3,t4", "t1,t3,t4", "t2"), + ], +) def user_with_privileges_on_columns(self, table_type, permutation, node=None): """Passes in examples to user_column_privileges() below to test granting of sub-privileges on columns """ - examples=Examples("grant_columns revoke_columns alter_columns_fail table_type permutation", - [tuple(list(row)+[table_type, permutation]) for row in self.examples]) + examples = Examples( + "grant_columns revoke_columns alter_columns_fail table_type permutation", + [tuple(list(row) + [table_type, permutation]) for row in self.examples], + ) Scenario(test=user_column_privileges, examples=examples)() + @TestOutline @Requirements( RQ_SRS_006_RBAC_Privileges_AlterColumn_Column("1.0"), ) -def user_column_privileges(self, grant_columns, revoke_columns, alter_columns_fail, table_type, - permutation, node=None): +def user_column_privileges( + self, + grant_columns, + revoke_columns, + alter_columns_fail, + table_type, + permutation, + node=None, +): """Check that user is able to alter on granted columns and unable to alter on not granted or revoked columns. """ @@ -538,22 +688,33 @@ def user_column_privileges(self, grant_columns, revoke_columns, alter_columns_fa with When(f"granted={privileges_on_columns}"): with table(node, table_name, table_type), user(node, user_name): with When(f"I grant subprivileges"): - node.query(f"GRANT {privileges_on_columns} ON {table_name} TO {user_name}") + node.query( + f"GRANT {privileges_on_columns} ON {table_name} TO {user_name}" + ) if alter_columns_fail is not None: with When(f"I try to alter on not granted columns, fails"): # Permutation 0: no privileges for any permutation on these columns - alter_column_privilege_handler(0, table_name, user_name, node, columns=alter_columns_fail) + alter_column_privilege_handler( + 0, table_name, user_name, node, columns=alter_columns_fail + ) with Then(f"I try to ALTER COLUMN"): - alter_column_privilege_handler(permutation, table_name, user_name, node, columns=grant_columns) + alter_column_privilege_handler( + permutation, table_name, user_name, node, columns=grant_columns + ) if revoke_columns is not None: with When(f"I revoke alter column privilege for columns"): - node.query(f"REVOKE {privileges_on_columns} ON {table_name} FROM {user_name}") + node.query( + f"REVOKE {privileges_on_columns} ON {table_name} FROM {user_name}" + ) with And("I try to alter revoked columns"): - alter_column_privilege_handler(0, table_name, user_name, node, columns=alter_columns_fail) + alter_column_privilege_handler( + 0, table_name, user_name, node, columns=alter_columns_fail + ) + @TestScenario @Requirements( @@ -572,7 +733,9 @@ def role_with_some_privileges(self, permutation, table_type, node=None): role_name = f"role_{getuid()}" with When(f"granted={privileges}"): - with table(node, table_name, table_type), user(node, user_name), role(node, role_name): + with table(node, table_name, table_type), user(node, user_name), role( + node, role_name + ): with Given("I grant the alter column privilege to a role"): node.query(f"GRANT {privileges} ON {table_name} TO {role_name}") @@ -582,6 +745,7 @@ def role_with_some_privileges(self, permutation, table_type, node=None): with Then(f"I try to ALTER COLUMN"): alter_column_privilege_handler(permutation, table_name, user_name, node) + @TestScenario def user_with_revoked_role(self, permutation, table_type, node=None): """Check that user with a role that has alter column privilege on a table is unable to @@ -596,7 +760,9 @@ def user_with_revoked_role(self, permutation, table_type, node=None): role_name = f"role_{getuid()}" with When(f"granted={privileges}"): - with table(node, table_name, table_type), user(node, user_name), role(node, role_name): + with table(node, table_name, table_type), user(node, user_name), role( + node, role_name + ): with When("I grant privileges to a role"): node.query(f"GRANT {privileges} ON {table_name} TO {role_name}") @@ -610,27 +776,41 @@ def user_with_revoked_role(self, permutation, table_type, node=None): # Permutation 0: no privileges for any permutation on these columns alter_column_privilege_handler(0, table_name, user_name, node) + @TestScenario -@Examples("grant_columns revoke_columns alter_columns_fail", [ - ("t1", "t1", "t2"), - ("t1,t3", "t1", "t2"), - ("t1,t3,t4", "t1,t3,t4", "t2"), -]) +@Examples( + "grant_columns revoke_columns alter_columns_fail", + [ + ("t1", "t1", "t2"), + ("t1,t3", "t1", "t2"), + ("t1,t3,t4", "t1,t3,t4", "t2"), + ], +) def role_with_privileges_on_columns(self, table_type, permutation, node=None): """Passes in examples to role_column_privileges() below to test granting of subprivileges on columns """ - examples=Examples("grant_columns revoke_columns alter_columns_fail table_type permutation", - [tuple(list(row)+[table_type, permutation]) for row in self.examples]) + examples = Examples( + "grant_columns revoke_columns alter_columns_fail table_type permutation", + [tuple(list(row) + [table_type, permutation]) for row in self.examples], + ) Scenario(test=user_column_privileges, examples=examples)() + @TestOutline @Requirements( RQ_SRS_006_RBAC_Privileges_AlterColumn_Column("1.0"), ) -def role_column_privileges(self, grant_columns, revoke_columns, alter_columns_fail, table_type, - permutation, node=None): +def role_column_privileges( + self, + grant_columns, + revoke_columns, + alter_columns_fail, + table_type, + permutation, + node=None, +): """Check that user is able to alter column from granted columns and unable to alter column from not granted or revoked columns. """ @@ -645,9 +825,13 @@ def role_column_privileges(self, grant_columns, revoke_columns, alter_columns_fa privileges_on_columns = on_columns(privileges, grant_columns) with When(f"granted={privileges_on_columns}"): - with table(node, table_name, table_type), user(node, user_name), role(node, role_name): + with table(node, table_name, table_type), user(node, user_name), role( + node, role_name + ): with When(f"I grant subprivileges"): - node.query(f"GRANT {privileges_on_columns} ON {table_name} TO {role_name}") + node.query( + f"GRANT {privileges_on_columns} ON {table_name} TO {role_name}" + ) with And("I grant the role to a user"): node.query(f"GRANT {role_name} TO {user_name}") @@ -655,17 +839,26 @@ def role_column_privileges(self, grant_columns, revoke_columns, alter_columns_fa if alter_columns_fail is not None: with When(f"I try to alter on not granted columns, fails"): # Permutation 0: no privileges for any permutation on these columns - alter_column_privilege_handler(0, table_name, user_name, node, columns=alter_columns_fail) + alter_column_privilege_handler( + 0, table_name, user_name, node, columns=alter_columns_fail + ) with Then(f"I try to ALTER COLUMN"): - alter_column_privilege_handler(permutation, table_name, user_name, node, columns=grant_columns) + alter_column_privilege_handler( + permutation, table_name, user_name, node, columns=grant_columns + ) if revoke_columns is not None: with When(f"I revoke alter column privilege for columns"): - node.query(f"REVOKE {privileges_on_columns} ON {table_name} FROM {role_name}") + node.query( + f"REVOKE {privileges_on_columns} ON {table_name} FROM {role_name}" + ) with And("I try to alter failed columns"): - alter_column_privilege_handler(0, table_name, user_name, node, columns=revoke_columns) + alter_column_privilege_handler( + 0, table_name, user_name, node, columns=revoke_columns + ) + @TestScenario @Requirements( @@ -686,48 +879,58 @@ def user_with_privileges_on_cluster(self, permutation, table_type, node=None): with table(node, table_name, table_type): try: with Given("I have a user on a cluster"): - node.query(f"CREATE USER OR REPLACE {user_name} ON CLUSTER sharded_cluster") + node.query( + f"CREATE USER OR REPLACE {user_name} ON CLUSTER sharded_cluster" + ) with When("I grant alter column privileges on a cluster"): - node.query(f"GRANT ON CLUSTER sharded_cluster {privileges} ON {table_name} TO {user_name}") + node.query( + f"GRANT ON CLUSTER sharded_cluster {privileges} ON {table_name} TO {user_name}" + ) with Then(f"I try to ALTER COLUMN"): - alter_column_privilege_handler(permutation, table_name, user_name, node) + alter_column_privilege_handler( + permutation, table_name, user_name, node + ) finally: with Finally("I drop the user on a cluster"): node.query(f"DROP USER {user_name} ON CLUSTER sharded_cluster") + @TestSuite def scenario_parallelization(self, table_type, permutation): args = {"table_type": table_type, "permutation": permutation} with Pool(7) as pool: try: for scenario in loads(current_module(), Scenario): - Scenario(test=scenario, setup=instrument_clickhouse_server_log, parallel=True, executor=pool)(**args) + Scenario( + test=scenario, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + )(**args) finally: join() + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_AlterColumn("1.0"), RQ_SRS_006_RBAC_Privileges_AlterColumn_TableEngines("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) -@Examples("table_type", [ - (key,) for key in table_types.keys() -]) +@Examples("table_type", [(key,) for key in table_types.keys()]) @Name("alter column") def feature(self, stress=None, node="clickhouse1"): - """Runs test suites above which check correctness over scenarios and permutations. - """ + """Runs test suites above which check correctness over scenarios and permutations.""" self.context.node = self.context.cluster.node(node) if stress is not None: self.context.stress = stress for example in self.examples: - table_type, = example + (table_type,) = example if table_type != "MergeTree" and not self.context.stress: continue @@ -738,6 +941,11 @@ def feature(self, stress=None, node="clickhouse1"): for permutation in permutations(table_type): privileges = alter_column_privileges(permutation) args = {"table_type": table_type, "permutation": permutation} - Suite(test=scenario_parallelization, name=privileges, parallel=True, executor=pool)(**args) + Suite( + test=scenario_parallelization, + name=privileges, + parallel=True, + executor=pool, + )(**args) finally: join() diff --git a/tests/testflows/rbac/tests/privileges/alter/alter_constraint.py b/tests/testflows/rbac/tests/privileges/alter/alter_constraint.py index c24109c8052..d1156aadc9f 100755 --- a/tests/testflows/rbac/tests/privileges/alter/alter_constraint.py +++ b/tests/testflows/rbac/tests/privileges/alter/alter_constraint.py @@ -9,18 +9,19 @@ import rbac.helper.errors as errors from rbac.helper.tables import table_types subprivileges = { - "ADD CONSTRAINT" : 1 << 0, - "DROP CONSTRAINT" : 1 << 1, + "ADD CONSTRAINT": 1 << 0, + "DROP CONSTRAINT": 1 << 1, } aliases = { - "ADD CONSTRAINT" : ["ALTER ADD CONSTRAINT", "ADD CONSTRAINT"], + "ADD CONSTRAINT": ["ALTER ADD CONSTRAINT", "ADD CONSTRAINT"], "DROP CONSTRAINT": ["ALTER DROP CONSTRAINT", "DROP CONSTRAINT"], - "ALTER CONSTRAINT": ["ALTER CONSTRAINT", "CONSTRAINT", "ALL"] # super-privilege + "ALTER CONSTRAINT": ["ALTER CONSTRAINT", "CONSTRAINT", "ALL"], # super-privilege } # Extra permutation is for 'ALTER CONSTRAINT' super-privilege -permutation_count = (1 << len(subprivileges)) +permutation_count = 1 << len(subprivileges) + def permutations(): """Returns list of all permutations to run. @@ -28,6 +29,7 @@ def permutations(): """ return [*range(permutation_count + len(aliases["ALTER CONSTRAINT"]))] + def alter_constraint_privileges(grants: int): """Takes in an integer, and returns the corresponding set of tests to grant and not grant using the binary string. Each integer corresponds to a unique permutation @@ -38,62 +40,75 @@ def alter_constraint_privileges(grants: int): # Extra iteration for ALTER CONSTRAINT if grants >= permutation_count: - privileges.append(aliases["ALTER CONSTRAINT"][grants-permutation_count]) - elif grants==0: # No privileges + privileges.append(aliases["ALTER CONSTRAINT"][grants - permutation_count]) + elif grants == 0: # No privileges privileges.append("NONE") else: - if (grants & subprivileges["ADD CONSTRAINT"]): - privileges.append(aliases["ADD CONSTRAINT"][grants % len(aliases["ADD CONSTRAINT"])]) - if (grants & subprivileges["DROP CONSTRAINT"]): - privileges.append(aliases["DROP CONSTRAINT"][grants % len(aliases["DROP CONSTRAINT"])]) + if grants & subprivileges["ADD CONSTRAINT"]: + privileges.append( + aliases["ADD CONSTRAINT"][grants % len(aliases["ADD CONSTRAINT"])] + ) + if grants & subprivileges["DROP CONSTRAINT"]: + privileges.append( + aliases["DROP CONSTRAINT"][grants % len(aliases["DROP CONSTRAINT"])] + ) note(f"Testing these privileges: {privileges}") - return ', '.join(privileges) + return ", ".join(privileges) + def alter_constraint_privilege_handler(grants, table, user, node): """For all 2 subprivileges, if the privilege is granted: run test to ensure correct behavior, and if the privilege is not granted, run test to ensure correct behavior there as well """ # Testing ALTER CONSTRAINT and CONSTRAINT is the same as testing all subprivileges - if grants > permutation_count-1: - grants = permutation_count-1 + if grants > permutation_count - 1: + grants = permutation_count - 1 - if (grants & subprivileges["ADD CONSTRAINT"]): + if grants & subprivileges["ADD CONSTRAINT"]: with When("I check add constraint when privilege is granted"): check_add_constraint_when_privilege_is_granted(table, user, node) else: with When("I check add constraint when privilege is not granted"): check_add_constraint_when_privilege_is_not_granted(table, user, node) - if (grants & subprivileges["DROP CONSTRAINT"]): + if grants & subprivileges["DROP CONSTRAINT"]: with When("I check drop constraint when privilege is granted"): check_drop_constraint_when_privilege_is_granted(table, user, node) else: with When("I check drop constraint when privilege is not granted"): check_drop_constraint_when_privilege_is_not_granted(table, user, node) + def check_add_constraint_when_privilege_is_granted(table, user, node): - """Ensures ADD CONSTRAINT runs as expected when the privilege is granted to the specified user - """ + """Ensures ADD CONSTRAINT runs as expected when the privilege is granted to the specified user""" constraint = "add" with Given(f"I add constraint '{constraint}'"): - node.query(f"ALTER TABLE {table} ADD CONSTRAINT {constraint} CHECK x>5", - settings = [("user", user)]) + node.query( + f"ALTER TABLE {table} ADD CONSTRAINT {constraint} CHECK x>5", + settings=[("user", user)], + ) with Then("I verify that the constraint is in the table"): - output = json.loads(node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output) - assert f"CONSTRAINT {constraint} CHECK x > 5" in output['statement'], error() + output = json.loads( + node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output + ) + assert f"CONSTRAINT {constraint} CHECK x > 5" in output["statement"], error() with Finally(f"I drop constraint {constraint}"): node.query(f"ALTER TABLE {table} DROP constraint {constraint}") + def check_drop_constraint_when_privilege_is_granted(table, user, node): - """Ensures DROP CONSTRAINT runs as expected when the privilege is granted to the specified user - """ + """Ensures DROP CONSTRAINT runs as expected when the privilege is granted to the specified user""" with But("I try to drop nonexistent constraint, throws exception"): exitcode, message = errors.wrong_constraint_name("fake_constraint") - node.query(f"ALTER TABLE {table} DROP CONSTRAINT fake_constraint", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} DROP CONSTRAINT fake_constraint", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) constraint = "drop" @@ -101,32 +116,47 @@ def check_drop_constraint_when_privilege_is_granted(table, user, node): node.query(f"ALTER TABLE {table} ADD CONSTRAINT {constraint} CHECK x>5") with Then(f"I drop constraint {constraint} which exists"): - node.query(f"ALTER TABLE {table} DROP CONSTRAINT {constraint}", - settings = [("user", user)]) + node.query( + f"ALTER TABLE {table} DROP CONSTRAINT {constraint}", + settings=[("user", user)], + ) with Then("I verify that the constraint is not in the table"): - output = json.loads(node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output) - assert f"CONSTRAINT {constraint} CHECK x > 5" not in output['statement'], error() + output = json.loads( + node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output + ) + assert ( + f"CONSTRAINT {constraint} CHECK x > 5" not in output["statement"] + ), error() + def check_add_constraint_when_privilege_is_not_granted(table, user, node): - """Ensures ADD CONSTRAINT errors as expected without the required privilege for the specified user - """ + """Ensures ADD CONSTRAINT errors as expected without the required privilege for the specified user""" constraint = "add" with When("I try to use privilege that has not been granted"): exitcode, message = errors.not_enough_privileges(user) - node.query(f"ALTER TABLE {table} ADD CONSTRAINT {constraint} CHECK x>5", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} ADD CONSTRAINT {constraint} CHECK x>5", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) + def check_drop_constraint_when_privilege_is_not_granted(table, user, node): - """Ensures DROP CONSTRAINT errors as expected without the required privilege for the specified user - """ + """Ensures DROP CONSTRAINT errors as expected without the required privilege for the specified user""" constraint = "drop" with When("I try to use privilege that has not been granted"): exitcode, message = errors.not_enough_privileges(user) - node.query(f"ALTER TABLE {table} DROP CONSTRAINT {constraint}", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} DROP CONSTRAINT {constraint}", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) + @TestScenario def user_with_some_privileges(self, table_type, node=None): @@ -148,7 +178,10 @@ def user_with_some_privileges(self, table_type, node=None): node.query(f"GRANT {privileges} ON {table_name} TO {user_name}") with Then(f"I try to ALTER CONSTRAINT"): - alter_constraint_privilege_handler(permutation, table_name, user_name, node) + alter_constraint_privilege_handler( + permutation, table_name, user_name, node + ) + @TestScenario @Requirements( @@ -179,6 +212,7 @@ def user_with_revoked_privileges(self, table_type, node=None): # Permutation 0: no privileges alter_constraint_privilege_handler(0, table_name, user_name, node) + @TestScenario @Requirements( RQ_SRS_006_RBAC_Privileges_AlterConstraint_Grant("1.0"), @@ -198,7 +232,9 @@ def role_with_some_privileges(self, table_type, node=None): privileges = alter_constraint_privileges(permutation) with When(f"granted={privileges}"): - with table(node, table_name, table_type), user(node, user_name), role(node, role_name): + with table(node, table_name, table_type), user(node, user_name), role( + node, role_name + ): with Given("I grant the ALTER CONSTRAINT privilege to a role"): node.query(f"GRANT {privileges} ON {table_name} TO {role_name}") @@ -206,7 +242,10 @@ def role_with_some_privileges(self, table_type, node=None): node.query(f"GRANT {role_name} TO {user_name}") with Then(f"I try to ALTER CONSTRAINT"): - alter_constraint_privilege_handler(permutation, table_name, user_name, node) + alter_constraint_privilege_handler( + permutation, table_name, user_name, node + ) + @TestScenario def user_with_revoked_role(self, table_type, node=None): @@ -224,7 +263,9 @@ def user_with_revoked_role(self, table_type, node=None): privileges = alter_constraint_privileges(permutation) with When(f"granted={privileges}"): - with table(node, table_name, table_type), user(node, user_name), role(node, role_name): + with table(node, table_name, table_type), user(node, user_name), role( + node, role_name + ): with When("I grant privileges to a role"): node.query(f"GRANT {privileges} ON {table_name} TO {role_name}") @@ -238,6 +279,7 @@ def user_with_revoked_role(self, table_type, node=None): # Permutation 0: no privileges for any permutation alter_constraint_privilege_handler(0, table_name, user_name, node) + @TestScenario @Requirements( RQ_SRS_006_RBAC_Privileges_AlterConstraint_Cluster("1.0"), @@ -259,27 +301,32 @@ def user_with_privileges_on_cluster(self, table_type, node=None): with table(node, table_name, table_type): try: with Given("I have a user on a cluster"): - node.query(f"CREATE USER OR REPLACE {user_name} ON CLUSTER sharded_cluster") + node.query( + f"CREATE USER OR REPLACE {user_name} ON CLUSTER sharded_cluster" + ) with When("I grant ALTER CONSTRAINT privileges on a cluster"): - node.query(f"GRANT ON CLUSTER sharded_cluster {privileges} ON {table_name} TO {user_name}") + node.query( + f"GRANT ON CLUSTER sharded_cluster {privileges} ON {table_name} TO {user_name}" + ) with Then(f"I try to ALTER CONSTRAINT"): - alter_constraint_privilege_handler(permutation, table_name, user_name, node) + alter_constraint_privilege_handler( + permutation, table_name, user_name, node + ) finally: with Finally("I drop the user on a cluster"): node.query(f"DROP USER {user_name} ON CLUSTER sharded_cluster") + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_AlterConstraint("1.0"), RQ_SRS_006_RBAC_Privileges_AlterConstraint_TableEngines("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) -@Examples("table_type", [ - (key,) for key in table_types.keys() -]) +@Examples("table_type", [(key,) for key in table_types.keys()]) @Name("alter constraint") def feature(self, stress=None, node="clickhouse1"): self.context.node = self.context.cluster.node(node) @@ -288,17 +335,22 @@ def feature(self, stress=None, node="clickhouse1"): self.context.stress = stress for example in self.examples: - table_type, = example + (table_type,) = example if table_type != "MergeTree" and not self.context.stress: continue - args = {"table_type" : table_type} + args = {"table_type": table_type} with Example(str(example)): with Pool(5) as pool: try: for scenario in loads(current_module(), Scenario): - Scenario(test=scenario, setup=instrument_clickhouse_server_log, parallel=True, executor=pool)(**args) + Scenario( + test=scenario, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + )(**args) finally: join() diff --git a/tests/testflows/rbac/tests/privileges/alter/alter_delete.py b/tests/testflows/rbac/tests/privileges/alter/alter_delete.py index 93d520f91bd..89e55e57ad0 100644 --- a/tests/testflows/rbac/tests/privileges/alter/alter_delete.py +++ b/tests/testflows/rbac/tests/privileges/alter/alter_delete.py @@ -7,6 +7,7 @@ import rbac.helper.errors as errors aliases = {"ALTER DELETE", "DELETE", "ALL"} + @TestSuite def privilege_granted_directly_or_via_role(self, table_type, privilege, node=None): """Check that user is only able to execute ALTER DELETE when they have required privilege, @@ -21,8 +22,16 @@ def privilege_granted_directly_or_via_role(self, table_type, privilege, node=Non with Suite("user with direct privilege", setup=instrument_clickhouse_server_log): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute ALTER DELETE with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, table_type=table_type, privilege=privilege, node=node) + with When( + f"I run checks that {user_name} is only able to execute ALTER DELETE with required privileges" + ): + privilege_check( + grant_target_name=user_name, + user_name=user_name, + table_type=table_type, + privilege=privilege, + node=node, + ) with Suite("user with privilege via role", setup=instrument_clickhouse_server_log): with user(node, user_name), role(node, role_name): @@ -30,12 +39,20 @@ def privilege_granted_directly_or_via_role(self, table_type, privilege, node=Non with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute ALTER DELETE with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, table_type=table_type, privilege=privilege, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute ALTER DELETE with required privileges" + ): + privilege_check( + grant_target_name=role_name, + user_name=user_name, + table_type=table_type, + privilege=privilege, + node=node, + ) + def privilege_check(grant_target_name, user_name, table_type, privilege, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege", setup=instrument_clickhouse_server_log): @@ -50,8 +67,12 @@ def privilege_check(grant_target_name, user_name, table_type, privilege, node=No node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to delete columns without privilege"): - node.query(f"ALTER TABLE {table_name} DELETE WHERE 1", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table_name} DELETE WHERE 1", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("user with privilege", setup=instrument_clickhouse_server_log): table_name = f"merge_tree_{getuid()}" @@ -62,9 +83,14 @@ def privilege_check(grant_target_name, user_name, table_type, privilege, node=No node.query(f"GRANT {privilege} ON {table_name} TO {grant_target_name}") with Then("I attempt to delete columns"): - node.query(f"ALTER TABLE {table_name} DELETE WHERE 1", settings = [("user", user_name)]) + node.query( + f"ALTER TABLE {table_name} DELETE WHERE 1", + settings=[("user", user_name)], + ) - with Scenario("user with revoked privilege", setup=instrument_clickhouse_server_log): + with Scenario( + "user with revoked privilege", setup=instrument_clickhouse_server_log + ): table_name = f"merge_tree_{getuid()}" with table(node, table_name, table_type): @@ -73,25 +99,29 @@ def privilege_check(grant_target_name, user_name, table_type, privilege, node=No node.query(f"GRANT {privilege} ON {table_name} TO {grant_target_name}") with And("I revoke the delete privilege"): - node.query(f"REVOKE {privilege} ON {table_name} FROM {grant_target_name}") + node.query( + f"REVOKE {privilege} ON {table_name} FROM {grant_target_name}" + ) with Then("I attempt to delete columns"): - node.query(f"ALTER TABLE {table_name} DELETE WHERE 1", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table_name} DELETE WHERE 1", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_AlterDelete("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) -@Examples("table_type", [ - (key,) for key in table_types.keys() -]) +@Examples("table_type", [(key,) for key in table_types.keys()]) @Name("alter delete") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of ALTER DELETE. - """ + """Check the RBAC functionality of ALTER DELETE.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -100,7 +130,7 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): self.context.stress = stress for example in self.examples: - table_type, = example + (table_type,) = example if table_type != "MergeTree" and not self.context.stress: continue @@ -108,4 +138,6 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): with Example(str(example)): for alias in aliases: with Suite(alias, test=privilege_granted_directly_or_via_role): - privilege_granted_directly_or_via_role(table_type=table_type, privilege=alias) + privilege_granted_directly_or_via_role( + table_type=table_type, privilege=alias + ) diff --git a/tests/testflows/rbac/tests/privileges/alter/alter_fetch.py b/tests/testflows/rbac/tests/privileges/alter/alter_fetch.py index b4ff0b65fd4..5a30231b9d5 100644 --- a/tests/testflows/rbac/tests/privileges/alter/alter_fetch.py +++ b/tests/testflows/rbac/tests/privileges/alter/alter_fetch.py @@ -7,10 +7,10 @@ import rbac.helper.errors as errors aliases = {"ALTER FETCH PARTITION", "FETCH PARTITION", "ALL"} + @TestSuite def privilege_granted_directly_or_via_role(self, table_type, privilege, node=None): - """Check that user is only able to execute ALTER FETCH PARTITION when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute ALTER FETCH PARTITION when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -20,8 +20,16 @@ def privilege_granted_directly_or_via_role(self, table_type, privilege, node=Non with Suite("user with direct privilege", setup=instrument_clickhouse_server_log): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute ALTER FETCH PARTITION with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, table_type=table_type, privilege=privilege, node=node) + with When( + f"I run checks that {user_name} is only able to execute ALTER FETCH PARTITION with required privileges" + ): + privilege_check( + grant_target_name=user_name, + user_name=user_name, + table_type=table_type, + privilege=privilege, + node=node, + ) with Suite("user with privilege via role", setup=instrument_clickhouse_server_log): with user(node, user_name), role(node, role_name): @@ -29,12 +37,20 @@ def privilege_granted_directly_or_via_role(self, table_type, privilege, node=Non with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute ALTER FETCH PARTITION with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, table_type=table_type, privilege=privilege, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute ALTER FETCH PARTITION with required privileges" + ): + privilege_check( + grant_target_name=role_name, + user_name=user_name, + table_type=table_type, + privilege=privilege, + node=node, + ) + def privilege_check(grant_target_name, user_name, table_type, privilege, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege", setup=instrument_clickhouse_server_log): @@ -49,8 +65,12 @@ def privilege_check(grant_target_name, user_name, table_type, privilege, node=No node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to fetch a partition without privilege"): - node.query(f"ALTER TABLE {table_name} FETCH PARTITION 1 FROM '/clickhouse/'", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table_name} FETCH PARTITION 1 FROM '/clickhouse/'", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("user with privilege", setup=instrument_clickhouse_server_log): table_name = f"merge_tree_{getuid()}" @@ -60,10 +80,16 @@ def privilege_check(grant_target_name, user_name, table_type, privilege, node=No node.query(f"GRANT {privilege} ON {table_name} TO {grant_target_name}") with Then("I attempt to fetch a partition"): - node.query(f"ALTER TABLE {table_name} FETCH PARTITION 1 FROM '/clickhouse/'", settings = [("user", user_name)], - exitcode=231, message="DB::Exception: No node") + node.query( + f"ALTER TABLE {table_name} FETCH PARTITION 1 FROM '/clickhouse/'", + settings=[("user", user_name)], + exitcode=231, + message="DB::Exception: No node", + ) - with Scenario("user with revoked privilege", setup=instrument_clickhouse_server_log): + with Scenario( + "user with revoked privilege", setup=instrument_clickhouse_server_log + ): table_name = f"merge_tree_{getuid()}" with table(node, table_name, table_type): @@ -71,38 +97,47 @@ def privilege_check(grant_target_name, user_name, table_type, privilege, node=No node.query(f"GRANT {privilege} ON {table_name} TO {grant_target_name}") with And("I revoke the fetch privilege"): - node.query(f"REVOKE {privilege} ON {table_name} FROM {grant_target_name}") + node.query( + f"REVOKE {privilege} ON {table_name} FROM {grant_target_name}" + ) with Then("I attempt to fetch a partition"): - node.query(f"ALTER TABLE {table_name} FETCH PARTITION 1 FROM '/clickhouse/'", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table_name} FETCH PARTITION 1 FROM '/clickhouse/'", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_AlterFetch("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), +) +@Examples( + "table_type", + [ + ("ReplicatedMergeTree-sharded_cluster",), + ("ReplicatedMergeTree-one_shard_cluster",), + ("ReplicatedReplacingMergeTree-sharded_cluster",), + ("ReplicatedReplacingMergeTree-one_shard_cluster",), + ("ReplicatedSummingMergeTree-sharded_cluster",), + ("ReplicatedSummingMergeTree-one_shard_cluster",), + ("ReplicatedAggregatingMergeTree-sharded_cluster",), + ("ReplicatedAggregatingMergeTree-one_shard_cluster",), + ("ReplicatedCollapsingMergeTree-sharded_cluster",), + ("ReplicatedCollapsingMergeTree-one_shard_cluster",), + ("ReplicatedVersionedCollapsingMergeTree-sharded_cluster",), + ("ReplicatedVersionedCollapsingMergeTree-one_shard_cluster",), + ("ReplicatedGraphiteMergeTree-sharded_cluster",), + ("ReplicatedGraphiteMergeTree-one_shard_cluster",), + ], ) -@Examples("table_type",[ - ("ReplicatedMergeTree-sharded_cluster",), - ("ReplicatedMergeTree-one_shard_cluster",), - ("ReplicatedReplacingMergeTree-sharded_cluster",), - ("ReplicatedReplacingMergeTree-one_shard_cluster",), - ("ReplicatedSummingMergeTree-sharded_cluster",), - ("ReplicatedSummingMergeTree-one_shard_cluster",), - ("ReplicatedAggregatingMergeTree-sharded_cluster",), - ("ReplicatedAggregatingMergeTree-one_shard_cluster",), - ("ReplicatedCollapsingMergeTree-sharded_cluster",), - ("ReplicatedCollapsingMergeTree-one_shard_cluster",), - ("ReplicatedVersionedCollapsingMergeTree-sharded_cluster",), - ("ReplicatedVersionedCollapsingMergeTree-one_shard_cluster",), - ("ReplicatedGraphiteMergeTree-sharded_cluster",), - ("ReplicatedGraphiteMergeTree-one_shard_cluster",) -]) @Name("alter fetch") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of ALTER FETCH. - """ + """Check the RBAC functionality of ALTER FETCH.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -111,12 +146,17 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): self.context.stress = stress for example in self.examples: - table_type, = example + (table_type,) = example - if table_type != "ReplicatedMergeTree-sharded_cluster" and not self.context.stress: + if ( + table_type != "ReplicatedMergeTree-sharded_cluster" + and not self.context.stress + ): continue with Example(str(example)): for alias in aliases: with Suite(alias, test=privilege_granted_directly_or_via_role): - privilege_granted_directly_or_via_role(table_type=table_type, privilege=alias) + privilege_granted_directly_or_via_role( + table_type=table_type, privilege=alias + ) diff --git a/tests/testflows/rbac/tests/privileges/alter/alter_freeze.py b/tests/testflows/rbac/tests/privileges/alter/alter_freeze.py index 775e2be270d..0f0e8ee6ee5 100644 --- a/tests/testflows/rbac/tests/privileges/alter/alter_freeze.py +++ b/tests/testflows/rbac/tests/privileges/alter/alter_freeze.py @@ -7,10 +7,10 @@ import rbac.helper.errors as errors aliases = {"ALTER FREEZE PARTITION", "FREEZE PARTITION", "ALL"} + @TestSuite def privilege_granted_directly_or_via_role(self, table_type, privilege, node=None): - """Check that user is only able to execute ALTER FREEZE PARTITION when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute ALTER FREEZE PARTITION when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -19,19 +19,37 @@ def privilege_granted_directly_or_via_role(self, table_type, privilege, node=Non with Scenario("user with direct privilege", setup=instrument_clickhouse_server_log): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute ALTER FREEZE PARTITION with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, table_type=table_type, privilege=privilege, node=node) + with When( + f"I run checks that {user_name} is only able to execute ALTER FREEZE PARTITION with required privileges" + ): + privilege_check( + grant_target_name=user_name, + user_name=user_name, + table_type=table_type, + privilege=privilege, + node=node, + ) - with Scenario("user with privilege via role", setup=instrument_clickhouse_server_log): + with Scenario( + "user with privilege via role", setup=instrument_clickhouse_server_log + ): with user(node, user_name), role(node, role_name): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute ALTER FREEZE PARTITION with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, table_type=table_type, privilege=privilege, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute ALTER FREEZE PARTITION with required privileges" + ): + privilege_check( + grant_target_name=role_name, + user_name=user_name, + table_type=table_type, + privilege=privilege, + node=node, + ) + def privilege_check(grant_target_name, user_name, table_type, privilege, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege", setup=instrument_clickhouse_server_log): @@ -46,8 +64,12 @@ def privilege_check(grant_target_name, user_name, table_type, privilege, node=No node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to freeze partitions without privilege"): - node.query(f"ALTER TABLE {table_name} FREEZE", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table_name} FREEZE", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("user with privilege", setup=instrument_clickhouse_server_log): table_name = f"merge_tree_{getuid()}" @@ -58,9 +80,13 @@ def privilege_check(grant_target_name, user_name, table_type, privilege, node=No node.query(f"GRANT {privilege} ON {table_name} TO {grant_target_name}") with Then("I attempt to freeze partitions"): - node.query(f"ALTER TABLE {table_name} FREEZE", settings = [("user", user_name)]) + node.query( + f"ALTER TABLE {table_name} FREEZE", settings=[("user", user_name)] + ) - with Scenario("user with revoked privilege", setup=instrument_clickhouse_server_log): + with Scenario( + "user with revoked privilege", setup=instrument_clickhouse_server_log + ): table_name = f"merge_tree_{getuid()}" with table(node, table_name, table_type): @@ -68,25 +94,29 @@ def privilege_check(grant_target_name, user_name, table_type, privilege, node=No with When("I grant the freeze privilege"): node.query(f"GRANT {privilege} ON {table_name} TO {grant_target_name}") with And("I revoke the freeze privilege"): - node.query(f"REVOKE {privilege} ON {table_name} FROM {grant_target_name}") + node.query( + f"REVOKE {privilege} ON {table_name} FROM {grant_target_name}" + ) with Then("I attempt to freeze partitions"): - node.query(f"ALTER TABLE {table_name} FREEZE", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table_name} FREEZE", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_AlterFreeze("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) -@Examples("table_type", [ - (key,) for key in table_types.keys() -]) +@Examples("table_type", [(key,) for key in table_types.keys()]) @Name("alter freeze") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of ALTER FREEZE. - """ + """Check the RBAC functionality of ALTER FREEZE.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -95,7 +125,7 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): self.context.stress = stress for example in self.examples: - table_type, = example + (table_type,) = example if table_type != "MergeTree" and not self.context.stress: continue @@ -103,4 +133,6 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): with Example(str(example)): for alias in aliases: with Suite(alias, test=privilege_granted_directly_or_via_role): - privilege_granted_directly_or_via_role(table_type=table_type, privilege=alias) + privilege_granted_directly_or_via_role( + table_type=table_type, privilege=alias + ) diff --git a/tests/testflows/rbac/tests/privileges/alter/alter_index.py b/tests/testflows/rbac/tests/privileges/alter/alter_index.py index 9bb1d72a004..d3190948eb7 100755 --- a/tests/testflows/rbac/tests/privileges/alter/alter_index.py +++ b/tests/testflows/rbac/tests/privileges/alter/alter_index.py @@ -11,26 +11,27 @@ import rbac.helper.errors as errors from rbac.helper.tables import table_types subprivileges = { - "ORDER BY" : 1 << 0, + "ORDER BY": 1 << 0, "SAMPLE BY": 1 << 1, - "ADD INDEX" : 1 << 2, - "MATERIALIZE INDEX" : 1 << 3, + "ADD INDEX": 1 << 2, + "MATERIALIZE INDEX": 1 << 3, "CLEAR INDEX": 1 << 4, "DROP INDEX": 1 << 5, } aliases = { - "ORDER BY" : ["ALTER ORDER BY", "ALTER MODIFY ORDER BY", "MODIFY ORDER BY"], + "ORDER BY": ["ALTER ORDER BY", "ALTER MODIFY ORDER BY", "MODIFY ORDER BY"], "SAMPLE BY": ["ALTER SAMPLE BY", "ALTER MODIFY SAMPLE BY", "MODIFY SAMPLE BY"], - "ADD INDEX" : ["ALTER ADD INDEX", "ADD INDEX"], - "MATERIALIZE INDEX" : ["ALTER MATERIALIZE INDEX", "MATERIALIZE INDEX"], + "ADD INDEX": ["ALTER ADD INDEX", "ADD INDEX"], + "MATERIALIZE INDEX": ["ALTER MATERIALIZE INDEX", "MATERIALIZE INDEX"], "CLEAR INDEX": ["ALTER CLEAR INDEX", "CLEAR INDEX"], "DROP INDEX": ["ALTER DROP INDEX", "DROP INDEX"], - "ALTER INDEX": ["ALTER INDEX", "INDEX", "ALL"] # super-privilege + "ALTER INDEX": ["ALTER INDEX", "INDEX", "ALL"], # super-privilege } # Extra permutation is for 'ALTER INDEX' super-privilege -permutation_count = (1 << len(subprivileges)) +permutation_count = 1 << len(subprivileges) + def permutations(table_type): """Uses stress flag and table type, returns list of all permutations to run @@ -44,8 +45,14 @@ def permutations(table_type): # *Selected permutations currently stand as [1,2,4,8,16,32,0,42,63,64,65]. # Testing ["ORDER BY", "SAMPLE BY", "ADD INDEX", "MATERIALIZE INDEX", "CLEAR INDEX", # "DROP INDEX", "NONE", {"DROP, MATERIALIZE, SAMPLE BY"}, all, "ALTER INDEX", and "INDEX"] - return [1 << index for index in range(len(subprivileges))] + \ - [0, int('101010', 2), permutation_count-1, permutation_count, permutation_count+1] + return [1 << index for index in range(len(subprivileges))] + [ + 0, + int("101010", 2), + permutation_count - 1, + permutation_count, + permutation_count + 1, + ] + def alter_index_privileges(grants: int): """Takes in an integer, and returns the corresponding set of tests to grant and @@ -57,85 +64,94 @@ def alter_index_privileges(grants: int): # Extra iteration for ALTER INDEX if grants >= permutation_count: - privileges.append(aliases["ALTER INDEX"][grants-permutation_count]) - elif grants==0: # No privileges + privileges.append(aliases["ALTER INDEX"][grants - permutation_count]) + elif grants == 0: # No privileges privileges.append("NONE") else: - if (grants & subprivileges["ORDER BY"]): + if grants & subprivileges["ORDER BY"]: privileges.append(aliases["ORDER BY"][grants % len(aliases["ORDER BY"])]) - if (grants & subprivileges["SAMPLE BY"]): + if grants & subprivileges["SAMPLE BY"]: privileges.append(aliases["SAMPLE BY"][grants % len(aliases["SAMPLE BY"])]) - if (grants & subprivileges["ADD INDEX"]): + if grants & subprivileges["ADD INDEX"]: privileges.append(aliases["ADD INDEX"][grants % len(aliases["ADD INDEX"])]) - if (grants & subprivileges["MATERIALIZE INDEX"]): - privileges.append(aliases["MATERIALIZE INDEX"][grants % len(aliases["MATERIALIZE INDEX"])]) - if (grants & subprivileges["CLEAR INDEX"]): - privileges.append(aliases["CLEAR INDEX"][grants % len(aliases["CLEAR INDEX"])]) - if (grants & subprivileges["DROP INDEX"]): - privileges.append(aliases["DROP INDEX"][grants % len(aliases["DROP INDEX"])]) + if grants & subprivileges["MATERIALIZE INDEX"]: + privileges.append( + aliases["MATERIALIZE INDEX"][grants % len(aliases["MATERIALIZE INDEX"])] + ) + if grants & subprivileges["CLEAR INDEX"]: + privileges.append( + aliases["CLEAR INDEX"][grants % len(aliases["CLEAR INDEX"])] + ) + if grants & subprivileges["DROP INDEX"]: + privileges.append( + aliases["DROP INDEX"][grants % len(aliases["DROP INDEX"])] + ) note(f"Testing these privileges: {privileges}") - return ', '.join(privileges) + return ", ".join(privileges) + def alter_index_privilege_handler(grants, table, user, node): """For all 5 subprivileges, if the privilege is granted: run test to ensure correct behavior, and if the privilege is not granted, run test to ensure correct behavior there as well. """ # Testing ALTER INDEX and INDEX is the same as testing all subprivileges - if grants > permutation_count-1: - grants = permutation_count-1 + if grants > permutation_count - 1: + grants = permutation_count - 1 - if (grants & subprivileges["ORDER BY"]): + if grants & subprivileges["ORDER BY"]: with When("I check order by when privilege is granted"): check_order_by_when_privilege_is_granted(table, user, node) else: with When("I check order by when privilege is not granted"): check_order_by_when_privilege_is_not_granted(table, user, node) - if (grants & subprivileges["SAMPLE BY"]): + if grants & subprivileges["SAMPLE BY"]: with When("I check sample by when privilege is granted"): check_sample_by_when_privilege_is_granted(table, user, node) else: with When("I check sample by when privilege is not granted"): check_sample_by_when_privilege_is_not_granted(table, user, node) - if (grants & subprivileges["ADD INDEX"]): + if grants & subprivileges["ADD INDEX"]: with When("I check add index when privilege is granted"): check_add_index_when_privilege_is_granted(table, user, node) else: with When("I check add index when privilege is not granted"): check_add_index_when_privilege_is_not_granted(table, user, node) - if (grants & subprivileges["MATERIALIZE INDEX"]): + if grants & subprivileges["MATERIALIZE INDEX"]: with When("I check materialize index when privilege is granted"): check_materialize_index_when_privilege_is_granted(table, user, node) else: with When("I check materialize index when privilege is not granted"): check_materialize_index_when_privilege_is_not_granted(table, user, node) - if (grants & subprivileges["CLEAR INDEX"]): + if grants & subprivileges["CLEAR INDEX"]: with When("I check clear index when privilege is granted"): check_clear_index_when_privilege_is_granted(table, user, node) else: with When("I check clear index when privilege is not granted"): check_clear_index_when_privilege_is_not_granted(table, user, node) - if (grants & subprivileges["DROP INDEX"]): + if grants & subprivileges["DROP INDEX"]: with When("I check drop index when privilege is granted"): check_drop_index_when_privilege_is_granted(table, user, node) else: with When("I check drop index when privilege is not granted"): check_drop_index_when_privilege_is_not_granted(table, user, node) + def check_order_by_when_privilege_is_granted(table, user, node): - """Ensures ORDER BY runs as expected when the privilege is granted to the specified user - """ + """Ensures ORDER BY runs as expected when the privilege is granted to the specified user""" column = "order" with Given("I run sanity check"): - node.query(f"ALTER TABLE {table} MODIFY ORDER BY b", settings = [("user", user)]) + node.query(f"ALTER TABLE {table} MODIFY ORDER BY b", settings=[("user", user)]) with And("I add new column and modify order using that column"): - node.query(f"ALTER TABLE {table} ADD COLUMN {column} UInt32, MODIFY ORDER BY (b, {column})") + node.query( + f"ALTER TABLE {table} ADD COLUMN {column} UInt32, MODIFY ORDER BY (b, {column})" + ) with When(f"I insert random data into the ordered-by column {column}"): - data = random.sample(range(1,1000),100) - values = ', '.join(f'({datum})' for datum in data) + data = random.sample(range(1, 1000), 100) + values = ", ".join(f"({datum})" for datum in data) node.query(f"INSERT INTO {table}({column}) VALUES {values}") with Then("I synchronize with optimize table"): @@ -144,154 +160,216 @@ def check_order_by_when_privilege_is_granted(table, user, node): with And("I verify that the added data is ordered in the table"): data.sort() note(data) - column_data = node.query(f"SELECT {column} FROM {table} FORMAT JSONEachRow").output - column_data = column_data.split('\n') + column_data = node.query( + f"SELECT {column} FROM {table} FORMAT JSONEachRow" + ).output + column_data = column_data.split("\n") for row, datum in zip(column_data[:10], data[:10]): - assert json.loads(row) == {column:datum}, error() + assert json.loads(row) == {column: datum}, error() with And("I verify that the sorting key is present in the table"): - output = json.loads(node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output) - assert f"ORDER BY (b, {column})" in output['statement'], error() + output = json.loads( + node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output + ) + assert f"ORDER BY (b, {column})" in output["statement"], error() with But(f"I cannot drop the required column {column}"): exitcode, message = errors.missing_columns(column) - node.query(f"ALTER TABLE {table} DROP COLUMN {column}", - exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} DROP COLUMN {column}", + exitcode=exitcode, + message=message, + ) + def check_sample_by_when_privilege_is_granted(table, user, node): - """Ensures SAMPLE BY runs as expected when the privilege is granted to the specified user - """ - column = 'sample' + """Ensures SAMPLE BY runs as expected when the privilege is granted to the specified user""" + column = "sample" with When(f"I add sample by clause"): - node.query(f"ALTER TABLE {table} MODIFY SAMPLE BY b", - settings = [("user", user)]) + node.query(f"ALTER TABLE {table} MODIFY SAMPLE BY b", settings=[("user", user)]) with Then("I verify that the sample is in the table"): - output = json.loads(node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output) - assert f"SAMPLE BY b" in output['statement'], error() + output = json.loads( + node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output + ) + assert f"SAMPLE BY b" in output["statement"], error() + def check_add_index_when_privilege_is_granted(table, user, node): - """Ensures ADD INDEX runs as expected when the privilege is granted to the specified user - """ + """Ensures ADD INDEX runs as expected when the privilege is granted to the specified user""" index = "add" - with Given(f"I add index '{index}'"): # Column x: String - node.query(f"ALTER TABLE {table} ADD INDEX {index}(x) TYPE set(0) GRANULARITY 1", - settings = [("user", user)]) + with Given(f"I add index '{index}'"): # Column x: String + node.query( + f"ALTER TABLE {table} ADD INDEX {index}(x) TYPE set(0) GRANULARITY 1", + settings=[("user", user)], + ) with Then("I verify that the index is in the table"): - output = json.loads(node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output) - assert f"INDEX {index} x TYPE set(0) GRANULARITY 1" in output['statement'], error() + output = json.loads( + node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output + ) + assert ( + f"INDEX {index} x TYPE set(0) GRANULARITY 1" in output["statement"] + ), error() with Finally(f"I drop index {index}"): node.query(f"ALTER TABLE {table} DROP INDEX {index}") + def check_materialize_index_when_privilege_is_granted(table, user, node): - """Ensures MATERIALIZE INDEX runs as expected when the privilege is granted to the specified user - """ + """Ensures MATERIALIZE INDEX runs as expected when the privilege is granted to the specified user""" index = "materialize" with Given(f"I add index '{index}'"): - node.query(f"ALTER TABLE {table} ADD INDEX {index}(x) TYPE set(0) GRANULARITY 1") + node.query( + f"ALTER TABLE {table} ADD INDEX {index}(x) TYPE set(0) GRANULARITY 1" + ) with When(f"I materialize index '{index}'"): - node.query(f"ALTER TABLE {table} MATERIALIZE INDEX {index} IN PARTITION 1 SETTINGS mutations_sync = 2", - settings = [("user", user)]) + node.query( + f"ALTER TABLE {table} MATERIALIZE INDEX {index} IN PARTITION 1 SETTINGS mutations_sync = 2", + settings=[("user", user)], + ) with Then("I verify that the index is in the table"): - output = json.loads(node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output) - assert f"INDEX {index} x TYPE set(0) GRANULARITY 1" in output['statement'], error() + output = json.loads( + node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output + ) + assert ( + f"INDEX {index} x TYPE set(0) GRANULARITY 1" in output["statement"] + ), error() with Finally(f"I drop index {index}"): node.query(f"ALTER TABLE {table} DROP INDEX {index}") + def check_clear_index_when_privilege_is_granted(table, user, node): - """Ensures CLEAR INDEX runs as expected when the privilege is granted to the specified user - """ + """Ensures CLEAR INDEX runs as expected when the privilege is granted to the specified user""" index = "clear" - with Given(f"I add index '{index}'"): # Column x: String - node.query(f"ALTER TABLE {table} ADD INDEX {index}(x) TYPE set(0) GRANULARITY 1") + with Given(f"I add index '{index}'"): # Column x: String + node.query( + f"ALTER TABLE {table} ADD INDEX {index}(x) TYPE set(0) GRANULARITY 1" + ) with When(f"I clear index {index}"): node.query(f"ALTER TABLE {table} CLEAR INDEX {index} IN PARTITION 1") with Then("I verify that the index is in the table"): - output = json.loads(node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output) - assert f"INDEX {index} x TYPE set(0) GRANULARITY 1" in output['statement'], error() + output = json.loads( + node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output + ) + assert ( + f"INDEX {index} x TYPE set(0) GRANULARITY 1" in output["statement"] + ), error() with Finally(f"I drop index {index}"): node.query(f"ALTER TABLE {table} DROP INDEX {index}") + def check_drop_index_when_privilege_is_granted(table, user, node): - """Ensures DROP INDEX runs as expected when the privilege is granted to the specified user - """ + """Ensures DROP INDEX runs as expected when the privilege is granted to the specified user""" with When("I try to drop nonexistent index, throws exception"): exitcode, message = errors.wrong_index_name("fake_index") - node.query(f"ALTER TABLE {table} DROP INDEX fake_index", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} DROP INDEX fake_index", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) index = "drop" with Given(f"I add the index"): - node.query(f"ALTER TABLE {table} ADD INDEX {index}(x) TYPE set(0) GRANULARITY 1") + node.query( + f"ALTER TABLE {table} ADD INDEX {index}(x) TYPE set(0) GRANULARITY 1" + ) with Then(f"I drop index {index} which exists"): - node.query(f"ALTER TABLE {table} DROP INDEX {index}", - settings = [("user", user)]) + node.query(f"ALTER TABLE {table} DROP INDEX {index}", settings=[("user", user)]) with And("I verify that the index is not in the table"): - output = json.loads(node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output) - assert f"INDEX {index} x TYPE set(0) GRANULARITY 1" not in output['statement'], error() + output = json.loads( + node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output + ) + assert ( + f"INDEX {index} x TYPE set(0) GRANULARITY 1" not in output["statement"] + ), error() + def check_order_by_when_privilege_is_not_granted(table, user, node): - """Ensures ORDER BY errors as expected without the required privilege for the specified user - """ + """Ensures ORDER BY errors as expected without the required privilege for the specified user""" with When("I try to use privilege that has not been granted"): exitcode, message = errors.not_enough_privileges(user) - node.query(f"ALTER TABLE {table} MODIFY ORDER BY b", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} MODIFY ORDER BY b", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) + def check_sample_by_when_privilege_is_not_granted(table, user, node): - """Ensures SAMPLE BY errors as expected without the required privilege for the specified user - """ + """Ensures SAMPLE BY errors as expected without the required privilege for the specified user""" with When("I try to use privilege that has not been granted"): exitcode, message = errors.not_enough_privileges(user) - node.query(f"ALTER TABLE {table} MODIFY SAMPLE BY b", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} MODIFY SAMPLE BY b", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) + def check_add_index_when_privilege_is_not_granted(table, user, node): - """Ensures ADD INDEX errors as expected without the required privilege for the specified user - """ + """Ensures ADD INDEX errors as expected without the required privilege for the specified user""" with When("I try to use privilege that has not been granted"): exitcode, message = errors.not_enough_privileges(user) - node.query(f"ALTER TABLE {table} ADD INDEX index1 b * length(x) TYPE set(1000) GRANULARITY 4", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} ADD INDEX index1 b * length(x) TYPE set(1000) GRANULARITY 4", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) + def check_materialize_index_when_privilege_is_not_granted(table, user, node): - """Ensures MATERIALIZE INDEX errors as expected without the required privilege for the specified user - """ + """Ensures MATERIALIZE INDEX errors as expected without the required privilege for the specified user""" with When("I try to use privilege that has not been granted"): exitcode, message = errors.not_enough_privileges(user) - node.query(f"ALTER TABLE {table} MATERIALIZE INDEX index1", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} MATERIALIZE INDEX index1", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) + def check_clear_index_when_privilege_is_not_granted(table, user, node): - """Ensures CLEAR INDEX errors as expected without the required privilege for the specified user - """ + """Ensures CLEAR INDEX errors as expected without the required privilege for the specified user""" with When("I try to use privilege that has not been granted"): exitcode, message = errors.not_enough_privileges(user) - node.query(f"ALTER TABLE {table} CLEAR INDEX index1 IN PARTITION 1", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} CLEAR INDEX index1 IN PARTITION 1", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) + def check_drop_index_when_privilege_is_not_granted(table, user, node): - """Ensures DROP INDEX errors as expected without the required privilege for the specified user - """ + """Ensures DROP INDEX errors as expected without the required privilege for the specified user""" with When("I try to use privilege that has not been granted"): exitcode, message = errors.not_enough_privileges(user) - node.query(f"ALTER TABLE {table} DROP INDEX index1", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} DROP INDEX index1", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) + @TestScenario def user_with_some_privileges(self, table_type, node=None): @@ -313,7 +391,10 @@ def user_with_some_privileges(self, table_type, node=None): node.query(f"GRANT {privileges} ON {table_name} TO {user_name}") with Then(f"I try to ALTER INDEX with given privileges"): - alter_index_privilege_handler(permutation, table_name, user_name, node) + alter_index_privilege_handler( + permutation, table_name, user_name, node + ) + @TestScenario @Requirements( @@ -344,6 +425,7 @@ def user_with_revoked_privileges(self, table_type, node=None): # Permutation 0: no privileges alter_index_privilege_handler(0, table_name, user_name, node) + @TestScenario @Requirements( RQ_SRS_006_RBAC_Privileges_AlterIndex_Grant("1.0"), @@ -363,7 +445,9 @@ def role_with_some_privileges(self, table_type, node=None): privileges = alter_index_privileges(permutation) with When(f"granted={privileges}"): - with table(node, table_name, table_type), user(node, user_name), role(node, role_name): + with table(node, table_name, table_type), user(node, user_name), role( + node, role_name + ): with Given("I grant the ALTER INDEX privilege to a role"): node.query(f"GRANT {privileges} ON {table_name} TO {role_name}") @@ -371,7 +455,10 @@ def role_with_some_privileges(self, table_type, node=None): node.query(f"GRANT {role_name} TO {user_name}") with Then(f"I try to ALTER INDEX with given privileges"): - alter_index_privilege_handler(permutation, table_name, user_name, node) + alter_index_privilege_handler( + permutation, table_name, user_name, node + ) + @TestScenario def user_with_revoked_role(self, table_type, node=None): @@ -389,7 +476,9 @@ def user_with_revoked_role(self, table_type, node=None): privileges = alter_index_privileges(permutation) with When(f"granted={privileges}"): - with table(node, table_name, table_type), user(node, user_name), role(node, role_name): + with table(node, table_name, table_type), user(node, user_name), role( + node, role_name + ): with When("I grant privileges to a role"): node.query(f"GRANT {privileges} ON {table_name} TO {role_name}") @@ -403,6 +492,7 @@ def user_with_revoked_role(self, table_type, node=None): # Permutation 0: no privileges for any permutation on these columns alter_index_privilege_handler(0, table_name, user_name, node) + @TestScenario @Requirements( RQ_SRS_006_RBAC_Privileges_AlterIndex_Cluster("1.0"), @@ -424,27 +514,34 @@ def user_with_privileges_on_cluster(self, table_type, node=None): with table(node, table_name, table_type): try: with Given("I have a user on a cluster"): - node.query(f"CREATE USER OR REPLACE {user_name} ON CLUSTER sharded_cluster") + node.query( + f"CREATE USER OR REPLACE {user_name} ON CLUSTER sharded_cluster" + ) - with When("I grant ALTER INDEX privileges needed for iteration on a cluster"): - node.query(f"GRANT ON CLUSTER sharded_cluster {privileges} ON {table_name} TO {user_name}") + with When( + "I grant ALTER INDEX privileges needed for iteration on a cluster" + ): + node.query( + f"GRANT ON CLUSTER sharded_cluster {privileges} ON {table_name} TO {user_name}" + ) with Then(f"I try to ALTER INDEX with given privileges"): - alter_index_privilege_handler(permutation, table_name, user_name, node) + alter_index_privilege_handler( + permutation, table_name, user_name, node + ) finally: with Finally("I drop the user on cluster"): node.query(f"DROP USER {user_name} ON CLUSTER sharded_cluster") + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_AlterIndex("1.0"), RQ_SRS_006_RBAC_Privileges_AlterIndex_TableEngines("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) -@Examples("table_type", [ - (key,) for key in table_types.keys() -]) +@Examples("table_type", [(key,) for key in table_types.keys()]) @Name("alter index") def feature(self, stress=None, parallel=None, node="clickhouse1"): self.context.node = self.context.cluster.node(node) @@ -455,17 +552,22 @@ def feature(self, stress=None, parallel=None, node="clickhouse1"): self.context.stress = stress for example in self.examples: - table_type, = example + (table_type,) = example if table_type != "MergeTree" and not self.context.stress: continue - args = {"table_type" : table_type} + args = {"table_type": table_type} with Example(str(example)): with Pool(5) as pool: try: for scenario in loads(current_module(), Scenario): - Scenario(test=scenario, setup=instrument_clickhouse_server_log, parallel=True, executor=pool)(**args) + Scenario( + test=scenario, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + )(**args) finally: join() diff --git a/tests/testflows/rbac/tests/privileges/alter/alter_move.py b/tests/testflows/rbac/tests/privileges/alter/alter_move.py index a8094716fe4..8d2fc79c0d4 100644 --- a/tests/testflows/rbac/tests/privileges/alter/alter_move.py +++ b/tests/testflows/rbac/tests/privileges/alter/alter_move.py @@ -5,12 +5,18 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors -aliases = {"ALTER MOVE PARTITION", "ALTER MOVE PART", "MOVE PARTITION", "MOVE PART", "ALL"} +aliases = { + "ALTER MOVE PARTITION", + "ALTER MOVE PART", + "MOVE PARTITION", + "MOVE PART", + "ALL", +} + @TestSuite def privilege_granted_directly_or_via_role(self, table_type, privilege, node=None): - """Check that user is only able to execute ALTER MOVE PARTITION when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute ALTER MOVE PARTITION when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -20,8 +26,16 @@ def privilege_granted_directly_or_via_role(self, table_type, privilege, node=Non with Suite("user with direct privilege", setup=instrument_clickhouse_server_log): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute ALTER MOVE PARTITION with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, table_type=table_type, privilege=privilege, node=node) + with When( + f"I run checks that {user_name} is only able to execute ALTER MOVE PARTITION with required privileges" + ): + privilege_check( + grant_target_name=user_name, + user_name=user_name, + table_type=table_type, + privilege=privilege, + node=node, + ) with Suite("user with privilege via role", setup=instrument_clickhouse_server_log): with user(node, user_name), role(node, role_name): @@ -29,12 +43,20 @@ def privilege_granted_directly_or_via_role(self, table_type, privilege, node=Non with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute ALTER MOVE PARTITION with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, table_type=table_type, privilege=privilege, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute ALTER MOVE PARTITION with required privileges" + ): + privilege_check( + grant_target_name=role_name, + user_name=user_name, + table_type=table_type, + privilege=privilege, + node=node, + ) + def privilege_check(grant_target_name, user_name, table_type, privilege, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege", setup=instrument_clickhouse_server_log): @@ -50,121 +72,199 @@ def privilege_check(grant_target_name, user_name, table_type, privilege, node=No node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to move partition without privilege"): - node.query(f"ALTER TABLE {source_table_name} MOVE PARTITION 1 TO TABLE {target_table_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {source_table_name} MOVE PARTITION 1 TO TABLE {target_table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) - with Scenario("user without ALTER MOVE PARTITION privilege", setup=instrument_clickhouse_server_log): + with Scenario( + "user without ALTER MOVE PARTITION privilege", + setup=instrument_clickhouse_server_log, + ): source_table_name = f"source_merge_tree_{getuid()}" target_table_name = f"target_merge_tree_{getuid()}" with table(node, f"{source_table_name},{target_table_name}", table_type): - with When(f"I grant SELECT and ALTER DELETE privileges on {source_table_name} to {grant_target_name}"): - node.query(f"GRANT SELECT, ALTER DELETE ON {source_table_name} TO {grant_target_name}") + with When( + f"I grant SELECT and ALTER DELETE privileges on {source_table_name} to {grant_target_name}" + ): + node.query( + f"GRANT SELECT, ALTER DELETE ON {source_table_name} TO {grant_target_name}" + ) with And(f"I grant INSERT on {target_table_name} to {grant_target_name}"): - node.query(f"GRANT INSERT ON {target_table_name} TO {grant_target_name}") + node.query( + f"GRANT INSERT ON {target_table_name} TO {grant_target_name}" + ) with Then("I attempt to move partitions without ALTER MOVE privilege"): - node.query(f"ALTER TABLE {source_table_name} MOVE PARTITION 1 TO TABLE {target_table_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {source_table_name} MOVE PARTITION 1 TO TABLE {target_table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) - with Scenario("user with ALTER MOVE PARTITION privilege", setup=instrument_clickhouse_server_log): + with Scenario( + "user with ALTER MOVE PARTITION privilege", + setup=instrument_clickhouse_server_log, + ): source_table_name = f"source_merge_tree_{getuid()}" target_table_name = f"target_merge_tree_{getuid()}" with table(node, f"{source_table_name},{target_table_name}", table_type): - with When(f"I grant SELECT, ALTER DELETE, and ALTER MOVE PARTITION privileges on {source_table_name} to {grant_target_name}"): - node.query(f"GRANT SELECT, ALTER DELETE, {privilege} ON {source_table_name} TO {grant_target_name}") + with When( + f"I grant SELECT, ALTER DELETE, and ALTER MOVE PARTITION privileges on {source_table_name} to {grant_target_name}" + ): + node.query( + f"GRANT SELECT, ALTER DELETE, {privilege} ON {source_table_name} TO {grant_target_name}" + ) with And(f"I grant INSERT on {target_table_name} to {grant_target_name}"): - node.query(f"GRANT INSERT ON {target_table_name} TO {grant_target_name}") + node.query( + f"GRANT INSERT ON {target_table_name} TO {grant_target_name}" + ) with Then("I attempt to move partitions with ALTER MOVE privilege"): - node.query(f"ALTER TABLE {source_table_name} MOVE PARTITION 1 TO TABLE {target_table_name}", settings = [("user", user_name)]) + node.query( + f"ALTER TABLE {source_table_name} MOVE PARTITION 1 TO TABLE {target_table_name}", + settings=[("user", user_name)], + ) - with Scenario("user with revoked ALTER MOVE PARTITION privilege", setup=instrument_clickhouse_server_log): + with Scenario( + "user with revoked ALTER MOVE PARTITION privilege", + setup=instrument_clickhouse_server_log, + ): source_table_name = f"source_merge_tree_{getuid()}" target_table_name = f"target_merge_tree_{getuid()}" with table(node, f"{source_table_name},{target_table_name}", table_type): - with When(f"I grant SELECT, ALTER DELETE, and ALTER MOVE PARTITION privileges on {source_table_name} to {grant_target_name}"): - node.query(f"GRANT SELECT, ALTER DELETE, {privilege} ON {source_table_name} TO {grant_target_name}") + with When( + f"I grant SELECT, ALTER DELETE, and ALTER MOVE PARTITION privileges on {source_table_name} to {grant_target_name}" + ): + node.query( + f"GRANT SELECT, ALTER DELETE, {privilege} ON {source_table_name} TO {grant_target_name}" + ) with And(f"I grant INSERT on {target_table_name} to {grant_target_name}"): - node.query(f"GRANT INSERT ON {target_table_name} TO {grant_target_name}") + node.query( + f"GRANT INSERT ON {target_table_name} TO {grant_target_name}" + ) with And("I revoke ALTER MOVE PARTITION privilege"): - node.query(f"REVOKE {privilege} ON {source_table_name} FROM {grant_target_name}") + node.query( + f"REVOKE {privilege} ON {source_table_name} FROM {grant_target_name}" + ) with Then("I attempt to move partition"): - node.query(f"ALTER TABLE {source_table_name} MOVE PARTITION 1 TO TABLE {target_table_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {source_table_name} MOVE PARTITION 1 TO TABLE {target_table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) - with Scenario("move partition to source table of a materialized view", setup=instrument_clickhouse_server_log): + with Scenario( + "move partition to source table of a materialized view", + setup=instrument_clickhouse_server_log, + ): source_table_name = f"source_merge_tree_{getuid()}" mat_view_name = f"mat_view_{getuid()}" mat_view_source_table_name = f"mat_view_source_merge_tree_{getuid()}" - with table(node, f"{source_table_name},{mat_view_source_table_name}", table_type): + with table( + node, f"{source_table_name},{mat_view_source_table_name}", table_type + ): try: with Given("I have a materialized view"): - node.query(f"CREATE MATERIALIZED VIEW {mat_view_name} ENGINE = {table_type} PARTITION BY y ORDER BY d AS SELECT * FROM {mat_view_source_table_name}") + node.query( + f"CREATE MATERIALIZED VIEW {mat_view_name} ENGINE = {table_type} PARTITION BY y ORDER BY d AS SELECT * FROM {mat_view_source_table_name}" + ) - with When(f"I grant SELECT, ALTER DELETE, and ALTER MOVE PARTITION privileges on {source_table_name} to {grant_target_name}"): - node.query(f"GRANT SELECT, ALTER DELETE, {privilege} ON {source_table_name} TO {grant_target_name}") + with When( + f"I grant SELECT, ALTER DELETE, and ALTER MOVE PARTITION privileges on {source_table_name} to {grant_target_name}" + ): + node.query( + f"GRANT SELECT, ALTER DELETE, {privilege} ON {source_table_name} TO {grant_target_name}" + ) - with And(f"I grant INSERT on {mat_view_source_table_name} to {grant_target_name}"): - node.query(f"GRANT INSERT ON {mat_view_source_table_name} TO {grant_target_name}") + with And( + f"I grant INSERT on {mat_view_source_table_name} to {grant_target_name}" + ): + node.query( + f"GRANT INSERT ON {mat_view_source_table_name} TO {grant_target_name}" + ) with Then("I attempt to move partitions with ALTER MOVE privilege"): - node.query(f"ALTER TABLE {source_table_name} MOVE PARTITION 1 TO TABLE {mat_view_source_table_name}", settings = [("user", user_name)]) + node.query( + f"ALTER TABLE {source_table_name} MOVE PARTITION 1 TO TABLE {mat_view_source_table_name}", + settings=[("user", user_name)], + ) finally: with Finally("I drop the materialized view"): node.query(f"DROP VIEW IF EXISTS {mat_view_name}") - with Scenario("move partition to implicit target table of a materialized view", setup=instrument_clickhouse_server_log): + with Scenario( + "move partition to implicit target table of a materialized view", + setup=instrument_clickhouse_server_log, + ): source_table_name = f"source_merge_tree_{getuid()}" mat_view_name = f"mat_view_{getuid()}" mat_view_source_table_name = f"mat_view_source_merge_tree_{getuid()}" - implicit_table_name = f"\\\".inner.{mat_view_name}\\\"" + implicit_table_name = f'\\".inner.{mat_view_name}\\"' - with table(node, f"{source_table_name},{mat_view_source_table_name}", table_type): + with table( + node, f"{source_table_name},{mat_view_source_table_name}", table_type + ): try: with Given("I have a materialized view"): - node.query(f"CREATE MATERIALIZED VIEW {mat_view_name} ENGINE = {table_type} PARTITION BY y ORDER BY d AS SELECT * FROM {mat_view_source_table_name}") + node.query( + f"CREATE MATERIALIZED VIEW {mat_view_name} ENGINE = {table_type} PARTITION BY y ORDER BY d AS SELECT * FROM {mat_view_source_table_name}" + ) - with When(f"I grant SELECT, ALTER DELETE, and ALTER MOVE PARTITION privileges on {source_table_name} to {grant_target_name}"): - node.query(f"GRANT SELECT, ALTER DELETE, {privilege} ON {source_table_name} TO {grant_target_name}") + with When( + f"I grant SELECT, ALTER DELETE, and ALTER MOVE PARTITION privileges on {source_table_name} to {grant_target_name}" + ): + node.query( + f"GRANT SELECT, ALTER DELETE, {privilege} ON {source_table_name} TO {grant_target_name}" + ) - with And(f"I grant INSERT on {implicit_table_name} to {grant_target_name}"): - node.query(f"GRANT INSERT ON {implicit_table_name} TO {grant_target_name}") + with And( + f"I grant INSERT on {implicit_table_name} to {grant_target_name}" + ): + node.query( + f"GRANT INSERT ON {implicit_table_name} TO {grant_target_name}" + ) with Then("I attempt to move partitions with ALTER MOVE privilege"): - node.query(f"ALTER TABLE {source_table_name} MOVE PARTITION 1 TO TABLE {implicit_table_name}", settings = [("user", user_name)]) + node.query( + f"ALTER TABLE {source_table_name} MOVE PARTITION 1 TO TABLE {implicit_table_name}", + settings=[("user", user_name)], + ) finally: with Finally("I drop the materialized view"): node.query(f"DROP VIEW IF EXISTS {mat_view_name}") + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_AlterMove("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) -@Examples("table_type", [ - (key,) for key in table_types.keys() -]) +@Examples("table_type", [(key,) for key in table_types.keys()]) @Name("alter move") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of ALTER MOVE. - """ + """Check the RBAC functionality of ALTER MOVE.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -173,7 +273,7 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): self.context.stress = stress for example in self.examples: - table_type, = example + (table_type,) = example if table_type != "MergeTree" and not self.context.stress: continue @@ -181,4 +281,6 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): with Example(str(example)): for alias in aliases: with Suite(alias, test=privilege_granted_directly_or_via_role): - privilege_granted_directly_or_via_role(table_type=table_type, privilege=alias) + privilege_granted_directly_or_via_role( + table_type=table_type, privilege=alias + ) diff --git a/tests/testflows/rbac/tests/privileges/alter/alter_quota.py b/tests/testflows/rbac/tests/privileges/alter/alter_quota.py index faad7c001f4..ae8326a0eab 100644 --- a/tests/testflows/rbac/tests/privileges/alter/alter_quota.py +++ b/tests/testflows/rbac/tests/privileges/alter/alter_quota.py @@ -7,6 +7,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @contextmanager def quota(node, name): try: @@ -19,10 +20,10 @@ def quota(node, name): with Finally("I drop the quota"): node.query(f"DROP QUOTA IF EXISTS {name}") + @TestSuite def alter_quota_granted_directly(self, node=None): - """Check that a user is able to execute `ALTER QUOTA` with privileges are granted directly. - """ + """Check that a user is able to execute `ALTER QUOTA` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -31,15 +32,22 @@ def alter_quota_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=alter_quota, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in alter_quota.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=alter_quota, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in alter_quota.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def alter_quota_granted_via_role(self, node=None): - """Check that a user is able to execute `ALTER QUOTA` with privileges are granted through a role. - """ + """Check that a user is able to execute `ALTER QUOTA` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -52,20 +60,30 @@ def alter_quota_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=alter_quota, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in alter_quota.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=alter_quota, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in alter_quota.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("ALTER QUOTA",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("ALTER QUOTA",), + ], +) def alter_quota(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `ALTER QUOTA` when they have the necessary privilege. - """ + """Check that user is only able to execute `ALTER QUOTA` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -83,8 +101,12 @@ def alter_quota(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't alter a quota"): - node.query(f"ALTER QUOTA {alter_quota_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER QUOTA {alter_quota_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("ALTER QUOTA with privilege"): alter_quota_name = f"alter_quota_{getuid()}" @@ -95,25 +117,34 @@ def alter_quota(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can alter a user"): - node.query(f"ALTER QUOTA {alter_quota_name}", settings = [("user", f"{user_name}")]) + node.query( + f"ALTER QUOTA {alter_quota_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("ALTER QUOTA on cluster"): alter_quota_name = f"alter_quota_{getuid()}" try: with Given("I have a quota on a cluster"): - node.query(f"CREATE QUOTA {alter_quota_name} ON CLUSTER sharded_cluster") + node.query( + f"CREATE QUOTA {alter_quota_name} ON CLUSTER sharded_cluster" + ) with When(f"I grant {privilege}"): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can alter a quota"): - node.query(f"ALTER QUOTA {alter_quota_name} ON CLUSTER sharded_cluster", - settings = [("user", f"{user_name}")]) + node.query( + f"ALTER QUOTA {alter_quota_name} ON CLUSTER sharded_cluster", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the quota"): - node.query(f"DROP QUOTA IF EXISTS {alter_quota_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP QUOTA IF EXISTS {alter_quota_name} ON CLUSTER sharded_cluster" + ) with Scenario("ALTER QUOTA with revoked privilege"): alter_quota_name = f"alter_quota_{getuid()}" @@ -127,19 +158,23 @@ def alter_quota(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user can't alter a quota"): - node.query(f"ALTER QUOTA {alter_quota_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER QUOTA {alter_quota_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("alter quota") @Requirements( RQ_SRS_006_RBAC_Privileges_AlterQuota("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of ALTER QUOTA. - """ + """Check the RBAC functionality of ALTER QUOTA.""" self.context.node = self.context.cluster.node(node) Suite(run=alter_quota_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/alter/alter_role.py b/tests/testflows/rbac/tests/privileges/alter/alter_role.py index 49e8baa191b..4be7123a969 100644 --- a/tests/testflows/rbac/tests/privileges/alter/alter_role.py +++ b/tests/testflows/rbac/tests/privileges/alter/alter_role.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): - """Check that a user is able to execute `ALTER ROLE` with privileges are granted directly. - """ + """Check that a user is able to execute `ALTER ROLE` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -17,15 +17,22 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=alter_role, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in alter_role.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=alter_role, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in alter_role.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): - """Check that a user is able to execute `ALTER ROLE` with privileges are granted through a role. - """ + """Check that a user is able to execute `ALTER ROLE` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -38,20 +45,30 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=alter_role, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in alter_role.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=alter_role, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in alter_role.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("ALTER ROLE",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("ALTER ROLE",), + ], +) def alter_role(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `ALTER ROLE` when they have the necessary privilege. - """ + """Check that user is only able to execute `ALTER ROLE` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -69,8 +86,12 @@ def alter_role(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't alter a role"): - node.query(f"ALTER ROLE {alter_role_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER ROLE {alter_role_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("ALTER ROLE with privilege"): alter_role_name = f"alter_role_{getuid()}" @@ -81,7 +102,9 @@ def alter_role(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can alter a role"): - node.query(f"ALTER ROLE {alter_role_name}", settings = [("user", f"{user_name}")]) + node.query( + f"ALTER ROLE {alter_role_name}", settings=[("user", f"{user_name}")] + ) with Scenario("ALTER ROLE on cluster"): alter_role_name = f"alter_role_{getuid()}" @@ -94,11 +117,16 @@ def alter_role(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can alter a role"): - node.query(f"ALTER ROLE {alter_role_name} ON CLUSTER sharded_cluster", settings = [("user", f"{user_name}")]) + node.query( + f"ALTER ROLE {alter_role_name} ON CLUSTER sharded_cluster", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the user"): - node.query(f"DROP ROLE IF EXISTS {alter_role_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP ROLE IF EXISTS {alter_role_name} ON CLUSTER sharded_cluster" + ) with Scenario("ALTER ROLE with revoked privilege"): alter_role_name = f"alter_role_{getuid()}" @@ -111,19 +139,23 @@ def alter_role(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot alter a role"): - node.query(f"ALTER ROLE {alter_role_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER ROLE {alter_role_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("alter role") @Requirements( RQ_SRS_006_RBAC_Privileges_AlterRole("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of ALTER ROLE. - """ + """Check the RBAC functionality of ALTER ROLE.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/alter/alter_row_policy.py b/tests/testflows/rbac/tests/privileges/alter/alter_row_policy.py index a0d1e4271bc..36a83051d5a 100644 --- a/tests/testflows/rbac/tests/privileges/alter/alter_row_policy.py +++ b/tests/testflows/rbac/tests/privileges/alter/alter_row_policy.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): - """Check that a user is able to execute `ALTER ROW POLICY` with privileges are granted directly. - """ + """Check that a user is able to execute `ALTER ROW POLICY` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -17,15 +17,22 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=alter_row_policy, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in alter_row_policy.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=alter_row_policy, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in alter_row_policy.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): - """Check that a user is able to execute `ALTER ROW POLICY` with privileges are granted through a role. - """ + """Check that a user is able to execute `ALTER ROW POLICY` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -38,21 +45,31 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=alter_row_policy, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in alter_row_policy.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=alter_row_policy, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in alter_row_policy.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("ALTER ROW POLICY",), - ("ALTER POLICY",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("ALTER ROW POLICY",), + ("ALTER POLICY",), + ], +) def alter_row_policy(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `ALTER ROW POLICY` when they have the necessary privilege. - """ + """Check that user is only able to execute `ALTER ROW POLICY` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -73,12 +90,18 @@ def alter_row_policy(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't alter a row policy"): - node.query(f"ALTER ROW POLICY {alter_row_policy_name} ON {table_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER ROW POLICY {alter_row_policy_name} ON {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the row policy"): - node.query(f"DROP ROW POLICY IF EXISTS {alter_row_policy_name} ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {alter_row_policy_name} ON {table_name}" + ) with Scenario("ALTER ROW POLICY with privilege"): alter_row_policy_name = f"alter_row_policy_{getuid()}" @@ -92,11 +115,16 @@ def alter_row_policy(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can alter a row policy"): - node.query(f"ALTER ROW POLICY {alter_row_policy_name} ON {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"ALTER ROW POLICY {alter_row_policy_name} ON {table_name}", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the row policy"): - node.query(f"DROP ROW POLICY IF EXISTS {alter_row_policy_name} ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {alter_row_policy_name} ON {table_name}" + ) with Scenario("ALTER ROW POLICY on cluster"): alter_row_policy_name = f"alter_row_policy_{getuid()}" @@ -104,17 +132,24 @@ def alter_row_policy(self, privilege, grant_target_name, user_name, node=None): try: with Given("I have a row policy on a cluster"): - node.query(f"CREATE ROW POLICY {alter_row_policy_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"CREATE ROW POLICY {alter_row_policy_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with When(f"I grant {privilege}"): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can alter a row policy"): - node.query(f"ALTER ROW POLICY {alter_row_policy_name} ON CLUSTER sharded_cluster ON {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"ALTER ROW POLICY {alter_row_policy_name} ON CLUSTER sharded_cluster ON {table_name}", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the user"): - node.query(f"DROP ROW POLICY IF EXISTS {alter_row_policy_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {alter_row_policy_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with Scenario("ALTER ROW POLICY with revoked privilege"): alter_row_policy_name = f"alter_row_policy_{getuid()}" @@ -131,16 +166,21 @@ def alter_row_policy(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot alter row policy"): - node.query(f"ALTER ROW POLICY {alter_row_policy_name} ON {table_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER ROW POLICY {alter_row_policy_name} ON {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: - with Finally("I drop the row policy"): - node.query(f"DROP ROW POLICY IF EXISTS {alter_row_policy_name} ON {table_name}") + with Finally("I drop the row policy"): + node.query( + f"DROP ROW POLICY IF EXISTS {alter_row_policy_name} ON {table_name}" + ) + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Restriction("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Restriction("1.0")) def no_grants(self, node=None): """Check that user is unable to select from a table without a row policy after a row policy has been altered to have a condition. @@ -162,22 +202,22 @@ def no_grants(self, node=None): with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() with When("I alter the row policy to have a condition"): node.query(f"ALTER POLICY {pol_name} ON {table_name} FOR SELECT USING 1") with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '' == output, error() + assert "" == output, error() + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Alter_Access_Permissive("1.0"), ) def permissive(self, node=None): - """Check that user is able to see from a table when they have a PERMISSIVE policy. - """ + """Check that user is able to see from a table when they have a PERMISSIVE policy.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -194,19 +234,19 @@ def permissive(self, node=None): node.query(f"INSERT INTO {table_name} (y) VALUES (1), (2)") with When("I alter a row policy to be permissive"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default" + ) with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Alter_Access_Restrictive("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Alter_Access_Restrictive("1.0")) def restrictive(self, node=None): - """Check that user is able to see values they have a RESTRICTIVE policy for. - """ + """Check that user is able to see values they have a RESTRICTIVE policy for.""" table_name = f"table_{getuid()}" perm_pol_name = f"perm_pol_{getuid()}" @@ -224,25 +264,29 @@ def restrictive(self, node=None): row_policy(name=perm_pol_name, table=table_name) with And("I alter a row policy to be permissive"): - node.query(f"ALTER ROW POLICY {perm_pol_name} ON {table_name} FOR SELECT USING y=1 OR y=2 TO default") + node.query( + f"ALTER ROW POLICY {perm_pol_name} ON {table_name} FOR SELECT USING y=1 OR y=2 TO default" + ) with And("I alter a row policy to be restrictive"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} AS RESTRICTIVE FOR SELECT USING y=1 TO default") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} AS RESTRICTIVE FOR SELECT USING y=1 TO default" + ) with When("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1), (2)") with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Alter_ForSelect("1.0"), ) def for_select(self, node=None): - """Check that user is able to see values allowed by the row policy condition in the FOR SELECT clause. - """ + """Check that user is able to see values allowed by the row policy condition in the FOR SELECT clause.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -259,19 +303,19 @@ def for_select(self, node=None): node.query(f"INSERT INTO {table_name} (y) VALUES (1)") with Given("I alter therow policy to use FOR SELECT"): - node.query(f"Alter ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1 TO default") + node.query( + f"Alter ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1 TO default" + ) with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Alter_Condition("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Alter_Condition("1.0")) def condition(self, node=None): - """Check that user is able to see values allowed by the row policy condition. - """ + """Check that user is able to see values allowed by the row policy condition.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -288,19 +332,19 @@ def condition(self, node=None): node.query(f"INSERT INTO {table_name} (y) VALUES (1),(2)") with When("I alter a row policy to be permissive"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default" + ) with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Alter_Condition_None("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Alter_Condition_None("1.0")) def remove_condition(self, node=None): - """Check that user is able to see the table after row policy condition has been removed. - """ + """Check that user is able to see the table after row policy condition has been removed.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -314,25 +358,27 @@ def remove_condition(self, node=None): row_policy(name=pol_name, table=table_name) with And("The row policy has a condition"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1" + ) with And("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1)") with When("I alter a row policy to not have a condition"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING NONE") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING NONE" + ) with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Alter_IfExists("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Alter_IfExists("1.0")) def if_exists(self, node=None): - """Check that a row policy altered using IF EXISTS restricts rows as expected. - """ + """Check that a row policy altered using IF EXISTS restricts rows as expected.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -349,19 +395,19 @@ def if_exists(self, node=None): node.query(f"INSERT INTO {table_name} (y) VALUES (1)") with When("I have alter a row policy to be permissive using IF EXISTS clause"): - node.query(f"ALTER ROW POLICY IF EXISTS {pol_name} ON {table_name} FOR SELECT USING 1 TO default") + node.query( + f"ALTER ROW POLICY IF EXISTS {pol_name} ON {table_name} FOR SELECT USING 1 TO default" + ) with Then("I select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Alter_Rename("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Alter_Rename("1.0")) def rename(self, node=None): - """Check that a row policy altered using RENAME restricts rows as expected. - """ + """Check that a row policy altered using RENAME restricts rows as expected.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -379,26 +425,28 @@ def rename(self, node=None): node.query(f"INSERT INTO {table_name} (y) VALUES (1)") with And("The row policy is permissive"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default" + ) with When("I have alter a row policy by renaming it"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} RENAME TO {pol_new_name}") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} RENAME TO {pol_new_name}" + ) with Then("I select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() finally: with Finally("I drop the row policy"): node.query(f"DROP ROW POLICY IF EXISTS {pol_new_name} ON {table_name}") + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Alter_OnCluster("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Alter_OnCluster("1.0")) def on_cluster(self, node=None): - """Check that a row policy altered using ON CLUSTER applies to the nodes of the cluster correctly. - """ + """Check that a row policy altered using ON CLUSTER applies to the nodes of the cluster correctly.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -409,10 +457,14 @@ def on_cluster(self, node=None): try: with Given("I have a table on a cluster"): - node.query(f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory") + node.query( + f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory" + ) with And("I have a row policy on a cluster on that table"): - node.query(f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with And("The table has some values on the first node"): node.query(f"INSERT INTO {table_name} (x) VALUES (1)") @@ -421,27 +473,31 @@ def on_cluster(self, node=None): node2.query(f"INSERT INTO {table_name} (x) VALUES (1)") with When("I alter the row policy to have a condition"): - node.query(f"ALTER ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name} FOR SELECT USING 1") + node.query( + f"ALTER ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name} FOR SELECT USING 1" + ) with Then("I select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '' == output, error() + assert "" == output, error() with And("I select from another node on the cluster"): output = node2.query(f"SELECT * FROM {table_name}").output - assert '' == output, error() + assert "" == output, error() finally: with Finally("I drop the row policy", flags=TE): - node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with And("I drop the table", flags=TE): node.query(f"DROP TABLE {table_name} ON CLUSTER sharded_cluster") + @TestScenario def diff_policies_on_diff_nodes(self, node=None): - """Check that a row policy altered on a node, does not effect row policy on a different node. - """ + """Check that a row policy altered on a node, does not effect row policy on a different node.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -452,10 +508,14 @@ def diff_policies_on_diff_nodes(self, node=None): try: with Given("I have a table on a cluster"): - node.query(f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory") + node.query( + f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory" + ) with And("I have a row policy on the cluster"): - node.query(f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with And("The table has some values on the first node"): node.query(f"INSERT INTO {table_name} (x) VALUES (1)") @@ -464,30 +524,34 @@ def diff_policies_on_diff_nodes(self, node=None): node2.query(f"INSERT INTO {table_name} (x) VALUES (1)") with When("I alter the row policy on the first node"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1" + ) with Then("I select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '' == output, error() + assert "" == output, error() with And("I select from another node on the cluster"): output = node2.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() finally: with Finally("I drop the row policy", flags=TE): - node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with And("I drop the table", flags=TE): node.query(f"DROP TABLE {table_name} ON CLUSTER sharded_cluster") + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment("1.0"), ) def assignment(self, node=None): - """Check that user is able to see rows from a table when they have PERMISSIVE policy assigned to them. - """ + """Check that user is able to see rows from a table when they have PERMISSIVE policy assigned to them.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -501,7 +565,9 @@ def assignment(self, node=None): row_policy(name=pol_name, table=table_name) with And("The row policy is permissive"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1" + ) with And("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1)") @@ -511,15 +577,15 @@ def assignment(self, node=None): with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_None("1.0"), ) def assignment_none(self, node=None): - """Check that no one is affected when a row policy is altered to be assigned to NONE. - """ + """Check that no one is affected when a row policy is altered to be assigned to NONE.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -533,7 +599,9 @@ def assignment_none(self, node=None): row_policy(name=pol_name, table=table_name) with And("The row policy is permissive"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1" + ) with And("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1)") @@ -543,15 +611,15 @@ def assignment_none(self, node=None): with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '' == output, error() + assert "" == output, error() + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_All("1.0"), ) def assignment_all(self, node=None): - """Check that everyone is effected with a row policy is altered to be assigned to ALL. - """ + """Check that everyone is effected with a row policy is altered to be assigned to ALL.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -565,7 +633,9 @@ def assignment_all(self, node=None): row_policy(name=pol_name, table=table_name) with And("The row policy is permissive"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1" + ) with And("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1)") @@ -575,15 +645,15 @@ def assignment_all(self, node=None): with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_AllExcept("1.0"), ) def assignment_all_except(self, node=None): - """Check that everyone is except the specified user is effect by a row policy is altered to be assigned to ALL EXCEPT. - """ + """Check that everyone is except the specified user is effect by a row policy is altered to be assigned to ALL EXCEPT.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -597,22 +667,25 @@ def assignment_all_except(self, node=None): row_policy(name=pol_name, table=table_name) with And("The row policy is permissive"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1" + ) with And("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1)") with When("I alter a row policy to be assigned to ALL EXCEPT default"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} TO ALL EXCEPT default") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} TO ALL EXCEPT default" + ) with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '' == output, error() + assert "" == output, error() + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0")) def nested_view(self, node=None): """Check that if a user has a row policy on a table and a view is altered to use a condition on that table, the user is only able to access the rows specified by the assigned policies. @@ -638,20 +711,21 @@ def nested_view(self, node=None): node.query(f"CREATE VIEW {view_name} AS SELECT * FROM {table_name}") with When("I alter the row policy to be permissive"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default" + ) with Then("I try to select from the view"): output = node.query(f"SELECT * FROM {view_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() finally: with Finally("I drop the view", flags=TE): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0")) def nested_live_view_before_policy(self, node=None): """Check that if a live view exists on a table and then a row policy is created, the user is only able to select rows specified by the assigned policies from the view. @@ -666,8 +740,12 @@ def nested_live_view_before_policy(self, node=None): with table(node, table_name): try: - with Given("I add allow_experimental_live_view to the default query settings"): - default_query_settings = getsattr(current().context, "default_query_settings", []) + with Given( + "I add allow_experimental_live_view to the default query settings" + ): + default_query_settings = getsattr( + current().context, "default_query_settings", [] + ) default_query_settings.append(("allow_experimental_live_view", 1)) with And("I have a row policy"): @@ -677,30 +755,40 @@ def nested_live_view_before_policy(self, node=None): node.query(f"INSERT INTO {table_name} (y) VALUES (1),(2)") with And("There exists a live view on the table"): - node.query(f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table_name}") + node.query( + f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table_name}" + ) with When("I alter the row policy to be permissive"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default" + ) with Then("I try to select from the view"): output = node.query(f"SELECT * FROM {view_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() finally: with Finally("I drop the live view", flags=TE): node.query(f"DROP VIEW IF EXISTS {view_name}") - with And("I remove allow_experimental_live_view from the default query settings", flags=TE): + with And( + "I remove allow_experimental_live_view from the default query settings", + flags=TE, + ): if default_query_settings: try: - default_query_settings.pop(default_query_settings.index(("allow_experimental_live_view", 1))) + default_query_settings.pop( + default_query_settings.index( + ("allow_experimental_live_view", 1) + ) + ) except ValueError: pass + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0")) def nested_live_view_after_policy(self, node=None): """Check that if a user has a row policy on a table and a materialized view is created on that table, the user is only able to select rows specified by the assigned policies from the view. @@ -715,8 +803,12 @@ def nested_live_view_after_policy(self, node=None): with table(node, table_name): try: - with Given("I add allow_experimental_live_view to the default query settings"): - default_query_settings = getsattr(current().context, "default_query_settings", []) + with Given( + "I add allow_experimental_live_view to the default query settings" + ): + default_query_settings = getsattr( + current().context, "default_query_settings", [] + ) default_query_settings.append(("allow_experimental_live_view", 1)) with And("I have a row policy"): @@ -726,30 +818,40 @@ def nested_live_view_after_policy(self, node=None): node.query(f"INSERT INTO {table_name} (y) VALUES (1),(2)") with When("I alter the row policy to be permissive"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default" + ) with And("I create a live view on the table"): - node.query(f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table_name}") + node.query( + f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table_name}" + ) with Then("I try to select from the view"): output = node.query(f"SELECT * FROM {view_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() finally: with Finally("I drop the live view", flags=TE): node.query(f"DROP VIEW IF EXISTS {view_name}") - with And("I remove allow_experimental_live_view from the default query settings", flags=TE): + with And( + "I remove allow_experimental_live_view from the default query settings", + flags=TE, + ): if default_query_settings: try: - default_query_settings.pop(default_query_settings.index(("allow_experimental_live_view", 1))) + default_query_settings.pop( + default_query_settings.index( + ("allow_experimental_live_view", 1) + ) + ) except ValueError: pass + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0")) def nested_mat_view_before_policy(self, node=None): """Check that if a materialized view exists on a table and then a row policy is created, the user is only able to select rows specified by the assigned policies from the view. @@ -768,26 +870,29 @@ def nested_mat_view_before_policy(self, node=None): row_policy(name=pol_name, table=table_name) with And("There exists a mat view on the table"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}" + ) with And("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1),(2)") with When("I alter the row policy"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default" + ) with Then("I try to select from the view"): output = node.query(f"SELECT * FROM {view_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() finally: with Finally("I drop the materialized view", flags=TE): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0")) def nested_mat_view_after_policy(self, node=None): """Check that if a user has a row policy on a table and a materialized view is created on that table, the user is only able to select rows specified by the assigned policies from the view. @@ -806,26 +911,29 @@ def nested_mat_view_after_policy(self, node=None): row_policy(name=pol_name, table=table_name) with And("I alter the row policy"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default" + ) with When("I create a mat view on the table"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}" + ) with And("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1),(2)") with Then("I try to select from the view"): output = node.query(f"SELECT * FROM {view_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() finally: with Finally("I drop the materialized view", flags=TE): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0")) def populate_mat_view(self, node=None): """Check that if a user has a row policy on a table and a materialized view is created using POPULATE from that table, the user can only select the rows from the materialized view specified in the row policy. @@ -844,26 +952,29 @@ def populate_mat_view(self, node=None): row_policy(name=pol_name, table=table_name) with And("I alter a row policy on the table"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default" + ) with And("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1),(2)") with When("I create a mat view populated by the table"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory POPULATE AS SELECT * FROM {table_name}") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory POPULATE AS SELECT * FROM {table_name}" + ) with Then("I try to select from the view"): output = node.query(f"SELECT * FROM {view_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() finally: with Finally("I drop the materialized view", flags=TE): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0")) def dist_table(self, node=None): """Check that if a user has a row policy on a table and a distributed table is created on that table, the user is only able to access the rows specified by the assigned policies. @@ -879,27 +990,37 @@ def dist_table(self, node=None): try: with Given("I have a table on a cluster"): - node.query(f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory") + node.query( + f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory" + ) with And("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with And("I have a distributed table"): - node.query(f"CREATE TABLE {dist_table_name} (x UInt64) ENGINE = Distributed(sharded_cluster, default, {table_name}, rand())") + node.query( + f"CREATE TABLE {dist_table_name} (x UInt64) ENGINE = Distributed(sharded_cluster, default, {table_name}, rand())" + ) with And("The table has some values on the first node"): node.query(f"INSERT INTO {table_name} (x) VALUES (1)") with When("I alter the row policy to be permissive"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} ON CLUSTER sharded_cluster FOR SELECT USING 1") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} ON CLUSTER sharded_cluster FOR SELECT USING 1" + ) with Then("I select from the distributed table"): output = node.query(f"SELECT * FROM {dist_table_name}").output - assert '' == output, error() + assert "" == output, error() finally: with Finally("I drop the row policy", flags=TE): - node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with And("I drop the table", flags=TE): node.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER sharded_cluster") @@ -907,13 +1028,11 @@ def dist_table(self, node=None): with And("I drop the distributed table", flags=TE): node.query(f"DROP TABLE IF EXISTS {dist_table_name}") + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0")) def dist_table_diff_policies_on_diff_nodes(self, node=None): - """Check that user is only able to select from the distributed table what is allowed by the row policies on each node. - """ + """Check that user is only able to select from the distributed table what is allowed by the row policies on each node.""" table_name = f"table_{getuid()}" dist_table_name = f"dist_table_{getuid()}" @@ -925,13 +1044,19 @@ def dist_table_diff_policies_on_diff_nodes(self, node=None): try: with Given("I have a table on a cluster"): - node.query(f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory") + node.query( + f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory" + ) with And("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with And("I have a distributed table"): - node.query(f"CREATE TABLE {dist_table_name} (x UInt64) ENGINE = Distributed(sharded_cluster, default, {table_name}, rand())") + node.query( + f"CREATE TABLE {dist_table_name} (x UInt64) ENGINE = Distributed(sharded_cluster, default, {table_name}, rand())" + ) with And("The table has some values on the first node"): node.query(f"INSERT INTO {table_name} (x) VALUES (1)") @@ -940,15 +1065,19 @@ def dist_table_diff_policies_on_diff_nodes(self, node=None): node2.query(f"INSERT INTO {table_name} (x) VALUES (2)") with When("I alter the row policy to be permissive on the first node"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1" + ) with Then("I select from the distributed table"): output = node.query(f"SELECT * FROM {dist_table_name}").output - assert '1' not in output and '2' in output, error() + assert "1" not in output and "2" in output, error() finally: with Finally("I drop the row policy", flags=TE): - node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON {table_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP ROW POLICY IF EXISTS {pol_name} ON {table_name} ON CLUSTER sharded_cluster" + ) with And("I drop the table", flags=TE): node.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER sharded_cluster") @@ -956,10 +1085,9 @@ def dist_table_diff_policies_on_diff_nodes(self, node=None): with And("I drop the distributed table", flags=TE): node.query(f"DROP TABLE IF EXISTS {dist_table_name}") + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0")) def dist_table_on_dist_table(self, node=None): """Check that if a user has a row policy on a table and a distributed table is created on that table, and another distributed table is created on top of that, @@ -976,40 +1104,55 @@ def dist_table_on_dist_table(self, node=None): try: with Given("I have a table on a cluster"): - node.query(f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory") + node.query( + f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory" + ) with And("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with And("I have a distributed table on a cluster"): - node.query(f"CREATE TABLE {dist_table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Distributed(sharded_cluster, default, {table_name}, rand())") + node.query( + f"CREATE TABLE {dist_table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Distributed(sharded_cluster, default, {table_name}, rand())" + ) with And("I have a distributed table on the other distributed table"): - node.query(f"CREATE TABLE {dist_table_2_name} (x UInt64) ENGINE = Distributed(sharded_cluster, default, {dist_table_name}, rand())") + node.query( + f"CREATE TABLE {dist_table_2_name} (x UInt64) ENGINE = Distributed(sharded_cluster, default, {dist_table_name}, rand())" + ) with And("The table has some values on the first node"): node.query(f"INSERT INTO {table_name} (x) VALUES (1)") with When("I alter the row policy to be permissive on the first node"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1" + ) with Then("I select from the second distributed table"): output = node.query(f"SELECT * FROM {dist_table_2_name}").output - assert '' == output, error() + assert "" == output, error() finally: with Finally("I drop the row policy", flags=TE): - node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with And("I drop the table", flags=TE): node.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER sharded_cluster") with And("I drop the distributed table", flags=TE): - node.query(f"DROP TABLE IF EXISTS {dist_table_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP TABLE IF EXISTS {dist_table_name} ON CLUSTER sharded_cluster" + ) with And("I drop the outer distributed table", flags=TE): node.query(f"DROP TABLE IF EXISTS {dist_table_2_name}") + @TestScenario def policy_before_table(self, node=None): """Check that if the policy is created and altered before the table, @@ -1025,7 +1168,9 @@ def policy_before_table(self, node=None): row_policy(name=pol_name, table=table_name) with And("I alter the row policy"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default" + ) with table(node, table_name): with When("The table has some values"): @@ -1033,7 +1178,8 @@ def policy_before_table(self, node=None): with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() + @TestScenario @Requirements( @@ -1056,20 +1202,26 @@ def dict(self, node=None): row_policy(name=pol_name, table=table_name) with And("I have a table"): - node.query(f"CREATE TABLE {table_name} (key UInt64, val UInt64 DEFAULT 5) ENGINE = Memory") + node.query( + f"CREATE TABLE {table_name} (key UInt64, val UInt64 DEFAULT 5) ENGINE = Memory" + ) with And("The table has some values"): node.query(f"INSERT INTO {table_name} (key) VALUES (1),(2)") with And("I create a dict on the table"): - node.query(f"CREATE DICTIONARY {dict_name} (key UInt64 DEFAULT 0, val UInt64 DEFAULT 5) PRIMARY KEY key SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE {table_name} PASSWORD '' DB 'default')) LIFETIME(MIN 0 MAX 0) LAYOUT(FLAT())") + node.query( + f"CREATE DICTIONARY {dict_name} (key UInt64 DEFAULT 0, val UInt64 DEFAULT 5) PRIMARY KEY key SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE {table_name} PASSWORD '' DB 'default')) LIFETIME(MIN 0 MAX 0) LAYOUT(FLAT())" + ) with When("I alter the row policy to be permissive"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING key=1 TO default") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING key=1 TO default" + ) with Then("I try to select from the dict"): output = node.query(f"SELECT * FROM {dict_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() finally: with Finally("I drop the materialized view", flags=TE): @@ -1084,11 +1236,10 @@ def dict(self, node=None): @Requirements( RQ_SRS_006_RBAC_Privileges_AlterRowPolicy("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of ALTER ROW POLICY. - """ + """Check the RBAC functionality of ALTER ROW POLICY.""" self.context.node = self.context.cluster.node(node) self.context.node2 = self.context.cluster.node("clickhouse2") @@ -1116,7 +1267,10 @@ def feature(self, node="clickhouse1"): Scenario(run=populate_mat_view, setup=instrument_clickhouse_server_log) Scenario(run=dist_table, setup=instrument_clickhouse_server_log) Scenario(run=dist_table_on_dist_table, setup=instrument_clickhouse_server_log) - Scenario(run=dist_table_diff_policies_on_diff_nodes, setup=instrument_clickhouse_server_log) + Scenario( + run=dist_table_diff_policies_on_diff_nodes, + setup=instrument_clickhouse_server_log, + ) Scenario(run=diff_policies_on_diff_nodes, setup=instrument_clickhouse_server_log) Scenario(run=policy_before_table, setup=instrument_clickhouse_server_log) Scenario(run=dict, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/alter/alter_settings.py b/tests/testflows/rbac/tests/privileges/alter/alter_settings.py index a1a2b824a11..f9d81342b84 100755 --- a/tests/testflows/rbac/tests/privileges/alter/alter_settings.py +++ b/tests/testflows/rbac/tests/privileges/alter/alter_settings.py @@ -8,26 +8,41 @@ from rbac.helper.common import * import rbac.helper.errors as errors from rbac.helper.tables import table_types -aliases = {"ALTER SETTINGS", "ALTER SETTING", "ALTER MODIFY SETTING", "MODIFY SETTING", "ALL"} +aliases = { + "ALTER SETTINGS", + "ALTER SETTING", + "ALTER MODIFY SETTING", + "MODIFY SETTING", + "ALL", +} + def check_alter_settings_when_privilege_is_granted(table, user, node): - """Ensures ADD SETTINGS runs as expected when the privilege is granted to the specified user - """ + """Ensures ADD SETTINGS runs as expected when the privilege is granted to the specified user""" with Given("I check that the modified setting is not already in the table"): - output = json.loads(node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output) - assert "merge_with_ttl_timeout = 5" not in output['statement'], error() + output = json.loads( + node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output + ) + assert "merge_with_ttl_timeout = 5" not in output["statement"], error() with And(f"I modify settings"): - node.query(f"ALTER TABLE {table} MODIFY SETTING merge_with_ttl_timeout=5", - settings=[("user", user)]) + node.query( + f"ALTER TABLE {table} MODIFY SETTING merge_with_ttl_timeout=5", + settings=[("user", user)], + ) with Then("I verify that the setting is in the table"): - output = json.loads(node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output) - assert "SETTINGS index_granularity = 8192, merge_with_ttl_timeout = 5" in output['statement'], error() + output = json.loads( + node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output + ) + assert ( + "SETTINGS index_granularity = 8192, merge_with_ttl_timeout = 5" + in output["statement"] + ), error() + def check_alter_settings_when_privilege_is_not_granted(table, user, node): - """Ensures CLEAR SETTINGS runs as expected when the privilege is granted to the specified user - """ + """Ensures CLEAR SETTINGS runs as expected when the privilege is granted to the specified user""" with When("I grant the user NONE privilege"): node.query(f"GRANT NONE TO {user}") @@ -36,8 +51,13 @@ def check_alter_settings_when_privilege_is_not_granted(table, user, node): with Then("I try to use ALTER SETTING, has not been granted"): exitcode, message = errors.not_enough_privileges(user) - node.query(f"ALTER TABLE {table} MODIFY SETTING merge_with_ttl_timeout=5", - settings=[("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} MODIFY SETTING merge_with_ttl_timeout=5", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) + @TestScenario def user_with_privileges(self, privilege, table_type, node=None): @@ -80,7 +100,10 @@ def user_with_revoked_privileges(self, privilege, table_type, node=None): node.query(f"REVOKE {privilege} ON {table_name} FROM {user_name}") with When(f"I try to ALTER SETTINGS"): - check_alter_settings_when_privilege_is_not_granted(table_name, user_name, node) + check_alter_settings_when_privilege_is_not_granted( + table_name, user_name, node + ) + @TestScenario @Requirements( @@ -97,7 +120,9 @@ def role_with_some_privileges(self, privilege, table_type, node=None): user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" - with table(node, table_name, table_type), user(node, user_name), role(node, role_name): + with table(node, table_name, table_type), user(node, user_name), role( + node, role_name + ): with Given("I grant the alter settings privilege to a role"): node.query(f"GRANT {privilege} ON {table_name} TO {role_name}") @@ -107,6 +132,7 @@ def role_with_some_privileges(self, privilege, table_type, node=None): with Then(f"I try to ALTER SETTINGS"): check_alter_settings_when_privilege_is_granted(table_name, user_name, node) + @TestScenario def user_with_revoked_role(self, privilege, table_type, node=None): """Check that user with a role that has alter settings privilege on a table is unable to @@ -119,7 +145,9 @@ def user_with_revoked_role(self, privilege, table_type, node=None): user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" - with table(node, table_name, table_type), user(node, user_name), role(node, role_name): + with table(node, table_name, table_type), user(node, user_name), role( + node, role_name + ): with When("I grant privileges to a role"): node.query(f"GRANT {privilege} ON {table_name} TO {role_name}") @@ -130,7 +158,10 @@ def user_with_revoked_role(self, privilege, table_type, node=None): node.query(f"REVOKE {role_name} FROM {user_name}") with And("I alter settings on the table"): - check_alter_settings_when_privilege_is_not_granted(table_name, user_name, node) + check_alter_settings_when_privilege_is_not_granted( + table_name, user_name, node + ) + @TestScenario @Requirements( @@ -150,27 +181,37 @@ def user_with_privileges_on_cluster(self, privilege, table_type, node=None): with table(node, table_name, table_type): try: with Given("I have a user on a cluster"): - node.query(f"CREATE USER OR REPLACE {user_name} ON CLUSTER sharded_cluster") + node.query( + f"CREATE USER OR REPLACE {user_name} ON CLUSTER sharded_cluster" + ) with When("I grant alter settings privileges on a cluster"): - node.query(f"GRANT ON CLUSTER sharded_cluster ALTER SETTINGS ON {table_name} TO {user_name}") + node.query( + f"GRANT ON CLUSTER sharded_cluster ALTER SETTINGS ON {table_name} TO {user_name}" + ) with Then(f"I try to ALTER SETTINGS"): - check_alter_settings_when_privilege_is_granted(table_name, user_name, node) + check_alter_settings_when_privilege_is_granted( + table_name, user_name, node + ) with When("I revoke alter settings privileges on a cluster"): - node.query(f"REVOKE ON CLUSTER sharded_cluster ALTER SETTINGS ON {table_name} FROM {user_name}") + node.query( + f"REVOKE ON CLUSTER sharded_cluster ALTER SETTINGS ON {table_name} FROM {user_name}" + ) with Then(f"I try to ALTER SETTINGS"): - check_alter_settings_when_privilege_is_not_granted(table_name, user_name, node) + check_alter_settings_when_privilege_is_not_granted( + table_name, user_name, node + ) finally: with Finally("I drop the user on a cluster"): node.query(f"DROP USER {user_name} ON CLUSTER sharded_cluster") + @TestSuite def scenario_parallelization(self, table_type, privilege): - """Runs all scenarios in parallel for a given privilege. - """ + """Runs all scenarios in parallel for a given privilege.""" args = {"table_type": table_type, "privilege": privilege} with Pool(4) as pool: @@ -180,27 +221,25 @@ def scenario_parallelization(self, table_type, privilege): finally: join() + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_AlterSettings("1.0"), RQ_SRS_006_RBAC_Privileges_AlterSettings_TableEngines("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) -@Examples("table_type", [ - (key,) for key in table_types.keys() -]) +@Examples("table_type", [(key,) for key in table_types.keys()]) @Name("alter settings") def feature(self, stress=None, node="clickhouse1"): - """Runs test suites above which check correctness over scenarios and permutations - """ + """Runs test suites above which check correctness over scenarios and permutations""" self.context.node = self.context.cluster.node(node) if stress is not None: self.context.stress = stress for example in self.examples: - table_type, = example + (table_type,) = example if table_type != "MergeTree" and not self.context.stress: continue @@ -210,6 +249,12 @@ def feature(self, stress=None, node="clickhouse1"): try: for alias in aliases: args = {"table_type": table_type, "privilege": alias} - Suite(test=scenario_parallelization, name=alias, setup=instrument_clickhouse_server_log, parallel=True, executor=pool)(**args) + Suite( + test=scenario_parallelization, + name=alias, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + )(**args) finally: join() diff --git a/tests/testflows/rbac/tests/privileges/alter/alter_settings_profile.py b/tests/testflows/rbac/tests/privileges/alter/alter_settings_profile.py index cd4648305f7..406ce704cff 100644 --- a/tests/testflows/rbac/tests/privileges/alter/alter_settings_profile.py +++ b/tests/testflows/rbac/tests/privileges/alter/alter_settings_profile.py @@ -7,6 +7,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @contextmanager def settings_profile(node, name): try: @@ -19,10 +20,10 @@ def settings_profile(node, name): with Finally("I drop the settings_profile"): node.query(f"DROP SETTINGS PROFILE IF EXISTS {name}") + @TestSuite def alter_settings_profile_granted_directly(self, node=None): - """Check that a user is able to execute `ALTER SETTINGS PROFILE` with privileges are granted directly. - """ + """Check that a user is able to execute `ALTER SETTINGS PROFILE` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -31,15 +32,22 @@ def alter_settings_profile_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=alter_settings_profile, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in alter_settings_profile.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=alter_settings_profile, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in alter_settings_profile.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def alter_settings_profile_granted_via_role(self, node=None): - """Check that a user is able to execute `ALTER SETTINGS PROFILE` with privileges are granted through a role. - """ + """Check that a user is able to execute `ALTER SETTINGS PROFILE` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -52,21 +60,31 @@ def alter_settings_profile_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=alter_settings_profile, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in alter_settings_profile.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=alter_settings_profile, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in alter_settings_profile.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("ALTER SETTINGS PROFILE",), - ("ALTER PROFILE",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("ALTER SETTINGS PROFILE",), + ("ALTER PROFILE",), + ], +) def alter_settings_profile(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `ALTER SETTINGS PROFILE` when they have the necessary privilege. - """ + """Check that user is only able to execute `ALTER SETTINGS PROFILE` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -84,8 +102,12 @@ def alter_settings_profile(self, privilege, grant_target_name, user_name, node=N node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't alter a settings_profile"): - node.query(f"ALTER SETTINGS PROFILE {alter_settings_profile_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER SETTINGS PROFILE {alter_settings_profile_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("ALTER SETTINGS PROFILE with privilege"): alter_settings_profile_name = f"alter_settings_profile_{getuid()}" @@ -96,25 +118,34 @@ def alter_settings_profile(self, privilege, grant_target_name, user_name, node=N node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can alter a user"): - node.query(f"ALTER SETTINGS PROFILE {alter_settings_profile_name}", settings = [("user", f"{user_name}")]) + node.query( + f"ALTER SETTINGS PROFILE {alter_settings_profile_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("ALTER SETTINGS PROFILE on cluster"): alter_settings_profile_name = f"alter_settings_profile_{getuid()}" try: with Given("I have a settings_profile on a cluster"): - node.query(f"CREATE SETTINGS PROFILE {alter_settings_profile_name} ON CLUSTER sharded_cluster") + node.query( + f"CREATE SETTINGS PROFILE {alter_settings_profile_name} ON CLUSTER sharded_cluster" + ) with When(f"I grant {privilege}"): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can alter a settings_profile"): - node.query(f"ALTER SETTINGS PROFILE {alter_settings_profile_name} ON CLUSTER sharded_cluster", - settings = [("user", f"{user_name}")]) + node.query( + f"ALTER SETTINGS PROFILE {alter_settings_profile_name} ON CLUSTER sharded_cluster", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the settings_profile"): - node.query(f"DROP SETTINGS PROFILE IF EXISTS {alter_settings_profile_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP SETTINGS PROFILE IF EXISTS {alter_settings_profile_name} ON CLUSTER sharded_cluster" + ) with Scenario("ALTER SETTINGS PROFILE with revoked privilege"): alter_settings_profile_name = f"alter_settings_profile_{getuid()}" @@ -128,20 +159,30 @@ def alter_settings_profile(self, privilege, grant_target_name, user_name, node=N node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user can't alter a settings_profile"): - node.query(f"ALTER SETTINGS PROFILE {alter_settings_profile_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER SETTINGS PROFILE {alter_settings_profile_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("alter settings profile") @Requirements( RQ_SRS_006_RBAC_Privileges_AlterSettingsProfile("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of ALTER SETTINGS PROFILE. - """ + """Check the RBAC functionality of ALTER SETTINGS PROFILE.""" self.context.node = self.context.cluster.node(node) - Suite(run=alter_settings_profile_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=alter_settings_profile_granted_via_role, setup=instrument_clickhouse_server_log) + Suite( + run=alter_settings_profile_granted_directly, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=alter_settings_profile_granted_via_role, + setup=instrument_clickhouse_server_log, + ) diff --git a/tests/testflows/rbac/tests/privileges/alter/alter_ttl.py b/tests/testflows/rbac/tests/privileges/alter/alter_ttl.py index 419cf880f30..50742c26eeb 100755 --- a/tests/testflows/rbac/tests/privileges/alter/alter_ttl.py +++ b/tests/testflows/rbac/tests/privileges/alter/alter_ttl.py @@ -9,16 +9,17 @@ import rbac.helper.errors as errors from rbac.helper.tables import table_types subprivileges = { - "TTL" : 1 << 0, - "MATERIALIZE TTL" : 1 << 1, + "TTL": 1 << 0, + "MATERIALIZE TTL": 1 << 1, } aliases = { - "TTL" : ["ALTER TTL", "ALTER MODIFY TTL", "MODIFY TTL"], + "TTL": ["ALTER TTL", "ALTER MODIFY TTL", "MODIFY TTL"], "MATERIALIZE TTL": ["ALTER MATERIALIZE TTL", "MATERIALIZE TTL", "ALL"], } -permutation_count = (1 << len(subprivileges)) +permutation_count = 1 << len(subprivileges) + def permutations(): """Returns list of all permutations to run. @@ -26,6 +27,7 @@ def permutations(): """ return [*range(permutation_count)] + def alter_ttl_privileges(grants: int): """Takes in an integer, and returns the corresponding set of tests to grant and not grant using the binary string. Each integer corresponds to a unique permutation @@ -34,75 +36,93 @@ def alter_ttl_privileges(grants: int): note(grants) privileges = [] - if grants==0: # No privileges + if grants == 0: # No privileges privileges.append("NONE") else: - if (grants & subprivileges["TTL"]): + if grants & subprivileges["TTL"]: privileges.append(f"ALTER TTL") - if (grants & subprivileges["MATERIALIZE TTL"]): + if grants & subprivileges["MATERIALIZE TTL"]: privileges.append(f"ALTER MATERIALIZE TTL") note(f"Testing these privileges: {privileges}") - return ', '.join(privileges) + return ", ".join(privileges) + def alter_ttl_privilege_handler(grants, table, user, node): """For all 2 subprivileges, if the privilege is granted: run test to ensure correct behavior, and if the privilege is not granted, run test to ensure correct behavior there as well """ - if (grants & subprivileges["TTL"]): + if grants & subprivileges["TTL"]: with When("I check ttl when privilege is granted"): check_ttl_when_privilege_is_granted(table, user, node) else: with When("I check ttl when privilege is not granted"): check_ttl_when_privilege_is_not_granted(table, user, node) - if (grants & subprivileges["MATERIALIZE TTL"]): + if grants & subprivileges["MATERIALIZE TTL"]: with When("I check materialize ttl when privilege is granted"): check_materialize_ttl_when_privilege_is_granted(table, user, node) else: with When("I check materialize ttl when privilege is not granted"): check_materialize_ttl_when_privilege_is_not_granted(table, user, node) + def check_ttl_when_privilege_is_granted(table, user, node): - """Ensures ALTER TTL runs as expected when the privilege is granted to the specified user - """ + """Ensures ALTER TTL runs as expected when the privilege is granted to the specified user""" with Given(f"I modify TTL"): - node.query(f"ALTER TABLE {table} MODIFY TTL d + INTERVAL 1 DAY;", - settings = [("user", user)]) + node.query( + f"ALTER TABLE {table} MODIFY TTL d + INTERVAL 1 DAY;", + settings=[("user", user)], + ) with Then("I verify that the TTL clause is in the table"): - output = json.loads(node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output) - assert "TTL d + toIntervalDay(1)" in output['statement'], error() + output = json.loads( + node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output + ) + assert "TTL d + toIntervalDay(1)" in output["statement"], error() + def check_materialize_ttl_when_privilege_is_granted(table, user, node): - """Ensures MATERIALIZE TTL runs as expected when the privilege is granted to the specified user - """ + """Ensures MATERIALIZE TTL runs as expected when the privilege is granted to the specified user""" with Given("I modify TTL so it exists"): node.query(f"ALTER TABLE {table} MODIFY TTL d + INTERVAL 1 MONTH;") with Then("I materialize the TTL"): - node.query(f"ALTER TABLE {table} MATERIALIZE TTL IN PARTITION 2", - settings = [("user", user)]) + node.query( + f"ALTER TABLE {table} MATERIALIZE TTL IN PARTITION 2", + settings=[("user", user)], + ) with Then("I verify that the TTL clause is in the table"): - output = json.loads(node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output) - assert "TTL d + toIntervalMonth(1)" in output['statement'], error() + output = json.loads( + node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output + ) + assert "TTL d + toIntervalMonth(1)" in output["statement"], error() + def check_ttl_when_privilege_is_not_granted(table, user, node): - """Ensures ALTER TTL errors as expected without the required privilege for the specified user - """ + """Ensures ALTER TTL errors as expected without the required privilege for the specified user""" with When("I try to use privilege that has not been granted"): exitcode, message = errors.not_enough_privileges(user) - node.query(f"ALTER TABLE {table} MODIFY TTL d + INTERVAL 1 DAY;", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} MODIFY TTL d + INTERVAL 1 DAY;", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) + def check_materialize_ttl_when_privilege_is_not_granted(table, user, node): - """Ensures MATERIALIZE TTL errors as expected without the required privilege for the specified user - """ + """Ensures MATERIALIZE TTL errors as expected without the required privilege for the specified user""" with When("I try to use privilege that has not been granted"): exitcode, message = errors.not_enough_privileges(user) - node.query(f"ALTER TABLE {table} MATERIALIZE TTL IN PARTITION 4", - settings = [("user", user)], exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table} MATERIALIZE TTL IN PARTITION 4", + settings=[("user", user)], + exitcode=exitcode, + message=message, + ) + @TestScenario def user_with_some_privileges(self, table_type, node=None): @@ -124,7 +144,10 @@ def user_with_some_privileges(self, table_type, node=None): node.query(f"GRANT {privileges} ON {table_name} TO {user_name}") with Then(f"I try to ALTER TTL"): - alter_ttl_privilege_handler(permutation, table_name, user_name, node) + alter_ttl_privilege_handler( + permutation, table_name, user_name, node + ) + @TestScenario @Requirements( @@ -155,6 +178,7 @@ def user_with_revoked_privileges(self, table_type, node=None): # Permutation 0: no privileges alter_ttl_privilege_handler(0, table_name, user_name, node) + @TestScenario @Requirements( RQ_SRS_006_RBAC_Privileges_AlterTTL_Grant("1.0"), @@ -174,7 +198,9 @@ def role_with_some_privileges(self, table_type, node=None): privileges = alter_ttl_privileges(permutation) with When(f"granted={privileges}"): - with table(node, table_name, table_type), user(node, user_name), role(node, role_name): + with table(node, table_name, table_type), user(node, user_name), role( + node, role_name + ): with Given("I grant the ALTER TTL privilege to a role"): node.query(f"GRANT {privileges} ON {table_name} TO {role_name}") @@ -182,7 +208,10 @@ def role_with_some_privileges(self, table_type, node=None): node.query(f"GRANT {role_name} TO {user_name}") with Then(f"I try to ALTER TTL"): - alter_ttl_privilege_handler(permutation, table_name, user_name, node) + alter_ttl_privilege_handler( + permutation, table_name, user_name, node + ) + @TestScenario def user_with_revoked_role(self, table_type, node=None): @@ -200,7 +229,9 @@ def user_with_revoked_role(self, table_type, node=None): privileges = alter_ttl_privileges(permutation) with When(f"granted={privileges}"): - with table(node, table_name, table_type), user(node, user_name), role(node, role_name): + with table(node, table_name, table_type), user(node, user_name), role( + node, role_name + ): with When("I grant privileges to a role"): node.query(f"GRANT {privileges} ON {table_name} TO {role_name}") @@ -214,6 +245,7 @@ def user_with_revoked_role(self, table_type, node=None): # Permutation 0: no privileges for any permutation alter_ttl_privilege_handler(0, table_name, user_name, node) + @TestScenario @Requirements( RQ_SRS_006_RBAC_Privileges_AlterTTL_Cluster("1.0"), @@ -235,27 +267,32 @@ def user_with_privileges_on_cluster(self, table_type, node=None): with table(node, table_name, table_type): try: with Given("I have a user on a cluster"): - node.query(f"CREATE USER OR REPLACE {user_name} ON CLUSTER sharded_cluster") + node.query( + f"CREATE USER OR REPLACE {user_name} ON CLUSTER sharded_cluster" + ) with When("I grant ALTER TTL privileges on a cluster"): - node.query(f"GRANT ON CLUSTER sharded_cluster {privileges} ON {table_name} TO {user_name}") + node.query( + f"GRANT ON CLUSTER sharded_cluster {privileges} ON {table_name} TO {user_name}" + ) with Then(f"I try to ALTER TTL"): - alter_ttl_privilege_handler(permutation, table_name, user_name, node) + alter_ttl_privilege_handler( + permutation, table_name, user_name, node + ) finally: with Finally("I drop the user on a cluster"): node.query(f"DROP USER {user_name} ON CLUSTER sharded_cluster") + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_AlterTTL("1.0"), RQ_SRS_006_RBAC_Privileges_AlterTTL_TableEngines("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) -@Examples("table_type", [ - (key,) for key in table_types.keys() -]) +@Examples("table_type", [(key,) for key in table_types.keys()]) @Name("alter ttl") def feature(self, stress=None, node="clickhouse1"): self.context.node = self.context.cluster.node(node) @@ -264,17 +301,22 @@ def feature(self, stress=None, node="clickhouse1"): self.context.stress = stress for example in self.examples: - table_type, = example + (table_type,) = example if table_type != "MergeTree" and not self.context.stress: continue - args = {"table_type" : table_type} + args = {"table_type": table_type} with Example(str(example)): with Pool(5) as pool: try: for scenario in loads(current_module(), Scenario): - Scenario(test=scenario, setup=instrument_clickhouse_server_log, parallel=True, executor=pool)(**args) + Scenario( + test=scenario, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + )(**args) finally: join() diff --git a/tests/testflows/rbac/tests/privileges/alter/alter_update.py b/tests/testflows/rbac/tests/privileges/alter/alter_update.py index 9f3f4e19041..1e0850a59bb 100644 --- a/tests/testflows/rbac/tests/privileges/alter/alter_update.py +++ b/tests/testflows/rbac/tests/privileges/alter/alter_update.py @@ -7,10 +7,10 @@ import rbac.helper.errors as errors aliases = {"ALTER UPDATE", "UPDATE", "ALL"} + @TestSuite def privilege_granted_directly_or_via_role(self, table_type, privilege, node=None): - """Check that user is only able to execute ALTER UPDATE when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute ALTER UPDATE when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -19,19 +19,35 @@ def privilege_granted_directly_or_via_role(self, table_type, privilege, node=Non with Suite("user with direct privilege", setup=instrument_clickhouse_server_log): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute ALTER UPDATE with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, table_type=table_type, privilege=privilege, node=node) + with When( + f"I run checks that {user_name} is only able to execute ALTER UPDATE with required privileges" + ): + privilege_check( + grant_target_name=user_name, + user_name=user_name, + table_type=table_type, + privilege=privilege, + node=node, + ) with Suite("user with privilege via role", setup=instrument_clickhouse_server_log): with user(node, user_name), role(node, role_name): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute ALTER UPDATE with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, table_type=table_type, privilege=privilege, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute ALTER UPDATE with required privileges" + ): + privilege_check( + grant_target_name=role_name, + user_name=user_name, + table_type=table_type, + privilege=privilege, + node=node, + ) + def privilege_check(grant_target_name, user_name, table_type, privilege, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege", setup=instrument_clickhouse_server_log): @@ -46,8 +62,12 @@ def privilege_check(grant_target_name, user_name, table_type, privilege, node=No node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to update a column without privilege"): - node.query(f"ALTER TABLE {table_name} UPDATE a = x WHERE 1", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table_name} UPDATE a = x WHERE 1", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("user with privilege", setup=instrument_clickhouse_server_log): table_name = f"merge_tree_{getuid()}" @@ -58,9 +78,14 @@ def privilege_check(grant_target_name, user_name, table_type, privilege, node=No node.query(f"GRANT {privilege} ON {table_name} TO {grant_target_name}") with Then("I attempt to update a column"): - node.query(f"ALTER TABLE {table_name} UPDATE a = x WHERE 1", settings = [("user", user_name)]) + node.query( + f"ALTER TABLE {table_name} UPDATE a = x WHERE 1", + settings=[("user", user_name)], + ) - with Scenario("user with revoked privilege", setup=instrument_clickhouse_server_log): + with Scenario( + "user with revoked privilege", setup=instrument_clickhouse_server_log + ): table_name = f"merge_tree_{getuid()}" with table(node, table_name, table_type): @@ -69,26 +94,30 @@ def privilege_check(grant_target_name, user_name, table_type, privilege, node=No node.query(f"GRANT {privilege} ON {table_name} TO {grant_target_name}") with And("I revoke the update privilege"): - node.query(f"REVOKE {privilege} ON {table_name} FROM {grant_target_name}") + node.query( + f"REVOKE {privilege} ON {table_name} FROM {grant_target_name}" + ) with Then("I attempt to update a column"): - node.query(f"ALTER TABLE {table_name} UPDATE a = x WHERE 1", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {table_name} UPDATE a = x WHERE 1", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_AlterUpdate("1.0"), RQ_SRS_006_RBAC_Privileges_AlterUpdate_TableEngines("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) -@Examples("table_type", [ - (key,) for key in table_types.keys() -]) +@Examples("table_type", [(key,) for key in table_types.keys()]) @Name("alter update") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of ALTER UPDATE. - """ + """Check the RBAC functionality of ALTER UPDATE.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -97,7 +126,7 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): self.context.stress = stress for example in self.examples: - table_type, = example + (table_type,) = example if table_type != "MergeTree" and not self.context.stress: continue @@ -105,4 +134,6 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): with Example(str(example)): for alias in aliases: with Suite(alias, test=privilege_granted_directly_or_via_role): - privilege_granted_directly_or_via_role(table_type=table_type, privilege=alias) + privilege_granted_directly_or_via_role( + table_type=table_type, privilege=alias + ) diff --git a/tests/testflows/rbac/tests/privileges/alter/alter_user.py b/tests/testflows/rbac/tests/privileges/alter/alter_user.py index bcf3014c9be..d4522ee29e8 100644 --- a/tests/testflows/rbac/tests/privileges/alter/alter_user.py +++ b/tests/testflows/rbac/tests/privileges/alter/alter_user.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def alter_user_granted_directly(self, node=None): - """Check that a user is able to execute `ALTER USER` with privileges are granted directly. - """ + """Check that a user is able to execute `ALTER USER` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -17,15 +17,22 @@ def alter_user_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=alter_user, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in alter_user.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=alter_user, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in alter_user.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def alter_user_granted_via_role(self, node=None): - """Check that a user is able to execute `ALTER USER` with privileges are granted through a role. - """ + """Check that a user is able to execute `ALTER USER` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -38,20 +45,30 @@ def alter_user_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=alter_user, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in alter_user.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=alter_user, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in alter_user.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("ALTER USER",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("ALTER USER",), + ], +) def alter_user(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `ALTER USER` when they have the necessary privilege. - """ + """Check that user is only able to execute `ALTER USER` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -69,8 +86,12 @@ def alter_user(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't alter a user"): - node.query(f"ALTER USER {alter_user_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER USER {alter_user_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("ALTER USER with privilege"): alter_user_name = f"alter_user_{getuid()}" @@ -79,7 +100,9 @@ def alter_user(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can alter a user"): - node.query(f"ALTER USER {alter_user_name}", settings = [("user", f"{user_name}")]) + node.query( + f"ALTER USER {alter_user_name}", settings=[("user", f"{user_name}")] + ) with Scenario("ALTER USER on cluster"): alter_user_name = f"alter_user_{getuid()}" @@ -91,12 +114,16 @@ def alter_user(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can alter a user"): - node.query(f"ALTER USER {alter_user_name} ON CLUSTER sharded_cluster", - settings = [("user", f"{user_name}")]) + node.query( + f"ALTER USER {alter_user_name} ON CLUSTER sharded_cluster", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the user"): - node.query(f"DROP USER IF EXISTS {alter_user_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP USER IF EXISTS {alter_user_name} ON CLUSTER sharded_cluster" + ) with Scenario("ALTER USER with revoked privilege"): alter_user_name = f"alter_user_{getuid()}" @@ -108,19 +135,23 @@ def alter_user(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user can't alter a user"): - node.query(f"ALTER USER {alter_user_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"ALTER USER {alter_user_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("alter user") @Requirements( RQ_SRS_006_RBAC_Privileges_AlterUser("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of ALTER USER. - """ + """Check the RBAC functionality of ALTER USER.""" self.context.node = self.context.cluster.node(node) Suite(run=alter_user_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/attach/attach_database.py b/tests/testflows/rbac/tests/privileges/attach/attach_database.py index 3fecbe2571f..5e5009d1c2f 100644 --- a/tests/testflows/rbac/tests/privileges/attach/attach_database.py +++ b/tests/testflows/rbac/tests/privileges/attach/attach_database.py @@ -2,10 +2,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privilege_granted_directly_or_via_role(self, node=None): - """Check that user is only able to execute ATTACH DATABASE when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute ATTACH DATABASE when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -15,8 +15,12 @@ def privilege_granted_directly_or_via_role(self, node=None): with Suite("user with direct privilege"): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute CREATE DATABASE with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, node=node) + with When( + f"I run checks that {user_name} is only able to execute CREATE DATABASE with required privileges" + ): + privilege_check( + grant_target_name=user_name, user_name=user_name, node=node + ) with Suite("user with privilege via role"): with user(node, user_name), role(node, role_name): @@ -24,12 +28,16 @@ def privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute CREATE DATABASE with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute CREATE DATABASE with required privileges" + ): + privilege_check( + grant_target_name=role_name, user_name=user_name, node=node + ) + def privilege_check(grant_target_name, user_name, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege"): @@ -43,8 +51,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to attach a database without privilege"): - node.query(f"ATTACH DATABASE {db_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ATTACH DATABASE {db_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the database"): @@ -55,11 +67,17 @@ def privilege_check(grant_target_name, user_name, node=None): try: with When("I grant create database privilege"): - node.query(f"GRANT CREATE DATABASE ON {db_name}.* TO {grant_target_name}") + node.query( + f"GRANT CREATE DATABASE ON {db_name}.* TO {grant_target_name}" + ) with Then("I attempt to attach a database"): - node.query(f"ATTACH DATABASE {db_name}", settings = [("user", user_name)], - exitcode=80, message="DB::Exception: Received from localhost:9000. DB::Exception: Database engine must be specified for ATTACH DATABASE query") + node.query( + f"ATTACH DATABASE {db_name}", + settings=[("user", user_name)], + exitcode=80, + message="DB::Exception: Received from localhost:9000. DB::Exception: Database engine must be specified for ATTACH DATABASE query", + ) finally: with Finally("I drop the database"): @@ -70,14 +88,22 @@ def privilege_check(grant_target_name, user_name, node=None): try: with When("I grant the create database privilege"): - node.query(f"GRANT CREATE DATABASE ON {db_name}.* TO {grant_target_name}") + node.query( + f"GRANT CREATE DATABASE ON {db_name}.* TO {grant_target_name}" + ) with And("I revoke the create database privilege"): - node.query(f"REVOKE CREATE DATABASE ON {db_name}.* FROM {grant_target_name}") + node.query( + f"REVOKE CREATE DATABASE ON {db_name}.* FROM {grant_target_name}" + ) with Then("I attempt to attach a database"): - node.query(f"ATTACH DATABASE {db_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ATTACH DATABASE {db_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the database"): @@ -88,14 +114,20 @@ def privilege_check(grant_target_name, user_name, node=None): try: with When("I grant the create database privilege"): - node.query(f"GRANT CREATE DATABASE ON {db_name}.* TO {grant_target_name}") + node.query( + f"GRANT CREATE DATABASE ON {db_name}.* TO {grant_target_name}" + ) with And("I revoke ALL privilege"): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to attach a database"): - node.query(f"ATTACH DATABASE {db_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ATTACH DATABASE {db_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the database"): @@ -109,23 +141,27 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I attempt to attach a database"): - node.query(f"ATTACH DATABASE {db_name}", settings = [("user", user_name)], - exitcode=80, message="DB::Exception: Received from localhost:9000. DB::Exception: Database engine must be specified for ATTACH DATABASE query") + node.query( + f"ATTACH DATABASE {db_name}", + settings=[("user", user_name)], + exitcode=80, + message="DB::Exception: Received from localhost:9000. DB::Exception: Database engine must be specified for ATTACH DATABASE query", + ) finally: with Finally("I drop the database"): node.query(f"DROP DATABASE IF EXISTS {db_name}") + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_AttachDatabase("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) @Name("attach database") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of ATTACH DATABASE. - """ + """Check the RBAC functionality of ATTACH DATABASE.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -133,5 +169,8 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): if stress is not None: self.context.stress = stress - with Suite(test=privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log): + with Suite( + test=privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ): privilege_granted_directly_or_via_role() diff --git a/tests/testflows/rbac/tests/privileges/attach/attach_dictionary.py b/tests/testflows/rbac/tests/privileges/attach/attach_dictionary.py index fbebdc0003d..678863aee2a 100644 --- a/tests/testflows/rbac/tests/privileges/attach/attach_dictionary.py +++ b/tests/testflows/rbac/tests/privileges/attach/attach_dictionary.py @@ -2,10 +2,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privilege_granted_directly_or_via_role(self, node=None): - """Check that user is only able to execute ATTACH DICTIONARY when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute ATTACH DICTIONARY when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -15,8 +15,12 @@ def privilege_granted_directly_or_via_role(self, node=None): with Suite("user with direct privilege"): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute CREATE DICTIONARY with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, node=node) + with When( + f"I run checks that {user_name} is only able to execute CREATE DICTIONARY with required privileges" + ): + privilege_check( + grant_target_name=user_name, user_name=user_name, node=node + ) with Suite("user with privilege via role"): with user(node, user_name), role(node, role_name): @@ -24,12 +28,16 @@ def privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute CREATE DICTIONARY with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute CREATE DICTIONARY with required privileges" + ): + privilege_check( + grant_target_name=role_name, user_name=user_name, node=node + ) + def privilege_check(grant_target_name, user_name, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege"): @@ -43,8 +51,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to attach a dictionary without privilege"): - node.query(f"ATTACH DICTIONARY {dict_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ATTACH DICTIONARY {dict_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the dictionary"): @@ -55,11 +67,17 @@ def privilege_check(grant_target_name, user_name, node=None): try: with When("I grant create dictionary privilege"): - node.query(f"GRANT CREATE DICTIONARY ON {dict_name} TO {grant_target_name}") + node.query( + f"GRANT CREATE DICTIONARY ON {dict_name} TO {grant_target_name}" + ) with Then("I attempt to attach a dictionary"): - node.query(f"ATTACH DICTIONARY {dict_name}", settings = [("user", user_name)], - exitcode=134, message=f"DB::Exception: Table `{dict_name}` doesn't exist.") + node.query( + f"ATTACH DICTIONARY {dict_name}", + settings=[("user", user_name)], + exitcode=134, + message=f"DB::Exception: Table `{dict_name}` doesn't exist.", + ) finally: with Finally("I drop the dictionary"): @@ -70,14 +88,22 @@ def privilege_check(grant_target_name, user_name, node=None): try: with When("I grant the create dictionary privilege"): - node.query(f"GRANT CREATE DICTIONARY ON {dict_name} TO {grant_target_name}") + node.query( + f"GRANT CREATE DICTIONARY ON {dict_name} TO {grant_target_name}" + ) with And("I revoke the create dictionary privilege"): - node.query(f"REVOKE CREATE DICTIONARY ON {dict_name} FROM {grant_target_name}") + node.query( + f"REVOKE CREATE DICTIONARY ON {dict_name} FROM {grant_target_name}" + ) with Then("I attempt to attach a dictionary"): - node.query(f"ATTACH DICTIONARY {dict_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ATTACH DICTIONARY {dict_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the dictionary"): @@ -88,14 +114,20 @@ def privilege_check(grant_target_name, user_name, node=None): try: with When("I grant the create database privilege"): - node.query(f"GRANT CREATE DATABASE ON {db_name}.* TO {grant_target_name}") + node.query( + f"GRANT CREATE DATABASE ON {db_name}.* TO {grant_target_name}" + ) with And("I revoke ALL privilege"): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to attach a database"): - node.query(f"ATTACH DATABASE {db_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ATTACH DATABASE {db_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the database"): @@ -109,23 +141,27 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I attempt to attach a dictionary"): - node.query(f"ATTACH DICTIONARY {dict_name}", settings = [("user", user_name)], - exitcode=134, message=f"DB::Exception: Table `{dict_name}` doesn't exist.") + node.query( + f"ATTACH DICTIONARY {dict_name}", + settings=[("user", user_name)], + exitcode=134, + message=f"DB::Exception: Table `{dict_name}` doesn't exist.", + ) finally: with Finally("I drop the dictionary"): node.query(f"DROP DICTIONARY IF EXISTS {dict_name}") + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_AttachDictionary("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) @Name("attach dictionary") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of ATTACH DICTIONARY. - """ + """Check the RBAC functionality of ATTACH DICTIONARY.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -133,5 +169,8 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): if stress is not None: self.context.stress = stress - with Suite(test=privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log): + with Suite( + test=privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ): privilege_granted_directly_or_via_role() diff --git a/tests/testflows/rbac/tests/privileges/attach/attach_table.py b/tests/testflows/rbac/tests/privileges/attach/attach_table.py index 411140506ea..1bbb51c75e1 100644 --- a/tests/testflows/rbac/tests/privileges/attach/attach_table.py +++ b/tests/testflows/rbac/tests/privileges/attach/attach_table.py @@ -2,10 +2,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privilege_granted_directly_or_via_role(self, node=None): - """Check that user is only able to execute ATTACH TABLE when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute ATTACH TABLE when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -15,8 +15,12 @@ def privilege_granted_directly_or_via_role(self, node=None): with Suite("user with direct privilege"): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute CREATE TABLE with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, node=node) + with When( + f"I run checks that {user_name} is only able to execute CREATE TABLE with required privileges" + ): + privilege_check( + grant_target_name=user_name, user_name=user_name, node=node + ) with Suite("user with privilege via role"): with user(node, user_name), role(node, role_name): @@ -24,12 +28,16 @@ def privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute CREATE TABLE with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute CREATE TABLE with required privileges" + ): + privilege_check( + grant_target_name=role_name, user_name=user_name, node=node + ) + def privilege_check(grant_target_name, user_name, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege"): @@ -43,8 +51,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to attach a table without privilege"): - node.query(f"ATTACH TABLE {table_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ATTACH TABLE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the table"): @@ -58,8 +70,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT CREATE TABLE ON *.* TO {grant_target_name}") with Then("I attempt to attach a table"): - node.query(f"ATTACH TABLE {table_name}", settings = [("user", user_name)], - exitcode=134, message=f"DB::Exception: Table `{table_name}` doesn't exist.") + node.query( + f"ATTACH TABLE {table_name}", + settings=[("user", user_name)], + exitcode=134, + message=f"DB::Exception: Table `{table_name}` doesn't exist.", + ) finally: with Finally("I drop the table"): @@ -76,8 +92,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"REVOKE CREATE TABLE ON *.* FROM {grant_target_name}") with Then("I attempt to attach a table"): - node.query(f"ATTACH TABLE {table_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ATTACH TABLE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the table"): @@ -94,8 +114,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to attach a table"): - node.query(f"ATTACH TABLE {table_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ATTACH TABLE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the table"): @@ -109,23 +133,27 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I attempt to attach a table"): - node.query(f"ATTACH TABLE {table_name}", settings = [("user", user_name)], - exitcode=134, message=f"DB::Exception: Table `{table_name}` doesn't exist.") + node.query( + f"ATTACH TABLE {table_name}", + settings=[("user", user_name)], + exitcode=134, + message=f"DB::Exception: Table `{table_name}` doesn't exist.", + ) finally: with Finally("I drop the table"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_AttachTable("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) @Name("attach table") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of ATTACH TABLE. - """ + """Check the RBAC functionality of ATTACH TABLE.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -133,5 +161,8 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): if stress is not None: self.context.stress = stress - with Suite(test=privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log): + with Suite( + test=privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ): privilege_granted_directly_or_via_role() diff --git a/tests/testflows/rbac/tests/privileges/attach/attach_temp_table.py b/tests/testflows/rbac/tests/privileges/attach/attach_temp_table.py index 2662a24d5a2..0a2cb6087a3 100644 --- a/tests/testflows/rbac/tests/privileges/attach/attach_temp_table.py +++ b/tests/testflows/rbac/tests/privileges/attach/attach_temp_table.py @@ -2,10 +2,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privilege_granted_directly_or_via_role(self, node=None): - """Check that user is only able to execute ATTACH TEMPORARY TABLE when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute ATTACH TEMPORARY TABLE when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -15,8 +15,12 @@ def privilege_granted_directly_or_via_role(self, node=None): with Suite("user with direct privilege"): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute CREATE TEMPORARY TABLE with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, node=node) + with When( + f"I run checks that {user_name} is only able to execute CREATE TEMPORARY TABLE with required privileges" + ): + privilege_check( + grant_target_name=user_name, user_name=user_name, node=node + ) with Suite("user with privilege via role"): with user(node, user_name), role(node, role_name): @@ -24,12 +28,16 @@ def privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute CREATE TEMPORARY TABLE with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute CREATE TEMPORARY TABLE with required privileges" + ): + privilege_check( + grant_target_name=role_name, user_name=user_name, node=node + ) + def privilege_check(grant_target_name, user_name, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege"): @@ -43,8 +51,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with When("I attempt to attach a temporary table without privilege"): - node.query(f"ATTACH TEMPORARY TABLE {temp_table_name} (x Int8)", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ATTACH TEMPORARY TABLE {temp_table_name} (x Int8)", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the temporary table"): @@ -55,10 +67,15 @@ def privilege_check(grant_target_name, user_name, node=None): try: with When("I grant create temporary table privilege"): - node.query(f"GRANT CREATE TEMPORARY TABLE ON *.* TO {grant_target_name}") + node.query( + f"GRANT CREATE TEMPORARY TABLE ON *.* TO {grant_target_name}" + ) with Then("I attempt to attach a temporary table"): - node.query(f"ATTACH TEMPORARY TABLE {temp_table_name} (x Int8)", settings = [("user", user_name)]) + node.query( + f"ATTACH TEMPORARY TABLE {temp_table_name} (x Int8)", + settings=[("user", user_name)], + ) finally: with Finally("I drop the temporary table"): @@ -69,14 +86,22 @@ def privilege_check(grant_target_name, user_name, node=None): try: with When("I grant the create temporary table privilege"): - node.query(f"GRANT CREATE TEMPORARY TABLE ON *.* TO {grant_target_name}") + node.query( + f"GRANT CREATE TEMPORARY TABLE ON *.* TO {grant_target_name}" + ) with And("I revoke the create temporary table privilege"): - node.query(f"REVOKE CREATE TEMPORARY TABLE ON *.* FROM {grant_target_name}") + node.query( + f"REVOKE CREATE TEMPORARY TABLE ON *.* FROM {grant_target_name}" + ) with Then("I attempt to attach a temporary table"): - node.query(f"ATTACH TEMPORARY TABLE {temp_table_name} (x Int8)", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ATTACH TEMPORARY TABLE {temp_table_name} (x Int8)", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the temporary table"): @@ -87,14 +112,20 @@ def privilege_check(grant_target_name, user_name, node=None): try: with When("I grant the create temporary table privilege"): - node.query(f"GRANT CREATE TEMPORARY TABLE ON *.* TO {grant_target_name}") + node.query( + f"GRANT CREATE TEMPORARY TABLE ON *.* TO {grant_target_name}" + ) with And("I revoke ALL privilege"): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to attach a temporary table"): - node.query(f"ATTACH TEMPORARY TABLE {temp_table_name} (x Int8)", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"ATTACH TEMPORARY TABLE {temp_table_name} (x Int8)", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the temporary table"): @@ -108,22 +139,25 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I attempt to attach a temporary table"): - node.query(f"ATTACH TEMPORARY TABLE {temp_table_name} (x Int8)", settings = [("user", user_name)]) + node.query( + f"ATTACH TEMPORARY TABLE {temp_table_name} (x Int8)", + settings=[("user", user_name)], + ) finally: with Finally("I drop the temporary table"): node.query(f"DROP TEMPORARY TABLE IF EXISTS {temp_table_name}") + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_AttachTemporaryTable("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) @Name("attach temporary table") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of ATTACH TEMPORARY TABLE. - """ + """Check the RBAC functionality of ATTACH TEMPORARY TABLE.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -131,5 +165,8 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): if stress is not None: self.context.stress = stress - with Suite(test=privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log): + with Suite( + test=privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ): privilege_granted_directly_or_via_role() diff --git a/tests/testflows/rbac/tests/privileges/create/create_database.py b/tests/testflows/rbac/tests/privileges/create/create_database.py index 8367d49e050..8a8b71b19a3 100644 --- a/tests/testflows/rbac/tests/privileges/create/create_database.py +++ b/tests/testflows/rbac/tests/privileges/create/create_database.py @@ -2,10 +2,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privilege_granted_directly_or_via_role(self, node=None): - """Check that user is only able to execute CREATE DATABASE when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute CREATE DATABASE when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -15,8 +15,12 @@ def privilege_granted_directly_or_via_role(self, node=None): with Suite("user with direct privilege"): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute CREATE DATABASE with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, node=node) + with When( + f"I run checks that {user_name} is only able to execute CREATE DATABASE with required privileges" + ): + privilege_check( + grant_target_name=user_name, user_name=user_name, node=node + ) with Suite("user with privilege via role"): with user(node, user_name), role(node, role_name): @@ -24,12 +28,16 @@ def privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute CREATE DATABASE with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute CREATE DATABASE with required privileges" + ): + privilege_check( + grant_target_name=role_name, user_name=user_name, node=node + ) + def privilege_check(grant_target_name, user_name, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege"): @@ -43,8 +51,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to create a database without privilege"): - node.query(f"CREATE DATABASE {db_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE DATABASE {db_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the database"): @@ -55,10 +67,12 @@ def privilege_check(grant_target_name, user_name, node=None): try: with When("I grant create database privilege"): - node.query(f"GRANT CREATE DATABASE ON {db_name}.* TO {grant_target_name}") + node.query( + f"GRANT CREATE DATABASE ON {db_name}.* TO {grant_target_name}" + ) with Then("I attempt to create a database"): - node.query(f"CREATE DATABASE {db_name}", settings = [("user", user_name)]) + node.query(f"CREATE DATABASE {db_name}", settings=[("user", user_name)]) finally: with Finally("I drop the database"): @@ -69,14 +83,22 @@ def privilege_check(grant_target_name, user_name, node=None): try: with When("I grant the create database privilege"): - node.query(f"GRANT CREATE DATABASE ON {db_name}.* TO {grant_target_name}") + node.query( + f"GRANT CREATE DATABASE ON {db_name}.* TO {grant_target_name}" + ) with And("I revoke the create database privilege"): - node.query(f"REVOKE CREATE DATABASE ON {db_name}.* FROM {grant_target_name}") + node.query( + f"REVOKE CREATE DATABASE ON {db_name}.* FROM {grant_target_name}" + ) with Then("I attempt to create a database"): - node.query(f"CREATE DATABASE {db_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE DATABASE {db_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the database"): @@ -87,14 +109,20 @@ def privilege_check(grant_target_name, user_name, node=None): try: with When("I grant the create database privilege"): - node.query(f"GRANT CREATE DATABASE ON {db_name}.* TO {grant_target_name}") + node.query( + f"GRANT CREATE DATABASE ON {db_name}.* TO {grant_target_name}" + ) with And("I revoke ALL privilege"): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to create a database"): - node.query(f"CREATE DATABASE {db_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE DATABASE {db_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the database"): @@ -108,22 +136,22 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I attempt to create a database"): - node.query(f"CREATE DATABASE {db_name}", settings = [("user", user_name)]) + node.query(f"CREATE DATABASE {db_name}", settings=[("user", user_name)]) finally: with Finally("I drop the database"): node.query(f"DROP DATABASE IF EXISTS {db_name}") + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_CreateDatabase("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) @Name("create database") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of CREATE DATABASE. - """ + """Check the RBAC functionality of CREATE DATABASE.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -131,5 +159,8 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): if stress is not None: self.context.stress = stress - with Suite(test=privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log): + with Suite( + test=privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ): privilege_granted_directly_or_via_role() diff --git a/tests/testflows/rbac/tests/privileges/create/create_dictionary.py b/tests/testflows/rbac/tests/privileges/create/create_dictionary.py index 73734f5d556..76125aa0239 100644 --- a/tests/testflows/rbac/tests/privileges/create/create_dictionary.py +++ b/tests/testflows/rbac/tests/privileges/create/create_dictionary.py @@ -2,10 +2,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privilege_granted_directly_or_via_role(self, node=None): - """Check that user is only able to execute CREATE DICTIONARY when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute CREATE DICTIONARY when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -15,8 +15,12 @@ def privilege_granted_directly_or_via_role(self, node=None): with Suite("user with direct privilege"): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute CREATE DICTIONARY with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, node=node) + with When( + f"I run checks that {user_name} is only able to execute CREATE DICTIONARY with required privileges" + ): + privilege_check( + grant_target_name=user_name, user_name=user_name, node=node + ) with Suite("user with privilege via role"): with user(node, user_name), role(node, role_name): @@ -24,12 +28,16 @@ def privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute CREATE DICTIONARY with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute CREATE DICTIONARY with required privileges" + ): + privilege_check( + grant_target_name=role_name, user_name=user_name, node=node + ) + def privilege_check(grant_target_name, user_name, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege"): @@ -43,8 +51,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to create a dictionary without privilege"): - node.query(f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the dictionary"): @@ -55,10 +67,15 @@ def privilege_check(grant_target_name, user_name, node=None): try: with When("I grant create dictionary privilege"): - node.query(f"GRANT CREATE DICTIONARY ON {dict_name} TO {grant_target_name}") + node.query( + f"GRANT CREATE DICTIONARY ON {dict_name} TO {grant_target_name}" + ) with Then("I attempt to create a dictionary"): - node.query(f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)", settings = [("user", user_name)]) + node.query( + f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)", + settings=[("user", user_name)], + ) finally: with Finally("I drop the dictionary"): @@ -69,33 +86,46 @@ def privilege_check(grant_target_name, user_name, node=None): try: with When("I grant the create dictionary privilege"): - node.query(f"GRANT CREATE DICTIONARY ON {dict_name} TO {grant_target_name}") + node.query( + f"GRANT CREATE DICTIONARY ON {dict_name} TO {grant_target_name}" + ) with And("I revoke the create dictionary privilege"): - node.query(f"REVOKE CREATE DICTIONARY ON {dict_name} FROM {grant_target_name}") + node.query( + f"REVOKE CREATE DICTIONARY ON {dict_name} FROM {grant_target_name}" + ) with Then("I attempt to create a dictionary"): - node.query(f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the dictionary"): node.query(f"DROP DICTIONARY IF EXISTS {dict_name}") - with Scenario("user with revoked ALL privilege"): dict_name = f"dict_{getuid()}" try: with When("I grant the create dictionary privilege"): - node.query(f"GRANT CREATE DICTIONARY ON {dict_name} TO {grant_target_name}") + node.query( + f"GRANT CREATE DICTIONARY ON {dict_name} TO {grant_target_name}" + ) with And("I revoke ALL privilege"): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to create a dictionary"): - node.query(f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the dictionary"): @@ -109,22 +139,25 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I attempt to create a dictionary"): - node.query(f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)", settings = [("user", user_name)]) + node.query( + f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)", + settings=[("user", user_name)], + ) finally: with Finally("I drop the dictionary"): node.query(f"DROP DICTIONARY IF EXISTS {dict_name}") + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_CreateDictionary("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) @Name("create dictionary") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of CREATE DICTIONARY. - """ + """Check the RBAC functionality of CREATE DICTIONARY.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -132,5 +165,8 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): if stress is not None: self.context.stress = stress - with Suite(test=privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log): + with Suite( + test=privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ): privilege_granted_directly_or_via_role() diff --git a/tests/testflows/rbac/tests/privileges/create/create_quota.py b/tests/testflows/rbac/tests/privileges/create/create_quota.py index d6e50ea904e..207bb786c89 100644 --- a/tests/testflows/rbac/tests/privileges/create/create_quota.py +++ b/tests/testflows/rbac/tests/privileges/create/create_quota.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): - """Check that a user is able to execute `CREATE QUOTA` with privileges are granted directly. - """ + """Check that a user is able to execute `CREATE QUOTA` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -17,15 +17,22 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=create_quota, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in create_quota.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=create_quota, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in create_quota.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): - """Check that a user is able to execute `CREATE QUOTA` with privileges are granted through a role. - """ + """Check that a user is able to execute `CREATE QUOTA` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -38,20 +45,30 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=create_quota, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in create_quota.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=create_quota, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in create_quota.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("CREATE QUOTA",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("CREATE QUOTA",), + ], +) def create_quota(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `CREATE QUOTA` when they have the necessary privilege. - """ + """Check that user is only able to execute `CREATE QUOTA` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -68,8 +85,12 @@ def create_quota(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't create a quota"): - node.query(f"CREATE QUOTA {create_quota_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE QUOTA {create_quota_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the quota"): @@ -83,7 +104,10 @@ def create_quota(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can create a quota"): - node.query(f"CREATE QUOTA {create_quota_name}", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE QUOTA {create_quota_name}", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the quota"): @@ -97,11 +121,16 @@ def create_quota(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can create a quota"): - node.query(f"CREATE QUOTA {create_quota_name} ON CLUSTER sharded_cluster", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE QUOTA {create_quota_name} ON CLUSTER sharded_cluster", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the quota"): - node.query(f"DROP QUOTA IF EXISTS {create_quota_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP QUOTA IF EXISTS {create_quota_name} ON CLUSTER sharded_cluster" + ) with Scenario("CREATE QUOTA with revoked privilege"): create_quota_name = f"create_quota_{getuid()}" @@ -114,23 +143,27 @@ def create_quota(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot create a quota"): - node.query(f"CREATE QUOTA {create_quota_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE QUOTA {create_quota_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the quota"): node.query(f"DROP QUOTA IF EXISTS {create_quota_name}") + @TestFeature @Name("create quota") @Requirements( RQ_SRS_006_RBAC_Privileges_CreateQuota("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of CREATE QUOTA. - """ + """Check the RBAC functionality of CREATE QUOTA.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/create/create_role.py b/tests/testflows/rbac/tests/privileges/create/create_role.py index c442036b625..403b02c8c63 100644 --- a/tests/testflows/rbac/tests/privileges/create/create_role.py +++ b/tests/testflows/rbac/tests/privileges/create/create_role.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): - """Check that a user is able to execute `CREATE ROLE` with privileges are granted directly. - """ + """Check that a user is able to execute `CREATE ROLE` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -17,15 +17,22 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=create_role, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in create_role.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=create_role, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in create_role.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): - """Check that a user is able to execute `CREATE ROLE` with privileges are granted through a role. - """ + """Check that a user is able to execute `CREATE ROLE` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -38,20 +45,30 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=create_role, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in create_role.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=create_role, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in create_role.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("CREATE ROLE",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("CREATE ROLE",), + ], +) def create_role(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `CREATE ROLE` when they have the necessary privilege. - """ + """Check that user is only able to execute `CREATE ROLE` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -68,8 +85,12 @@ def create_role(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't create a role"): - node.query(f"CREATE ROLE {create_role_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE ROLE {create_role_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the role"): @@ -83,7 +104,10 @@ def create_role(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can create a role"): - node.query(f"CREATE ROLE {create_role_name}", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE ROLE {create_role_name}", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the role"): @@ -97,11 +121,16 @@ def create_role(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can create a role"): - node.query(f"CREATE ROLE {create_role_name} ON CLUSTER sharded_cluster", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE ROLE {create_role_name} ON CLUSTER sharded_cluster", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the role"): - node.query(f"DROP ROLE IF EXISTS {create_role_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP ROLE IF EXISTS {create_role_name} ON CLUSTER sharded_cluster" + ) with Scenario("CREATE ROLE with revoked privilege"): create_role_name = f"create_role_{getuid()}" @@ -114,23 +143,27 @@ def create_role(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot create a role"): - node.query(f"CREATE ROLE {create_role_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE ROLE {create_role_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the role"): node.query(f"DROP ROLE IF EXISTS {create_role_name}") + @TestFeature @Name("create role") @Requirements( RQ_SRS_006_RBAC_Privileges_CreateRole("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of CREATE ROLE. - """ + """Check the RBAC functionality of CREATE ROLE.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/create/create_row_policy.py b/tests/testflows/rbac/tests/privileges/create/create_row_policy.py index 8e670333492..81b9d093e7e 100644 --- a/tests/testflows/rbac/tests/privileges/create/create_row_policy.py +++ b/tests/testflows/rbac/tests/privileges/create/create_row_policy.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): - """Check that a user is able to execute `CREATE ROW POLICY` with privileges are granted directly. - """ + """Check that a user is able to execute `CREATE ROW POLICY` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -17,15 +17,22 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=create_row_policy, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in create_row_policy.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=create_row_policy, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in create_row_policy.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): - """Check that a user is able to execute `CREATE ROW POLICY` with privileges are granted through a role. - """ + """Check that a user is able to execute `CREATE ROW POLICY` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -38,21 +45,31 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=create_row_policy, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in create_row_policy.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=create_row_policy, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in create_row_policy.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("CREATE ROW POLICY",), - ("CREATE POLICY",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("CREATE ROW POLICY",), + ("CREATE POLICY",), + ], +) def create_row_policy(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `CREATE ROW POLICY` when they have the necessary privilege. - """ + """Check that user is only able to execute `CREATE ROW POLICY` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -70,12 +87,18 @@ def create_row_policy(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't create a row policy"): - node.query(f"CREATE ROW POLICY {create_row_policy_name} ON {table_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE ROW POLICY {create_row_policy_name} ON {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the row policy"): - node.query(f"DROP ROW POLICY IF EXISTS {create_row_policy_name} ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {create_row_policy_name} ON {table_name}" + ) with Scenario("CREATE ROW POLICY with privilege"): create_row_policy_name = f"create_row_policy_{getuid()}" @@ -86,11 +109,16 @@ def create_row_policy(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can create a row policy"): - node.query(f"CREATE ROW POLICY {create_row_policy_name} ON {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE ROW POLICY {create_row_policy_name} ON {table_name}", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the row policy"): - node.query(f"DROP ROW POLICY IF EXISTS {create_row_policy_name} ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {create_row_policy_name} ON {table_name}" + ) with Scenario("CREATE ROW POLICY on cluster"): create_row_policy_name = f"create_row_policy_{getuid()}" @@ -101,11 +129,16 @@ def create_row_policy(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can create a row policy"): - node.query(f"CREATE ROW POLICY {create_row_policy_name} ON CLUSTER sharded_cluster ON {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE ROW POLICY {create_row_policy_name} ON CLUSTER sharded_cluster ON {table_name}", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the row policy"): - node.query(f"DROP ROW POLICY IF EXISTS {create_row_policy_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {create_row_policy_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with Scenario("CREATE ROW POLICY with revoked privilege"): create_row_policy_name = f"create_row_policy_{getuid()}" @@ -119,17 +152,22 @@ def create_row_policy(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot create a row policy"): - node.query(f"CREATE ROW POLICY {create_row_policy_name} ON {table_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE ROW POLICY {create_row_policy_name} ON {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the row policy"): - node.query(f"DROP ROW POLICY IF EXISTS {create_row_policy_name} ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {create_row_policy_name} ON {table_name}" + ) + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Restriction("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Restriction("1.0")) def no_grants(self, node=None): """Check that user is unable to select from a table without a row policy after a row policy with a condition has been created on that table. @@ -151,26 +189,28 @@ def no_grants(self, node=None): with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() with When("I create a row policy with a condition"): - node.query(f"CREATE ROW POLICY OR REPLACE {pol_name} ON {table_name} FOR SELECT USING 1") + node.query( + f"CREATE ROW POLICY OR REPLACE {pol_name} ON {table_name} FOR SELECT USING 1" + ) with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '' == output, error() + assert "" == output, error() finally: with Finally("I drop the row policy"): node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON {table_name}") + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Create_Access_Permissive("1.0"), ) def permissive(self, node=None): - """Check that user is able to see from a table when they have a PERMISSIVE policy. - """ + """Check that user is able to see from a table when they have a PERMISSIVE policy.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -181,26 +221,26 @@ def permissive(self, node=None): with table(node, table_name): try: with Given("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default" + ) with When("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1), (2)") with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() finally: with Finally("I drop the row policy"): node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON {table_name}") + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Create_Access_Restrictive("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Create_Access_Restrictive("1.0")) def restrictive(self, node=None): - """Check that user is able to see values they have a RESTRICTIVE policy for. - """ + """Check that user is able to see values they have a RESTRICTIVE policy for.""" table_name = f"table_{getuid()}" perm_pol_name = f"perm_pol_{getuid()}" @@ -212,17 +252,21 @@ def restrictive(self, node=None): with table(node, table_name): try: with Given("I have a permissive row policy"): - node.query(f"CREATE ROW POLICY {perm_pol_name} ON {table_name} FOR SELECT USING y=1 OR y=2 TO default") + node.query( + f"CREATE ROW POLICY {perm_pol_name} ON {table_name} FOR SELECT USING y=1 OR y=2 TO default" + ) with And("I have a restrictive row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} AS RESTRICTIVE FOR SELECT USING y=1 TO default") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} AS RESTRICTIVE FOR SELECT USING y=1 TO default" + ) with When("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1), (2)") with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() finally: with Finally("I drop the restrictive row policy", flags=TE): @@ -231,13 +275,13 @@ def restrictive(self, node=None): with And("I drop the permissive row policy", flags=TE): node.query(f"DROP ROW POLICY IF EXISTS {perm_pol_name} ON {table_name}") + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Create_ForSelect("1.0"), ) def for_select(self, node=None): - """Check that user is able to see values allowed by the row policy condition in the FOR SELECT clause. - """ + """Check that user is able to see values allowed by the row policy condition in the FOR SELECT clause.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -248,26 +292,26 @@ def for_select(self, node=None): with table(node, table_name): try: with Given("I have a restrictive row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1 TO default") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1 TO default" + ) with When("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1)") with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() finally: with Finally("I drop the row policy"): node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON {table_name}") + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Create_Condition("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Create_Condition("1.0")) def condition(self, node=None): - """Check that user is able to see values allowed by the row policy condition. - """ + """Check that user is able to see values allowed by the row policy condition.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -278,26 +322,26 @@ def condition(self, node=None): with table(node, table_name): try: with Given("I have a restrictive row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default" + ) with When("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1),(2)") with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() finally: with Finally("I drop the row policy"): node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON {table_name}") + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Create_IfNotExists("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Create_IfNotExists("1.0")) def if_not_exists(self, node=None): - """Check that a row policy created using IF NOT EXISTS does not replace a row policy with the same name. - """ + """Check that a row policy created using IF NOT EXISTS does not replace a row policy with the same name.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -308,33 +352,37 @@ def if_not_exists(self, node=None): with table(node, table_name): try: with Given("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1 TO default") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1 TO default" + ) with When("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1)") with Then("I select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() - with When("I create another row policy with the same name using IF NOT EXISTS"): - node.query(f"CREATE ROW POLICY IF NOT EXISTS {pol_name} ON {table_name}") + with When( + "I create another row policy with the same name using IF NOT EXISTS" + ): + node.query( + f"CREATE ROW POLICY IF NOT EXISTS {pol_name} ON {table_name}" + ) with Then("I select from the table again"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() finally: with Finally("I drop the row policy"): node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON {table_name}") + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Create_Replace("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Create_Replace("1.0")) def or_replace(self, node=None): - """Check that a row policy created using OR REPLACE does replace the row policy with the same name. - """ + """Check that a row policy created using OR REPLACE does replace the row policy with the same name.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -345,33 +393,37 @@ def or_replace(self, node=None): with table(node, table_name): try: with Given("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1 TO default") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1 TO default" + ) with When("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1)") with Then("I select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() - with When("I create another row policy with the same name using OR REPLACE"): - node.query(f"CREATE ROW POLICY OR REPLACE {pol_name} ON {table_name} AS RESTRICTIVE FOR SELECT USING 1 TO default") + with When( + "I create another row policy with the same name using OR REPLACE" + ): + node.query( + f"CREATE ROW POLICY OR REPLACE {pol_name} ON {table_name} AS RESTRICTIVE FOR SELECT USING 1 TO default" + ) with Then("I can no longer select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert output == '', error() + assert output == "", error() finally: with Finally("I drop the row policy"): node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON {table_name}") + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Create_OnCluster("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Create_OnCluster("1.0")) def on_cluster(self, node=None): - """Check that a row policy created using ON CLUSTER applies to the nodes of the cluster correctly. - """ + """Check that a row policy created using ON CLUSTER applies to the nodes of the cluster correctly.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -382,10 +434,14 @@ def on_cluster(self, node=None): try: with Given("I have a table on a cluster"): - node.query(f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory") + node.query( + f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory" + ) with And("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name} FOR SELECT USING 1") + node.query( + f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name} FOR SELECT USING 1" + ) with When("I insert some values into the table on the first node"): node.query(f"INSERT INTO {table_name} (x) VALUES (1)") @@ -395,23 +451,25 @@ def on_cluster(self, node=None): with Then("I select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '' == output, error() + assert "" == output, error() with And("I select from another node on the cluster"): output = node2.query(f"SELECT * FROM {table_name}").output - assert '' == output, error() + assert "" == output, error() finally: with Finally("I drop the row policy", flags=TE): - node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with And("I drop the table", flags=TE): node.query(f"DROP TABLE {table_name} ON CLUSTER sharded_cluster") + @TestScenario def diff_policies_on_diff_nodes(self, node=None): - """Check that a row policy created on a node, does not effect a different node. - """ + """Check that a row policy created on a node, does not effect a different node.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -422,10 +480,14 @@ def diff_policies_on_diff_nodes(self, node=None): try: with Given("I have a table on a cluster"): - node.query(f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory") + node.query( + f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory" + ) with And("I have a row policy on one node"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1" + ) with When("I insert some values into the table on the first node"): node.query(f"INSERT INTO {table_name} (x) VALUES (1)") @@ -435,11 +497,11 @@ def diff_policies_on_diff_nodes(self, node=None): with Then("I select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '' == output, error() + assert "" == output, error() with And("I select from another node on the cluster"): output = node2.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() finally: with Finally("I drop the row policy", flags=TE): @@ -448,13 +510,13 @@ def diff_policies_on_diff_nodes(self, node=None): with And("I drop the table", flags=TE): node.query(f"DROP TABLE {table_name} ON CLUSTER sharded_cluster") + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0"), ) def assignment(self, node=None): - """Check that user is able to see rows from a table when they have PERMISSIVE policy assigned to them. - """ + """Check that user is able to see rows from a table when they have PERMISSIVE policy assigned to them.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -465,26 +527,28 @@ def assignment(self, node=None): with table(node, table_name): try: with Given("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default" + ) with When("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1)") with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() finally: with Finally("I drop the row policy"): node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON {table_name}") + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_None("1.0"), ) def assignment_none(self, node=None): - """Check that no one is affected when a row policy is assigned to NONE. - """ + """Check that no one is affected when a row policy is assigned to NONE.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -495,26 +559,28 @@ def assignment_none(self, node=None): with table(node, table_name): try: with Given("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO NONE") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO NONE" + ) with When("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1)") with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '' == output, error() + assert "" == output, error() finally: with Finally("I drop the row policy"): node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON {table_name}") + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_All("1.0"), ) def assignment_all(self, node=None): - """Check that everyone is effected with a row policy is assigned to ALL. - """ + """Check that everyone is effected with a row policy is assigned to ALL.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -525,26 +591,28 @@ def assignment_all(self, node=None): with table(node, table_name): try: with Given("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO ALL") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO ALL" + ) with When("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1)") with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() finally: with Finally("I drop the row policy"): node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON {table_name}") + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_AllExcept("1.0"), ) def assignment_all_except(self, node=None): - """Check that everyone is except the specified user is effect by a row policy assigned to ALL EXCEPT. - """ + """Check that everyone is except the specified user is effect by a row policy assigned to ALL EXCEPT.""" table_name = f"table_{getuid()}" pol_name = f"pol_{getuid()}" @@ -555,19 +623,22 @@ def assignment_all_except(self, node=None): with table(node, table_name): try: with Given("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO ALL EXCEPT default") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO ALL EXCEPT default" + ) with When("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1)") with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '' == output, error() + assert "" == output, error() finally: with Finally("I drop the row policy"): node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON {table_name}") + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0"), @@ -587,7 +658,9 @@ def nested_view(self, node=None): with table(node, table_name): try: with Given("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default" + ) with When("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1),(2)") @@ -597,7 +670,7 @@ def nested_view(self, node=None): with Then("I try to select from the view"): output = node.query(f"SELECT * FROM {view_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() finally: with Finally("I drop the row policy", flags=TE): @@ -606,6 +679,7 @@ def nested_view(self, node=None): with And("I drop the view", flags=TE): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0"), @@ -624,22 +698,30 @@ def nested_live_view_after_policy(self, node=None): with table(node, table_name): try: - with Given("I add allow_experimental_live_view to the default query settings"): - default_query_settings = getsattr(current().context, "default_query_settings", []) + with Given( + "I add allow_experimental_live_view to the default query settings" + ): + default_query_settings = getsattr( + current().context, "default_query_settings", [] + ) default_query_settings.append(("allow_experimental_live_view", 1)) with And("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default" + ) with When("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1),(2)") with And("I create a live view on the table"): - node.query(f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table_name}") + node.query( + f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table_name}" + ) with Then("I try to select from the view"): output = node.query(f"SELECT * FROM {view_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() finally: with Finally("I drop the row policy", flags=TE): @@ -648,13 +730,21 @@ def nested_live_view_after_policy(self, node=None): with And("I drop the live view", flags=TE): node.query(f"DROP VIEW IF EXISTS {view_name}") - with And("I remove allow_experimental_live_view from the default query settings", flags=TE): + with And( + "I remove allow_experimental_live_view from the default query settings", + flags=TE, + ): if default_query_settings: try: - default_query_settings.pop(default_query_settings.index(("allow_experimental_live_view", 1))) + default_query_settings.pop( + default_query_settings.index( + ("allow_experimental_live_view", 1) + ) + ) except ValueError: pass + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0"), @@ -673,22 +763,30 @@ def nested_live_view_before_policy(self, node=None): with table(node, table_name): try: - with Given("I add allow_experimental_live_view to the default query settings"): - default_query_settings = getsattr(current().context, "default_query_settings", []) + with Given( + "I add allow_experimental_live_view to the default query settings" + ): + default_query_settings = getsattr( + current().context, "default_query_settings", [] + ) default_query_settings.append(("allow_experimental_live_view", 1)) with And("There is a live view on the table"): - node.query(f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table_name}") + node.query( + f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table_name}" + ) with And("There is a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default" + ) with When("I insert values into the table"): node.query(f"INSERT INTO {table_name} (y) VALUES (1),(2)") with Then("I try to select from the view"): output = node.query(f"SELECT * FROM {view_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() finally: with Finally("I drop the row policy", flags=TE): @@ -697,13 +795,21 @@ def nested_live_view_before_policy(self, node=None): with And("I drop the live view", flags=TE): node.query(f"DROP VIEW IF EXISTS {view_name}") - with And("I remove allow_experimental_live_view from the default query settings", flags=TE): + with And( + "I remove allow_experimental_live_view from the default query settings", + flags=TE, + ): if default_query_settings: try: - default_query_settings.pop(default_query_settings.index(("allow_experimental_live_view", 1))) + default_query_settings.pop( + default_query_settings.index( + ("allow_experimental_live_view", 1) + ) + ) except ValueError: pass + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0"), @@ -723,17 +829,21 @@ def nested_mat_view_after_policy(self, node=None): with table(node, table_name): try: with Given("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default" + ) with When("I create a view on the table"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}" + ) with And("I insert some values on the table"): node.query(f"INSERT INTO {table_name} (y) VALUES (1),(2)") with Then("I try to select from the view"): output = node.query(f"SELECT * FROM {view_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() finally: with Finally("I drop the row policy", flags=TE): @@ -742,6 +852,7 @@ def nested_mat_view_after_policy(self, node=None): with And("I drop the materialized view", flags=TE): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0"), @@ -761,17 +872,21 @@ def nested_mat_view_before_policy(self, node=None): with table(node, table_name): try: with Given("I have a view on the table"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}" + ) with And("I have some values on the table"): node.query(f"INSERT INTO {table_name} (y) VALUES (1),(2)") with When("I create a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default" + ) with Then("I try to select from the view"): output = node.query(f"SELECT * FROM {view_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() finally: with Finally("I drop the row policy", flags=TE): @@ -780,6 +895,7 @@ def nested_mat_view_before_policy(self, node=None): with And("I drop the materialized view", flags=TE): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def populate_mat_view(self, node=None): """Check that if a user has a row policy on a table and a materialized view is created using POPULATE from that table, @@ -796,17 +912,21 @@ def populate_mat_view(self, node=None): with table(node, table_name): try: with Given("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default" + ) with And("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1),(2)") with When("I create a mat view with POPULATE from the table"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory POPULATE AS SELECT * FROM {table_name}") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory POPULATE AS SELECT * FROM {table_name}" + ) with Then("I try to select from the view"): output = node.query(f"SELECT * FROM {view_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() finally: with Finally("I drop the row policy", flags=TE): @@ -815,10 +935,9 @@ def populate_mat_view(self, node=None): with And("I drop the materialized view", flags=TE): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0")) def dist_table(self, node=None): """Check that if a user has a row policy on a table and a distributed table is created on that table, the user is only able to select rows specified by the assigned policies from the distributed table. @@ -834,24 +953,32 @@ def dist_table(self, node=None): try: with Given("I have a table on a cluster"): - node.query(f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory") + node.query( + f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory" + ) with And("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name} FOR SELECT USING 1") + node.query( + f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name} FOR SELECT USING 1" + ) with And("I have a distributed table"): - node.query(f"CREATE TABLE {dist_table_name} (x UInt64) ENGINE = Distributed(sharded_cluster, default, {table_name}, rand())") + node.query( + f"CREATE TABLE {dist_table_name} (x UInt64) ENGINE = Distributed(sharded_cluster, default, {table_name}, rand())" + ) with When("I insert some values into the table on the first node"): node.query(f"INSERT INTO {table_name} (x) VALUES (1)") with Then("I select from the table"): output = node.query(f"SELECT * FROM {dist_table_name}").output - assert '' == output, error() + assert "" == output, error() finally: with Finally("I drop the row policy", flags=TE): - node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with And("I drop the table", flags=TE): node.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER sharded_cluster") @@ -859,10 +986,9 @@ def dist_table(self, node=None): with And("I drop the distributed table", flags=TE): node.query(f"DROP TABLE IF EXISTS {dist_table_name}") + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0")) def dist_table_diff_policies_on_diff_nodes(self, node=None): """Check that the user can only access the rows of the distributed table that are allowed by row policies on the the source tables. The row policies are different on different nodes. @@ -878,13 +1004,19 @@ def dist_table_diff_policies_on_diff_nodes(self, node=None): try: with Given("I have a table on a cluster"): - node.query(f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory") + node.query( + f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory" + ) with And("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1" + ) with And("I have a distributed table"): - node.query(f"CREATE TABLE {dist_table_name} (x UInt64) ENGINE = Distributed(sharded_cluster, default, {table_name}, rand())") + node.query( + f"CREATE TABLE {dist_table_name} (x UInt64) ENGINE = Distributed(sharded_cluster, default, {table_name}, rand())" + ) with When("I insert some values into the table on the first node"): node.query(f"INSERT INTO {table_name} (x) VALUES (1)") @@ -894,11 +1026,13 @@ def dist_table_diff_policies_on_diff_nodes(self, node=None): with Then("I select from the table"): output = node.query(f"SELECT * FROM {dist_table_name}").output - assert '2' in output and '1' not in output, error() + assert "2" in output and "1" not in output, error() finally: with Finally("I drop the row policy", flags=TE): - node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with And("I drop the table", flags=TE): node.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER sharded_cluster") @@ -906,10 +1040,9 @@ def dist_table_diff_policies_on_diff_nodes(self, node=None): with And("I drop the distributed table", flags=TE): node.query(f"DROP TABLE IF EXISTS {dist_table_name}") + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0")) def dist_table_on_dist_table(self, node=None): """Check that if a user has a row policy on a table and a distributed table is created on that table, and another distributed table is created on top of that, @@ -926,53 +1059,72 @@ def dist_table_on_dist_table(self, node=None): try: with Given("I have a table on a cluster"): - node.query(f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory") + node.query( + f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory" + ) with And("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name} FOR SELECT USING 1") + node.query( + f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name} FOR SELECT USING 1" + ) with And("I have a distributed table on a cluster"): - node.query(f"CREATE TABLE {dist_table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Distributed(sharded_cluster, default, {table_name}, rand())") + node.query( + f"CREATE TABLE {dist_table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Distributed(sharded_cluster, default, {table_name}, rand())" + ) with And("I have a distributed table on the other distributed table"): - node.query(f"CREATE TABLE {dist_table_2_name} (x UInt64) ENGINE = Distributed(sharded_cluster, default, {dist_table_name}, rand())") + node.query( + f"CREATE TABLE {dist_table_2_name} (x UInt64) ENGINE = Distributed(sharded_cluster, default, {dist_table_name}, rand())" + ) with When("I insert some values into the table on the first node"): node.query(f"INSERT INTO {dist_table_2_name} (x) VALUES (1)") with Then("I select from the table"): output = node.query(f"SELECT * FROM {dist_table_2_name}").output - assert '' == output, error() + assert "" == output, error() finally: with Finally("I drop the row policy", flags=TE): - node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with And("I drop the table", flags=TE): node.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER sharded_cluster") with And("I drop the distributed table", flags=TE): - node.query(f"DROP TABLE IF EXISTS {dist_table_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP TABLE IF EXISTS {dist_table_name} ON CLUSTER sharded_cluster" + ) with And("I drop the outer distributed table", flags=TE): node.query(f"DROP TABLE IF EXISTS {dist_table_2_name}") + @TestScenario def no_table(self, node=None): - """Check that row policy is not created when the table is not specified. - """ + """Check that row policy is not created when the table is not specified.""" pol_name = f"pol_{getuid()}" if node is None: node = self.context.node with When("I try to create a row policy without a table"): - node.query(f"CREATE ROW POLICY {pol_name}", - exitcode=62, message='Exception: Syntax error') + node.query( + f"CREATE ROW POLICY {pol_name}", + exitcode=62, + message="Exception: Syntax error", + ) with And("I try to create a row policy on a database"): - node.query(f"CREATE ROW POLICY {pol_name} ON default.*", - exitcode=62, message='Exception: Syntax error') + node.query( + f"CREATE ROW POLICY {pol_name} ON default.*", + exitcode=62, + message="Exception: Syntax error", + ) + @TestScenario def policy_before_table(self, node=None): @@ -987,7 +1139,9 @@ def policy_before_table(self, node=None): try: with Given("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING y=1 TO default" + ) with table(node, table_name): with When("The table has some values"): @@ -995,12 +1149,13 @@ def policy_before_table(self, node=None): with Then("I try to select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() finally: with Finally("I drop the row policy"): node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON {table_name}") + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Nesting("1.0"), @@ -1019,20 +1174,26 @@ def dict(self, node=None): try: with Given("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING key=1 TO default") + node.query( + f"CREATE ROW POLICY {pol_name} ON {table_name} AS PERMISSIVE FOR SELECT USING key=1 TO default" + ) with And("I have a table"): - node.query(f"CREATE TABLE {table_name} (key UInt64, val UInt64 DEFAULT 5) ENGINE = Memory") + node.query( + f"CREATE TABLE {table_name} (key UInt64, val UInt64 DEFAULT 5) ENGINE = Memory" + ) with When("The table has some values"): node.query(f"INSERT INTO {table_name} (key) VALUES (1),(2)") with And("I create a dict on the table"): - node.query(f"CREATE DICTIONARY {dict_name} (key UInt64 DEFAULT 0, val UInt64 DEFAULT 5) PRIMARY KEY key SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE {table_name} PASSWORD '' DB 'default')) LIFETIME(MIN 0 MAX 0) LAYOUT(FLAT())") + node.query( + f"CREATE DICTIONARY {dict_name} (key UInt64 DEFAULT 0, val UInt64 DEFAULT 5) PRIMARY KEY key SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE {table_name} PASSWORD '' DB 'default')) LIFETIME(MIN 0 MAX 0) LAYOUT(FLAT())" + ) with Then("I try to select from the dict"): output = node.query(f"SELECT * FROM {dict_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() finally: with Finally("I drop the row policy", flags=TE): @@ -1044,16 +1205,16 @@ def dict(self, node=None): with And("I drop the table", flags=TE): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestFeature @Name("create row policy") @Requirements( RQ_SRS_006_RBAC_Privileges_CreateRowPolicy("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of CREATE ROW POLICY. - """ + """Check the RBAC functionality of CREATE ROW POLICY.""" self.context.node = self.context.cluster.node(node) self.context.node2 = self.context.cluster.node("clickhouse2") @@ -1080,7 +1241,10 @@ def feature(self, node="clickhouse1"): Scenario(run=populate_mat_view, setup=instrument_clickhouse_server_log) Scenario(run=dist_table, setup=instrument_clickhouse_server_log) Scenario(run=dist_table_on_dist_table, setup=instrument_clickhouse_server_log) - Scenario(run=dist_table_diff_policies_on_diff_nodes, setup=instrument_clickhouse_server_log) + Scenario( + run=dist_table_diff_policies_on_diff_nodes, + setup=instrument_clickhouse_server_log, + ) Scenario(run=diff_policies_on_diff_nodes, setup=instrument_clickhouse_server_log) Scenario(run=no_table, setup=instrument_clickhouse_server_log) Scenario(run=policy_before_table, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/create/create_settings_profile.py b/tests/testflows/rbac/tests/privileges/create/create_settings_profile.py index 938de560391..95511afa260 100644 --- a/tests/testflows/rbac/tests/privileges/create/create_settings_profile.py +++ b/tests/testflows/rbac/tests/privileges/create/create_settings_profile.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): - """Check that a user is able to execute `CREATE SETTINGS PROFILE` with privileges are granted directly. - """ + """Check that a user is able to execute `CREATE SETTINGS PROFILE` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -17,15 +17,22 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=create_settings_profile, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in create_settings_profile.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=create_settings_profile, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in create_settings_profile.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): - """Check that a user is able to execute `CREATE SETTINGS PROFILE` with privileges are granted through a role. - """ + """Check that a user is able to execute `CREATE SETTINGS PROFILE` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -38,21 +45,31 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=create_settings_profile, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in create_settings_profile.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=create_settings_profile, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in create_settings_profile.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("CREATE SETTINGS PROFILE",), - ("CREATE PROFILE",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("CREATE SETTINGS PROFILE",), + ("CREATE PROFILE",), + ], +) def create_settings_profile(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `CREATE SETTINGS PROFILE` when they have the necessary privilege. - """ + """Check that user is only able to execute `CREATE SETTINGS PROFILE` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -69,12 +86,18 @@ def create_settings_profile(self, privilege, grant_target_name, user_name, node= node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't create a settings_profile"): - node.query(f"CREATE SETTINGS PROFILE {create_settings_profile_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE SETTINGS PROFILE {create_settings_profile_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the settings_profile"): - node.query(f"DROP SETTINGS PROFILE IF EXISTS {create_settings_profile_name}") + node.query( + f"DROP SETTINGS PROFILE IF EXISTS {create_settings_profile_name}" + ) with Scenario("CREATE SETTINGS PROFILE with privilege"): create_settings_profile_name = f"create_settings_profile_{getuid()}" @@ -84,11 +107,16 @@ def create_settings_profile(self, privilege, grant_target_name, user_name, node= node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can create a settings_profile"): - node.query(f"CREATE SETTINGS PROFILE {create_settings_profile_name}", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE SETTINGS PROFILE {create_settings_profile_name}", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the settings_profile"): - node.query(f"DROP SETTINGS PROFILE IF EXISTS {create_settings_profile_name}") + node.query( + f"DROP SETTINGS PROFILE IF EXISTS {create_settings_profile_name}" + ) with Scenario("CREATE SETTINGS PROFILE on cluster"): create_settings_profile_name = f"create_settings_profile_{getuid()}" @@ -98,11 +126,16 @@ def create_settings_profile(self, privilege, grant_target_name, user_name, node= node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can create a settings_profile"): - node.query(f"CREATE SETTINGS PROFILE {create_settings_profile_name} ON CLUSTER sharded_cluster", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE SETTINGS PROFILE {create_settings_profile_name} ON CLUSTER sharded_cluster", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the settings_profile"): - node.query(f"DROP SETTINGS PROFILE IF EXISTS {create_settings_profile_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP SETTINGS PROFILE IF EXISTS {create_settings_profile_name} ON CLUSTER sharded_cluster" + ) with Scenario("CREATE SETTINGS PROFILE with revoked privilege"): create_settings_profile_name = f"create_settings_profile_{getuid()}" @@ -115,23 +148,29 @@ def create_settings_profile(self, privilege, grant_target_name, user_name, node= node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot create a settings_profile"): - node.query(f"CREATE SETTINGS PROFILE {create_settings_profile_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE SETTINGS PROFILE {create_settings_profile_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the settings_profile"): - node.query(f"DROP SETTINGS PROFILE IF EXISTS {create_settings_profile_name}") + node.query( + f"DROP SETTINGS PROFILE IF EXISTS {create_settings_profile_name}" + ) + @TestFeature @Name("create settings profile") @Requirements( RQ_SRS_006_RBAC_Privileges_CreateSettingsProfile("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of CREATE SETTINGS PROFILE. - """ + """Check the RBAC functionality of CREATE SETTINGS PROFILE.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/create/create_table.py b/tests/testflows/rbac/tests/privileges/create/create_table.py index 8f0a9f43771..9d10d4fc9f0 100644 --- a/tests/testflows/rbac/tests/privileges/create/create_table.py +++ b/tests/testflows/rbac/tests/privileges/create/create_table.py @@ -5,13 +5,11 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_Privileges_None("1.0") -) +@Requirements(RQ_SRS_006_RBAC_Privileges_None("1.0")) def create_without_create_table_privilege(self, node=None): - """Check that user is unable to create a table without CREATE TABLE privilege. - """ + """Check that user is unable to create a table without CREATE TABLE privilege.""" user_name = f"user_{getuid()}" table_name = f"table_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -30,18 +28,24 @@ def create_without_create_table_privilege(self, node=None): with And("I grant the user USAGE privilege"): node.query(f"GRANT USAGE ON *.* TO {user_name}") - with Then("I try to create a table without CREATE TABLE privilege as the user"): - node.query(f"CREATE TABLE {table_name} (x Int8) ENGINE = Memory", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + with Then( + "I try to create a table without CREATE TABLE privilege as the user" + ): + node.query( + f"CREATE TABLE {table_name} (x Int8) ENGINE = Memory", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the table"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestScenario def create_with_create_table_privilege_granted_directly_or_via_role(self, node=None): - """Check that user is able to create a table with CREATE TABLE privilege, either granted directly or through a role. - """ + """Check that user is able to create a table with CREATE TABLE privilege, either granted directly or through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -50,21 +54,25 @@ def create_with_create_table_privilege_granted_directly_or_via_role(self, node=N with user(node, f"{user_name}"): - Scenario(test=create_with_create_table_privilege, - name="create with create table privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_create_table_privilege, + name="create with create table privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_create_table_privilege, - name="create with create table privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_create_table_privilege, + name="create with create table privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_create_table_privilege(self, grant_target_name, user_name, node=None): - """Check that user is able to create a table with the granted privileges. - """ + """Check that user is able to create a table with the granted privileges.""" table_name = f"table_{getuid()}" if node is None: @@ -77,19 +85,20 @@ def create_with_create_table_privilege(self, grant_target_name, user_name, node= node.query(f"GRANT CREATE TABLE ON {table_name} TO {grant_target_name}") with Then("I try to create a table without privilege as the user"): - node.query(f"CREATE TABLE {table_name} (x Int8) ENGINE = Memory", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE TABLE {table_name} (x Int8) ENGINE = Memory", + settings=[("user", f"{user_name}")], + ) finally: with Then("I drop the table"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_Privileges_All("1.0") -) +@Requirements(RQ_SRS_006_RBAC_Privileges_All("1.0")) def create_with_all_privilege_granted_directly_or_via_role(self, node=None): - """Check that user is able to create a table with ALL privilege, either granted directly or through a role. - """ + """Check that user is able to create a table with ALL privilege, either granted directly or through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -98,21 +107,25 @@ def create_with_all_privilege_granted_directly_or_via_role(self, node=None): with user(node, f"{user_name}"): - Scenario(test=create_with_all_privilege, - name="create with ALL privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_all_privilege, + name="create with ALL privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_all_privilege, - name="create with ALL privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_all_privilege, + name="create with ALL privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_all_privilege(self, grant_target_name, user_name, node=None): - """Check that user is able to create a table with the granted privileges. - """ + """Check that user is able to create a table with the granted privileges.""" table_name = f"table_{getuid()}" if node is None: @@ -125,16 +138,21 @@ def create_with_all_privilege(self, grant_target_name, user_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I try to create a table without privilege as the user"): - node.query(f"CREATE TABLE {table_name} (x Int8) ENGINE = Memory", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE TABLE {table_name} (x Int8) ENGINE = Memory", + settings=[("user", f"{user_name}")], + ) finally: with Then("I drop the table"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestScenario -def create_with_revoked_create_table_privilege_revoked_directly_or_from_role(self, node=None): - """Check that user is unable to create table after the CREATE TABLE privilege is revoked, either directly or from a role. - """ +def create_with_revoked_create_table_privilege_revoked_directly_or_from_role( + self, node=None +): + """Check that user is unable to create table after the CREATE TABLE privilege is revoked, either directly or from a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -143,21 +161,27 @@ def create_with_revoked_create_table_privilege_revoked_directly_or_from_role(sel with user(node, f"{user_name}"): - Scenario(test=create_with_revoked_create_table_privilege, - name="create with create table privilege revoked directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_revoked_create_table_privilege, + name="create with create table privilege revoked directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_revoked_create_table_privilege, - name="create with create table privilege revoked from a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_revoked_create_table_privilege, + name="create with create table privilege revoked from a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline -def create_with_revoked_create_table_privilege(self, grant_target_name, user_name, node=None): - """Revoke CREATE TABLE privilege and check the user is unable to create a table. - """ +def create_with_revoked_create_table_privilege( + self, grant_target_name, user_name, node=None +): + """Revoke CREATE TABLE privilege and check the user is unable to create a table.""" table_name = f"table_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -175,17 +199,21 @@ def create_with_revoked_create_table_privilege(self, grant_target_name, user_nam node.query(f"REVOKE CREATE TABLE ON {table_name} FROM {grant_target_name}") with Then("I try to create a table on the table as the user"): - node.query(f"CREATE TABLE {table_name} (x Int8) ENGINE = Memory", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table_name} (x Int8) ENGINE = Memory", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the table"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestScenario def create_with_all_privileges_revoked_directly_or_from_role(self, node=None): - """Check that user is unable to create table after ALL privileges are revoked, either directly or from a role. - """ + """Check that user is unable to create table after ALL privileges are revoked, either directly or from a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -194,21 +222,25 @@ def create_with_all_privileges_revoked_directly_or_from_role(self, node=None): with user(node, f"{user_name}"): - Scenario(test=create_with_revoked_all_privilege, - name="create with all privilege revoked directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_revoked_all_privilege, + name="create with all privilege revoked directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_revoked_all_privilege, - name="create with all privilege revoked from a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_revoked_all_privilege, + name="create with all privilege revoked from a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_revoked_all_privilege(self, grant_target_name, user_name, node=None): - """Revoke ALL privilege and check the user is unable to create a table. - """ + """Revoke ALL privilege and check the user is unable to create a table.""" table_name = f"table_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -226,13 +258,18 @@ def create_with_revoked_all_privilege(self, grant_target_name, user_name, node=N node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I try to create a table on the table as the user"): - node.query(f"CREATE TABLE {table_name} (x Int8) ENGINE = Memory", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table_name} (x Int8) ENGINE = Memory", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the table"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestScenario def create_without_source_table_privilege(self, node=None): """Check that user is unable to create a table without select @@ -256,14 +293,21 @@ def create_without_source_table_privilege(self, node=None): with And("I grant INSERT privilege"): node.query(f"GRANT INSERT ON {table_name} TO {user_name}") - with Then("I try to create a table without select privilege on the table"): - node.query(f"CREATE TABLE {table_name} ENGINE = Memory AS SELECT * FROM {source_table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + with Then( + "I try to create a table without select privilege on the table" + ): + node.query( + f"CREATE TABLE {table_name} ENGINE = Memory AS SELECT * FROM {source_table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the table"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestScenario def create_without_insert_privilege(self, node=None): """Check that user is unable to create a table without insert @@ -287,13 +331,20 @@ def create_without_insert_privilege(self, node=None): with And("I grant SELECT privilege"): node.query(f"GRANT SELECT ON {source_table_name} TO {user_name}") - with Then("I try to create a table without select privilege on the table"): - node.query(f"CREATE TABLE {table_name} ENGINE = Memory AS SELECT * FROM {source_table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + with Then( + "I try to create a table without select privilege on the table" + ): + node.query( + f"CREATE TABLE {table_name} ENGINE = Memory AS SELECT * FROM {source_table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the table"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestScenario def create_with_source_table_privilege_granted_directly_or_via_role(self, node=None): """Check that a user is able to create a table if and only if the user has create table privilege and @@ -307,21 +358,25 @@ def create_with_source_table_privilege_granted_directly_or_via_role(self, node=N with user(node, f"{user_name}"): - Scenario(test=create_with_source_table_privilege, - name="create with create table and select privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_source_table_privilege, + name="create with create table and select privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_source_table_privilege, - name="create with create table and select privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_source_table_privilege, + name="create with create table and select privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_source_table_privilege(self, user_name, grant_target_name, node=None): - """Check that user is unable to create a table without SELECT privilege on the source table. - """ + """Check that user is unable to create a table without SELECT privilege on the source table.""" table_name = f"table_{getuid()}" source_table_name = f"source_table_{getuid()}" @@ -338,16 +393,22 @@ def create_with_source_table_privilege(self, user_name, grant_target_name, node= node.query(f"GRANT INSERT ON {table_name} TO {grant_target_name}") with And("I grant SELECT privilege"): - node.query(f"GRANT SELECT ON {source_table_name} TO {grant_target_name}") + node.query( + f"GRANT SELECT ON {source_table_name} TO {grant_target_name}" + ) with And("I try to create a table on the table as the user"): node.query(f"DROP TABLE IF EXISTS {table_name}") - node.query(f"CREATE TABLE {table_name} ENGINE = Memory AS SELECT * FROM {source_table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE TABLE {table_name} ENGINE = Memory AS SELECT * FROM {source_table_name}", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the table"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestScenario def create_with_subquery_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to create a table where the stored query has two subqueries @@ -362,16 +423,21 @@ def create_with_subquery_privilege_granted_directly_or_via_role(self, node=None) with user(node, f"{user_name}"): - Scenario(test=create_with_subquery, - name="create with subquery, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_subquery, + name="create with subquery, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_subquery, - name="create with subquery, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_subquery, + name="create with subquery, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_subquery(self, user_name, grant_target_name, node=None): @@ -399,28 +465,71 @@ def create_with_subquery(self, user_name, grant_target_name, node=None): node.query(f"GRANT INSERT ON {table_name} TO {grant_target_name}") with Then("I attempt to CREATE TABLE as the user with create privilege"): - node.query(create_table_query.format(table_name=table_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_table_query.format( + table_name=table_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=3): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name, table2_name) as tables_granted: + with grant_select_on_table( + node, + permutation, + grant_target_name, + table0_name, + table1_name, + table2_name, + ) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with When("I attempt to create a table as the user"): - node.query(create_table_query.format(table_name=table_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_table_query.format( + table_name=table_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=3))+1, grant_target_name, table0_name, table1_name, table2_name): + with grant_select_on_table( + node, + max(permutations(table_count=3)) + 1, + grant_target_name, + table0_name, + table1_name, + table2_name, + ): with When("I attempt to create a table as the user"): - node.query(create_table_query.format(table_name=table_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name), settings = [("user", f"{user_name}")]) + node.query( + create_table_query.format( + table_name=table_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the table"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestScenario def create_with_join_query_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to create a table where the stored query includes a `JOIN` statement @@ -435,16 +544,21 @@ def create_with_join_query_privilege_granted_directly_or_via_role(self, node=Non with user(node, f"{user_name}"): - Scenario(test=create_with_join_query, - name="create with join query, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_join_query, + name="create with join query, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_join_query, - name="create with join query, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_join_query, + name="create with join query, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_join_query(self, grant_target_name, user_name, node=None): @@ -470,28 +584,62 @@ def create_with_join_query(self, grant_target_name, user_name, node=None): node.query(f"GRANT INSERT ON {table_name} TO {grant_target_name}") with Then("I attempt to create table as the user"): - node.query(create_table_query.format(table_name=table_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_table_query.format( + table_name=table_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=2): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name) as tables_granted: + with grant_select_on_table( + node, permutation, grant_target_name, table0_name, table1_name + ) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with When("I attempt to create a table as the user"): - node.query(create_table_query.format(table_name=table_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_table_query.format( + table_name=table_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=2))+1, grant_target_name, table0_name, table1_name): + with grant_select_on_table( + node, + max(permutations(table_count=2)) + 1, + grant_target_name, + table0_name, + table1_name, + ): with When("I attempt to create a table as the user"): - node.query(create_table_query.format(table_name=table_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")]) + node.query( + create_table_query.format( + table_name=table_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Then("I drop the table"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestScenario def create_with_union_query_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to create a table where the stored query includes a `UNION ALL` statement @@ -506,16 +654,21 @@ def create_with_union_query_privilege_granted_directly_or_via_role(self, node=No with user(node, f"{user_name}"): - Scenario(test=create_with_union_query, - name="create with union query, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_union_query, + name="create with union query, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_union_query, - name="create with union query, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_union_query, + name="create with union query, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_union_query(self, grant_target_name, user_name, node=None): @@ -541,30 +694,66 @@ def create_with_union_query(self, grant_target_name, user_name, node=None): node.query(f"GRANT INSERT ON {table_name} TO {grant_target_name}") with Then("I attempt to create table as the user"): - node.query(create_table_query.format(table_name=table_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_table_query.format( + table_name=table_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=2): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name) as tables_granted: + with grant_select_on_table( + node, permutation, grant_target_name, table0_name, table1_name + ) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with When("I attempt to create a table as the user"): - node.query(create_table_query.format(table_name=table_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_table_query.format( + table_name=table_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=2))+1, grant_target_name, table0_name, table1_name): + with grant_select_on_table( + node, + max(permutations(table_count=2)) + 1, + grant_target_name, + table0_name, + table1_name, + ): with When("I attempt to create a table as the user"): - node.query(create_table_query.format(table_name=table_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")]) + node.query( + create_table_query.format( + table_name=table_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the table"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestScenario -def create_with_join_union_subquery_privilege_granted_directly_or_via_role(self, node=None): +def create_with_join_union_subquery_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to create a table with a stored query that includes `UNION ALL`, `JOIN` and two subqueries if and only if the user has SELECT privilege on all of the tables, either granted directly or through a role. """ @@ -576,16 +765,21 @@ def create_with_join_union_subquery_privilege_granted_directly_or_via_role(self, with user(node, f"{user_name}"): - Scenario(test=create_with_join_union_subquery, - name="create with join union subquery, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_join_union_subquery, + name="create with join union subquery, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_join_union_subquery, - name="create with join union subquery, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_join_union_subquery, + name="create with join union subquery, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_join_union_subquery(self, grant_target_name, user_name, node=None): @@ -604,46 +798,103 @@ def create_with_join_union_subquery(self, grant_target_name, user_name, node=Non if node is None: node = self.context.node - with table(node, f"{table0_name},{table1_name},{table2_name},{table3_name},{table4_name}"): + with table( + node, f"{table0_name},{table1_name},{table2_name},{table3_name},{table4_name}" + ): with user(node, f"{user_name}"): try: with When("I grant CREATE TABLE privilege"): - node.query(f"GRANT CREATE TABLE ON {table_name} TO {grant_target_name}") + node.query( + f"GRANT CREATE TABLE ON {table_name} TO {grant_target_name}" + ) with And("I grant INSERT privilege"): node.query(f"GRANT INSERT ON {table_name} TO {grant_target_name}") - with Then("I attempt to create table as the user with CREATE TABLE privilege"): - node.query(create_table_query.format(table_name=table_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name, table3_name=table3_name, table4_name=table4_name), - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + with Then( + "I attempt to create table as the user with CREATE TABLE privilege" + ): + node.query( + create_table_query.format( + table_name=table_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + table3_name=table3_name, + table4_name=table4_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=5): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name, table3_name, table4_name) as tables_granted: + with grant_select_on_table( + node, + permutation, + grant_target_name, + table0_name, + table1_name, + table3_name, + table4_name, + ) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Given("I don't have a table"): node.query(f"DROP TABLE IF EXISTS {table_name}") with Then("I attempt to create a table as the user"): - node.query(create_table_query.format(table_name=table_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name, table3_name=table3_name, table4_name=table4_name), - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + create_table_query.format( + table_name=table_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + table3_name=table3_name, + table4_name=table4_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=5))+1, grant_target_name, table0_name, table1_name, table2_name, table3_name, table4_name): + with grant_select_on_table( + node, + max(permutations(table_count=5)) + 1, + grant_target_name, + table0_name, + table1_name, + table2_name, + table3_name, + table4_name, + ): with Given("I don't have a table"): node.query(f"DROP TABLE IF EXISTS {table_name}") with Then("I attempt to create a table as the user"): - node.query(create_table_query.format(table_name=table_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name, table3_name=table3_name, table4_name=table4_name), - settings = [("user", f"{user_name}")]) + node.query( + create_table_query.format( + table_name=table_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + table3_name=table3_name, + table4_name=table4_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the table"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestScenario def create_with_nested_tables_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to create a table with a stored query that includes other tables if and only if @@ -657,16 +908,21 @@ def create_with_nested_tables_privilege_granted_directly_or_via_role(self, node= with user(node, f"{user_name}"): - Scenario(test=create_with_nested_tables, - name="create with nested tables, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_nested_tables, + name="create with nested tables, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_nested_tables, - name="create with nested tables, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_nested_tables, + name="create with nested tables, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_nested_tables(self, grant_target_name, user_name, node=None): @@ -692,9 +948,15 @@ def create_with_nested_tables(self, grant_target_name, user_name, node=None): try: with Given("I have some tables"): - node.query(f"CREATE TABLE {table1_name} ENGINE = Memory AS SELECT y FROM {table0_name}") - node.query(f"CREATE TABLE {table3_name} ENGINE = Memory AS SELECT y FROM {table2_name} WHERE y IN (SELECT y FROM {table1_name} WHERE y<2)") - node.query(f"CREATE TABLE {table5_name} ENGINE = Memory AS SELECT y FROM {table4_name} JOIN {table3_name} USING y") + node.query( + f"CREATE TABLE {table1_name} ENGINE = Memory AS SELECT y FROM {table0_name}" + ) + node.query( + f"CREATE TABLE {table3_name} ENGINE = Memory AS SELECT y FROM {table2_name} WHERE y IN (SELECT y FROM {table1_name} WHERE y<2)" + ) + node.query( + f"CREATE TABLE {table5_name} ENGINE = Memory AS SELECT y FROM {table4_name} JOIN {table3_name} USING y" + ) with When("I grant CREATE TABLE privilege"): node.query(f"GRANT CREATE TABLE ON {table_name} TO {grant_target_name}") @@ -702,32 +964,82 @@ def create_with_nested_tables(self, grant_target_name, user_name, node=None): with And("I grant INSERT privilege"): node.query(f"GRANT INSERT ON {table_name} TO {grant_target_name}") - with Then("I attempt to create table as the user with CREATE TABLE privilege"): - node.query(create_table_query.format(table_name=table_name, table5_name=table5_name, table6_name=table6_name), - settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + with Then( + "I attempt to create table as the user with CREATE TABLE privilege" + ): + node.query( + create_table_query.format( + table_name=table_name, + table5_name=table5_name, + table6_name=table6_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) - for permutation in ([0,1,2,3,7,11,15,31,39,79,95],permutations(table_count=7))[self.context.stress]: - with grant_select_on_table(node, permutation, grant_target_name, table5_name, table6_name, table3_name, table4_name, table1_name, table2_name, table0_name) as tables_granted: + for permutation in ( + [0, 1, 2, 3, 7, 11, 15, 31, 39, 79, 95], + permutations(table_count=7), + )[self.context.stress]: + with grant_select_on_table( + node, + permutation, + grant_target_name, + table5_name, + table6_name, + table3_name, + table4_name, + table1_name, + table2_name, + table0_name, + ) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Given("I don't have a table"): node.query(f"DROP TABLE IF EXISTS {table3_name}") with Then("I attempt to create a table as the user"): - node.query(create_table_query.format(table_name=table_name, table5_name=table5_name, table6_name=table6_name), - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + create_table_query.format( + table_name=table_name, + table5_name=table5_name, + table6_name=table6_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=7))+1, - grant_target_name, table0_name, table1_name, table2_name, table3_name, table4_name, table5_name, table6_name): + with grant_select_on_table( + node, + max(permutations(table_count=7)) + 1, + grant_target_name, + table0_name, + table1_name, + table2_name, + table3_name, + table4_name, + table5_name, + table6_name, + ): with Given("I don't have a table"): node.query(f"DROP TABLE IF EXISTS {table_name}") with Then("I attempt to create a table as the user"): - node.query(create_table_query.format(table_name=table_name, table5_name=table5_name, table6_name=table6_name), - settings = [("user", f"{user_name}")]) + node.query( + create_table_query.format( + table_name=table_name, + table5_name=table5_name, + table6_name=table6_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally(f"I drop {table_name}"): @@ -742,10 +1054,10 @@ def create_with_nested_tables(self, grant_target_name, user_name, node=None): with And(f"I drop {table5_name}"): node.query(f"DROP TABLE IF EXISTS {table5_name}") + @TestScenario def create_as_another_table(self, node=None): - """Check that user is able to create a table as another table with only CREATE TABLE privilege. - """ + """Check that user is able to create a table as another table with only CREATE TABLE privilege.""" user_name = f"user_{getuid()}" table_name = f"table_{getuid()}" source_table_name = f"source_table_{getuid()}" @@ -762,16 +1074,19 @@ def create_as_another_table(self, node=None): node.query(f"GRANT CREATE TABLE ON {table_name} TO {user_name}") with Then("I try to create a table as another table"): - node.query(f"CREATE TABLE {table_name} AS {source_table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE TABLE {table_name} AS {source_table_name}", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the tables"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestScenario def create_as_numbers(self, node=None): - """Check that user is able to create a table as numbers table function. - """ + """Check that user is able to create a table as numbers table function.""" user_name = f"user_{getuid()}" table_name = f"table_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -789,16 +1104,19 @@ def create_as_numbers(self, node=None): node.query(f"GRANT INSERT ON {table_name} TO {user_name}") with Then("I try to create a table without select privilege on the table"): - node.query(f"CREATE TABLE {table_name} AS numbers(5)", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE TABLE {table_name} AS numbers(5)", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the tables"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestScenario def create_as_merge(self, node=None): - """Check that user is able to create a table as merge table function. - """ + """Check that user is able to create a table as merge table function.""" user_name = f"user_{getuid()}" table_name = f"table_{getuid()}" source_table_name = f"source_table_{getuid()}" @@ -818,20 +1136,23 @@ def create_as_merge(self, node=None): node.query(f"GRANT SELECT ON {source_table_name} TO {user_name}") with Then("I try to create a table as another table"): - node.query(f"CREATE TABLE {table_name} AS merge(default,'{source_table_name}')", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE TABLE {table_name} AS merge(default,'{source_table_name}')", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the tables"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_CreateTable("1.0"), ) @Name("create table") def feature(self, stress=None, node="clickhouse1"): - """Check the RBAC functionality of CREATE TABLE. - """ + """Check the RBAC functionality of CREATE TABLE.""" self.context.node = self.context.cluster.node(node) if stress is not None: diff --git a/tests/testflows/rbac/tests/privileges/create/create_temp_table.py b/tests/testflows/rbac/tests/privileges/create/create_temp_table.py index ac38e0269cf..0cc3211bddf 100644 --- a/tests/testflows/rbac/tests/privileges/create/create_temp_table.py +++ b/tests/testflows/rbac/tests/privileges/create/create_temp_table.py @@ -2,10 +2,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privilege_granted_directly_or_via_role(self, node=None): - """Check that user is only able to execute CREATE TEMPORARY TABLE when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute CREATE TEMPORARY TABLE when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -15,8 +15,12 @@ def privilege_granted_directly_or_via_role(self, node=None): with Suite("user with direct privilege"): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute CREATE TEMPORARY TABLE with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, node=node) + with When( + f"I run checks that {user_name} is only able to execute CREATE TEMPORARY TABLE with required privileges" + ): + privilege_check( + grant_target_name=user_name, user_name=user_name, node=node + ) with Suite("user with privilege via role"): with user(node, user_name), role(node, role_name): @@ -24,12 +28,16 @@ def privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute CREATE TEMPORARY TABLE with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute CREATE TEMPORARY TABLE with required privileges" + ): + privilege_check( + grant_target_name=role_name, user_name=user_name, node=node + ) + def privilege_check(grant_target_name, user_name, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege"): @@ -43,8 +51,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to create a temporary table without privilege"): - node.query(f"CREATE TEMPORARY TABLE {temp_table_name} (x Int8)", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TEMPORARY TABLE {temp_table_name} (x Int8)", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the temporary table"): @@ -55,10 +67,15 @@ def privilege_check(grant_target_name, user_name, node=None): try: with When("I grant create temporary table privilege"): - node.query(f"GRANT CREATE TEMPORARY TABLE ON *.* TO {grant_target_name}") + node.query( + f"GRANT CREATE TEMPORARY TABLE ON *.* TO {grant_target_name}" + ) with Then("I attempt to create aa temporary table"): - node.query(f"CREATE TEMPORARY TABLE {temp_table_name} (x Int8)", settings = [("user", user_name)]) + node.query( + f"CREATE TEMPORARY TABLE {temp_table_name} (x Int8)", + settings=[("user", user_name)], + ) finally: with Finally("I drop the temporary table"): @@ -69,14 +86,22 @@ def privilege_check(grant_target_name, user_name, node=None): try: with When("I grant the create temporary table privilege"): - node.query(f"GRANT CREATE TEMPORARY TABLE ON *.* TO {grant_target_name}") + node.query( + f"GRANT CREATE TEMPORARY TABLE ON *.* TO {grant_target_name}" + ) with And("I revoke the create temporary table privilege"): - node.query(f"REVOKE CREATE TEMPORARY TABLE ON *.* FROM {grant_target_name}") + node.query( + f"REVOKE CREATE TEMPORARY TABLE ON *.* FROM {grant_target_name}" + ) with Then("I attempt to create a temporary table"): - node.query(f"CREATE TEMPORARY TABLE {temp_table_name} (x Int8)", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TEMPORARY TABLE {temp_table_name} (x Int8)", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the temporary table"): @@ -87,14 +112,20 @@ def privilege_check(grant_target_name, user_name, node=None): try: with When("I grant the create temporary table privilege"): - node.query(f"GRANT CREATE TEMPORARY TABLE ON *.* TO {grant_target_name}") + node.query( + f"GRANT CREATE TEMPORARY TABLE ON *.* TO {grant_target_name}" + ) with And("I revoke ALL privilege"): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to create a temporary table"): - node.query(f"CREATE TEMPORARY TABLE {temp_table_name} (x Int8)", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TEMPORARY TABLE {temp_table_name} (x Int8)", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the temporary table"): @@ -108,22 +139,25 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I attempt to create aa temporary table"): - node.query(f"CREATE TEMPORARY TABLE {temp_table_name} (x Int8)", settings = [("user", user_name)]) + node.query( + f"CREATE TEMPORARY TABLE {temp_table_name} (x Int8)", + settings=[("user", user_name)], + ) finally: with Finally("I drop the temporary table"): node.query(f"DROP TEMPORARY TABLE IF EXISTS {temp_table_name}") + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_CreateTemporaryTable("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) @Name("create temporary table") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of CREATE TEMPORARY TABLE. - """ + """Check the RBAC functionality of CREATE TEMPORARY TABLE.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -131,5 +165,8 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): if stress is not None: self.context.stress = stress - with Suite(test=privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log): + with Suite( + test=privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ): privilege_granted_directly_or_via_role() diff --git a/tests/testflows/rbac/tests/privileges/create/create_user.py b/tests/testflows/rbac/tests/privileges/create/create_user.py index b055deecea2..c2722b5b9b0 100644 --- a/tests/testflows/rbac/tests/privileges/create/create_user.py +++ b/tests/testflows/rbac/tests/privileges/create/create_user.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def create_user_granted_directly(self, node=None): - """Check that a user is able to execute `CREATE USER` with privileges are granted directly. - """ + """Check that a user is able to execute `CREATE USER` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -17,15 +17,22 @@ def create_user_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=create_user, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in create_user.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=create_user, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in create_user.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def create_user_granted_via_role(self, node=None): - """Check that a user is able to execute `CREATE USER` with privileges are granted through a role. - """ + """Check that a user is able to execute `CREATE USER` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -38,20 +45,30 @@ def create_user_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=create_user, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in create_user.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=create_user, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in create_user.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("CREATE USER",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("CREATE USER",), + ], +) def create_user(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `CREATE USER` when they have the necessary privilege. - """ + """Check that user is only able to execute `CREATE USER` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -68,8 +85,12 @@ def create_user(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't create a user"): - node.query(f"CREATE USER {create_user_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE USER {create_user_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the user"): @@ -83,7 +104,10 @@ def create_user(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can create a user"): - node.query(f"CREATE USER {create_user_name}", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE USER {create_user_name}", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the user"): @@ -97,12 +121,16 @@ def create_user(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can create a user"): - node.query(f"CREATE USER {create_user_name} ON CLUSTER sharded_cluster", - settings = [("user", f"{user_name}")]) + node.query( + f"CREATE USER {create_user_name} ON CLUSTER sharded_cluster", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the user"): - node.query(f"DROP USER IF EXISTS {create_user_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP USER IF EXISTS {create_user_name} ON CLUSTER sharded_cluster" + ) with Scenario("CREATE USER with revoked privilege"): create_user_name = f"create_user_{getuid()}" @@ -115,17 +143,21 @@ def create_user(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user can't create a user"): - node.query(f"CREATE USER {create_user_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE USER {create_user_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the user"): node.query(f"DROP USER IF EXISTS {create_user_name}") + @TestSuite def default_role_granted_directly(self, node=None): - """Check that a user is able to execute `CREATE USER` with `DEFAULT ROLE` with privileges are granted directly. - """ + """Check that a user is able to execute `CREATE USER` with `DEFAULT ROLE` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -136,10 +168,10 @@ def default_role_granted_directly(self, node=None): Suite(test=default_role)(grant_target_name=user_name, user_name=user_name) + @TestSuite def default_role_granted_via_role(self, node=None): - """Check that a user is able to execute `CREATE USER` with `DEFAULT ROLE` with privileges are granted through a role. - """ + """Check that a user is able to execute `CREATE USER` with `DEFAULT ROLE` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -154,6 +186,7 @@ def default_role_granted_via_role(self, node=None): Suite(test=default_role)(grant_target_name=role_name, user_name=user_name) + @TestSuite @Requirements( RQ_SRS_006_RBAC_Privileges_CreateUser_DefaultRole("1.0"), @@ -177,8 +210,12 @@ def default_role(self, grant_target_name, user_name, node=None): node.query(f"GRANT CREATE USER ON *.* TO {grant_target_name}") with Then("I check the user can't create a user"): - node.query(f"CREATE USER {create_user_name} DEFAULT ROLE {default_role_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE USER {create_user_name} DEFAULT ROLE {default_role_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the user"): @@ -194,10 +231,15 @@ def default_role(self, grant_target_name, user_name, node=None): node.query(f"GRANT CREATE USER ON *.* TO {grant_target_name}") with And(f"I grant the role with ADMIN OPTION"): - node.query(f"GRANT {default_role_name} TO {grant_target_name} WITH ADMIN OPTION") + node.query( + f"GRANT {default_role_name} TO {grant_target_name} WITH ADMIN OPTION" + ) with Then("I check the user can create a user"): - node.query(f"CREATE USER {create_user_name} DEFAULT ROLE {default_role_name}", settings=[("user",user_name)]) + node.query( + f"CREATE USER {create_user_name} DEFAULT ROLE {default_role_name}", + settings=[("user", user_name)], + ) finally: with Finally("I drop the user"): @@ -209,21 +251,29 @@ def default_role(self, grant_target_name, user_name, node=None): try: with Given("I have role on a cluster"): - node.query(f"CREATE ROLE {default_role_name} ON CLUSTER sharded_cluster") + node.query( + f"CREATE ROLE {default_role_name} ON CLUSTER sharded_cluster" + ) with When(f"I grant CREATE USER"): node.query(f"GRANT CREATE USER ON *.* TO {grant_target_name}") with And(f"I grant the role with ADMIN OPTION"): - node.query(f"GRANT {default_role_name} TO {grant_target_name} WITH ADMIN OPTION") + node.query( + f"GRANT {default_role_name} TO {grant_target_name} WITH ADMIN OPTION" + ) with Then("I check the user can create a user"): - node.query(f"CREATE USER {create_user_name} ON CLUSTER sharded_cluster DEFAULT ROLE {default_role_name}", - settings = [("user", f"{user_name}")]) + node.query( + f"CREATE USER {create_user_name} ON CLUSTER sharded_cluster DEFAULT ROLE {default_role_name}", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the user"): - node.query(f"DROP USER IF EXISTS {create_user_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP USER IF EXISTS {create_user_name} ON CLUSTER sharded_cluster" + ) with And("I drop the role from the cluster"): node.query(f"DROP ROLE {default_role_name} ON CLUSTER sharded_cluster") @@ -238,14 +288,20 @@ def default_role(self, grant_target_name, user_name, node=None): node.query(f"GRANT CREATE USER ON *.* TO {grant_target_name}") with And(f"I grant the role with ADMIN OPTION"): - node.query(f"GRANT {default_role_name} TO {grant_target_name} WITH ADMIN OPTION") + node.query( + f"GRANT {default_role_name} TO {grant_target_name} WITH ADMIN OPTION" + ) with And(f"I revoke the role"): node.query(f"REVOKE {default_role_name} FROM {grant_target_name}") with Then("I check the user can't create a user"): - node.query(f"CREATE USER {create_user_name} DEFAULT ROLE {default_role_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE USER {create_user_name} DEFAULT ROLE {default_role_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the user"): @@ -258,25 +314,30 @@ def default_role(self, grant_target_name, user_name, node=None): with role(node, default_role_name): try: with When(f"I grant ACCESS MANAGEMENT "): - node.query(f"GRANT ACCESS MANAGEMENT ON *.* TO {grant_target_name}") + node.query( + f"GRANT ACCESS MANAGEMENT ON *.* TO {grant_target_name}" + ) with Then("I check the user can create a user"): - node.query(f"CREATE USER {create_user_name} DEFAULT ROLE {default_role_name}", settings=[("user",user_name)]) + node.query( + f"CREATE USER {create_user_name} DEFAULT ROLE {default_role_name}", + settings=[("user", user_name)], + ) finally: with Finally("I drop the user"): node.query(f"DROP USER IF EXISTS {create_user_name}") + @TestFeature @Name("create user") @Requirements( RQ_SRS_006_RBAC_Privileges_CreateUser("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of CREATE USER. - """ + """Check the RBAC functionality of CREATE USER.""" self.context.node = self.context.cluster.node(node) Suite(run=create_user_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/detach/detach_database.py b/tests/testflows/rbac/tests/privileges/detach/detach_database.py index 12eeb39aa1b..848de0bb682 100644 --- a/tests/testflows/rbac/tests/privileges/detach/detach_database.py +++ b/tests/testflows/rbac/tests/privileges/detach/detach_database.py @@ -2,10 +2,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privilege_granted_directly_or_via_role(self, node=None): - """Check that user is only able to execute DETACH DATABASE when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute DETACH DATABASE when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -15,8 +15,12 @@ def privilege_granted_directly_or_via_role(self, node=None): with Suite("user with direct privilege"): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute DETACH DATABASE with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, node=node) + with When( + f"I run checks that {user_name} is only able to execute DETACH DATABASE with required privileges" + ): + privilege_check( + grant_target_name=user_name, user_name=user_name, node=node + ) with Suite("user with privilege via role"): with user(node, user_name), role(node, role_name): @@ -24,12 +28,16 @@ def privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute DETACH DATABASE with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute DETACH DATABASE with required privileges" + ): + privilege_check( + grant_target_name=role_name, user_name=user_name, node=node + ) + def privilege_check(grant_target_name, user_name, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege", setup=instrument_clickhouse_server_log): @@ -46,8 +54,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to detach the database"): - node.query(f"DETACH DATABASE {db_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"DETACH DATABASE {db_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I reattach the database", flags=TE): node.query(f"ATTACH DATABASE IF NOT EXISTS {db_name}") @@ -65,7 +77,7 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT DROP DATABASE ON {db_name}.* TO {grant_target_name}") with Then("I attempt to detach a database"): - node.query(f"DETACH DATABASE {db_name}", settings = [("user", user_name)]) + node.query(f"DETACH DATABASE {db_name}", settings=[("user", user_name)]) finally: with Finally("I reattach the database", flags=TE): @@ -84,11 +96,17 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT DROP DATABASE ON {db_name}.* TO {grant_target_name}") with And("I revoke the drop database privilege"): - node.query(f"REVOKE DROP DATABASE ON {db_name}.* FROM {grant_target_name}") + node.query( + f"REVOKE DROP DATABASE ON {db_name}.* FROM {grant_target_name}" + ) with Then("I attempt to detach a database"): - node.query(f"DETACH DATABASE {db_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"DETACH DATABASE {db_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I reattach the database", flags=TE): @@ -110,8 +128,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to detach a database"): - node.query(f"DETACH DATABASE {db_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"DETACH DATABASE {db_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I reattach the database", flags=TE): @@ -130,7 +152,7 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I attempt to detach a database"): - node.query(f"DETACH DATABASE {db_name}", settings = [("user", user_name)]) + node.query(f"DETACH DATABASE {db_name}", settings=[("user", user_name)]) finally: with Finally("I reattach the database", flags=TE): @@ -138,16 +160,16 @@ def privilege_check(grant_target_name, user_name, node=None): with And("I drop the database", flags=TE): node.query(f"DROP DATABASE IF EXISTS {db_name}") + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_DetachDatabase("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) @Name("detach database") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of DETACH DATABASE. - """ + """Check the RBAC functionality of DETACH DATABASE.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -155,5 +177,8 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): if stress is not None: self.context.stress = stress - with Suite(test=privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log): + with Suite( + test=privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ): privilege_granted_directly_or_via_role() diff --git a/tests/testflows/rbac/tests/privileges/detach/detach_dictionary.py b/tests/testflows/rbac/tests/privileges/detach/detach_dictionary.py index 17b37ce6dc0..bd24d0a400a 100644 --- a/tests/testflows/rbac/tests/privileges/detach/detach_dictionary.py +++ b/tests/testflows/rbac/tests/privileges/detach/detach_dictionary.py @@ -2,10 +2,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privilege_granted_directly_or_via_role(self, node=None): - """Check that user is only able to execute DETACH DICTIONARY when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute DETACH DICTIONARY when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -15,8 +15,12 @@ def privilege_granted_directly_or_via_role(self, node=None): with Suite("user with direct privilege"): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute DETACH DICTIONARY with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, node=node) + with When( + f"I run checks that {user_name} is only able to execute DETACH DICTIONARY with required privileges" + ): + privilege_check( + grant_target_name=user_name, user_name=user_name, node=node + ) with Suite("user with privilege via role"): with user(node, user_name), role(node, role_name): @@ -24,12 +28,16 @@ def privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute DETACH DICTIONARY with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute DETACH DICTIONARY with required privileges" + ): + privilege_check( + grant_target_name=role_name, user_name=user_name, node=node + ) + def privilege_check(grant_target_name, user_name, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege"): @@ -37,7 +45,9 @@ def privilege_check(grant_target_name, user_name, node=None): try: with Given("I have a dictionary"): - node.query(f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)") + node.query( + f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)" + ) with When("I grant the user NONE privilege"): node.query(f"GRANT NONE TO {grant_target_name}") @@ -46,7 +56,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to detach a dictionary without privilege"): - node.query(f"DETACH DICTIONARY {dict_name}", settings = [("user", user_name)], exitcode=exitcode, message=message) + node.query( + f"DETACH DICTIONARY {dict_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I reattach the dictionary", flags=TE): @@ -59,13 +74,19 @@ def privilege_check(grant_target_name, user_name, node=None): try: with Given("I have a dictionary"): - node.query(f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)") + node.query( + f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)" + ) with When("I grant drop dictionary privilege"): - node.query(f"GRANT DROP DICTIONARY ON {dict_name} TO {grant_target_name}") + node.query( + f"GRANT DROP DICTIONARY ON {dict_name} TO {grant_target_name}" + ) with Then("I attempt to detach a dictionary"): - node.query(f"DETACH DICTIONARY {dict_name}", settings = [("user", user_name)]) + node.query( + f"DETACH DICTIONARY {dict_name}", settings=[("user", user_name)] + ) finally: with Finally("I reattach the dictionary", flags=TE): @@ -78,16 +99,27 @@ def privilege_check(grant_target_name, user_name, node=None): try: with Given("I have a dictionary"): - node.query(f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)") + node.query( + f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)" + ) with When("I grant the drop dictionary privilege"): - node.query(f"GRANT DROP DICTIONARY ON {dict_name} TO {grant_target_name}") + node.query( + f"GRANT DROP DICTIONARY ON {dict_name} TO {grant_target_name}" + ) with And("I revoke the drop dictionary privilege"): - node.query(f"REVOKE DROP DICTIONARY ON {dict_name} FROM {grant_target_name}") + node.query( + f"REVOKE DROP DICTIONARY ON {dict_name} FROM {grant_target_name}" + ) with Then("I attempt to detach a dictionary"): - node.query(f"DETACH DICTIONARY {dict_name}", settings = [("user", user_name)], exitcode=exitcode, message=message) + node.query( + f"DETACH DICTIONARY {dict_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I reattach the dictionary", flags=TE): @@ -100,16 +132,25 @@ def privilege_check(grant_target_name, user_name, node=None): try: with Given("I have a dictionary"): - node.query(f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)") + node.query( + f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)" + ) with When("I grant the drop dictionary privilege"): - node.query(f"GRANT DROP DICTIONARY ON {dict_name} TO {grant_target_name}") + node.query( + f"GRANT DROP DICTIONARY ON {dict_name} TO {grant_target_name}" + ) with And("I revoke ALL privilege"): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to detach a dictionary"): - node.query(f"DETACH DICTIONARY {dict_name}", settings = [("user", user_name)], exitcode=exitcode, message=message) + node.query( + f"DETACH DICTIONARY {dict_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I reattach the dictionary", flags=TE): @@ -122,13 +163,17 @@ def privilege_check(grant_target_name, user_name, node=None): try: with Given("I have a dictionary"): - node.query(f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)") + node.query( + f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)" + ) with When("I grant ALL privilege"): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I attempt to detach a dictionary"): - node.query(f"DETACH DICTIONARY {dict_name}", settings = [("user", user_name)]) + node.query( + f"DETACH DICTIONARY {dict_name}", settings=[("user", user_name)] + ) finally: with Finally("I reattach the dictionary", flags=TE): @@ -136,16 +181,16 @@ def privilege_check(grant_target_name, user_name, node=None): with And("I drop the dictionary", flags=TE): node.query(f"DROP DICTIONARY IF EXISTS {dict_name}") + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_DetachDictionary("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) @Name("detach dictionary") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of DETACH DICTIONARY. - """ + """Check the RBAC functionality of DETACH DICTIONARY.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -153,5 +198,8 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): if stress is not None: self.context.stress = stress - with Suite(test=privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log): + with Suite( + test=privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ): privilege_granted_directly_or_via_role() diff --git a/tests/testflows/rbac/tests/privileges/detach/detach_table.py b/tests/testflows/rbac/tests/privileges/detach/detach_table.py index b5a01b361fc..421464ac501 100644 --- a/tests/testflows/rbac/tests/privileges/detach/detach_table.py +++ b/tests/testflows/rbac/tests/privileges/detach/detach_table.py @@ -2,10 +2,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privilege_granted_directly_or_via_role(self, node=None): - """Check that user is only able to execute DETACH TABLE when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute DETACH TABLE when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -15,8 +15,12 @@ def privilege_granted_directly_or_via_role(self, node=None): with Suite("user with direct privilege"): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute DETACH TABLE with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, node=node) + with When( + f"I run checks that {user_name} is only able to execute DETACH TABLE with required privileges" + ): + privilege_check( + grant_target_name=user_name, user_name=user_name, node=node + ) with Suite("user with privilege via role"): with user(node, user_name), role(node, role_name): @@ -24,12 +28,16 @@ def privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute DETACH TABLE with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute DETACH TABLE with required privileges" + ): + privilege_check( + grant_target_name=role_name, user_name=user_name, node=node + ) + def privilege_check(grant_target_name, user_name, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege"): @@ -46,8 +54,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with When("I attempt to detach a table without privilege"): - node.query(f"DETACH TABLE {table_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"DETACH TABLE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I reattach the table", flags=TE): @@ -66,7 +78,7 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT DROP TABLE ON *.* TO {grant_target_name}") with Then("I attempt to detach a table"): - node.query(f"DETACH TABLE {table_name}", settings = [("user", user_name)]) + node.query(f"DETACH TABLE {table_name}", settings=[("user", user_name)]) finally: with Finally("I reattach the table", flags=TE): @@ -88,8 +100,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"REVOKE DROP TABLE ON *.* FROM {grant_target_name}") with Then("I attempt to detach a table"): - node.query(f"DETACH TABLE {table_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"DETACH TABLE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I reattach the table", flags=TE): @@ -111,8 +127,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to detach a table"): - node.query(f"DETACH TABLE {table_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"DETACH TABLE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I reattach the table", flags=TE): @@ -131,7 +151,7 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I attempt to detach a table"): - node.query(f"DETACH TABLE {table_name}", settings = [("user", user_name)]) + node.query(f"DETACH TABLE {table_name}", settings=[("user", user_name)]) finally: with Finally("I reattach the table", flags=TE): @@ -139,16 +159,16 @@ def privilege_check(grant_target_name, user_name, node=None): with And("I drop the table", flags=TE): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_DetachTable("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) @Name("detach table") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of DETACH TABLE. - """ + """Check the RBAC functionality of DETACH TABLE.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -156,5 +176,8 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): if stress is not None: self.context.stress = stress - with Suite(test=privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log): + with Suite( + test=privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ): privilege_granted_directly_or_via_role() diff --git a/tests/testflows/rbac/tests/privileges/detach/detach_view.py b/tests/testflows/rbac/tests/privileges/detach/detach_view.py index c3c9f70a35a..03dd6247ccb 100644 --- a/tests/testflows/rbac/tests/privileges/detach/detach_view.py +++ b/tests/testflows/rbac/tests/privileges/detach/detach_view.py @@ -2,10 +2,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privilege_granted_directly_or_via_role(self, node=None): - """Check that user is only able to execute DETACH VIEW when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute DETACH VIEW when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -15,8 +15,12 @@ def privilege_granted_directly_or_via_role(self, node=None): with Suite("user with direct privilege"): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute DETACH VIEW with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, node=node) + with When( + f"I run checks that {user_name} is only able to execute DETACH VIEW with required privileges" + ): + privilege_check( + grant_target_name=user_name, user_name=user_name, node=node + ) with Suite("user with privilege via role"): with user(node, user_name), role(node, role_name): @@ -24,12 +28,16 @@ def privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute DETACH VIEW with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute DETACH VIEW with required privileges" + ): + privilege_check( + grant_target_name=role_name, user_name=user_name, node=node + ) + def privilege_check(grant_target_name, user_name, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege"): @@ -46,8 +54,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with When("I attempt to drop a view without privilege"): - node.query(f"DETACH VIEW {view_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"DETACH VIEW {view_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I reattach the view as a table", flags=TE): @@ -66,7 +78,7 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT DROP VIEW ON {view_name} TO {grant_target_name}") with Then("I attempt to drop a view"): - node.query(f"DETACH VIEW {view_name}", settings = [("user", user_name)]) + node.query(f"DETACH VIEW {view_name}", settings=[("user", user_name)]) finally: with Finally("I reattach the view as a table", flags=TE): @@ -88,8 +100,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"REVOKE DROP VIEW ON {view_name} FROM {grant_target_name}") with Then("I attempt to drop a view"): - node.query(f"DETACH VIEW {view_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"DETACH VIEW {view_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I reattach the view as a table", flags=TE): @@ -111,8 +127,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to drop a view"): - node.query(f"DETACH VIEW {view_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"DETACH VIEW {view_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I reattach the view as a table", flags=TE): @@ -131,7 +151,7 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I attempt to drop a view"): - node.query(f"DETACH VIEW {view_name}", settings = [("user", user_name)]) + node.query(f"DETACH VIEW {view_name}", settings=[("user", user_name)]) finally: with Finally("I reattach the view as a table", flags=TE): @@ -139,16 +159,16 @@ def privilege_check(grant_target_name, user_name, node=None): with And("I drop the table", flags=TE): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_DetachView("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) @Name("detach view") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of DETACH VIEW. - """ + """Check the RBAC functionality of DETACH VIEW.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -156,5 +176,8 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): if stress is not None: self.context.stress = stress - with Suite(test=privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log): + with Suite( + test=privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ): privilege_granted_directly_or_via_role() diff --git a/tests/testflows/rbac/tests/privileges/dictGet.py b/tests/testflows/rbac/tests/privileges/dictGet.py index 4bee598bb9b..14a38f78f8d 100644 --- a/tests/testflows/rbac/tests/privileges/dictGet.py +++ b/tests/testflows/rbac/tests/privileges/dictGet.py @@ -6,17 +6,21 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @contextmanager def dict_setup(node, table_name, dict_name, type="UInt64"): - """Setup and teardown of table and dictionary needed for the tests. - """ + """Setup and teardown of table and dictionary needed for the tests.""" try: with Given("I have a table"): - node.query(f"CREATE TABLE {table_name} (x UInt64, y UInt64, z {type}) ENGINE = Memory") + node.query( + f"CREATE TABLE {table_name} (x UInt64, y UInt64, z {type}) ENGINE = Memory" + ) with And("I have a dictionary"): - node.query(f"CREATE DICTIONARY {dict_name} (x UInt64 HIERARCHICAL IS_OBJECT_ID, y UInt64 HIERARCHICAL, z {type}) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE(host 'localhost' port 9000 user 'default' password '' db 'default' table '{table_name}')) LIFETIME(0)") + node.query( + f"CREATE DICTIONARY {dict_name} (x UInt64 HIERARCHICAL IS_OBJECT_ID, y UInt64 HIERARCHICAL, z {type}) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE(host 'localhost' port 9000 user 'default' password '' db 'default' table '{table_name}')) LIFETIME(0)" + ) yield @@ -27,10 +31,10 @@ def dict_setup(node, table_name, dict_name, type="UInt64"): with And("I drop the table", flags=TE): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestSuite def dictGet_granted_directly(self, node=None): - """Run dictGet checks with privileges granted directly. - """ + """Run dictGet checks with privileges granted directly.""" user_name = f"user_{getuid()}" @@ -39,15 +43,22 @@ def dictGet_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=dictGet_check, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in dictGet_check.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=dictGet_check, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in dictGet_check.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def dictGet_granted_via_role(self, node=None): - """Run dictGet checks with privileges granted through a role. - """ + """Run dictGet checks with privileges granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -60,25 +71,33 @@ def dictGet_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=dictGet_check, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in dictGet_check.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=dictGet_check, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in dictGet_check.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("dictGet", "dict"), - ("dictHas", "dict"), - ("dictGetHierarchy", "dict"), - ("dictIsIn", "dict"), -]) -@Requirements( - RQ_SRS_006_RBAC_dictGet_RequiredPrivilege("1.0") +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("dictGet", "dict"), + ("dictHas", "dict"), + ("dictGetHierarchy", "dict"), + ("dictIsIn", "dict"), + ], ) +@Requirements(RQ_SRS_006_RBAC_dictGet_RequiredPrivilege("1.0")) def dictGet_check(self, privilege, on, grant_target_name, user_name, node=None): - """Check that user is able to execute `dictGet` if and only if they have the necessary privileges. - """ + """Check that user is able to execute `dictGet` if and only if they have the necessary privileges.""" if node is None: node = self.context.node @@ -100,7 +119,12 @@ def dictGet_check(self, privilege, on, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to dictGet without privilege"): - node.query(f"SELECT dictGet ({dict_name},'y',toUInt64(1))", settings = [("user", user_name)], exitcode=exitcode, message=message) + node.query( + f"SELECT dictGet ({dict_name},'y',toUInt64(1))", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("user with privilege"): @@ -110,7 +134,10 @@ def dictGet_check(self, privilege, on, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I attempt to dictGet with privilege"): - node.query(f"SELECT dictGet ({dict_name},'y',toUInt64(1))", settings = [("user", user_name)]) + node.query( + f"SELECT dictGet ({dict_name},'y',toUInt64(1))", + settings=[("user", user_name)], + ) with Scenario("user with revoked privilege"): @@ -123,12 +150,17 @@ def dictGet_check(self, privilege, on, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with When("I attempt to dictGet without privilege"): - node.query(f"SELECT dictGet ({dict_name},'y',toUInt64(1))", settings = [("user", user_name)], exitcode=exitcode, message=message) + node.query( + f"SELECT dictGet ({dict_name},'y',toUInt64(1))", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite def dictGetOrDefault_granted_directly(self, node=None): - """Run dictGetOrDefault checks with privileges granted directly. - """ + """Run dictGetOrDefault checks with privileges granted directly.""" user_name = f"user_{getuid()}" @@ -137,15 +169,22 @@ def dictGetOrDefault_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=dictGetOrDefault_check, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in dictGetOrDefault_check.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=dictGetOrDefault_check, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in dictGetOrDefault_check.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def dictGetOrDefault_granted_via_role(self, node=None): - """Run dictGetOrDefault checks with privileges granted through a role. - """ + """Run dictGetOrDefault checks with privileges granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -158,25 +197,35 @@ def dictGetOrDefault_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=dictGetOrDefault_check, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in dictGetOrDefault_check.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=dictGetOrDefault_check, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in dictGetOrDefault_check.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("dictGet", "dict"), - ("dictHas", "dict"), - ("dictGetHierarchy", "dict"), - ("dictIsIn", "dict"), -]) -@Requirements( - RQ_SRS_006_RBAC_dictGet_OrDefault_RequiredPrivilege("1.0") +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("dictGet", "dict"), + ("dictHas", "dict"), + ("dictGetHierarchy", "dict"), + ("dictIsIn", "dict"), + ], ) -def dictGetOrDefault_check(self, privilege, on, grant_target_name, user_name, node=None): - """Check that user is able to execute `dictGetOrDefault` if and only if they have the necessary privileges. - """ +@Requirements(RQ_SRS_006_RBAC_dictGet_OrDefault_RequiredPrivilege("1.0")) +def dictGetOrDefault_check( + self, privilege, on, grant_target_name, user_name, node=None +): + """Check that user is able to execute `dictGetOrDefault` if and only if they have the necessary privileges.""" if node is None: node = self.context.node @@ -198,7 +247,12 @@ def dictGetOrDefault_check(self, privilege, on, grant_target_name, user_name, no node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to dictGetOrDefault without privilege"): - node.query(f"SELECT dictGetOrDefault ({dict_name},'y',toUInt64(1),toUInt64(1))", settings = [("user", user_name)], exitcode=exitcode, message=message) + node.query( + f"SELECT dictGetOrDefault ({dict_name},'y',toUInt64(1),toUInt64(1))", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("user with privilege"): @@ -208,7 +262,10 @@ def dictGetOrDefault_check(self, privilege, on, grant_target_name, user_name, no node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I attempt to dictGetOrDefault with privilege"): - node.query(f"SELECT dictGetOrDefault ({dict_name},'y',toUInt64(1),toUInt64(1))", settings = [("user", user_name)]) + node.query( + f"SELECT dictGetOrDefault ({dict_name},'y',toUInt64(1),toUInt64(1))", + settings=[("user", user_name)], + ) with Scenario("user with revoked privilege"): @@ -221,12 +278,17 @@ def dictGetOrDefault_check(self, privilege, on, grant_target_name, user_name, no node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with When("I attempt to dictGetOrDefault without privilege"): - node.query(f"SELECT dictGetOrDefault ({dict_name},'y',toUInt64(1),toUInt64(1))", settings = [("user", user_name)], exitcode=exitcode, message=message) + node.query( + f"SELECT dictGetOrDefault ({dict_name},'y',toUInt64(1),toUInt64(1))", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite def dictHas_granted_directly(self, node=None): - """Run dictHas checks with privileges granted directly. - """ + """Run dictHas checks with privileges granted directly.""" user_name = f"user_{getuid()}" @@ -235,15 +297,22 @@ def dictHas_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=dictHas_check, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in dictHas_check.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=dictHas_check, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in dictHas_check.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def dictHas_granted_via_role(self, node=None): - """Run checks with privileges granted through a role. - """ + """Run checks with privileges granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -256,25 +325,33 @@ def dictHas_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=dictHas_check, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in dictHas_check.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=dictHas_check, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in dictHas_check.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("dictGet", "dict"), - ("dictHas", "dict"), - ("dictGetHierarchy", "dict"), - ("dictIsIn", "dict"), -]) -@Requirements( - RQ_SRS_006_RBAC_dictHas_RequiredPrivilege("1.0") +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("dictGet", "dict"), + ("dictHas", "dict"), + ("dictGetHierarchy", "dict"), + ("dictIsIn", "dict"), + ], ) +@Requirements(RQ_SRS_006_RBAC_dictHas_RequiredPrivilege("1.0")) def dictHas_check(self, privilege, on, grant_target_name, user_name, node=None): - """Check that user is able to execute `dictHas` if and only if they have the necessary privileges. - """ + """Check that user is able to execute `dictHas` if and only if they have the necessary privileges.""" if node is None: node = self.context.node @@ -296,7 +373,12 @@ def dictHas_check(self, privilege, on, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to dictHas without privilege"): - node.query(f"SELECT dictHas({dict_name},toUInt64(1))", settings = [("user", user_name)], exitcode=exitcode, message=message) + node.query( + f"SELECT dictHas({dict_name},toUInt64(1))", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("user with privilege"): @@ -306,7 +388,10 @@ def dictHas_check(self, privilege, on, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I attempt to dictHas with privilege"): - node.query(f"SELECT dictHas({dict_name},toUInt64(1))", settings = [("user", user_name)]) + node.query( + f"SELECT dictHas({dict_name},toUInt64(1))", + settings=[("user", user_name)], + ) with Scenario("user with revoked privilege"): @@ -319,12 +404,17 @@ def dictHas_check(self, privilege, on, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with When("I attempt to dictHas without privilege"): - node.query(f"SELECT dictHas({dict_name},toUInt64(1))", settings = [("user", user_name)], exitcode=exitcode, message=message) + node.query( + f"SELECT dictHas({dict_name},toUInt64(1))", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite def dictGetHierarchy_granted_directly(self, node=None): - """Run dictGetHierarchy checks with privileges granted directly. - """ + """Run dictGetHierarchy checks with privileges granted directly.""" user_name = f"user_{getuid()}" @@ -332,15 +422,22 @@ def dictGetHierarchy_granted_directly(self, node=None): node = self.context.node with user(node, f"{user_name}"): - Suite(run=dictGetHierarchy_check, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in dictGetHierarchy_check.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=dictGetHierarchy_check, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in dictGetHierarchy_check.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def dictGetHierarchy_granted_via_role(self, node=None): - """Run checks with privileges granted through a role. - """ + """Run checks with privileges granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -353,25 +450,35 @@ def dictGetHierarchy_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=dictGetHierarchy_check, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in dictGetHierarchy_check.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=dictGetHierarchy_check, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in dictGetHierarchy_check.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("dictGet", "dict"), - ("dictHas", "dict"), - ("dictGetHierarchy", "dict"), - ("dictIsIn", "dict"), -]) -@Requirements( - RQ_SRS_006_RBAC_dictGetHierarchy_RequiredPrivilege("1.0") +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("dictGet", "dict"), + ("dictHas", "dict"), + ("dictGetHierarchy", "dict"), + ("dictIsIn", "dict"), + ], ) -def dictGetHierarchy_check(self, privilege, on, grant_target_name, user_name, node=None): - """Check that user is able to execute `dictGetHierarchy` if and only if they have the necessary privileges. - """ +@Requirements(RQ_SRS_006_RBAC_dictGetHierarchy_RequiredPrivilege("1.0")) +def dictGetHierarchy_check( + self, privilege, on, grant_target_name, user_name, node=None +): + """Check that user is able to execute `dictGetHierarchy` if and only if they have the necessary privileges.""" if node is None: node = self.context.node @@ -393,7 +500,12 @@ def dictGetHierarchy_check(self, privilege, on, grant_target_name, user_name, no node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to dictGetHierarchy without privilege"): - node.query(f"SELECT dictGetHierarchy({dict_name},toUInt64(1))", settings = [("user", user_name)], exitcode=exitcode, message=message) + node.query( + f"SELECT dictGetHierarchy({dict_name},toUInt64(1))", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("user with privilege"): @@ -403,7 +515,10 @@ def dictGetHierarchy_check(self, privilege, on, grant_target_name, user_name, no node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I attempt to dictGetHierarchy with privilege"): - node.query(f"SELECT dictGetHierarchy({dict_name},toUInt64(1))", settings = [("user", user_name)]) + node.query( + f"SELECT dictGetHierarchy({dict_name},toUInt64(1))", + settings=[("user", user_name)], + ) with Scenario("user with revoked privilege"): @@ -416,12 +531,17 @@ def dictGetHierarchy_check(self, privilege, on, grant_target_name, user_name, no node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with When("I attempt to dictGetHierarchy without privilege"): - node.query(f"SELECT dictGetHierarchy({dict_name},toUInt64(1))", settings = [("user", user_name)], exitcode=exitcode, message=message) + node.query( + f"SELECT dictGetHierarchy({dict_name},toUInt64(1))", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite def dictIsIn_granted_directly(self, node=None): - """Run dictIsIn checks with privileges granted directly. - """ + """Run dictIsIn checks with privileges granted directly.""" user_name = f"user_{getuid()}" @@ -429,15 +549,22 @@ def dictIsIn_granted_directly(self, node=None): node = self.context.node with user(node, f"{user_name}"): - Suite(run=dictIsIn_check, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in dictIsIn_check.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=dictIsIn_check, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in dictIsIn_check.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def dictIsIn_granted_via_role(self, node=None): - """Run checks with privileges granted through a role. - """ + """Run checks with privileges granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -450,25 +577,33 @@ def dictIsIn_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=dictIsIn_check, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in dictIsIn_check.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=dictIsIn_check, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in dictIsIn_check.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("dictGet", "dict"), - ("dictHas", "dict"), - ("dictGetHierarchy", "dict"), - ("dictIsIn", "dict"), -]) -@Requirements( - RQ_SRS_006_RBAC_dictIsIn_RequiredPrivilege("1.0") +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("dictGet", "dict"), + ("dictHas", "dict"), + ("dictGetHierarchy", "dict"), + ("dictIsIn", "dict"), + ], ) +@Requirements(RQ_SRS_006_RBAC_dictIsIn_RequiredPrivilege("1.0")) def dictIsIn_check(self, privilege, on, grant_target_name, user_name, node=None): - """Check that user is able to execute `dictIsIn` if and only if they have the necessary privileges. - """ + """Check that user is able to execute `dictIsIn` if and only if they have the necessary privileges.""" if node is None: node = self.context.node @@ -490,7 +625,12 @@ def dictIsIn_check(self, privilege, on, grant_target_name, user_name, node=None) node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to dictIsIn without privilege"): - node.query(f"SELECT dictIsIn({dict_name},toUInt64(1),toUInt64(1))", settings = [("user", user_name)], exitcode=exitcode, message=message) + node.query( + f"SELECT dictIsIn({dict_name},toUInt64(1),toUInt64(1))", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("user with privilege"): @@ -500,7 +640,10 @@ def dictIsIn_check(self, privilege, on, grant_target_name, user_name, node=None) node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I attempt to dictIsIn with privilege"): - node.query(f"SELECT dictIsIn({dict_name},toUInt64(1),toUInt64(1))", settings = [("user", user_name)]) + node.query( + f"SELECT dictIsIn({dict_name},toUInt64(1),toUInt64(1))", + settings=[("user", user_name)], + ) with Scenario("user with revoked privilege"): @@ -513,28 +656,36 @@ def dictIsIn_check(self, privilege, on, grant_target_name, user_name, node=None) node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with When("I attempt to dictIsIn without privilege"): - node.query(f"SELECT dictIsIn({dict_name},toUInt64(1),toUInt64(1))", settings = [("user", user_name)], exitcode=exitcode, message=message) + node.query( + f"SELECT dictIsIn({dict_name},toUInt64(1),toUInt64(1))", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite -@Examples("type",[ - ("Int8",), - ("Int16",), - ("Int32",), - ("Int64",), - ("UInt8",), - ("UInt16",), - ("UInt32",), - ("UInt64",), - ("Float32",), - ("Float64",), - ("Date",), - ("DateTime",), - ("UUID",), - ("String",), -]) +@Examples( + "type", + [ + ("Int8",), + ("Int16",), + ("Int32",), + ("Int64",), + ("UInt8",), + ("UInt16",), + ("UInt32",), + ("UInt64",), + ("Float32",), + ("Float64",), + ("Date",), + ("DateTime",), + ("UUID",), + ("String",), + ], +) def dictGetType_granted_directly(self, type, node=None): - """Run checks on dictGet with a type specified with privileges granted directly. - """ + """Run checks on dictGet with a type specified with privileges granted directly.""" user_name = f"user_{getuid()}" @@ -542,31 +693,41 @@ def dictGetType_granted_directly(self, type, node=None): node = self.context.node with user(node, f"{user_name}"): - Suite(run=dictGetType_check, - examples=Examples("privilege on grant_target_name user_name type", [ - tuple(list(row)+[user_name,user_name,type]) for row in dictGetType_check.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=dictGetType_check, + examples=Examples( + "privilege on grant_target_name user_name type", + [ + tuple(list(row) + [user_name, user_name, type]) + for row in dictGetType_check.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite -@Examples("type",[ - ("Int8",), - ("Int16",), - ("Int32",), - ("Int64",), - ("UInt8",), - ("UInt16",), - ("UInt32",), - ("UInt64",), - ("Float32",), - ("Float64",), - ("Date",), - ("DateTime",), - ("UUID",), - ("String",), -]) +@Examples( + "type", + [ + ("Int8",), + ("Int16",), + ("Int32",), + ("Int64",), + ("UInt8",), + ("UInt16",), + ("UInt32",), + ("UInt64",), + ("Float32",), + ("Float64",), + ("Date",), + ("DateTime",), + ("UUID",), + ("String",), + ], +) def dictGetType_granted_via_role(self, type, node=None): - """Run checks on dictGet with a type specified with privileges granted through a role. - """ + """Run checks on dictGet with a type specified with privileges granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -579,25 +740,35 @@ def dictGetType_granted_via_role(self, type, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=dictGetType_check, - examples=Examples("privilege on grant_target_name user_name type", [ - tuple(list(row)+[role_name,user_name,type]) for row in dictGetType_check.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=dictGetType_check, + examples=Examples( + "privilege on grant_target_name user_name type", + [ + tuple(list(row) + [role_name, user_name, type]) + for row in dictGetType_check.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("dictGet", "dict"), - ("dictHas", "dict"), - ("dictGetHierarchy", "dict"), - ("dictIsIn", "dict"), -]) -@Requirements( - RQ_SRS_006_RBAC_dictGet_Type_RequiredPrivilege("1.0") +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("dictGet", "dict"), + ("dictHas", "dict"), + ("dictGetHierarchy", "dict"), + ("dictIsIn", "dict"), + ], ) -def dictGetType_check(self, privilege, on, grant_target_name, user_name, type, node=None): - """Check that user is able to execute `dictGet` if and only if they have the necessary privileges. - """ +@Requirements(RQ_SRS_006_RBAC_dictGet_Type_RequiredPrivilege("1.0")) +def dictGetType_check( + self, privilege, on, grant_target_name, user_name, type, node=None +): + """Check that user is able to execute `dictGet` if and only if they have the necessary privileges.""" if node is None: node = self.context.node @@ -619,7 +790,12 @@ def dictGetType_check(self, privilege, on, grant_target_name, user_name, type, n node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to dictGet without privilege"): - node.query(f"SELECT dictGet{type}({dict_name},'z',toUInt64(1))", settings = [("user", user_name)], exitcode=exitcode, message=message) + node.query( + f"SELECT dictGet{type}({dict_name},'z',toUInt64(1))", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("user with privilege"): @@ -629,7 +805,10 @@ def dictGetType_check(self, privilege, on, grant_target_name, user_name, type, n node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I attempt to dictGet with privilege"): - node.query(f"SELECT dictGet{type}({dict_name},'z',toUInt64(1))", settings = [("user", user_name)]) + node.query( + f"SELECT dictGet{type}({dict_name},'z',toUInt64(1))", + settings=[("user", user_name)], + ) with Scenario("user with revoked privilege"): @@ -642,18 +821,23 @@ def dictGetType_check(self, privilege, on, grant_target_name, user_name, type, n node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with When("I attempt to dictGet without privilege"): - node.query(f"SELECT dictGet{type}({dict_name},'z',toUInt64(1))", settings = [("user", user_name)], exitcode=exitcode, message=message) + node.query( + f"SELECT dictGet{type}({dict_name},'z',toUInt64(1))", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Requirements( RQ_SRS_006_RBAC_dictGet_Privilege("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) @Name("dictGet") def feature(self, stress=None, node="clickhouse1"): - """Check the RBAC functionality of dictGet. - """ + """Check the RBAC functionality of dictGet.""" self.context.node = self.context.cluster.node(node) if stress is not None: @@ -661,24 +845,84 @@ def feature(self, stress=None, node="clickhouse1"): with Pool(20) as pool: try: - Suite(run=dictGet_granted_directly, setup=instrument_clickhouse_server_log, parallel=True, executor=pool) - Suite(run=dictGet_granted_via_role, setup=instrument_clickhouse_server_log, parallel=True, executor=pool) - Suite(run=dictGetOrDefault_granted_directly, setup=instrument_clickhouse_server_log, parallel=True, executor=pool) - Suite(run=dictGetOrDefault_granted_via_role, setup=instrument_clickhouse_server_log, parallel=True, executor=pool) - Suite(run=dictHas_granted_directly, setup=instrument_clickhouse_server_log, parallel=True, executor=pool) - Suite(run=dictHas_granted_via_role, setup=instrument_clickhouse_server_log, parallel=True, executor=pool) - Suite(run=dictGetHierarchy_granted_directly, setup=instrument_clickhouse_server_log, parallel=True, executor=pool) - Suite(run=dictGetHierarchy_granted_via_role, setup=instrument_clickhouse_server_log, parallel=True, executor=pool) - Suite(run=dictIsIn_granted_directly, setup=instrument_clickhouse_server_log, parallel=True, executor=pool) - Suite(run=dictIsIn_granted_via_role, setup=instrument_clickhouse_server_log, parallel=True, executor=pool) + Suite( + run=dictGet_granted_directly, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + ) + Suite( + run=dictGet_granted_via_role, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + ) + Suite( + run=dictGetOrDefault_granted_directly, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + ) + Suite( + run=dictGetOrDefault_granted_via_role, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + ) + Suite( + run=dictHas_granted_directly, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + ) + Suite( + run=dictHas_granted_via_role, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + ) + Suite( + run=dictGetHierarchy_granted_directly, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + ) + Suite( + run=dictGetHierarchy_granted_via_role, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + ) + Suite( + run=dictIsIn_granted_directly, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + ) + Suite( + run=dictIsIn_granted_via_role, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + ) for example in dictGetType_granted_directly.examples: - type, = example - args = {"type" : type} + (type,) = example + args = {"type": type} with Example(example): - Suite(test=dictGetType_granted_directly, setup=instrument_clickhouse_server_log, parallel=True, executor=pool)(**args) - Suite(test=dictGetType_granted_via_role, setup=instrument_clickhouse_server_log, parallel=True, executor=pool)(**args) + Suite( + test=dictGetType_granted_directly, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + )(**args) + Suite( + test=dictGetType_granted_via_role, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + )(**args) finally: join() diff --git a/tests/testflows/rbac/tests/privileges/distributed_table.py b/tests/testflows/rbac/tests/privileges/distributed_table.py index c99e6363b4d..5291a5609db 100755 --- a/tests/testflows/rbac/tests/privileges/distributed_table.py +++ b/tests/testflows/rbac/tests/privileges/distributed_table.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestStep(Given) def user(self, name, node=None): - """Create a user with a given name. - """ + """Create a user with a given name.""" if node is None: node = self.context.node @@ -20,10 +20,10 @@ def user(self, name, node=None): with Finally(f"I delete user {name}"): node.query(f"DROP USER IF EXISTS {name} ON CLUSTER one_shard_cluster") + @TestStep(Given) def role(self, name, node=None): - """Create a role with a given name. - """ + """Create a role with a given name.""" if node is None: node = self.context.node @@ -35,17 +35,19 @@ def role(self, name, node=None): with Finally(f"I delete role {name}"): node.query(f"DROP ROLE IF EXISTS {name} ON CLUSTER one_shard_cluster") + @TestStep(Given) def table(self, name, cluster=None, node=None): - """Create a table with given name and on specified cluster, if specified. - """ + """Create a table with given name and on specified cluster, if specified.""" if node is None: node = self.context.node try: if cluster: with Given(f"I create table {name}"): node.query(f"DROP TABLE IF EXISTS {name}") - node.query(f"CREATE TABLE {name} ON CLUSTER {cluster} (a UInt64) ENGINE = Memory") + node.query( + f"CREATE TABLE {name} ON CLUSTER {cluster} (a UInt64) ENGINE = Memory" + ) else: with Given(f"I create table {name}"): node.query(f"DROP TABLE IF EXISTS {name}") @@ -59,26 +61,26 @@ def table(self, name, cluster=None, node=None): with Finally(f"I delete role {name}"): node.query(f"DROP ROLE IF EXISTS {name}") + @TestSuite @Requirements( RQ_SRS_006_RBAC_DistributedTable_Create("1.0"), ) def create(self): - """Check the RBAC functionality of distributed table with CREATE. - """ - create_scenarios=[ - create_without_privilege, - create_with_privilege_granted_directly_or_via_role, - create_with_all_privilege_granted_directly_or_via_role, + """Check the RBAC functionality of distributed table with CREATE.""" + create_scenarios = [ + create_without_privilege, + create_with_privilege_granted_directly_or_via_role, + create_with_all_privilege_granted_directly_or_via_role, ] for scenario in create_scenarios: Scenario(run=scenario, setup=instrument_clickhouse_server_log) + @TestScenario def create_without_privilege(self, node=None): - """Check that user is unable to create a distributed table without privileges. - """ + """Check that user is unable to create a distributed table without privileges.""" user_name = f"user_{getuid()}" table0_name = f"table0_{getuid()}" @@ -104,8 +106,13 @@ def create_without_privilege(self, node=None): node.query(f"GRANT USAGE ON *.* TO {user_name}") with Then("I attempt to create the distributed table without privilege"): - node.query(f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed(sharded_cluster, default, {table0_name}, rand())", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed(sharded_cluster, default, {table0_name}, rand())", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestScenario def create_with_privilege_granted_directly_or_via_role(self, node=None): @@ -121,8 +128,9 @@ def create_with_privilege_granted_directly_or_via_role(self, node=None): with Given("I have a user"): user(name=user_name) - Scenario(test=create_with_privilege, - name="create with privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario(test=create_with_privilege, name="create with privilege granted directly")( + grant_target_name=user_name, user_name=user_name + ) with Given("I have a user"): user(name=user_name) @@ -133,8 +141,10 @@ def create_with_privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name} ON CLUSTER one_shard_cluster") - Scenario(test=create_with_privilege, - name="create with privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_privilege, name="create with privilege granted through a role" + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_privilege(self, user_name, grant_target_name, node=None): @@ -158,8 +168,12 @@ def create_with_privilege(self, user_name, grant_target_name, node=None): node.query(f"GRANT CREATE ON {table1_name} TO {grant_target_name}") with Then("I attempt to create the distributed table as the user"): - node.query(f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I revoke the create table privilege"): node.query(f"REVOKE CREATE TABLE ON {table1_name} FROM {grant_target_name}") @@ -168,19 +182,27 @@ def create_with_privilege(self, user_name, grant_target_name, node=None): node.query(f"GRANT REMOTE ON *.* to {grant_target_name}") with Then("I attempt to create the distributed table as the user"): - node.query(f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant create table privilege"): node.query(f"GRANT CREATE ON {table1_name} TO {grant_target_name}") with Then("I attempt to create the distributed table as the user"): - node.query(f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the distributed table"): node.query(f"DROP TABLE IF EXISTS {table1_name}") + @TestScenario def create_with_all_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to create a distributed table if and only if @@ -195,8 +217,9 @@ def create_with_all_privilege_granted_directly_or_via_role(self, node=None): with Given("I have a user"): user(name=user_name) - Scenario(test=create_with_privilege, - name="create with privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario(test=create_with_privilege, name="create with privilege granted directly")( + grant_target_name=user_name, user_name=user_name + ) with Given("I have a user"): user(name=user_name) @@ -207,13 +230,14 @@ def create_with_all_privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name} ON CLUSTER one_shard_cluster") - Scenario(test=create_with_privilege, - name="create with privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_privilege, name="create with privilege granted through a role" + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_privilege(self, user_name, grant_target_name, node=None): - """Grant ALL privilege and check the user is able is create the table. - """ + """Grant ALL privilege and check the user is able is create the table.""" table0_name = f"table0_{getuid()}" table1_name = f"table1_{getuid()}" @@ -231,32 +255,35 @@ def create_with_privilege(self, user_name, grant_target_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I create the distributed table as the user"): - node.query(f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the distributed table"): node.query(f"DROP TABLE IF EXISTS {table1_name}") + @TestSuite @Requirements( RQ_SRS_006_RBAC_DistributedTable_Select("1.0"), ) def select(self): - """Check the RBAC functionality of distributed table with SELECT. - """ + """Check the RBAC functionality of distributed table with SELECT.""" select_scenarios = [ select_without_privilege, select_with_privilege_granted_directly_or_via_role, - select_with_all_privilege_granted_directly_or_via_role + select_with_all_privilege_granted_directly_or_via_role, ] for scenario in select_scenarios: Scenario(run=scenario, setup=instrument_clickhouse_server_log) + @TestScenario def select_without_privilege(self, node=None): - """Check that user is unable to select from a distributed table without privileges. - """ + """Check that user is unable to select from a distributed table without privileges.""" user_name = f"user_{getuid()}" table0_name = f"table0_{getuid()}" table1_name = f"table1_{getuid()}" @@ -275,7 +302,9 @@ def select_without_privilege(self, node=None): table(name=table0_name, cluster=cluster) with And("I have a distributed table"): - node.query(f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())") + node.query( + f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())" + ) with When("I grant the user NONE privilege"): node.query(f"GRANT NONE TO {user_name}") @@ -284,12 +313,17 @@ def select_without_privilege(self, node=None): node.query(f"GRANT USAGE ON *.* TO {user_name}") with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table1_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the distributed table"): node.query(f"DROP TABLE IF EXISTS {table1_name}") + @TestScenario def select_with_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to select from a distributed table if and only if @@ -304,8 +338,9 @@ def select_with_privilege_granted_directly_or_via_role(self, node=None): with Given("I have a user"): user(name=user_name) - Scenario(test=select_with_privilege, - name="select with privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario(test=select_with_privilege, name="select with privilege granted directly")( + grant_target_name=user_name, user_name=user_name + ) with Given("I have a user"): user(name=user_name) @@ -316,8 +351,10 @@ def select_with_privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name} ON CLUSTER one_shard_cluster") - Scenario(test=select_with_privilege, - name="select with privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_privilege, name="select with privilege granted through a role" + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_privilege(self, user_name, grant_target_name, node=None): @@ -338,14 +375,20 @@ def select_with_privilege(self, user_name, grant_target_name, node=None): table(name=table0_name, cluster=cluster) with And("I have a distributed table"): - node.query(f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())") + node.query( + f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())" + ) with When("I grant select privilege on the distributed table"): node.query(f"GRANT SELECT ON {table1_name} TO {grant_target_name}") with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table1_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I revoke select privilege on the distributed table"): node.query(f"REVOKE SELECT ON {table1_name} FROM {grant_target_name}") @@ -354,19 +397,26 @@ def select_with_privilege(self, user_name, grant_target_name, node=None): node.query(f"GRANT SELECT ON {table0_name} to {grant_target_name}") with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table1_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant the user select privilege on the distributed table"): node.query(f"GRANT SELECT ON {table1_name} TO {grant_target_name}") with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SELECT * FROM {table1_name}", settings=[("user", f"{user_name}")] + ) finally: with Finally("I drop the distributed table"): node.query(f"DROP TABLE IF EXISTS {table1_name}") + @TestScenario def select_with_all_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to select from a distributed table if and only if @@ -381,8 +431,9 @@ def select_with_all_privilege_granted_directly_or_via_role(self, node=None): with Given("I have a user"): user(name=user_name) - Scenario(test=select_with_privilege, - name="select with privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario(test=select_with_privilege, name="select with privilege granted directly")( + grant_target_name=user_name, user_name=user_name + ) with Given("I have a user"): user(name=user_name) @@ -393,13 +444,14 @@ def select_with_all_privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name} ON CLUSTER one_shard_cluster") - Scenario(test=select_with_privilege, - name="select with privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_privilege, name="select with privilege granted through a role" + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_privilege(self, user_name, grant_target_name, node=None): - """Grant ALL and check the user is able to select from the distributed table. - """ + """Grant ALL and check the user is able to select from the distributed table.""" table0_name = f"table0_{getuid()}" table1_name = f"table1_{getuid()}" @@ -414,25 +466,29 @@ def select_with_privilege(self, user_name, grant_target_name, node=None): table(name=table0_name, cluster=cluster) with And("I have a distributed table"): - node.query(f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())") + node.query( + f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())" + ) with When("I grant ALL privilege"): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SELECT * FROM {table1_name}", settings=[("user", f"{user_name}")] + ) finally: with Finally("I drop the distributed table"): node.query(f"DROP TABLE IF EXISTS {table1_name}") + @TestSuite @Requirements( RQ_SRS_006_RBAC_DistributedTable_Insert("1.0"), ) def insert(self): - """Check the RBAC functionality of distributed table with INSERT. - """ + """Check the RBAC functionality of distributed table with INSERT.""" insert_scenarios = [ insert_without_privilege, insert_with_privilege_granted_directly_or_via_role, @@ -441,10 +497,10 @@ def insert(self): for scenario in insert_scenarios: Scenario(run=scenario, setup=instrument_clickhouse_server_log) + @TestScenario def insert_without_privilege(self, node=None): - """Check that user is unable to insert into a distributed table without privileges. - """ + """Check that user is unable to insert into a distributed table without privileges.""" user_name = f"user_{getuid()}" table0_name = f"table0_{getuid()}" @@ -465,7 +521,9 @@ def insert_without_privilege(self, node=None): table(name=table0_name, cluster=cluster) with And("I have a distributed table"): - node.query(f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())") + node.query( + f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())" + ) with When("I grant the user NONE privilege"): node.query(f"GRANT NONE TO {user_name}") @@ -474,12 +532,17 @@ def insert_without_privilege(self, node=None): node.query(f"GRANT USAGE ON *.* TO {user_name}") with Then("I attempt to insert into the distributed table as the user"): - node.query(f"INSERT INTO {table1_name} VALUES (8888)", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table1_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the distributed table"): node.query(f"DROP TABLE IF EXISTS {table1_name}") + @TestScenario def insert_with_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to select from a distributed table if and only if @@ -494,8 +557,9 @@ def insert_with_privilege_granted_directly_or_via_role(self, node=None): with Given("I have a user"): user(name=user_name) - Scenario(test=insert_with_privilege, - name="insert with privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario(test=insert_with_privilege, name="insert with privilege granted directly")( + grant_target_name=user_name, user_name=user_name + ) with Given("I have a user"): user(name=user_name) @@ -506,8 +570,10 @@ def insert_with_privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name} ON CLUSTER one_shard_cluster") - Scenario(test=insert_with_privilege, - name="insert with privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=insert_with_privilege, name="insert with privilege granted through a role" + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def insert_with_privilege(self, user_name, grant_target_name, node=None): @@ -528,14 +594,20 @@ def insert_with_privilege(self, user_name, grant_target_name, node=None): table(name=table0_name, cluster=cluster) with And("I have a distributed table"): - node.query(f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())") + node.query( + f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())" + ) with When("I grant insert privilege on the distributed table"): node.query(f"GRANT INSERT ON {table1_name} TO {grant_target_name}") with Then("I attempt to insert into the distributed table as the user"): - node.query(f"INSERT INTO {table1_name} VALUES (8888)", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table1_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I revoke the insert privilege on the distributed table"): node.query(f"REVOKE INSERT ON {table1_name} FROM {grant_target_name}") @@ -544,32 +616,47 @@ def insert_with_privilege(self, user_name, grant_target_name, node=None): node.query(f"GRANT INSERT ON {table0_name} to {grant_target_name}") with Then("I attempt to insert into the distributed table as the user"): - node.query(f"INSERT INTO {table1_name} VALUES (8888)", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table1_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant insert privilege on the distributed table"): node.query(f"GRANT INSERT ON {table1_name} TO {grant_target_name}") with Then("I attempt to insert into the distributed table as the user"): - node.query(f"INSERT INTO {table1_name} VALUES (8888)", settings = [("user", f"{user_name}")]) + node.query( + f"INSERT INTO {table1_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + ) with When("I revoke ALL privileges"): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to insert into the distributed table as the user"): - node.query(f"INSERT INTO {table1_name} VALUES (8888)", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table1_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant ALL privilege"): node.query(f"GRANT ALL ON *.* To {grant_target_name}") with Then("I attempt to insert into the distributed table as the user"): - node.query(f"INSERT INTO {table1_name} VALUES (8888)", settings = [("user", f"{user_name}")]) + node.query( + f"INSERT INTO {table1_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the distributed table"): node.query(f"DROP TABLE IF EXISTS {table1_name}") + @TestSuite @Requirements( RQ_SRS_006_RBAC_DistributedTable_SpecialTables("1.0"), @@ -590,8 +677,11 @@ def special_cases(self): for scenario in special_case_scenarios: Scenario(run=scenario, setup=instrument_clickhouse_server_log) + @TestScenario -def select_with_table_on_materialized_view_privilege_granted_directly_or_via_role(self, node=None): +def select_with_table_on_materialized_view_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to SELECT from a distributed table that uses a materialized view if and only if they have SELECT privilege on the distributed table and the materialized view it is built on. """ @@ -604,8 +694,10 @@ def select_with_table_on_materialized_view_privilege_granted_directly_or_via_rol with Given("I have a user"): user(name=user_name) - Scenario(test=select_with_table_on_source_table_of_materialized_view, - name="select with table on source table of materialized view, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_table_on_source_table_of_materialized_view, + name="select with table on source table of materialized view, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with Given("I have a user"): user(name=user_name) @@ -616,11 +708,16 @@ def select_with_table_on_materialized_view_privilege_granted_directly_or_via_rol with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name} ON CLUSTER one_shard_cluster") - Scenario(test=select_with_table_on_source_table_of_materialized_view, - name="select with table on source table of materialized view, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_table_on_source_table_of_materialized_view, + name="select with table on source table of materialized view, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline -def select_with_table_on_materialized_view(self, user_name, grant_target_name, node=None): +def select_with_table_on_materialized_view( + self, user_name, grant_target_name, node=None +): """Grant SELECT on the distributed table and the materialized view seperately, check that the user is unable to select from the distributed table, grant privilege on both and check the user is able to select. """ @@ -639,10 +736,14 @@ def select_with_table_on_materialized_view(self, user_name, grant_target_name, n table(name=table0_name, cluster=cluster) with And("I have a materialized view on a cluster"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ON CLUSTER {cluster} ENGINE = Memory() AS SELECT * FROM {table0_name}") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ON CLUSTER {cluster} ENGINE = Memory() AS SELECT * FROM {table0_name}" + ) with And("I have a distributed table on the materialized view"): - node.query(f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {view_name}, rand())") + node.query( + f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {view_name}, rand())" + ) with When("I grant the user NONE privilege"): node.query(f"GRANT NONE TO {grant_target_name}") @@ -651,15 +752,23 @@ def select_with_table_on_materialized_view(self, user_name, grant_target_name, n node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table1_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select privilege on the distributed table"): node.query(f"GRANT SELECT ON {table1_name} TO {grant_target_name}") with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table1_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I revoke the select privilege on the distributed table"): node.query(f"REVOKE SELECT ON {table1_name} FROM {grant_target_name}") @@ -668,27 +777,39 @@ def select_with_table_on_materialized_view(self, user_name, grant_target_name, n node.query(f"GRANT SELECT ON {view_name} to {grant_target_name}") with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table1_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select privilege on the distributed table"): node.query(f"GRANT SELECT ON {table1_name} TO {grant_target_name}") with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SELECT * FROM {table1_name}", settings=[("user", f"{user_name}")] + ) with When("I revoke ALL privileges"): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table1_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant ALL privilege"): node.query(f"GRANT ALL ON *.* To {grant_target_name}") with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SELECT * FROM {table1_name}", settings=[("user", f"{user_name}")] + ) finally: with Finally("I drop the distributed table"): @@ -697,8 +818,11 @@ def select_with_table_on_materialized_view(self, user_name, grant_target_name, n with And("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -def select_with_table_on_source_table_of_materialized_view_privilege_granted_directly_or_via_role(self, node=None): +def select_with_table_on_source_table_of_materialized_view_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to SELECT from a distributed table that uses the source table of a materialized view if and only if they have SELECT privilege on the distributed table and the table it is using. """ @@ -711,8 +835,10 @@ def select_with_table_on_source_table_of_materialized_view_privilege_granted_dir with Given("I have a user"): user(name=user_name) - Scenario(test=select_with_table_on_source_table_of_materialized_view, - name="select with table on source table of materialized view, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_table_on_source_table_of_materialized_view, + name="select with table on source table of materialized view, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with Given("I have a user"): user(name=user_name) @@ -723,11 +849,16 @@ def select_with_table_on_source_table_of_materialized_view_privilege_granted_dir with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name} ON CLUSTER one_shard_cluster") - Scenario(test=select_with_table_on_source_table_of_materialized_view, - name="select with table on source table of materialized view, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_table_on_source_table_of_materialized_view, + name="select with table on source table of materialized view, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline -def select_with_table_on_source_table_of_materialized_view(self, user_name, grant_target_name, node=None): +def select_with_table_on_source_table_of_materialized_view( + self, user_name, grant_target_name, node=None +): """Grant SELECT on the distributed table and the source table seperately, check that the user is unable to select from the distributed table, grant privilege on both and check the user is able to select. """ @@ -746,17 +877,27 @@ def select_with_table_on_source_table_of_materialized_view(self, user_name, gran table(name=table0_name, cluster=cluster) with And("I have a materialized view on a cluster"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ON CLUSTER {cluster} ENGINE = Memory() AS SELECT * FROM {table0_name}") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ON CLUSTER {cluster} ENGINE = Memory() AS SELECT * FROM {table0_name}" + ) - with And("I have a distributed table using the source table of the materialized view"): - node.query(f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())") + with And( + "I have a distributed table using the source table of the materialized view" + ): + node.query( + f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())" + ) with When("I grant select privilege on the distributed table"): node.query(f"GRANT SELECT ON {table1_name} TO {grant_target_name}") with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table1_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I revoke select privilege on the distributed table"): node.query(f"REVOKE SELECT ON {table1_name} FROM {grant_target_name}") @@ -765,27 +906,39 @@ def select_with_table_on_source_table_of_materialized_view(self, user_name, gran node.query(f"GRANT SELECT ON {table0_name} to {grant_target_name}") with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table1_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select privilege on the distributed table"): node.query(f"GRANT SELECT ON {table1_name} TO {grant_target_name}") with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SELECT * FROM {table1_name}", settings=[("user", f"{user_name}")] + ) with When("I revoke ALL privileges"): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table1_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant ALL privilege"): node.query(f"GRANT ALL ON *.* To {grant_target_name}") with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SELECT * FROM {table1_name}", settings=[("user", f"{user_name}")] + ) finally: with Finally("I drop the distributed table"): @@ -794,8 +947,11 @@ def select_with_table_on_source_table_of_materialized_view(self, user_name, gran with And("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -def select_with_table_on_distributed_table_privilege_granted_directly_or_via_role(self, node=None): +def select_with_table_on_distributed_table_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to SELECT from a distributed table that uses another distributed table if and only if they have SELECT privilege on the distributed table, the distributed table it is using and the table that the second distributed table is using. """ @@ -808,8 +964,10 @@ def select_with_table_on_distributed_table_privilege_granted_directly_or_via_rol with Given("I have a user"): user(name=user_name) - Scenario(test=select_with_table_on_distributed_table, - name="select with table on distributed table, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_table_on_distributed_table, + name="select with table on distributed table, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with Given("I have a user"): user(name=user_name) @@ -820,11 +978,16 @@ def select_with_table_on_distributed_table_privilege_granted_directly_or_via_rol with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name} ON CLUSTER one_shard_cluster") - Scenario(test=select_with_table_on_distributed_table, - name="select with table on distributed table, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_table_on_distributed_table, + name="select with table on distributed table, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestScenario -def select_with_table_on_distributed_table(self, user_name, grant_target_name, node=None): +def select_with_table_on_distributed_table( + self, user_name, grant_target_name, node=None +): """Grant SELECT privilege seperately on the distributed table, the distributed table it is using and the table that the second distributed table is using, check that user is unable to select from the distributed table, grant privilege on all three and check the user is able to select. """ @@ -843,40 +1006,75 @@ def select_with_table_on_distributed_table(self, user_name, grant_target_name, n table(name=table0_name, cluster=cluster) with And("I have a distributed table on a cluster"): - node.query(f"CREATE TABLE {table1_name} ON CLUSTER {cluster} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())") + node.query( + f"CREATE TABLE {table1_name} ON CLUSTER {cluster} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())" + ) with And("I have a distributed table on that distributed table"): - node.query(f"CREATE TABLE {table2_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table1_name}, rand())") + node.query( + f"CREATE TABLE {table2_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table1_name}, rand())" + ) for permutation in permutations(table_count=3): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name, table2_name) as tables_granted: + with grant_select_on_table( + node, + permutation, + grant_target_name, + table0_name, + table1_name, + table2_name, + ) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): - with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table2_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + with Then( + "I attempt to select from the distributed table as the user" + ): + node.query( + f"SELECT * FROM {table2_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=3))+1, grant_target_name, table0_name, table1_name, table2_name): + with grant_select_on_table( + node, + max(permutations(table_count=3)) + 1, + grant_target_name, + table0_name, + table1_name, + table2_name, + ): with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table2_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SELECT * FROM {table2_name}", + settings=[("user", f"{user_name}")], + ) with When("I revoke ALL privileges"): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table2_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table2_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant ALL privilege"): node.query(f"GRANT ALL ON *.* To {grant_target_name}") with Then("I attempt to select from the distributed table as the user"): - node.query(f"SELECT * FROM {table2_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SELECT * FROM {table2_name}", settings=[("user", f"{user_name}")] + ) finally: with Finally("I drop the first distributed table"): @@ -885,8 +1083,11 @@ def select_with_table_on_distributed_table(self, user_name, grant_target_name, n with And("I drop the other distributed table"): node.query(f"DROP TABLE IF EXISTS {table2_name}") + @TestScenario -def insert_with_table_on_materialized_view_privilege_granted_directly_or_via_role(self, node=None): +def insert_with_table_on_materialized_view_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to INSERT into a distributed table that uses a materialized view if and only if they have INSERT privilege on the distributed table and the materialized view it is built on. """ @@ -899,8 +1100,10 @@ def insert_with_table_on_materialized_view_privilege_granted_directly_or_via_rol with Given("I have a user"): user(name=user_name) - Scenario(test=insert_with_table_on_materialized_view, - name="insert with table on materialized view, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=insert_with_table_on_materialized_view, + name="insert with table on materialized view, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with Given("I have a user"): user(name=user_name) @@ -911,11 +1114,16 @@ def insert_with_table_on_materialized_view_privilege_granted_directly_or_via_rol with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name} ON CLUSTER one_shard_cluster") - Scenario(test=insert_with_table_on_materialized_view, - name="insert with table on materialized view, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=insert_with_table_on_materialized_view, + name="insert with table on materialized view, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline -def insert_with_table_on_materialized_view(self, user_name, grant_target_name, node=None): +def insert_with_table_on_materialized_view( + self, user_name, grant_target_name, node=None +): """Grant INSERT on the distributed table and the materialized view seperately, check that the user is unable to insert into the distributed table, grant privilege on both and check the user is able to insert. """ @@ -938,17 +1146,25 @@ def insert_with_table_on_materialized_view(self, user_name, grant_target_name, n table(name=table1_name, cluster=cluster) with And("I have a materialized view on a cluster"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ON CLUSTER {cluster} TO {table0_name} AS SELECT * FROM {table1_name}") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ON CLUSTER {cluster} TO {table0_name} AS SELECT * FROM {table1_name}" + ) with And("I have a distributed table on the materialized view"): - node.query(f"CREATE TABLE {table2_name} (a UInt64) ENGINE = Distributed({cluster}, default, {view_name}, rand())") + node.query( + f"CREATE TABLE {table2_name} (a UInt64) ENGINE = Distributed({cluster}, default, {view_name}, rand())" + ) with When("I grant insert privilege on the distributed table"): node.query(f"GRANT INSERT ON {table2_name} TO {grant_target_name}") with Then("I attempt to insert into the distributed table as the user"): - node.query(f"INSERT INTO {table2_name} VALUES (8888)", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table2_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I revoke the insert privilege on the distributed table"): node.query(f"REVOKE INSERT ON {table2_name} FROM {grant_target_name}") @@ -957,27 +1173,41 @@ def insert_with_table_on_materialized_view(self, user_name, grant_target_name, n node.query(f"GRANT INSERT ON {view_name} to {grant_target_name}") with Then("I attempt insert into the distributed table as the user"): - node.query(f"INSERT INTO {table2_name} VALUES (8888)", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table2_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant insert privilege on the distributed table"): node.query(f"GRANT INSERT ON {table2_name} TO {grant_target_name}") with Then("I attempt to insert into the distributed table as the user"): - node.query(f"INSERT INTO {table2_name} VALUES (8888)", settings = [("user", f"{user_name}")]) + node.query( + f"INSERT INTO {table2_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + ) with When("I revoke ALL privileges"): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt insert into the distributed table as the user"): - node.query(f"INSERT INTO {table2_name} VALUES (8888)", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table2_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant ALL privilege"): node.query(f"GRANT ALL ON *.* To {grant_target_name}") with Then("I attempt to insert into the distributed table as the user"): - node.query(f"INSERT INTO {table2_name} VALUES (8888)", settings = [("user", f"{user_name}")]) + node.query( + f"INSERT INTO {table2_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the distributed table"): @@ -986,8 +1216,11 @@ def insert_with_table_on_materialized_view(self, user_name, grant_target_name, n with And("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -def insert_with_table_on_source_table_of_materialized_view_privilege_granted_directly_or_via_role(self, node=None): +def insert_with_table_on_source_table_of_materialized_view_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to INSERT into a distributed table that uses the source table of a materialized view if and only if they have INSERT privilege on the distributed table and the table it is using. """ @@ -1000,8 +1233,10 @@ def insert_with_table_on_source_table_of_materialized_view_privilege_granted_dir with Given("I have a user"): user(name=user_name) - Scenario(test=insert_with_table_on_source_table_of_materialized_view, - name="insert with table on source table of materialized view, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=insert_with_table_on_source_table_of_materialized_view, + name="insert with table on source table of materialized view, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with Given("I have a user"): user(name=user_name) @@ -1012,11 +1247,16 @@ def insert_with_table_on_source_table_of_materialized_view_privilege_granted_dir with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name} ON CLUSTER one_shard_cluster") - Scenario(test=insert_with_table_on_source_table_of_materialized_view, - name="insert with table on source table of materialized view, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=insert_with_table_on_source_table_of_materialized_view, + name="insert with table on source table of materialized view, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline -def insert_with_table_on_source_table_of_materialized_view(self, user_name, grant_target_name, node=None): +def insert_with_table_on_source_table_of_materialized_view( + self, user_name, grant_target_name, node=None +): """Grant INSERT on the distributed table and the source table seperately, check that the user is unable to insert into the distributed table, grant privilege on both and check the user is able to insert. """ @@ -1035,17 +1275,25 @@ def insert_with_table_on_source_table_of_materialized_view(self, user_name, gran table(name=table0_name, cluster=cluster) with And("I have a materialized view on a cluster"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ON CLUSTER {cluster} ENGINE = Memory() AS SELECT * FROM {table0_name}") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ON CLUSTER {cluster} ENGINE = Memory() AS SELECT * FROM {table0_name}" + ) with And("I have a distributed table on the materialized view"): - node.query(f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())") + node.query( + f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())" + ) with When("I grant insert privilege on the distributed table"): node.query(f"GRANT INSERT ON {table1_name} TO {grant_target_name}") with Then("I attempt to insert into the distributed table as the user"): - node.query(f"INSERT INTO {table1_name} VALUES (8888)", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table1_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I revoke insert privilege on the distributed table"): node.query(f"REVOKE INSERT ON {table1_name} FROM {grant_target_name}") @@ -1054,27 +1302,41 @@ def insert_with_table_on_source_table_of_materialized_view(self, user_name, gran node.query(f"GRANT INSERT ON {table0_name} to {grant_target_name}") with Then("I attempt insert into the distributed table as the user"): - node.query(f"INSERT INTO {table1_name} VALUES (8888)", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table1_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant insert privilege on the distributed table"): node.query(f"GRANT INSERT ON {table1_name} TO {grant_target_name}") with Then("I attempt to insert into the distributed table as the user"): - node.query(f"INSERT INTO {table1_name} VALUES (8888)", settings = [("user", f"{user_name}")]) + node.query( + f"INSERT INTO {table1_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + ) with When("I revoke ALL privileges"): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt insert into the distributed table as the user"): - node.query(f"INSERT INTO {table1_name} VALUES (8888)", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table1_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant ALL privilege"): node.query(f"GRANT ALL ON *.* To {grant_target_name}") with Then("I attempt to insert into the distributed table as the user"): - node.query(f"INSERT INTO {table1_name} VALUES (8888)", settings = [("user", f"{user_name}")]) + node.query( + f"INSERT INTO {table1_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the distributed table"): @@ -1083,8 +1345,11 @@ def insert_with_table_on_source_table_of_materialized_view(self, user_name, gran with And("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -def insert_with_table_on_distributed_table_privilege_granted_directly_or_via_role(self, node=None): +def insert_with_table_on_distributed_table_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to INSERT into a distributed table that uses another distributed table if and only if they have INSERT privilege on the distributed table, the distributed table it is using and the table that the second distributed table is using. """ @@ -1097,8 +1362,10 @@ def insert_with_table_on_distributed_table_privilege_granted_directly_or_via_rol with Given("I have a user"): user(name=user_name) - Scenario(test=insert_with_table_on_distributed_table, - name="insert with table on distributed table, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=insert_with_table_on_distributed_table, + name="insert with table on distributed table, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with Given("I have a user"): user(name=user_name) @@ -1109,11 +1376,16 @@ def insert_with_table_on_distributed_table_privilege_granted_directly_or_via_rol with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name} ON CLUSTER one_shard_cluster") - Scenario(test=insert_with_table_on_distributed_table, - name="insert with table on distributed table, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=insert_with_table_on_distributed_table, + name="insert with table on distributed table, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline -def insert_with_table_on_distributed_table(self, user_name, grant_target_name, node=None): +def insert_with_table_on_distributed_table( + self, user_name, grant_target_name, node=None +): """Grant INSERT privilege seperately on the distributed table, the distributed table it is using and the table that the second distributed table is using, check that user is unable to insert into the distributed table, grant privilege on all three and check the user is able to insert. """ @@ -1132,17 +1404,25 @@ def insert_with_table_on_distributed_table(self, user_name, grant_target_name, n table(name=table0_name, cluster=cluster) with And("I have a distributed table on a cluster"): - node.query(f"CREATE TABLE {table1_name} ON CLUSTER {cluster} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())") + node.query( + f"CREATE TABLE {table1_name} ON CLUSTER {cluster} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())" + ) with And("I have a distributed table on that distributed table"): - node.query(f"CREATE TABLE {table2_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table1_name}, rand())") + node.query( + f"CREATE TABLE {table2_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table1_name}, rand())" + ) with When("I grant insert privilege on the outer distributed table"): node.query(f"GRANT INSERT ON {table2_name} TO {grant_target_name}") with Then("I attempt to insert into the outer distributed table as the user"): - node.query(f"INSERT INTO {table2_name} VALUES (8888)", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table2_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I revoke the insert privilege on the outer distributed table"): node.query(f"REVOKE INSERT ON {table2_name} FROM {grant_target_name}") @@ -1151,8 +1431,12 @@ def insert_with_table_on_distributed_table(self, user_name, grant_target_name, n node.query(f"GRANT INSERT ON {table1_name} to {grant_target_name}") with Then("I attempt insert into the outer distributed table as the user"): - node.query(f"INSERT INTO {table2_name} VALUES (8888)", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table2_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I revoke the insert privilege on the inner distributed table"): node.query(f"REVOKE INSERT ON {table1_name} FROM {grant_target_name}") @@ -1161,34 +1445,52 @@ def insert_with_table_on_distributed_table(self, user_name, grant_target_name, n node.query(f"GRANT INSERT ON {table0_name} to {grant_target_name}") with Then("I attempt insert into the outer distributed table as the user"): - node.query(f"INSERT INTO {table2_name} VALUES (8888)", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table2_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant insert privilege on the inner distributed table"): node.query(f"GRANT INSERT ON {table1_name} to {grant_target_name}") with Then("I attempt insert into the outer distributed table as the user"): - node.query(f"INSERT INTO {table2_name} VALUES (8888)", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table2_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant insert privilege on the outer distributed table"): node.query(f"GRANT INSERT ON {table2_name} to {grant_target_name}") with Then("I attempt insert into the outer distributed table as the user"): - node.query(f"INSERT INTO {table2_name} VALUES (8888)", settings = [("user", f"{user_name}")]) + node.query( + f"INSERT INTO {table2_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + ) with When("I revoke ALL privileges"): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt insert into the outer distributed table as the user"): - node.query(f"INSERT INTO {table2_name} VALUES (8888)", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table2_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant ALL privilege"): node.query(f"GRANT ALL ON *.* To {grant_target_name}") with Then("I attempt insert into the outer distributed table as the user"): - node.query(f"INSERT INTO {table2_name} VALUES (8888)", settings = [("user", f"{user_name}")]) + node.query( + f"INSERT INTO {table2_name} VALUES (8888)", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the outer distributed table"): @@ -1197,16 +1499,28 @@ def insert_with_table_on_distributed_table(self, user_name, grant_target_name, n with And("I drop the inner distributed table"): node.query(f"DROP TABLE IF EXISTS {table2_name}") + @TestOutline(Scenario) -@Examples("cluster", [ - ("sharded_cluster12", Description("two node cluster with two shards where one shard is" - " on clickhouse1 and another on clickhouse2 accessed from clickhouse1")), - ("one_shard_cluster12", Description("two node cluster with only one shard and two replicas" - " where one replica is on clickhouse1 and another on clickhouse2 accessed from clickhouse1")), -]) -@Requirements( - RQ_SRS_006_RBAC_DistributedTable_LocalUser("1.0") +@Examples( + "cluster", + [ + ( + "sharded_cluster12", + Description( + "two node cluster with two shards where one shard is" + " on clickhouse1 and another on clickhouse2 accessed from clickhouse1" + ), + ), + ( + "one_shard_cluster12", + Description( + "two node cluster with only one shard and two replicas" + " where one replica is on clickhouse1 and another on clickhouse2 accessed from clickhouse1" + ), + ), + ], ) +@Requirements(RQ_SRS_006_RBAC_DistributedTable_LocalUser("1.0")) def local_user(self, cluster, node=None): """Check that a user that exists locally and not present on the remote nodes is able to execute queries they have privileges to. @@ -1227,7 +1541,9 @@ def local_user(self, cluster, node=None): table(name=table0_name, cluster=cluster) with And("I have a distributed table"): - node.query(f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())") + node.query( + f"CREATE TABLE {table1_name} (a UInt64) ENGINE = Distributed({cluster}, default, {table0_name}, rand())" + ) with When("I grant select privilege on the distributed table"): node.query(f"GRANT SELECT ON {table1_name} TO {user_name}") @@ -1236,7 +1552,9 @@ def local_user(self, cluster, node=None): node.query(f"GRANT SELECT ON {table0_name} TO {user_name}") with Then("I select from the distributed table as the user"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SELECT * FROM {table1_name}", settings=[("user", f"{user_name}")] + ) with When("I revoke ALL privileges"): node.query(f"REVOKE ALL ON *.* FROM {user_name}") @@ -1245,7 +1563,9 @@ def local_user(self, cluster, node=None): node.query(f"GRANT ALL ON *.* To {user_name}") with Then("I select from the distributed table as the user"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SELECT * FROM {table1_name}", settings=[("user", f"{user_name}")] + ) finally: with Finally("I drop the user"): @@ -1254,6 +1574,7 @@ def local_user(self, cluster, node=None): with And("I drop the distributed table"): node.query(f"DROP TABLE IF EXISTS {table1_name}") + @TestScenario @Requirements( RQ_SRS_006_RBAC_DistributedTable_SameUserDifferentNodesDifferentPrivileges("1.0") @@ -1282,7 +1603,9 @@ def multiple_node_user(self, node=None): table(name=table0_name, cluster="sharded_cluster12") with And("I have a distributed table"): - node.query(f"CREATE TABLE {table1_name} ON CLUSTER sharded_cluster12 (a UInt64) ENGINE = Distributed(sharded_cluster12, default, {table0_name}, rand())") + node.query( + f"CREATE TABLE {table1_name} ON CLUSTER sharded_cluster12 (a UInt64) ENGINE = Distributed(sharded_cluster12, default, {table0_name}, rand())" + ) with When("I grant select privilege on the distributed table on one node"): node.query(f"GRANT SELECT ON {table1_name} TO {user_name}") @@ -1290,12 +1613,22 @@ def multiple_node_user(self, node=None): with And("I grant select privilege on the other table on one node"): node.query(f"GRANT SELECT ON {table0_name} TO {user_name}") - with Then("I select from the distributed table on the node where the user has privileges"): - node.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")]) + with Then( + "I select from the distributed table on the node where the user has privileges" + ): + node.query( + f"SELECT * FROM {table1_name}", settings=[("user", f"{user_name}")] + ) - with And("I select from the distributed table on the node the user doesn't have privileges"): - node2.query(f"SELECT * FROM {table1_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + with And( + "I select from the distributed table on the node the user doesn't have privileges" + ): + node2.query( + f"SELECT * FROM {table1_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the user"): @@ -1304,19 +1637,40 @@ def multiple_node_user(self, node=None): with And("I drop the distributed table"): node.query(f"DROP TABLE IF EXISTS {table1_name}") + @TestOutline(Feature) -@Examples("cluster", [ - ("cluster1", Description("one node cluster with clickhouse1 accessed from clickhouse1")), - ("sharded_cluster23", Description("two node cluster with two shards where one shard is" - " on clickhouse2 and another on clickhouse3 accessed from clickhouse1")), - ("sharded_cluster12", Description("two node cluster with two shards where one shard is" - " on clickhouse1 and another on clickhouse2 accessed from clickhouse1")), - ("one_shard_cluster12", Description("two node cluster with only one shard and two replicas" - " where one replica is on clickhouse1 and another on clickhouse2 accessed from clickhouse1")), -]) +@Examples( + "cluster", + [ + ( + "cluster1", + Description("one node cluster with clickhouse1 accessed from clickhouse1"), + ), + ( + "sharded_cluster23", + Description( + "two node cluster with two shards where one shard is" + " on clickhouse2 and another on clickhouse3 accessed from clickhouse1" + ), + ), + ( + "sharded_cluster12", + Description( + "two node cluster with two shards where one shard is" + " on clickhouse1 and another on clickhouse2 accessed from clickhouse1" + ), + ), + ( + "one_shard_cluster12", + Description( + "two node cluster with only one shard and two replicas" + " where one replica is on clickhouse1 and another on clickhouse2 accessed from clickhouse1" + ), + ), + ], +) def cluster_tests(self, cluster, node=None): - """Scenarios to be run on different cluster configurations. - """ + """Scenarios to be run on different cluster configurations.""" self.context.cluster_name = cluster with Pool(3) as pool: @@ -1326,15 +1680,14 @@ def cluster_tests(self, cluster, node=None): finally: join() + @TestFeature @Requirements( - RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_All("1.0"), RQ_SRS_006_RBAC_Privileges_None("1.0") ) @Name("distributed table") def feature(self, node="clickhouse1"): - """Check the RBAC functionality of queries executed using distributed tables. - """ + """Check the RBAC functionality of queries executed using distributed tables.""" self.context.node = self.context.cluster.node(node) self.context.node2 = self.context.cluster.node("clickhouse2") self.context.node3 = self.context.cluster.node("clickhouse3") diff --git a/tests/testflows/rbac/tests/privileges/drop/drop_database.py b/tests/testflows/rbac/tests/privileges/drop/drop_database.py index 274003e763f..3001285ef37 100644 --- a/tests/testflows/rbac/tests/privileges/drop/drop_database.py +++ b/tests/testflows/rbac/tests/privileges/drop/drop_database.py @@ -2,10 +2,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privilege_granted_directly_or_via_role(self, node=None): - """Check that user is only able to execute DROP DATABASE when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute DROP DATABASE when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -15,8 +15,12 @@ def privilege_granted_directly_or_via_role(self, node=None): with Suite("user with direct privilege"): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute DROP DATABASE with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, node=node) + with When( + f"I run checks that {user_name} is only able to execute DROP DATABASE with required privileges" + ): + privilege_check( + grant_target_name=user_name, user_name=user_name, node=node + ) with Suite("user with privilege via role"): with user(node, user_name), role(node, role_name): @@ -24,12 +28,16 @@ def privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute DROP DATABASE with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute DROP DATABASE with required privileges" + ): + privilege_check( + grant_target_name=role_name, user_name=user_name, node=node + ) + def privilege_check(grant_target_name, user_name, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege"): @@ -46,8 +54,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to drop the database"): - node.query(f"DROP DATABASE {db_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"DROP DATABASE {db_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the database"): node.query(f"DROP DATABASE IF EXISTS {db_name}") @@ -63,7 +75,7 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT DROP DATABASE ON {db_name}.* TO {grant_target_name}") with Then("I attempt to drop a database"): - node.query(f"DROP DATABASE {db_name}", settings = [("user", user_name)]) + node.query(f"DROP DATABASE {db_name}", settings=[("user", user_name)]) finally: with Finally("I drop the database"): @@ -80,11 +92,17 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT DROP DATABASE ON {db_name}.* TO {grant_target_name}") with And("I revoke the drop database privilege"): - node.query(f"REVOKE DROP DATABASE ON {db_name}.* FROM {grant_target_name}") + node.query( + f"REVOKE DROP DATABASE ON {db_name}.* FROM {grant_target_name}" + ) with Then("I attempt to drop a database"): - node.query(f"DROP DATABASE {db_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"DROP DATABASE {db_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the database"): @@ -104,8 +122,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to drop a database"): - node.query(f"DROP DATABASE {db_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"DROP DATABASE {db_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the database"): @@ -122,22 +144,22 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I attempt to drop a database"): - node.query(f"DROP DATABASE {db_name}", settings = [("user", user_name)]) + node.query(f"DROP DATABASE {db_name}", settings=[("user", user_name)]) finally: with Finally("I drop the database"): node.query(f"DROP DATABASE IF EXISTS {db_name}") + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_DropDatabase("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) @Name("drop database") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of DROP DATABASE. - """ + """Check the RBAC functionality of DROP DATABASE.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -145,5 +167,8 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): if stress is not None: self.context.stress = stress - with Suite(test=privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log): + with Suite( + test=privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ): privilege_granted_directly_or_via_role() diff --git a/tests/testflows/rbac/tests/privileges/drop/drop_dictionary.py b/tests/testflows/rbac/tests/privileges/drop/drop_dictionary.py index c3f07885bd5..7d5958945b1 100644 --- a/tests/testflows/rbac/tests/privileges/drop/drop_dictionary.py +++ b/tests/testflows/rbac/tests/privileges/drop/drop_dictionary.py @@ -2,10 +2,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privilege_granted_directly_or_via_role(self, node=None): - """Check that user is only able to execute DROP DICTIONARY when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute DROP DICTIONARY when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -14,19 +14,27 @@ def privilege_granted_directly_or_via_role(self, node=None): with Suite("user with direct privilege"): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute DROP DICTIONARY with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, node=node) + with When( + f"I run checks that {user_name} is only able to execute DROP DICTIONARY with required privileges" + ): + privilege_check( + grant_target_name=user_name, user_name=user_name, node=node + ) with Suite("user with privilege via role"): with user(node, user_name), role(node, role_name): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute DROP DICTIONARY with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute DROP DICTIONARY with required privileges" + ): + privilege_check( + grant_target_name=role_name, user_name=user_name, node=node + ) + def privilege_check(grant_target_name, user_name, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege"): @@ -34,7 +42,9 @@ def privilege_check(grant_target_name, user_name, node=None): try: with Given("I have a dictionary"): - node.query(f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)") + node.query( + f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)" + ) with When("I grant the user NONE privilege"): node.query(f"GRANT NONE TO {grant_target_name}") @@ -43,7 +53,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to drop a dictionary without privilege"): - node.query(f"DROP DICTIONARY {dict_name}", settings = [("user", user_name)], exitcode=exitcode, message=message) + node.query( + f"DROP DICTIONARY {dict_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the dictionary"): @@ -54,13 +69,19 @@ def privilege_check(grant_target_name, user_name, node=None): try: with Given("I have a dictionary"): - node.query(f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)") + node.query( + f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)" + ) with When("I grant drop dictionary privilege"): - node.query(f"GRANT DROP DICTIONARY ON {dict_name} TO {grant_target_name}") + node.query( + f"GRANT DROP DICTIONARY ON {dict_name} TO {grant_target_name}" + ) with Then("I attempt to drop aa dictionary"): - node.query(f"DROP DICTIONARY {dict_name}", settings = [("user", user_name)]) + node.query( + f"DROP DICTIONARY {dict_name}", settings=[("user", user_name)] + ) finally: with Finally("I drop the dictionary"): @@ -71,16 +92,27 @@ def privilege_check(grant_target_name, user_name, node=None): try: with Given("I have a dictionary"): - node.query(f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)") + node.query( + f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)" + ) with When("I grant the drop dictionary privilege"): - node.query(f"GRANT DROP DICTIONARY ON {dict_name} TO {grant_target_name}") + node.query( + f"GRANT DROP DICTIONARY ON {dict_name} TO {grant_target_name}" + ) with And("I revoke the drop dictionary privilege"): - node.query(f"REVOKE DROP DICTIONARY ON {dict_name} FROM {grant_target_name}") + node.query( + f"REVOKE DROP DICTIONARY ON {dict_name} FROM {grant_target_name}" + ) with Then("I attempt to drop a dictionary"): - node.query(f"DROP DICTIONARY {dict_name}", settings = [("user", user_name)], exitcode=exitcode, message=message) + node.query( + f"DROP DICTIONARY {dict_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the dictionary"): @@ -91,27 +123,32 @@ def privilege_check(grant_target_name, user_name, node=None): try: with Given("I have a dictionary"): - node.query(f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)") + node.query( + f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)" + ) with When("I grant ALL privilege"): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I drop the dictionary"): - node.query(f"DROP DICTIONARY {dict_name}", settings = [("user", user_name)]) + node.query( + f"DROP DICTIONARY {dict_name}", settings=[("user", user_name)] + ) finally: with Finally("I drop the dictionary"): node.query(f"DROP DICTIONARY IF EXISTS {dict_name}") + + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_DropDictionary("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) @Name("drop dictionary") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of DROP DICTIONARY. - """ + """Check the RBAC functionality of DROP DICTIONARY.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -119,5 +156,8 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): if stress is not None: self.context.stress = stress - with Suite(test=privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log): + with Suite( + test=privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ): privilege_granted_directly_or_via_role() diff --git a/tests/testflows/rbac/tests/privileges/drop/drop_quota.py b/tests/testflows/rbac/tests/privileges/drop/drop_quota.py index b8727556a26..f2202d3fb67 100644 --- a/tests/testflows/rbac/tests/privileges/drop/drop_quota.py +++ b/tests/testflows/rbac/tests/privileges/drop/drop_quota.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): - """Check that a user is able to execute `DROP QUOTA` with privileges are granted directly. - """ + """Check that a user is able to execute `DROP QUOTA` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -17,15 +17,22 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=drop_quota, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in drop_quota.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=drop_quota, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in drop_quota.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): - """Check that a user is able to execute `DROP QUOTA` with privileges are granted through a role. - """ + """Check that a user is able to execute `DROP QUOTA` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -38,20 +45,30 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=drop_quota, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in drop_quota.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=drop_quota, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in drop_quota.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("DROP QUOTA",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("DROP QUOTA",), + ], +) def drop_quota(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `DROP QUOTA` when they have the necessary privilege. - """ + """Check that user is only able to execute `DROP QUOTA` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -71,8 +88,12 @@ def drop_quota(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't drop a quota"): - node.query(f"DROP QUOTA {drop_row_policy_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"DROP QUOTA {drop_row_policy_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the quota"): @@ -89,7 +110,10 @@ def drop_quota(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can drop a quota"): - node.query(f"DROP QUOTA {drop_row_policy_name}", settings = [("user", f"{user_name}")]) + node.query( + f"DROP QUOTA {drop_row_policy_name}", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the quota"): @@ -100,17 +124,24 @@ def drop_quota(self, privilege, grant_target_name, user_name, node=None): try: with Given("I have a quota on a cluster"): - node.query(f"CREATE QUOTA {drop_row_policy_name} ON CLUSTER sharded_cluster") + node.query( + f"CREATE QUOTA {drop_row_policy_name} ON CLUSTER sharded_cluster" + ) with When(f"I grant {privilege}"): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can drop a quota"): - node.query(f"DROP QUOTA {drop_row_policy_name} ON CLUSTER sharded_cluster", settings = [("user", f"{user_name}")]) + node.query( + f"DROP QUOTA {drop_row_policy_name} ON CLUSTER sharded_cluster", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the user"): - node.query(f"DROP QUOTA IF EXISTS {drop_row_policy_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP QUOTA IF EXISTS {drop_row_policy_name} ON CLUSTER sharded_cluster" + ) with Scenario("DROP QUOTA with revoked privilege"): drop_row_policy_name = f"drop_row_policy_{getuid()}" @@ -126,22 +157,26 @@ def drop_quota(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot drop quota"): - node.query(f"DROP QUOTA {drop_row_policy_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"DROP QUOTA {drop_row_policy_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: - with Finally("I drop the quota"): + with Finally("I drop the quota"): node.query(f"DROP QUOTA IF EXISTS {drop_row_policy_name}") + @TestFeature @Name("drop quota") @Requirements( RQ_SRS_006_RBAC_Privileges_DropQuota("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of DROP QUOTA. - """ + """Check the RBAC functionality of DROP QUOTA.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/drop/drop_role.py b/tests/testflows/rbac/tests/privileges/drop/drop_role.py index ca9eb1b0947..df63911620d 100644 --- a/tests/testflows/rbac/tests/privileges/drop/drop_role.py +++ b/tests/testflows/rbac/tests/privileges/drop/drop_role.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): - """Check that a user is able to execute `DROP ROLE` with privileges are granted directly. - """ + """Check that a user is able to execute `DROP ROLE` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -17,15 +17,22 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=drop_role, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in drop_role.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=drop_role, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in drop_role.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): - """Check that a user is able to execute `DROP ROLE` with privileges are granted through a role. - """ + """Check that a user is able to execute `DROP ROLE` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -38,20 +45,30 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=drop_role, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in drop_role.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=drop_role, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in drop_role.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("DROP ROLE",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("DROP ROLE",), + ], +) def drop_role(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `DROP ROLE` when they have the necessary privilege. - """ + """Check that user is only able to execute `DROP ROLE` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -69,8 +86,12 @@ def drop_role(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't drop a role"): - node.query(f"DROP ROLE {drop_role_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"DROP ROLE {drop_role_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("DROP ROLE with privilege"): drop_role_name = f"drop_role_{getuid()}" @@ -81,7 +102,9 @@ def drop_role(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can drop a role"): - node.query(f"DROP ROLE {drop_role_name}", settings = [("user", f"{user_name}")]) + node.query( + f"DROP ROLE {drop_role_name}", settings=[("user", f"{user_name}")] + ) with Scenario("DROP ROLE on cluster"): drop_role_name = f"drop_role_{getuid()}" @@ -94,11 +117,16 @@ def drop_role(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can drop a role"): - node.query(f"DROP ROLE {drop_role_name} ON CLUSTER sharded_cluster", settings = [("user", f"{user_name}")]) + node.query( + f"DROP ROLE {drop_role_name} ON CLUSTER sharded_cluster", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the user"): - node.query(f"DROP ROLE IF EXISTS {drop_role_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP ROLE IF EXISTS {drop_role_name} ON CLUSTER sharded_cluster" + ) with Scenario("DROP ROLE with revoked privilege"): drop_role_name = f"drop_role_{getuid()}" @@ -111,19 +139,23 @@ def drop_role(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user can't drop a role"): - node.query(f"DROP ROLE {drop_role_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"DROP ROLE {drop_role_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("drop role") @Requirements( RQ_SRS_006_RBAC_Privileges_DropRole("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of DROP ROLE. - """ + """Check the RBAC functionality of DROP ROLE.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/drop/drop_row_policy.py b/tests/testflows/rbac/tests/privileges/drop/drop_row_policy.py index ad7fed94df0..59c65990aea 100644 --- a/tests/testflows/rbac/tests/privileges/drop/drop_row_policy.py +++ b/tests/testflows/rbac/tests/privileges/drop/drop_row_policy.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): - """Check that a user is able to execute `DROP ROW POLICY` with privileges are granted directly. - """ + """Check that a user is able to execute `DROP ROW POLICY` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -17,15 +17,22 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=drop_row_policy, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in drop_row_policy.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=drop_row_policy, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in drop_row_policy.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): - """Check that a user is able to execute `DROP ROW POLICY` with privileges are granted through a role. - """ + """Check that a user is able to execute `DROP ROW POLICY` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -38,21 +45,31 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=drop_row_policy, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in drop_row_policy.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=drop_row_policy, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in drop_row_policy.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("DROP ROW POLICY",), - ("DROP POLICY",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("DROP ROW POLICY",), + ("DROP POLICY",), + ], +) def drop_row_policy(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `DROP ROW POLICY` when they have the necessary privilege. - """ + """Check that user is only able to execute `DROP ROW POLICY` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -73,12 +90,18 @@ def drop_row_policy(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't drop a row policy"): - node.query(f"DROP ROW POLICY {drop_row_policy_name} ON {table_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"DROP ROW POLICY {drop_row_policy_name} ON {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the row policy"): - node.query(f"DROP ROW POLICY IF EXISTS {drop_row_policy_name} ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {drop_row_policy_name} ON {table_name}" + ) with Scenario("DROP ROW POLICY with privilege"): drop_row_policy_name = f"drop_row_policy_{getuid()}" @@ -92,11 +115,16 @@ def drop_row_policy(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can drop a row policy"): - node.query(f"DROP ROW POLICY {drop_row_policy_name} ON {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"DROP ROW POLICY {drop_row_policy_name} ON {table_name}", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the row policy"): - node.query(f"DROP ROW POLICY IF EXISTS {drop_row_policy_name} ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {drop_row_policy_name} ON {table_name}" + ) with Scenario("DROP ROW POLICY on cluster"): drop_row_policy_name = f"drop_row_policy_{getuid()}" @@ -104,17 +132,24 @@ def drop_row_policy(self, privilege, grant_target_name, user_name, node=None): try: with Given("I have a row policy on a cluster"): - node.query(f"CREATE ROW POLICY {drop_row_policy_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"CREATE ROW POLICY {drop_row_policy_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with When(f"I grant {privilege}"): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can drop a row policy"): - node.query(f"DROP ROW POLICY {drop_row_policy_name} ON CLUSTER sharded_cluster ON {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"DROP ROW POLICY {drop_row_policy_name} ON CLUSTER sharded_cluster ON {table_name}", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the user"): - node.query(f"DROP ROW POLICY IF EXISTS {drop_row_policy_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {drop_row_policy_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with Scenario("DROP ROW POLICY with revoked privilege"): drop_row_policy_name = f"drop_row_policy_{getuid()}" @@ -131,19 +166,23 @@ def drop_row_policy(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot drop row policy"): - node.query(f"DROP ROW POLICY {drop_row_policy_name} ON {table_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"DROP ROW POLICY {drop_row_policy_name} ON {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: - with Finally("I drop the row policy"): - node.query(f"DROP ROW POLICY IF EXISTS {drop_row_policy_name} ON {table_name}") + with Finally("I drop the row policy"): + node.query( + f"DROP ROW POLICY IF EXISTS {drop_row_policy_name} ON {table_name}" + ) + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_RowPolicy_Restriction("1.0") -) +@Requirements(RQ_SRS_006_RBAC_RowPolicy_Restriction("1.0")) def drop_all_pol_with_conditions(self, node=None): - """Check that when all policies with conditions are dropped, the table becomes unrestricted. - """ + """Check that when all policies with conditions are dropped, the table becomes unrestricted.""" if node is None: node = self.context.node @@ -157,29 +196,31 @@ def drop_all_pol_with_conditions(self, node=None): row_policy(name=pol_name, table=table_name) with And("The row policy has a condition"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING 1" + ) with And("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1),(2)") with And("I can't see any of the rows on the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '' == output, error() + assert "" == output, error() with When("I drop the row policy"): node.query(f"DROP ROW POLICY {pol_name} ON {table_name}") with Then("I select all the rows from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output and '2' in output, error() + assert "1" in output and "2" in output, error() + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0"), ) def drop_on(self, node=None): - """Check that when a row policy is dropped, users are able to access rows restricted by that policy. - """ + """Check that when a row policy is dropped, users are able to access rows restricted by that policy.""" if node is None: node = self.context.node @@ -193,29 +234,31 @@ def drop_on(self, node=None): row_policy(name=pol_name, table=table_name) with And("The row policy has a condition"): - node.query(f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default") + node.query( + f"ALTER ROW POLICY {pol_name} ON {table_name} FOR SELECT USING y=1 TO default" + ) with And("The table has some values"): node.query(f"INSERT INTO {table_name} (y) VALUES (1),(2)") with And("I can't see one of the rows on the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output and '2' not in output, error() + assert "1" in output and "2" not in output, error() with When("I drop the row policy"): node.query(f"DROP ROW POLICY {pol_name} ON {table_name}") with Then("I select all the rows from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output and '2' in output, error() + assert "1" in output and "2" in output, error() + @TestScenario @Requirements( RQ_SRS_006_RBAC_RowPolicy_Drop_OnCluster("1.0"), ) def drop_on_cluster(self, node=None): - """Check that when a row policy is dropped on a cluster, it works on all nodes. - """ + """Check that when a row policy is dropped on a cluster, it works on all nodes.""" if node is None: node = self.context.node @@ -226,10 +269,14 @@ def drop_on_cluster(self, node=None): try: with Given("I have a table on a cluster"): - node.query(f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory") + node.query( + f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (x UInt64) ENGINE = Memory" + ) with And("I have a row policy"): - node.query(f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name} FOR SELECT USING 1") + node.query( + f"CREATE ROW POLICY {pol_name} ON CLUSTER sharded_cluster ON {table_name} FOR SELECT USING 1" + ) with And("There are some values on the table on the first node"): node.query(f"INSERT INTO {table_name} (x) VALUES (1)") @@ -238,33 +285,37 @@ def drop_on_cluster(self, node=None): node2.query(f"INSERT INTO {table_name} (x) VALUES (1)") with When("I drop the row policy on cluster"): - node.query(f"DROP ROW POLICY {pol_name} ON {table_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP ROW POLICY {pol_name} ON {table_name} ON CLUSTER sharded_cluster" + ) with Then("I select from the table"): output = node.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() with And("I select from another node on the cluster"): output = node2.query(f"SELECT * FROM {table_name}").output - assert '1' in output, error() + assert "1" in output, error() finally: with Finally("I drop the row policy", flags=TE): - node.query(f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}") + node.query( + f"DROP ROW POLICY IF EXISTS {pol_name} ON CLUSTER sharded_cluster ON {table_name}" + ) with And("I drop the table", flags=TE): node.query(f"DROP TABLE {table_name} ON CLUSTER sharded_cluster") + @TestFeature @Name("drop row policy") @Requirements( RQ_SRS_006_RBAC_Privileges_DropRowPolicy("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of DROP ROW POLICY. - """ + """Check the RBAC functionality of DROP ROW POLICY.""" self.context.node = self.context.cluster.node(node) self.context.node2 = self.context.cluster.node("clickhouse2") diff --git a/tests/testflows/rbac/tests/privileges/drop/drop_settings_profile.py b/tests/testflows/rbac/tests/privileges/drop/drop_settings_profile.py index 3aa9ef2c369..e3876984801 100644 --- a/tests/testflows/rbac/tests/privileges/drop/drop_settings_profile.py +++ b/tests/testflows/rbac/tests/privileges/drop/drop_settings_profile.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): - """Check that a user is able to execute `DROP SETTINGS PROFILE` with privileges are granted directly. - """ + """Check that a user is able to execute `DROP SETTINGS PROFILE` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -17,15 +17,22 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=drop_settings_profile, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in drop_settings_profile.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=drop_settings_profile, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in drop_settings_profile.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): - """Check that a user is able to execute `DROP SETTINGS PROFILE` with privileges are granted through a role. - """ + """Check that a user is able to execute `DROP SETTINGS PROFILE` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -38,21 +45,31 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=drop_settings_profile, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in drop_settings_profile.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=drop_settings_profile, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in drop_settings_profile.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("DROP SETTINGS PROFILE",), - ("DROP PROFILE",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("DROP SETTINGS PROFILE",), + ("DROP PROFILE",), + ], +) def drop_settings_profile(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `DROP SETTINGS PROFILE` when they have the necessary privilege. - """ + """Check that user is only able to execute `DROP SETTINGS PROFILE` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -72,8 +89,12 @@ def drop_settings_profile(self, privilege, grant_target_name, user_name, node=No node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't drop a settings_profile"): - node.query(f"DROP SETTINGS PROFILE {drop_row_policy_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"DROP SETTINGS PROFILE {drop_row_policy_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the settings_profile"): @@ -90,7 +111,10 @@ def drop_settings_profile(self, privilege, grant_target_name, user_name, node=No node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can drop a settings_profile"): - node.query(f"DROP SETTINGS PROFILE {drop_row_policy_name}", settings = [("user", f"{user_name}")]) + node.query( + f"DROP SETTINGS PROFILE {drop_row_policy_name}", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the settings_profile"): @@ -101,17 +125,24 @@ def drop_settings_profile(self, privilege, grant_target_name, user_name, node=No try: with Given("I have a settings_profile on a cluster"): - node.query(f"CREATE SETTINGS PROFILE {drop_row_policy_name} ON CLUSTER sharded_cluster") + node.query( + f"CREATE SETTINGS PROFILE {drop_row_policy_name} ON CLUSTER sharded_cluster" + ) with When(f"I grant {privilege}"): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can drop a settings_profile"): - node.query(f"DROP SETTINGS PROFILE {drop_row_policy_name} ON CLUSTER sharded_cluster", settings = [("user", f"{user_name}")]) + node.query( + f"DROP SETTINGS PROFILE {drop_row_policy_name} ON CLUSTER sharded_cluster", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the user"): - node.query(f"DROP SETTINGS PROFILE IF EXISTS {drop_row_policy_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP SETTINGS PROFILE IF EXISTS {drop_row_policy_name} ON CLUSTER sharded_cluster" + ) with Scenario("DROP SETTINGS PROFILE with revoked privilege"): drop_row_policy_name = f"drop_row_policy_{getuid()}" @@ -127,22 +158,26 @@ def drop_settings_profile(self, privilege, grant_target_name, user_name, node=No node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot drop settings_profile"): - node.query(f"DROP SETTINGS PROFILE {drop_row_policy_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"DROP SETTINGS PROFILE {drop_row_policy_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: - with Finally("I drop the settings_profile"): + with Finally("I drop the settings_profile"): node.query(f"DROP SETTINGS PROFILE IF EXISTS {drop_row_policy_name}") + @TestFeature @Name("drop settings profile") @Requirements( RQ_SRS_006_RBAC_Privileges_DropSettingsProfile("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of DROP SETTINGS PROFILE. - """ + """Check the RBAC functionality of DROP SETTINGS PROFILE.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/drop/drop_table.py b/tests/testflows/rbac/tests/privileges/drop/drop_table.py index 1fd394daf96..daafa250462 100644 --- a/tests/testflows/rbac/tests/privileges/drop/drop_table.py +++ b/tests/testflows/rbac/tests/privileges/drop/drop_table.py @@ -2,10 +2,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privilege_granted_directly_or_via_role(self, node=None): - """Check that user is only able to execute DROP TABLE when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute DROP TABLE when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -15,8 +15,12 @@ def privilege_granted_directly_or_via_role(self, node=None): with Suite("user with direct privilege"): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute DROP TABLE with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, node=node) + with When( + f"I run checks that {user_name} is only able to execute DROP TABLE with required privileges" + ): + privilege_check( + grant_target_name=user_name, user_name=user_name, node=node + ) with Suite("user with privilege via role"): with user(node, user_name), role(node, role_name): @@ -24,12 +28,16 @@ def privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute DROP TABLE with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute DROP TABLE with required privileges" + ): + privilege_check( + grant_target_name=role_name, user_name=user_name, node=node + ) + def privilege_check(grant_target_name, user_name, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege"): @@ -46,8 +54,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to drop a table without privilege"): - node.query(f"DROP TABLE {table_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"DROP TABLE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the table"): @@ -64,7 +76,7 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT DROP TABLE ON *.* TO {grant_target_name}") with Then("I attempt to drop a table"): - node.query(f"DROP TABLE {table_name}", settings = [("user", user_name)]) + node.query(f"DROP TABLE {table_name}", settings=[("user", user_name)]) finally: with Finally("I drop the table"): @@ -83,8 +95,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"REVOKE DROP TABLE ON *.* FROM {grant_target_name}") with Then("I attempt to drop a table"): - node.query(f"DROP TABLE {table_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"DROP TABLE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the table"): @@ -103,8 +119,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to drop a table"): - node.query(f"DROP TABLE {table_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"DROP TABLE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the table"): @@ -121,22 +141,22 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I drop the table"): - node.query(f"DROP TABLE {table_name}", settings = [("user", user_name)]) + node.query(f"DROP TABLE {table_name}", settings=[("user", user_name)]) finally: with Finally("I drop the table"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_DropTable("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) @Name("drop table") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of DROP TABLE. - """ + """Check the RBAC functionality of DROP TABLE.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -144,5 +164,8 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): if stress is not None: self.context.stress = stress - with Suite(test=privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log): + with Suite( + test=privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ): privilege_granted_directly_or_via_role() diff --git a/tests/testflows/rbac/tests/privileges/drop/drop_user.py b/tests/testflows/rbac/tests/privileges/drop/drop_user.py index c3f1df8ae15..8c8d77d46f0 100644 --- a/tests/testflows/rbac/tests/privileges/drop/drop_user.py +++ b/tests/testflows/rbac/tests/privileges/drop/drop_user.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def drop_user_granted_directly(self, node=None): - """Check that a user is able to execute `DROP USER` with privileges are granted directly. - """ + """Check that a user is able to execute `DROP USER` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -17,15 +17,22 @@ def drop_user_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=drop_user, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in drop_user.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=drop_user, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in drop_user.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def drop_user_granted_via_role(self, node=None): - """Check that a user is able to execute `DROP USER` with privileges are granted through a role. - """ + """Check that a user is able to execute `DROP USER` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -38,20 +45,30 @@ def drop_user_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=drop_user, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in drop_user.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=drop_user, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in drop_user.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("DROP USER",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("DROP USER",), + ], +) def drop_user(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `DROP USER` when they have the necessary privilege. - """ + """Check that user is only able to execute `DROP USER` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -70,8 +87,12 @@ def drop_user(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with When("I check the user can't drop a user"): - node.query(f"DROP USER {drop_user_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"DROP USER {drop_user_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("DROP USER with privilege"): drop_user_name = f"drop_user_{getuid()}" @@ -81,7 +102,9 @@ def drop_user(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can drop a user"): - node.query(f"DROP USER {drop_user_name}", settings = [("user", f"{user_name}")]) + node.query( + f"DROP USER {drop_user_name}", settings=[("user", f"{user_name}")] + ) with Scenario("DROP USER on cluster"): drop_user_name = f"drop_user_{getuid()}" @@ -94,12 +117,16 @@ def drop_user(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can drop a user"): - node.query(f"DROP USER {drop_user_name} ON CLUSTER sharded_cluster", - settings = [("user", f"{user_name}")]) + node.query( + f"DROP USER {drop_user_name} ON CLUSTER sharded_cluster", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the user"): - node.query(f"DROP USER IF EXISTS {drop_user_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP USER IF EXISTS {drop_user_name} ON CLUSTER sharded_cluster" + ) with Scenario("DROP USER with revoked privilege"): drop_user_name = f"drop_user_{getuid()}" @@ -112,19 +139,23 @@ def drop_user(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user can't drop a user"): - node.query(f"DROP USER {drop_user_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"DROP USER {drop_user_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("drop user") @Requirements( RQ_SRS_006_RBAC_Privileges_DropUser("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of DROP USER. - """ + """Check the RBAC functionality of DROP USER.""" self.context.node = self.context.cluster.node(node) Suite(run=drop_user_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/feature.py b/tests/testflows/rbac/tests/privileges/feature.py index e68d71675ab..58d24d1f1f7 100755 --- a/tests/testflows/rbac/tests/privileges/feature.py +++ b/tests/testflows/rbac/tests/privileges/feature.py @@ -2,95 +2,401 @@ from testflows.core import * from rbac.helper.common import * + @TestFeature @Name("privileges") def feature(self): - """Check RBAC privileges. - """ + """Check RBAC privileges.""" with Pool(10) as pool: try: - Feature(run=load("rbac.tests.privileges.insert", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.select", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.public_tables", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.distributed_table", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.grant_option", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.truncate", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.optimize", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.kill_query", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.kill_mutation", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.role_admin", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.dictGet", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.introspection", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.sources", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.admin_option", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.all_role", "feature"), parallel=True, executor=pool) + Feature( + run=load("rbac.tests.privileges.insert", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.select", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.public_tables", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.distributed_table", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.grant_option", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.truncate", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.optimize", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.kill_query", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.kill_mutation", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.role_admin", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.dictGet", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.introspection", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.sources", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.admin_option", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.all_role", "feature"), + parallel=True, + executor=pool, + ) - Feature(run=load("rbac.tests.privileges.show.show_tables", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.show.show_dictionaries", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.show.show_databases", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.show.show_columns", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.show.show_users", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.show.show_roles", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.show.show_quotas", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.show.show_settings_profiles", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.show.show_row_policies", "feature"), parallel=True, executor=pool) + Feature( + run=load("rbac.tests.privileges.show.show_tables", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.show.show_dictionaries", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.show.show_databases", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.show.show_columns", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.show.show_users", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.show.show_roles", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.show.show_quotas", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load( + "rbac.tests.privileges.show.show_settings_profiles", "feature" + ), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.show.show_row_policies", "feature"), + parallel=True, + executor=pool, + ) - Feature(run=load("rbac.tests.privileges.alter.alter_column", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.alter.alter_index", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.alter.alter_constraint", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.alter.alter_ttl", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.alter.alter_settings", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.alter.alter_update", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.alter.alter_delete", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.alter.alter_freeze", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.alter.alter_fetch", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.alter.alter_move", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.alter.alter_user", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.alter.alter_role", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.alter.alter_row_policy", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.alter.alter_quota", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.alter.alter_settings_profile", "feature"), parallel=True, executor=pool) + Feature( + run=load("rbac.tests.privileges.alter.alter_column", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.alter.alter_index", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.alter.alter_constraint", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.alter.alter_ttl", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.alter.alter_settings", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.alter.alter_update", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.alter.alter_delete", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.alter.alter_freeze", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.alter.alter_fetch", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.alter.alter_move", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.alter.alter_user", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.alter.alter_role", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.alter.alter_row_policy", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.alter.alter_quota", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load( + "rbac.tests.privileges.alter.alter_settings_profile", "feature" + ), + parallel=True, + executor=pool, + ) - Feature(run=load("rbac.tests.privileges.create.create_database", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.create.create_dictionary", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.create.create_temp_table", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.create.create_table", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.create.create_user", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.create.create_role", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.create.create_row_policy", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.create.create_quota", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.create.create_settings_profile", "feature"), parallel=True, executor=pool) + Feature( + run=load("rbac.tests.privileges.create.create_database", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.create.create_dictionary", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.create.create_temp_table", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.create.create_table", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.create.create_user", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.create.create_role", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.create.create_row_policy", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.create.create_quota", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load( + "rbac.tests.privileges.create.create_settings_profile", "feature" + ), + parallel=True, + executor=pool, + ) - Feature(run=load("rbac.tests.privileges.attach.attach_database", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.attach.attach_dictionary", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.attach.attach_temp_table", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.attach.attach_table", "feature"), parallel=True, executor=pool) + Feature( + run=load("rbac.tests.privileges.attach.attach_database", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.attach.attach_dictionary", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.attach.attach_temp_table", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.attach.attach_table", "feature"), + parallel=True, + executor=pool, + ) - Feature(run=load("rbac.tests.privileges.drop.drop_database", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.drop.drop_dictionary", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.drop.drop_table", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.drop.drop_user", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.drop.drop_role", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.drop.drop_row_policy", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.drop.drop_quota", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.drop.drop_settings_profile", "feature"), parallel=True, executor=pool) + Feature( + run=load("rbac.tests.privileges.drop.drop_database", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.drop.drop_dictionary", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.drop.drop_table", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.drop.drop_user", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.drop.drop_role", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.drop.drop_row_policy", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.drop.drop_quota", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.drop.drop_settings_profile", "feature"), + parallel=True, + executor=pool, + ) - Feature(run=load("rbac.tests.privileges.detach.detach_database", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.detach.detach_dictionary", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.detach.detach_table", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.detach.detach_view", "feature"), parallel=True, executor=pool) + Feature( + run=load("rbac.tests.privileges.detach.detach_database", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.detach.detach_dictionary", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.detach.detach_table", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.detach.detach_view", "feature"), + parallel=True, + executor=pool, + ) - Feature(run=load("rbac.tests.privileges.system.drop_cache", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.system.reload", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.system.flush", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.system.merges", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.system.moves", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.system.replication_queues", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.system.ttl_merges", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.system.restart_replica", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.system.sends", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.system.sync_replica", "feature"), parallel=True, executor=pool) - Feature(run=load("rbac.tests.privileges.system.fetches", "feature"), parallel=True, executor=pool) + Feature( + run=load("rbac.tests.privileges.system.drop_cache", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.system.reload", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.system.flush", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.system.merges", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.system.moves", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.system.replication_queues", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.system.ttl_merges", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.system.restart_replica", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.system.sends", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.system.sync_replica", "feature"), + parallel=True, + executor=pool, + ) + Feature( + run=load("rbac.tests.privileges.system.fetches", "feature"), + parallel=True, + executor=pool, + ) finally: join() diff --git a/tests/testflows/rbac/tests/privileges/grant_option.py b/tests/testflows/rbac/tests/privileges/grant_option.py index ea5ff0ba66a..795b336969a 100644 --- a/tests/testflows/rbac/tests/privileges/grant_option.py +++ b/tests/testflows/rbac/tests/privileges/grant_option.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def grant_option(self, table_type, privilege, node=None): """Check that user is able to execute GRANT and REVOKE privilege statements if and only if they have the privilege WITH GRANT OPTION, @@ -19,104 +20,213 @@ def grant_option(self, table_type, privilege, node=None): with Suite("user with direct privilege granting to user"): with user(node, f"{user0_name},{user1_name}"): - with When(f"I run checks that grant and revoke privilege from {user0_name} to {user1_name}"): - grant_option_check(grant_option_target=user0_name, grant_target=user1_name, user_name=user0_name, table_type=table_type, privilege=privilege, node=node) + with When( + f"I run checks that grant and revoke privilege from {user0_name} to {user1_name}" + ): + grant_option_check( + grant_option_target=user0_name, + grant_target=user1_name, + user_name=user0_name, + table_type=table_type, + privilege=privilege, + node=node, + ) with Suite("user with direct privilege granting to role"): with user(node, user0_name), role(node, role1_name): - with When(f"I run checks that grant and revoke privilege from {user0_name} to {role1_name}"): - grant_option_check(grant_option_target=user0_name, grant_target=role1_name, user_name=user0_name, table_type=table_type, privilege=privilege, node=node) + with When( + f"I run checks that grant and revoke privilege from {user0_name} to {role1_name}" + ): + grant_option_check( + grant_option_target=user0_name, + grant_target=role1_name, + user_name=user0_name, + table_type=table_type, + privilege=privilege, + node=node, + ) with Suite("user with privilege via role granting to user"): with user(node, f"{user0_name},{user1_name}"), role(node, role0_name): with When("I grant the role to the user"): node.query(f"GRANT {role0_name} TO {user0_name}") - with When(f"I run checks that grant and revoke privilege from {user0_name} with {role0_name} to {user1_name}"): - grant_option_check(grant_option_target=role0_name, grant_target=user1_name, user_name=user0_name, table_type=table_type, privilege=privilege, node=node) + with When( + f"I run checks that grant and revoke privilege from {user0_name} with {role0_name} to {user1_name}" + ): + grant_option_check( + grant_option_target=role0_name, + grant_target=user1_name, + user_name=user0_name, + table_type=table_type, + privilege=privilege, + node=node, + ) with Suite("user with privilege via role granting to role"): with user(node, user0_name), role(node, f"{role0_name},{role1_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role0_name} TO {user0_name}") - with When(f"I run checks that grant and revoke privilege from {user0_name} with {role0_name} to {role1_name}"): - grant_option_check(grant_option_target=role0_name, grant_target=role1_name, user_name=user0_name, table_type=table_type, privilege=privilege, node=node) + with When( + f"I run checks that grant and revoke privilege from {user0_name} with {role0_name} to {role1_name}" + ): + grant_option_check( + grant_option_target=role0_name, + grant_target=role1_name, + user_name=user0_name, + table_type=table_type, + privilege=privilege, + node=node, + ) -def grant_option_check(grant_option_target, grant_target, user_name, table_type, privilege, node=None): - """Run different scenarios to check the user's access with different privileges. - """ + +def grant_option_check( + grant_option_target, grant_target, user_name, table_type, privilege, node=None +): + """Run different scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") - with Scenario("grant by user without privilege", setup=instrument_clickhouse_server_log): + with Scenario( + "grant by user without privilege", setup=instrument_clickhouse_server_log + ): table_name = f"merge_tree_{getuid()}" with table(node, name=table_name, table_type_name=table_type): with Then("I attempt to grant delete privilege without privilege"): - node.query(f"GRANT {privilege} ON {table_name} TO {grant_target}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"GRANT {privilege} ON {table_name} TO {grant_target}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) - with Scenario("grant by user with grant option privilege", setup=instrument_clickhouse_server_log): + with Scenario( + "grant by user with grant option privilege", + setup=instrument_clickhouse_server_log, + ): table_name = f"merge_tree_{getuid()}" with table(node, name=table_name, table_type_name=table_type): with When("I grant delete privilege"): - node.query(f"GRANT {privilege} ON {table_name} TO {grant_option_target} WITH GRANT OPTION") + node.query( + f"GRANT {privilege} ON {table_name} TO {grant_option_target} WITH GRANT OPTION" + ) with Then("I attempt to grant delete privilege"): - node.query(f"GRANT {privilege} ON {table_name} TO {grant_target}", settings = [("user", user_name)]) + node.query( + f"GRANT {privilege} ON {table_name} TO {grant_target}", + settings=[("user", user_name)], + ) - with Scenario("revoke by user with grant option privilege", setup=instrument_clickhouse_server_log): + with Scenario( + "revoke by user with grant option privilege", + setup=instrument_clickhouse_server_log, + ): table_name = f"merge_tree_{getuid()}" with table(node, name=table_name, table_type_name=table_type): with When("I grant delete privilege"): - node.query(f"GRANT {privilege} ON {table_name} TO {grant_option_target} WITH GRANT OPTION") + node.query( + f"GRANT {privilege} ON {table_name} TO {grant_option_target} WITH GRANT OPTION" + ) with Then("I attempt to revoke delete privilege"): - node.query(f"REVOKE {privilege} ON {table_name} FROM {grant_target}", settings = [("user", user_name)]) + node.query( + f"REVOKE {privilege} ON {table_name} FROM {grant_target}", + settings=[("user", user_name)], + ) - with Scenario("grant by user with revoked grant option privilege", setup=instrument_clickhouse_server_log): + with Scenario( + "grant by user with revoked grant option privilege", + setup=instrument_clickhouse_server_log, + ): table_name = f"merge_tree_{getuid()}" with table(node, name=table_name, table_type_name=table_type): - with When(f"I grant delete privilege with grant option to {grant_option_target}"): - node.query(f"GRANT {privilege} ON {table_name} TO {grant_option_target} WITH GRANT OPTION") - with And(f"I revoke delete privilege with grant option from {grant_option_target}"): - node.query(f"REVOKE {privilege} ON {table_name} FROM {grant_option_target}") + with When( + f"I grant delete privilege with grant option to {grant_option_target}" + ): + node.query( + f"GRANT {privilege} ON {table_name} TO {grant_option_target} WITH GRANT OPTION" + ) + with And( + f"I revoke delete privilege with grant option from {grant_option_target}" + ): + node.query( + f"REVOKE {privilege} ON {table_name} FROM {grant_option_target}" + ) with Then("I attempt to grant delete privilege"): - node.query(f"GRANT {privilege} ON {table_name} TO {grant_target}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"GRANT {privilege} ON {table_name} TO {grant_target}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_GrantOption("1.0"), ) -@Examples("privilege", [ - ("ALTER MOVE PARTITION",), ("ALTER MOVE PART",), ("MOVE PARTITION",), ("MOVE PART",), - ("ALTER DELETE",), ("DELETE",), - ("ALTER FETCH PARTITION",), ("FETCH PARTITION",), - ("ALTER FREEZE PARTITION",), ("FREEZE PARTITION",), - ("ALTER UPDATE",), ("UPDATE",), - ("ALTER ADD COLUMN",), ("ADD COLUMN",), - ("ALTER CLEAR COLUMN",), ("CLEAR COLUMN",), - ("ALTER MODIFY COLUMN",), ("MODIFY COLUMN",), - ("ALTER RENAME COLUMN",), ("RENAME COLUMN",), - ("ALTER COMMENT COLUMN",), ("COMMENT COLUMN",), - ("ALTER DROP COLUMN",), ("DROP COLUMN",), - ("ALTER COLUMN",), - ("ALTER SETTINGS",), ("ALTER SETTING",), ("ALTER MODIFY SETTING",), ("MODIFY SETTING",), - ("ALTER ORDER BY",), ("ALTER MODIFY ORDER BY",), ("MODIFY ORDER BY",), - ("ALTER SAMPLE BY",), ("ALTER MODIFY SAMPLE BY",), ("MODIFY SAMPLE BY",), - ("ALTER ADD INDEX",), ("ADD INDEX",), - ("ALTER MATERIALIZE INDEX",), ("MATERIALIZE INDEX",), - ("ALTER CLEAR INDEX",), ("CLEAR INDEX",), - ("ALTER DROP INDEX",), ("DROP INDEX",), - ("ALTER INDEX",), ("INDEX",), - ("ALTER TTL",), ("ALTER MODIFY TTL",), ("MODIFY TTL",), - ("ALTER MATERIALIZE TTL",), ("MATERIALIZE TTL",), - ("ALTER ADD CONSTRAINT",), ("ADD CONSTRAINT",), - ("ALTER DROP CONSTRAINT",), ("DROP CONSTRAINT",), - ("ALTER CONSTRAINT",), ("CONSTRAINT",), - ("INSERT",), - ("SELECT",), -]) +@Examples( + "privilege", + [ + ("ALTER MOVE PARTITION",), + ("ALTER MOVE PART",), + ("MOVE PARTITION",), + ("MOVE PART",), + ("ALTER DELETE",), + ("DELETE",), + ("ALTER FETCH PARTITION",), + ("FETCH PARTITION",), + ("ALTER FREEZE PARTITION",), + ("FREEZE PARTITION",), + ("ALTER UPDATE",), + ("UPDATE",), + ("ALTER ADD COLUMN",), + ("ADD COLUMN",), + ("ALTER CLEAR COLUMN",), + ("CLEAR COLUMN",), + ("ALTER MODIFY COLUMN",), + ("MODIFY COLUMN",), + ("ALTER RENAME COLUMN",), + ("RENAME COLUMN",), + ("ALTER COMMENT COLUMN",), + ("COMMENT COLUMN",), + ("ALTER DROP COLUMN",), + ("DROP COLUMN",), + ("ALTER COLUMN",), + ("ALTER SETTINGS",), + ("ALTER SETTING",), + ("ALTER MODIFY SETTING",), + ("MODIFY SETTING",), + ("ALTER ORDER BY",), + ("ALTER MODIFY ORDER BY",), + ("MODIFY ORDER BY",), + ("ALTER SAMPLE BY",), + ("ALTER MODIFY SAMPLE BY",), + ("MODIFY SAMPLE BY",), + ("ALTER ADD INDEX",), + ("ADD INDEX",), + ("ALTER MATERIALIZE INDEX",), + ("MATERIALIZE INDEX",), + ("ALTER CLEAR INDEX",), + ("CLEAR INDEX",), + ("ALTER DROP INDEX",), + ("DROP INDEX",), + ("ALTER INDEX",), + ("INDEX",), + ("ALTER TTL",), + ("ALTER MODIFY TTL",), + ("MODIFY TTL",), + ("ALTER MATERIALIZE TTL",), + ("MATERIALIZE TTL",), + ("ALTER ADD CONSTRAINT",), + ("ADD CONSTRAINT",), + ("ALTER DROP CONSTRAINT",), + ("DROP CONSTRAINT",), + ("ALTER CONSTRAINT",), + ("CONSTRAINT",), + ("INSERT",), + ("SELECT",), + ], +) @Name("grant option") def feature(self, stress=None, node="clickhouse1"): - """Check the RBAC functionality of privileges with GRANT OPTION. - """ + """Check the RBAC functionality of privileges with GRANT OPTION.""" self.context.node = self.context.cluster.node(node) if stress is not None: @@ -125,8 +235,14 @@ def feature(self, stress=None, node="clickhouse1"): with Pool(12) as pool: try: for example in self.examples: - privilege, = example + (privilege,) = example args = {"table_type": "MergeTree", "privilege": privilege} - Suite(test=grant_option, name=privilege, setup=instrument_clickhouse_server_log, parallel=True, executor=pool)(**args) + Suite( + test=grant_option, + name=privilege, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + )(**args) finally: join() diff --git a/tests/testflows/rbac/tests/privileges/insert.py b/tests/testflows/rbac/tests/privileges/insert.py index 650e65b2fb0..4130249c742 100755 --- a/tests/testflows/rbac/tests/privileges/insert.py +++ b/tests/testflows/rbac/tests/privileges/insert.py @@ -8,20 +8,23 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + def input_output_equality_check(node, input_columns, input_data, table_name): data_list = [x.strip("'") for x in input_data.split(",")] input_dict = dict(zip(input_columns.split(","), data_list)) - output_dict = json.loads(node.query(f"select {input_columns} from {table_name} format JSONEachRow").output) - output_dict = {k:str(v) for (k,v) in output_dict.items()} + output_dict = json.loads( + node.query( + f"select {input_columns} from {table_name} format JSONEachRow" + ).output + ) + output_dict = {k: str(v) for (k, v) in output_dict.items()} return input_dict == output_dict + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_Privileges_None("1.0") -) +@Requirements(RQ_SRS_006_RBAC_Privileges_None("1.0")) def without_privilege(self, table_type, node=None): - """Check that user without insert privilege on a table is not able to insert on that table. - """ + """Check that user without insert privilege on a table is not able to insert on that table.""" user_name = f"user_{getuid()}" table_name = f"table_{getuid()}" @@ -40,16 +43,20 @@ def without_privilege(self, table_type, node=None): with Then("I run INSERT without privilege"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( RQ_SRS_006_RBAC_Grant_Privilege_Insert("1.0"), ) def user_with_privilege(self, table_type, node=None): - """Check that user can insert into a table on which they have insert privilege. - """ + """Check that user can insert into a table on which they have insert privilege.""" user_name = f"user_{getuid()}" table_name = f"table_{getuid()}" @@ -63,20 +70,25 @@ def user_with_privilege(self, table_type, node=None): node.query(f"GRANT INSERT ON {table_name} TO {user_name}") with And("I use INSERT"): - node.query(f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", settings=[("user",user_name)]) + node.query( + f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", + settings=[("user", user_name)], + ) with Then("I check the insert functioned"): - output = node.query(f"SELECT d FROM {table_name} FORMAT JSONEachRow").output + output = node.query( + f"SELECT d FROM {table_name} FORMAT JSONEachRow" + ).output assert output == '{"d":"2020-01-01"}', error() + @TestScenario @Requirements( RQ_SRS_006_RBAC_Privileges_All("1.0"), RQ_SRS_006_RBAC_Grant_Privilege_Insert("1.0"), ) def all_privilege(self, table_type, node=None): - """Check that user can insert into a table on which they have insert privilege. - """ + """Check that user can insert into a table on which they have insert privilege.""" user_name = f"user_{getuid()}" table_name = f"table_{getuid()}" @@ -91,19 +103,24 @@ def all_privilege(self, table_type, node=None): node.query(f"GRANT ALL ON *.* TO {user_name}") with And("I use INSERT"): - node.query(f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", settings=[("user",user_name)]) + node.query( + f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", + settings=[("user", user_name)], + ) with Then("I check the insert functioned"): - output = node.query(f"SELECT d FROM {table_name} FORMAT JSONEachRow").output + output = node.query( + f"SELECT d FROM {table_name} FORMAT JSONEachRow" + ).output assert output == '{"d":"2020-01-01"}', error() + @TestScenario @Requirements( RQ_SRS_006_RBAC_Revoke_Privilege_Insert("1.0"), ) def user_with_revoked_privilege(self, table_type, node=None): - """Check that user is unable to insert into a table after insert privilege on that table has been revoked from user. - """ + """Check that user is unable to insert into a table after insert privilege on that table has been revoked from user.""" user_name = f"user_{getuid()}" table_name = f"table_{getuid()}" @@ -121,13 +138,17 @@ def user_with_revoked_privilege(self, table_type, node=None): with Then("I use INSERT"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", - settings=[("user",user_name)], exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestScenario def user_with_all_revoked_privilege(self, table_type, node=None): - """Check that user is unable to insert into a table after ALL privilege has been revoked from user. - """ + """Check that user is unable to insert into a table after ALL privilege has been revoked from user.""" user_name = f"user_{getuid()}" table_name = f"table_{getuid()}" @@ -145,28 +166,52 @@ def user_with_all_revoked_privilege(self, table_type, node=None): with Then("I use INSERT"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", - settings=[("user",user_name)], exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestScenario def user_with_privilege_on_columns(self, table_type): - Scenario(run=user_column_privileges, - examples=Examples("grant_columns revoke_columns insert_columns_fail insert_columns_pass data_fail data_pass table_type", - [tuple(list(row)+[table_type]) for row in user_column_privileges.examples])) + Scenario( + run=user_column_privileges, + examples=Examples( + "grant_columns revoke_columns insert_columns_fail insert_columns_pass data_fail data_pass table_type", + [ + tuple(list(row) + [table_type]) + for row in user_column_privileges.examples + ], + ), + ) @TestOutline @Requirements( RQ_SRS_006_RBAC_Insert_Column("1.0"), ) -@Examples("grant_columns revoke_columns insert_columns_fail insert_columns_pass data_fail data_pass", [ - ("d", "d", "x", "d", '\'woo\'', '\'2020-01-01\''), - ("d,a", "d", "x", "d", '\'woo\'', '\'2020-01-01\''), - ("d,a,b", "d,a,b", "x", "d,b", '\'woo\'', '\'2020-01-01\',9'), - ("d,a,b", "b", "y", "d,a,b", '9', '\'2020-01-01\',\'woo\',9') -]) -def user_column_privileges(self, grant_columns, insert_columns_pass, data_fail, data_pass, table_type, - revoke_columns=None, insert_columns_fail=None, node=None): +@Examples( + "grant_columns revoke_columns insert_columns_fail insert_columns_pass data_fail data_pass", + [ + ("d", "d", "x", "d", "'woo'", "'2020-01-01'"), + ("d,a", "d", "x", "d", "'woo'", "'2020-01-01'"), + ("d,a,b", "d,a,b", "x", "d,b", "'woo'", "'2020-01-01',9"), + ("d,a,b", "b", "y", "d,a,b", "9", "'2020-01-01','woo',9"), + ], +) +def user_column_privileges( + self, + grant_columns, + insert_columns_pass, + data_fail, + data_pass, + table_type, + revoke_columns=None, + insert_columns_fail=None, + node=None, +): """Check that user is able to insert on columns where insert privilege is granted and unable to insert on columns where insert privilege has not been granted or has been revoked. """ @@ -181,31 +226,48 @@ def user_column_privileges(self, grant_columns, insert_columns_pass, data_fail, with user(node, user_name): with When("I grant insert privilege"): - node.query(f"GRANT INSERT({grant_columns}) ON {table_name} TO {user_name}") + node.query( + f"GRANT INSERT({grant_columns}) ON {table_name} TO {user_name}" + ) if insert_columns_fail is not None: with And("I insert into a column without insert privilege"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"INSERT INTO {table_name} ({insert_columns_fail}) VALUES ({data_fail})", - settings=[("user",user_name)], exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table_name} ({insert_columns_fail}) VALUES ({data_fail})", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with And("I insert into granted column"): - node.query(f"INSERT INTO {table_name} ({insert_columns_pass}) VALUES ({data_pass})", - settings=[("user",user_name)]) + node.query( + f"INSERT INTO {table_name} ({insert_columns_pass}) VALUES ({data_pass})", + settings=[("user", user_name)], + ) with Then("I check the insert functioned"): - input_equals_output = input_output_equality_check(node, insert_columns_pass, data_pass, table_name) + input_equals_output = input_output_equality_check( + node, insert_columns_pass, data_pass, table_name + ) assert input_equals_output, error() if revoke_columns is not None: with When("I revoke insert privilege from columns"): - node.query(f"REVOKE INSERT({revoke_columns}) ON {table_name} FROM {user_name}") + node.query( + f"REVOKE INSERT({revoke_columns}) ON {table_name} FROM {user_name}" + ) with And("I insert into revoked columns"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"INSERT INTO {table_name} ({insert_columns_pass}) VALUES ({data_pass})", - settings=[("user",user_name)], exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table_name} ({insert_columns_pass}) VALUES ({data_pass})", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( @@ -233,12 +295,18 @@ def role_with_privilege(self, table_type, node=None): node.query(f"GRANT {role_name} TO {user_name}") with And("I insert into the table"): - node.query(f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", settings=[("user",user_name)]) + node.query( + f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", + settings=[("user", user_name)], + ) with Then("I check the data matches the input"): - output = node.query(f"SELECT d FROM {table_name} FORMAT JSONEachRow").output + output = node.query( + f"SELECT d FROM {table_name} FORMAT JSONEachRow" + ).output assert output == '{"d":"2020-01-01"}', error() + @TestScenario @Requirements( RQ_SRS_006_RBAC_Revoke_Privilege_Insert("1.0"), @@ -270,8 +338,13 @@ def role_with_revoked_privilege(self, table_type, node=None): with And("I insert into the table"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", - settings=[("user",user_name)], exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestScenario def user_with_revoked_role(self, table_type, node=None): @@ -300,27 +373,52 @@ def user_with_revoked_role(self, table_type, node=None): with And("I insert into the table"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", - settings=[("user",user_name)], exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestScenario def role_with_privilege_on_columns(self, table_type): - Scenario(run=role_column_privileges, - examples=Examples("grant_columns revoke_columns insert_columns_fail insert_columns_pass data_fail data_pass table_type", - [tuple(list(row)+[table_type]) for row in role_column_privileges.examples])) + Scenario( + run=role_column_privileges, + examples=Examples( + "grant_columns revoke_columns insert_columns_fail insert_columns_pass data_fail data_pass table_type", + [ + tuple(list(row) + [table_type]) + for row in role_column_privileges.examples + ], + ), + ) + @TestOutline @Requirements( RQ_SRS_006_RBAC_Insert_Column("1.0"), ) -@Examples("grant_columns revoke_columns insert_columns_fail insert_columns_pass data_fail data_pass", [ - ("d", "d", "x", "d", '\'woo\'', '\'2020-01-01\''), - ("d,a", "d", "x", "d", '\'woo\'', '\'2020-01-01\''), - ("d,a,b", "d,a,b", "x", "d,b", '\'woo\'', '\'2020-01-01\',9'), - ("d,a,b", "b", "y", "d,a,b", '9', '\'2020-01-01\',\'woo\',9') -]) -def role_column_privileges(self, grant_columns, insert_columns_pass, data_fail, data_pass, - table_type, revoke_columns=None, insert_columns_fail=None, node=None): +@Examples( + "grant_columns revoke_columns insert_columns_fail insert_columns_pass data_fail data_pass", + [ + ("d", "d", "x", "d", "'woo'", "'2020-01-01'"), + ("d,a", "d", "x", "d", "'woo'", "'2020-01-01'"), + ("d,a,b", "d,a,b", "x", "d,b", "'woo'", "'2020-01-01',9"), + ("d,a,b", "b", "y", "d,a,b", "9", "'2020-01-01','woo',9"), + ], +) +def role_column_privileges( + self, + grant_columns, + insert_columns_pass, + data_fail, + data_pass, + table_type, + revoke_columns=None, + insert_columns_fail=None, + node=None, +): """Check that user with a role is able to insert on columns where insert privilege is granted to the role and unable to insert on columns where insert privilege has not been granted or has been revoked from the role. """ @@ -335,7 +433,9 @@ def role_column_privileges(self, grant_columns, insert_columns_pass, data_fail, with user(node, user_name), role(node, role_name): with When("I grant insert privilege"): - node.query(f"GRANT INSERT({grant_columns}) ON {table_name} TO {role_name}") + node.query( + f"GRANT INSERT({grant_columns}) ON {table_name} TO {role_name}" + ) with And("I grant the role to a user"): node.query(f"GRANT {role_name} TO {user_name}") @@ -344,26 +444,41 @@ def role_column_privileges(self, grant_columns, insert_columns_pass, data_fail, with And("I insert into columns without insert privilege"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"INSERT INTO {table_name} ({insert_columns_fail}) VALUES ({data_fail})", - settings=[("user",user_name)], exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table_name} ({insert_columns_fail}) VALUES ({data_fail})", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with And("I insert into granted column"): - node.query(f"INSERT INTO {table_name} ({insert_columns_pass}) VALUES ({data_pass})", - settings=[("user",user_name)]) + node.query( + f"INSERT INTO {table_name} ({insert_columns_pass}) VALUES ({data_pass})", + settings=[("user", user_name)], + ) with Then("I check the insert functioned"): - input_equals_output = input_output_equality_check(node, insert_columns_pass, data_pass, table_name) + input_equals_output = input_output_equality_check( + node, insert_columns_pass, data_pass, table_name + ) assert input_equals_output, error() if revoke_columns is not None: with When("I revoke insert privilege from columns"): - node.query(f"REVOKE INSERT({revoke_columns}) ON {table_name} FROM {role_name}") + node.query( + f"REVOKE INSERT({revoke_columns}) ON {table_name} FROM {role_name}" + ) with And("I insert into revoked columns"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"INSERT INTO {table_name} ({insert_columns_pass}) VALUES ({data_pass})", - settings=[("user",user_name)], exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table_name} ({insert_columns_pass}) VALUES ({data_pass})", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( @@ -383,40 +498,68 @@ def user_with_privilege_on_cluster(self, table_type, node=None): try: with Given("I have a user on a cluster"): - node.query(f"CREATE USER OR REPLACE {user_name} ON CLUSTER sharded_cluster") + node.query( + f"CREATE USER OR REPLACE {user_name} ON CLUSTER sharded_cluster" + ) - with When("I grant insert privilege on a cluster without the node with the table"): - node.query(f"GRANT ON CLUSTER sharded_cluster23 INSERT ON {table_name} TO {user_name}") + with When( + "I grant insert privilege on a cluster without the node with the table" + ): + node.query( + f"GRANT ON CLUSTER sharded_cluster23 INSERT ON {table_name} TO {user_name}" + ) with And("I insert into the table expecting a fail"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with And("I grant insert privilege on cluster including all nodes"): - node.query(f"GRANT ON CLUSTER sharded_cluster INSERT ON {table_name} TO {user_name}") + node.query( + f"GRANT ON CLUSTER sharded_cluster INSERT ON {table_name} TO {user_name}" + ) - with And("I revoke insert privilege on cluster without the node with the table"): - node.query(f"REVOKE ON CLUSTER sharded_cluster23 INSERT ON {table_name} FROM {user_name}") + with And( + "I revoke insert privilege on cluster without the node with the table" + ): + node.query( + f"REVOKE ON CLUSTER sharded_cluster23 INSERT ON {table_name} FROM {user_name}" + ) with And("I insert into the table"): - node.query(f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", settings=[("user",user_name)]) + node.query( + f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", + settings=[("user", user_name)], + ) with And("I check that I can read inserted data"): - output = node.query(f"SELECT d FROM {table_name} FORMAT JSONEachRow").output + output = node.query( + f"SELECT d FROM {table_name} FORMAT JSONEachRow" + ).output assert output == '{"d":"2020-01-01"}', error() with And("I revoke insert privilege on cluster with all nodes"): - node.query(f"REVOKE ON CLUSTER sharded_cluster INSERT ON {table_name} FROM {user_name}") + node.query( + f"REVOKE ON CLUSTER sharded_cluster INSERT ON {table_name} FROM {user_name}" + ) with Then("I insert into table expecting fail"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the user"): node.query(f"DROP USER {user_name} ON CLUSTER sharded_cluster") + @TestScenario @Requirements( RQ_SRS_006_RBAC_Insert_Cluster("1.0"), @@ -436,59 +579,81 @@ def role_with_privilege_on_cluster(self, table_type, node=None): try: with Given("I have a user on a cluster"): - node.query(f"CREATE USER OR REPLACE {user_name} ON CLUSTER sharded_cluster") + node.query( + f"CREATE USER OR REPLACE {user_name} ON CLUSTER sharded_cluster" + ) with And("I have a role on a cluster"): - node.query(f"CREATE ROLE OR REPLACE {role_name} ON CLUSTER sharded_cluster") + node.query( + f"CREATE ROLE OR REPLACE {role_name} ON CLUSTER sharded_cluster" + ) with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And("I grant insert privilege on a cluster without the node with the table"): - node.query(f"GRANT ON CLUSTER sharded_cluster23 INSERT ON {table_name} TO {role_name}") + with And( + "I grant insert privilege on a cluster without the node with the table" + ): + node.query( + f"GRANT ON CLUSTER sharded_cluster23 INSERT ON {table_name} TO {role_name}" + ) with And("I insert into the table expecting a fail"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with And("I grant insert privilege on cluster including all nodes"): - node.query(f"GRANT ON CLUSTER sharded_cluster INSERT ON {table_name} TO {role_name}") + node.query( + f"GRANT ON CLUSTER sharded_cluster INSERT ON {table_name} TO {role_name}" + ) with And("I revoke insert privilege on cluster without the table node"): - node.query(f"REVOKE ON CLUSTER sharded_cluster23 INSERT ON {table_name} FROM {role_name}") + node.query( + f"REVOKE ON CLUSTER sharded_cluster23 INSERT ON {table_name} FROM {role_name}" + ) with And("I insert into the table"): - node.query(f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", settings=[("user",user_name)]) + node.query( + f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", + settings=[("user", user_name)], + ) with And("I check that I can read inserted data"): - output = node.query(f"SELECT d FROM {table_name} FORMAT JSONEachRow").output + output = node.query( + f"SELECT d FROM {table_name} FORMAT JSONEachRow" + ).output assert output == '{"d":"2020-01-01"}', error() with And("I revoke insert privilege on cluster with all nodes"): - node.query(f"REVOKE ON CLUSTER sharded_cluster INSERT ON {table_name} FROM {role_name}") + node.query( + f"REVOKE ON CLUSTER sharded_cluster INSERT ON {table_name} FROM {role_name}" + ) with Then("I insert into table expecting fail"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the user"): node.query(f"DROP USER {user_name} ON CLUSTER sharded_cluster") + @TestOutline(Feature) -@Requirements( - RQ_SRS_006_RBAC_Insert("1.0"), - RQ_SRS_006_RBAC_Insert_TableEngines("1.0") -) -@Examples("table_type", [ - (key,) for key in table_types.keys() -]) +@Requirements(RQ_SRS_006_RBAC_Insert("1.0"), RQ_SRS_006_RBAC_Insert_TableEngines("1.0")) +@Examples("table_type", [(key,) for key in table_types.keys()]) @Name("insert") def feature(self, table_type, stress=None, node="clickhouse1"): - """Check the RBAC functionality of INSERT. - """ - args = {"table_type" : table_type} + """Check the RBAC functionality of INSERT.""" + args = {"table_type": table_type} self.context.node = self.context.cluster.node(node) @@ -502,6 +667,11 @@ def feature(self, table_type, stress=None, node="clickhouse1"): with Pool(10) as pool: try: for scenario in loads(current_module(), Scenario): - Scenario(test=scenario, setup=instrument_clickhouse_server_log, parallel=True, executor=pool)(**args) + Scenario( + test=scenario, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + )(**args) finally: join() diff --git a/tests/testflows/rbac/tests/privileges/introspection.py b/tests/testflows/rbac/tests/privileges/introspection.py index a8d62cf8618..b36085ced96 100644 --- a/tests/testflows/rbac/tests/privileges/introspection.py +++ b/tests/testflows/rbac/tests/privileges/introspection.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @contextmanager def allow_introspection_functions(node): setting = ("allow_introspection_functions", 1) @@ -12,21 +13,25 @@ def allow_introspection_functions(node): try: with Given("I add allow_introspection_functions to the default query settings"): - default_query_settings = getsattr(current().context, "default_query_settings", []) + default_query_settings = getsattr( + current().context, "default_query_settings", [] + ) default_query_settings.append(setting) yield finally: - with Finally("I remove allow_introspection_functions from the default query settings"): + with Finally( + "I remove allow_introspection_functions from the default query settings" + ): if default_query_settings: try: default_query_settings.pop(default_query_settings.index(setting)) except ValueError: pass + @TestSuite def addressToLine_privileges_granted_directly(self, node=None): - """Check that a user is able to execute `addressToLine` with privileges are granted directly. - """ + """Check that a user is able to execute `addressToLine` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -35,15 +40,22 @@ def addressToLine_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=addressToLine, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in addressToLine.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=addressToLine, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in addressToLine.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def addressToLine_privileges_granted_via_role(self, node=None): - """Check that a user is able to execute `addressToLine` with privileges are granted through a role. - """ + """Check that a user is able to execute `addressToLine` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -56,24 +68,34 @@ def addressToLine_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=addressToLine, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in addressToLine.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=addressToLine, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in addressToLine.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("INTROSPECTION",), - ("INTROSPECTION FUNCTIONS",), - ("addressToLine",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("INTROSPECTION",), + ("INTROSPECTION FUNCTIONS",), + ("addressToLine",), + ], +) @Requirements( RQ_SRS_006_RBAC_Privileges_Introspection_addressToLine("1.0"), ) def addressToLine(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `addressToLine` when they have the necessary privilege. - """ + """Check that user is only able to execute `addressToLine` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -88,8 +110,12 @@ def addressToLine(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use addressToLine"): - node.query(f"WITH addressToLine(toUInt64(dummy)) AS addr SELECT 1 WHERE addr = ''", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"WITH addressToLine(toUInt64(dummy)) AS addr SELECT 1 WHERE addr = ''", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("addressToLine with privilege"): @@ -97,7 +123,10 @@ def addressToLine(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use addressToLine"): - node.query(f"WITH addressToLine(toUInt64(dummy)) AS addr SELECT 1 WHERE addr = ''", settings = [("user", f"{user_name}")]) + node.query( + f"WITH addressToLine(toUInt64(dummy)) AS addr SELECT 1 WHERE addr = ''", + settings=[("user", f"{user_name}")], + ) with Scenario("addressToLine with revoked privilege"): @@ -108,13 +137,17 @@ def addressToLine(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use addressToLine"): - node.query(f"WITH addressToLine(toUInt64(dummy)) AS addr SELECT 1 WHERE addr = ''", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"WITH addressToLine(toUInt64(dummy)) AS addr SELECT 1 WHERE addr = ''", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite def addressToSymbol_privileges_granted_directly(self, node=None): - """Check that a user is able to execute `addressToSymbol` with privileges are granted directly. - """ + """Check that a user is able to execute `addressToSymbol` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -123,15 +156,22 @@ def addressToSymbol_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=addressToSymbol, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in addressToSymbol.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=addressToSymbol, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in addressToSymbol.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def addressToSymbol_privileges_granted_via_role(self, node=None): - """Check that a user is able to execute `addressToSymbol` with privileges are granted through a role. - """ + """Check that a user is able to execute `addressToSymbol` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -144,24 +184,34 @@ def addressToSymbol_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=addressToSymbol, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in addressToSymbol.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=addressToSymbol, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in addressToSymbol.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("INTROSPECTION",), - ("INTROSPECTION FUNCTIONS",), - ("addressToSymbol",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("INTROSPECTION",), + ("INTROSPECTION FUNCTIONS",), + ("addressToSymbol",), + ], +) @Requirements( RQ_SRS_006_RBAC_Privileges_Introspection_addressToSymbol("1.0"), ) def addressToSymbol(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `addressToSymbol` when they have the necessary privilege. - """ + """Check that user is only able to execute `addressToSymbol` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -176,8 +226,12 @@ def addressToSymbol(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use addressToSymbol"): - node.query(f"WITH addressToSymbol(toUInt64(dummy)) AS addr SELECT 1 WHERE addr = ''", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"WITH addressToSymbol(toUInt64(dummy)) AS addr SELECT 1 WHERE addr = ''", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("addressToSymbol with privilege"): @@ -185,7 +239,10 @@ def addressToSymbol(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use addressToSymbol"): - node.query(f"WITH addressToSymbol(toUInt64(dummy)) AS addr SELECT 1 WHERE addr = ''", settings = [("user", f"{user_name}")]) + node.query( + f"WITH addressToSymbol(toUInt64(dummy)) AS addr SELECT 1 WHERE addr = ''", + settings=[("user", f"{user_name}")], + ) with Scenario("addressToSymbol with revoked privilege"): @@ -196,13 +253,17 @@ def addressToSymbol(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use addressToSymbol"): - node.query(f"WITH addressToSymbol(toUInt64(dummy)) AS addr SELECT 1 WHERE addr = ''", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"WITH addressToSymbol(toUInt64(dummy)) AS addr SELECT 1 WHERE addr = ''", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite def demangle_privileges_granted_directly(self, node=None): - """Check that a user is able to execute `demangle` with privileges are granted directly. - """ + """Check that a user is able to execute `demangle` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -211,15 +272,22 @@ def demangle_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=demangle, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in demangle.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=demangle, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in demangle.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def demangle_privileges_granted_via_role(self, node=None): - """Check that a user is able to execute `demangle` with privileges are granted through a role. - """ + """Check that a user is able to execute `demangle` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -232,24 +300,34 @@ def demangle_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=demangle, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in demangle.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=demangle, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in demangle.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("INTROSPECTION",), - ("INTROSPECTION FUNCTIONS",), - ("demangle",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("INTROSPECTION",), + ("INTROSPECTION FUNCTIONS",), + ("demangle",), + ], +) @Requirements( RQ_SRS_006_RBAC_Privileges_Introspection_demangle("1.0"), ) def demangle(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `demangle` when they have the necessary privilege. - """ + """Check that user is only able to execute `demangle` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -264,8 +342,12 @@ def demangle(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use demangle"): - node.query(f"WITH demangle(toString(dummy)) AS addr SELECT 1 WHERE addr = ''", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"WITH demangle(toString(dummy)) AS addr SELECT 1 WHERE addr = ''", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("demangle with privilege"): @@ -273,7 +355,10 @@ def demangle(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use demangle"): - node.query(f"WITH demangle(toString(dummy)) AS addr SELECT 1 WHERE addr = ''", settings = [("user", f"{user_name}")]) + node.query( + f"WITH demangle(toString(dummy)) AS addr SELECT 1 WHERE addr = ''", + settings=[("user", f"{user_name}")], + ) with Scenario("demangle with revoked privilege"): @@ -284,25 +369,47 @@ def demangle(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use demangle"): - node.query(f"WITH demangle(toString(dummy)) AS addr SELECT 1 WHERE addr = ''", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"WITH demangle(toString(dummy)) AS addr SELECT 1 WHERE addr = ''", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("introspection") @Requirements( RQ_SRS_006_RBAC_Privileges_Introspection("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of INTROSPECTION. - """ + """Check the RBAC functionality of INTROSPECTION.""" self.context.node = self.context.cluster.node(node) with allow_introspection_functions(self.context.node): - Suite(run=addressToLine_privileges_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=addressToLine_privileges_granted_via_role, setup=instrument_clickhouse_server_log) - Suite(run=addressToSymbol_privileges_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=addressToSymbol_privileges_granted_via_role, setup=instrument_clickhouse_server_log) - Suite(run=demangle_privileges_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=demangle_privileges_granted_via_role, setup=instrument_clickhouse_server_log) + Suite( + run=addressToLine_privileges_granted_directly, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=addressToLine_privileges_granted_via_role, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=addressToSymbol_privileges_granted_directly, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=addressToSymbol_privileges_granted_via_role, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=demangle_privileges_granted_directly, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=demangle_privileges_granted_via_role, + setup=instrument_clickhouse_server_log, + ) diff --git a/tests/testflows/rbac/tests/privileges/kill_mutation.py b/tests/testflows/rbac/tests/privileges/kill_mutation.py index 9a27836cad4..ce810ff8625 100644 --- a/tests/testflows/rbac/tests/privileges/kill_mutation.py +++ b/tests/testflows/rbac/tests/privileges/kill_mutation.py @@ -2,10 +2,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def no_privilege(self, node=None): - """Check that user doesn't need privileges to execute `KILL MUTATION` with no mutations. - """ + """Check that user doesn't need privileges to execute `KILL MUTATION` with no mutations.""" if node is None: node = self.context.node @@ -24,7 +24,10 @@ def no_privilege(self, node=None): node.query(f"GRANT USAGE ON *.* TO {user_name}") with Then("I attempt to kill mutation on table"): - node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)]) + node.query( + f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", + settings=[("user", user_name)], + ) with Scenario("kill mutation on cluster"): user_name = f"user_{getuid()}" @@ -41,7 +44,11 @@ def no_privilege(self, node=None): node.query(f"GRANT USAGE ON *.* TO {user_name}") with Then("I attempt to kill mutation on cluster"): - node.query(f"KILL MUTATION ON CLUSTER sharded_cluster WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)]) + node.query( + f"KILL MUTATION ON CLUSTER sharded_cluster WHERE database = 'default' AND table = '{table_name}'", + settings=[("user", user_name)], + ) + @TestSuite def privileges_granted_directly(self, node=None): @@ -60,6 +67,7 @@ def privileges_granted_directly(self, node=None): Suite(test=delete)(user_name=user_name, grant_target_name=user_name) Suite(test=drop_column)(user_name=user_name, grant_target_name=user_name) + @TestSuite def privileges_granted_via_role(self, node=None): """Check that a user is able to execute `KILL MUTATION` on a table with a mutation @@ -81,10 +89,9 @@ def privileges_granted_via_role(self, node=None): Suite(test=delete)(user_name=user_name, grant_target_name=role_name) Suite(test=drop_column)(user_name=user_name, grant_target_name=role_name) + @TestSuite -@Requirements( - RQ_SRS_006_RBAC_Privileges_KillMutation_AlterUpdate("1.0") -) +@Requirements(RQ_SRS_006_RBAC_Privileges_KillMutation_AlterUpdate("1.0")) def update(self, user_name, grant_target_name, node=None): """Check that the user is able to execute `KILL MUTATION` after `ALTER UPDATE` if and only if the user has `ALTER UPDATE` privilege. @@ -112,8 +119,12 @@ def update(self, user_name, grant_target_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I try to KILL MUTATION"): - node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)], - exitcode=exitcode, message="Exception: Not allowed to kill mutation.") + node.query( + f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", + settings=[("user", user_name)], + exitcode=exitcode, + message="Exception: Not allowed to kill mutation.", + ) with Scenario("KILL ALTER UPDATE with privilege"): table_name = f"merge_tree_{getuid()}" @@ -127,7 +138,10 @@ def update(self, user_name, grant_target_name, node=None): node.query(f"GRANT ALTER UPDATE ON {table_name} TO {grant_target_name}") with Then("I try to KILL MUTATION"): - node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)]) + node.query( + f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", + settings=[("user", user_name)], + ) with Scenario("KILL ALTER UPDATE with revoked privilege"): table_name = f"merge_tree_{getuid()}" @@ -141,11 +155,17 @@ def update(self, user_name, grant_target_name, node=None): node.query(f"GRANT ALTER UPDATE ON {table_name} TO {grant_target_name}") with And("I revoke the ALTER UPDATE privilege"): - node.query(f"REVOKE ALTER UPDATE ON {table_name} FROM {grant_target_name}") + node.query( + f"REVOKE ALTER UPDATE ON {table_name} FROM {grant_target_name}" + ) with Then("I try to KILL MUTATION"): - node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)], - exitcode=exitcode, message="Exception: Not allowed to kill mutation.") + node.query( + f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", + settings=[("user", user_name)], + exitcode=exitcode, + message="Exception: Not allowed to kill mutation.", + ) with Scenario("KILL ALTER UPDATE with revoked ALL privilege"): table_name = f"merge_tree_{getuid()}" @@ -162,8 +182,12 @@ def update(self, user_name, grant_target_name, node=None): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I try to KILL MUTATION"): - node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)], - exitcode=exitcode, message="Exception: Not allowed to kill mutation.") + node.query( + f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", + settings=[("user", user_name)], + exitcode=exitcode, + message="Exception: Not allowed to kill mutation.", + ) with Scenario("KILL ALTER UPDATE with ALL privilege"): table_name = f"merge_tree_{getuid()}" @@ -177,12 +201,14 @@ def update(self, user_name, grant_target_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I try to KILL MUTATION"): - node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)]) + node.query( + f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", + settings=[("user", user_name)], + ) + @TestSuite -@Requirements( - RQ_SRS_006_RBAC_Privileges_KillMutation_AlterDelete("1.0") -) +@Requirements(RQ_SRS_006_RBAC_Privileges_KillMutation_AlterDelete("1.0")) def delete(self, user_name, grant_target_name, node=None): """Check that the user is able to execute `KILL MUTATION` after `ALTER DELETE` if and only if the user has `ALTER DELETE` privilege. @@ -210,8 +236,12 @@ def delete(self, user_name, grant_target_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I try to KILL MUTATION"): - node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)], - exitcode=exitcode, message="Exception: Not allowed to kill mutation.") + node.query( + f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", + settings=[("user", user_name)], + exitcode=exitcode, + message="Exception: Not allowed to kill mutation.", + ) with Scenario("KILL ALTER DELETE with privilege"): table_name = f"merge_tree_{getuid()}" @@ -225,7 +255,10 @@ def delete(self, user_name, grant_target_name, node=None): node.query(f"GRANT ALTER DELETE ON {table_name} TO {grant_target_name}") with Then("I try to KILL MUTATION"): - node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)]) + node.query( + f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", + settings=[("user", user_name)], + ) with Scenario("KILL ALTER DELETE with revoked privilege"): table_name = f"merge_tree_{getuid()}" @@ -239,11 +272,17 @@ def delete(self, user_name, grant_target_name, node=None): node.query(f"GRANT ALTER DELETE ON {table_name} TO {grant_target_name}") with And("I revoke the ALTER DELETE privilege"): - node.query(f"REVOKE ALTER DELETE ON {table_name} FROM {grant_target_name}") + node.query( + f"REVOKE ALTER DELETE ON {table_name} FROM {grant_target_name}" + ) with Then("I try to KILL MUTATION"): - node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)], - exitcode=exitcode, message="Exception: Not allowed to kill mutation.") + node.query( + f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", + settings=[("user", user_name)], + exitcode=exitcode, + message="Exception: Not allowed to kill mutation.", + ) with Scenario("KILL ALTER DELETE with revoked ALL privilege"): table_name = f"merge_tree_{getuid()}" @@ -260,8 +299,12 @@ def delete(self, user_name, grant_target_name, node=None): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I try to KILL MUTATION"): - node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)], - exitcode=exitcode, message="Exception: Not allowed to kill mutation.") + node.query( + f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", + settings=[("user", user_name)], + exitcode=exitcode, + message="Exception: Not allowed to kill mutation.", + ) with Scenario("KILL ALTER DELETE with ALL privilege"): table_name = f"merge_tree_{getuid()}" @@ -275,12 +318,14 @@ def delete(self, user_name, grant_target_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I try to KILL MUTATION"): - node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)]) + node.query( + f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", + settings=[("user", user_name)], + ) + @TestSuite -@Requirements( - RQ_SRS_006_RBAC_Privileges_KillMutation_AlterDropColumn("1.0") -) +@Requirements(RQ_SRS_006_RBAC_Privileges_KillMutation_AlterDropColumn("1.0")) def drop_column(self, user_name, grant_target_name, node=None): """Check that the user is able to execute `KILL MUTATION` after `ALTER DROP COLUMN` if and only if the user has `ALTER DROP COLUMN` privilege. @@ -308,8 +353,12 @@ def drop_column(self, user_name, grant_target_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I try to KILL MUTATION"): - node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)], - exitcode=exitcode, message="Exception: Not allowed to kill mutation.") + node.query( + f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", + settings=[("user", user_name)], + exitcode=exitcode, + message="Exception: Not allowed to kill mutation.", + ) with Scenario("KILL ALTER DROP COLUMN with privilege"): table_name = f"merge_tree_{getuid()}" @@ -320,10 +369,15 @@ def drop_column(self, user_name, grant_target_name, node=None): node.query(f"ALTER TABLE {table_name} DROP COLUMN x") with When("I grant the ALTER DROP COLUMN privilege"): - node.query(f"GRANT ALTER DROP COLUMN ON {table_name} TO {grant_target_name}") + node.query( + f"GRANT ALTER DROP COLUMN ON {table_name} TO {grant_target_name}" + ) with Then("I try to KILL MUTATION"): - node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)]) + node.query( + f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", + settings=[("user", user_name)], + ) with Scenario("KILL ALTER DROP COLUMN with revoked privilege"): table_name = f"merge_tree_{getuid()}" @@ -334,14 +388,22 @@ def drop_column(self, user_name, grant_target_name, node=None): node.query(f"ALTER TABLE {table_name} DROP COLUMN x") with When("I grant the ALTER DROP COLUMN privilege"): - node.query(f"GRANT ALTER DROP COLUMN ON {table_name} TO {grant_target_name}") + node.query( + f"GRANT ALTER DROP COLUMN ON {table_name} TO {grant_target_name}" + ) with And("I revoke the ALTER DROP COLUMN privilege"): - node.query(f"REVOKE ALTER DROP COLUMN ON {table_name} FROM {grant_target_name}") + node.query( + f"REVOKE ALTER DROP COLUMN ON {table_name} FROM {grant_target_name}" + ) with Then("I try to KILL MUTATION"): - node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)], - exitcode=exitcode, message="Exception: Not allowed to kill mutation.") + node.query( + f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", + settings=[("user", user_name)], + exitcode=exitcode, + message="Exception: Not allowed to kill mutation.", + ) with Scenario("KILL ALTER DROP COLUMN with revoked privilege"): table_name = f"merge_tree_{getuid()}" @@ -352,14 +414,20 @@ def drop_column(self, user_name, grant_target_name, node=None): node.query(f"ALTER TABLE {table_name} DROP COLUMN x") with When("I grant the ALTER DROP COLUMN privilege"): - node.query(f"GRANT ALTER DROP COLUMN ON {table_name} TO {grant_target_name}") + node.query( + f"GRANT ALTER DROP COLUMN ON {table_name} TO {grant_target_name}" + ) with And("I revoke ALL privilege"): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I try to KILL MUTATION"): - node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)], - exitcode=exitcode, message="Exception: Not allowed to kill mutation.") + node.query( + f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", + settings=[("user", user_name)], + exitcode=exitcode, + message="Exception: Not allowed to kill mutation.", + ) with Scenario("KILL ALTER DROP COLUMN with ALL privilege"): table_name = f"merge_tree_{getuid()}" @@ -373,18 +441,21 @@ def drop_column(self, user_name, grant_target_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I try to KILL MUTATION"): - node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)]) + node.query( + f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", + settings=[("user", user_name)], + ) + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_KillMutation("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) @Name("kill mutation") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of KILL MUTATION. - """ + """Check the RBAC functionality of KILL MUTATION.""" self.context.node = self.context.cluster.node(node) if parallel is not None: diff --git a/tests/testflows/rbac/tests/privileges/kill_query.py b/tests/testflows/rbac/tests/privileges/kill_query.py index d1f96e23fd8..397d783a097 100644 --- a/tests/testflows/rbac/tests/privileges/kill_query.py +++ b/tests/testflows/rbac/tests/privileges/kill_query.py @@ -2,10 +2,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privilege_granted_directly_or_via_role(self, node=None): - """Check that user is only able to execute KILL QUERY when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute KILL QUERY when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -15,8 +15,12 @@ def privilege_granted_directly_or_via_role(self, node=None): with Suite("user with direct privilege"): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute KILL QUERY with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, node=node) + with When( + f"I run checks that {user_name} is only able to execute KILL QUERY with required privileges" + ): + privilege_check( + grant_target_name=user_name, user_name=user_name, node=node + ) with Suite("user with privilege via role"): with user(node, user_name), role(node, role_name): @@ -24,12 +28,16 @@ def privilege_granted_directly_or_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute KILL QUERY with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute KILL QUERY with required privileges" + ): + privilege_check( + grant_target_name=role_name, user_name=user_name, node=node + ) + def privilege_check(grant_target_name, user_name, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege"): @@ -41,8 +49,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to kill a query without privilege"): - node.query(f"KILL QUERY WHERE user ='default'", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"KILL QUERY WHERE user ='default'", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("user with privilege"): @@ -50,7 +62,7 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT KILL QUERY TO {grant_target_name}") with Then("I attempt to kill a query"): - node.query(f"KILL QUERY WHERE 1", settings = [("user", user_name)]) + node.query(f"KILL QUERY WHERE 1", settings=[("user", user_name)]) with Scenario("user with revoked privilege"): @@ -61,8 +73,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"REVOKE KILL QUERY TO {grant_target_name}") with Then("I attempt to kill a query"): - node.query(f"KILL QUERY WHERE 1", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"KILL QUERY WHERE 1", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("user with revoked ALL privilege"): @@ -73,8 +89,12 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to kill a query"): - node.query(f"KILL QUERY WHERE 1", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"KILL QUERY WHERE 1", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("execute on cluster"): @@ -82,7 +102,9 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT KILL QUERY TO {grant_target_name}") with Then("I attempt to kill a query"): - node.query(f"KILL QUERY ON CLUSTER WHERE 1'", settings = [("user", user_name)]) + node.query( + f"KILL QUERY ON CLUSTER WHERE 1'", settings=[("user", user_name)] + ) with Scenario("user with ALL privilege"): @@ -93,18 +115,18 @@ def privilege_check(grant_target_name, user_name, node=None): node.query(f"GRANT ALL ON *.* ON {grant_target_name}") with Then("I attempt to kill a query"): - node.query(f"KILL QUERY WHERE 1", settings = [("user", user_name)]) + node.query(f"KILL QUERY WHERE 1", settings=[("user", user_name)]) + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_KillQuery("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) @Name("kill query") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of KILL QUERY. - """ + """Check the RBAC functionality of KILL QUERY.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -112,5 +134,8 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): if stress is not None: self.context.stress = stress - with Suite(test=privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log): + with Suite( + test=privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ): privilege_granted_directly_or_via_role() diff --git a/tests/testflows/rbac/tests/privileges/optimize.py b/tests/testflows/rbac/tests/privileges/optimize.py index 7d3f41a43b4..0e984a17a9b 100644 --- a/tests/testflows/rbac/tests/privileges/optimize.py +++ b/tests/testflows/rbac/tests/privileges/optimize.py @@ -2,10 +2,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privilege_granted_directly_or_via_role(self, table_type, node=None): - """Check that user is only able to execute OPTIMIZE when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute OPTIMIZE when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -15,8 +15,15 @@ def privilege_granted_directly_or_via_role(self, table_type, node=None): with Suite("user with direct privilege"): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute OPTIMIZE with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, table_type=table_type, node=node) + with When( + f"I run checks that {user_name} is only able to execute OPTIMIZE with required privileges" + ): + privilege_check( + grant_target_name=user_name, + user_name=user_name, + table_type=table_type, + node=node, + ) with Suite("user with privilege via role"): with user(node, user_name), role(node, role_name): @@ -24,12 +31,19 @@ def privilege_granted_directly_or_via_role(self, table_type, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute OPTIMIZE with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, table_type=table_type, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute OPTIMIZE with required privileges" + ): + privilege_check( + grant_target_name=role_name, + user_name=user_name, + table_type=table_type, + node=node, + ) + def privilege_check(grant_target_name, user_name, table_type, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege"): @@ -44,8 +58,12 @@ def privilege_check(grant_target_name, user_name, table_type, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to optimize a table without privilege"): - node.query(f"OPTIMIZE TABLE {table_name} FINAL", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"OPTIMIZE TABLE {table_name} FINAL", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("user with privilege"): table_name = f"merge_tree_{getuid()}" @@ -56,7 +74,9 @@ def privilege_check(grant_target_name, user_name, table_type, node=None): node.query(f"GRANT OPTIMIZE ON {table_name} TO {grant_target_name}") with Then("I attempt to optimize a table"): - node.query(f"OPTIMIZE TABLE {table_name}", settings = [("user", user_name)]) + node.query( + f"OPTIMIZE TABLE {table_name}", settings=[("user", user_name)] + ) with Scenario("user with revoked privilege"): table_name = f"merge_tree_{getuid()}" @@ -70,8 +90,12 @@ def privilege_check(grant_target_name, user_name, table_type, node=None): node.query(f"REVOKE OPTIMIZE ON {table_name} FROM {grant_target_name}") with Then("I attempt to optimize a table"): - node.query(f"OPTIMIZE TABLE {table_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"OPTIMIZE TABLE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("user with revoked ALL privilege"): table_name = f"merge_tree_{getuid()}" @@ -85,25 +109,36 @@ def privilege_check(grant_target_name, user_name, table_type, node=None): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to optimize a table"): - node.query(f"OPTIMIZE TABLE {table_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"OPTIMIZE TABLE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("execute on cluster"): table_name = f"merge_tree_{getuid()}" try: with Given("I have a table on a cluster"): - node.query(f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (d DATE, a String, b UInt8, x String, y Int8) ENGINE = MergeTree() PARTITION BY y ORDER BY d") + node.query( + f"CREATE TABLE {table_name} ON CLUSTER sharded_cluster (d DATE, a String, b UInt8, x String, y Int8) ENGINE = MergeTree() PARTITION BY y ORDER BY d" + ) with When("I grant the optimize privilege"): node.query(f"GRANT OPTIMIZE ON {table_name} TO {grant_target_name}") with Then("I attempt to optimize a table"): - node.query(f"OPTIMIZE TABLE {table_name} ON CLUSTER sharded_cluster", settings = [("user", user_name)]) + node.query( + f"OPTIMIZE TABLE {table_name} ON CLUSTER sharded_cluster", + settings=[("user", user_name)], + ) finally: with Finally("I drop the table from the cluster"): - node.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP TABLE IF EXISTS {table_name} ON CLUSTER sharded_cluster" + ) with Scenario("user with ALL privilege"): table_name = f"merge_tree_{getuid()}" @@ -117,20 +152,21 @@ def privilege_check(grant_target_name, user_name, table_type, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I attempt to optimize a table"): - node.query(f"OPTIMIZE TABLE {table_name}", settings = [("user", user_name)]) + node.query( + f"OPTIMIZE TABLE {table_name}", settings=[("user", user_name)] + ) + + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_Optimize("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) -@Examples("table_type", [ - (key,) for key in table_types.keys() -]) +@Examples("table_type", [(key,) for key in table_types.keys()]) @Name("optimize") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of OPTIMIZE. - """ + """Check the RBAC functionality of OPTIMIZE.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -139,11 +175,14 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): self.context.stress = stress for example in self.examples: - table_type, = example + (table_type,) = example if table_type != "MergeTree" and not self.context.stress: continue with Example(str(example)): - with Suite(test=privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log): + with Suite( + test=privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ): privilege_granted_directly_or_via_role(table_type=table_type) diff --git a/tests/testflows/rbac/tests/privileges/public_tables.py b/tests/testflows/rbac/tests/privileges/public_tables.py index 52232022ed6..7ce67d7f0b0 100755 --- a/tests/testflows/rbac/tests/privileges/public_tables.py +++ b/tests/testflows/rbac/tests/privileges/public_tables.py @@ -6,13 +6,13 @@ from testflows.asserts import error from rbac.requirements import * from rbac.helper.common import * + @TestScenario @Requirements( RQ_SRS_006_RBAC_Table_PublicTables("1.0"), ) def public_tables(self, node=None): - """Check that a user with no privilege is able to select from public tables. - """ + """Check that a user with no privilege is able to select from public tables.""" user_name = f"user_{getuid()}" if node is None: node = self.context.node @@ -20,24 +20,33 @@ def public_tables(self, node=None): with user(node, f"{user_name}"): with When("I check the user is able to select on system.one"): - node.query("SELECT count(*) FROM system.one", settings = [("user",user_name)]) + node.query( + "SELECT count(*) FROM system.one", settings=[("user", user_name)] + ) with And("I check the user is able to select on system.numbers"): - node.query("SELECT * FROM system.numbers LIMIT 1", settings = [("user",user_name)]) + node.query( + "SELECT * FROM system.numbers LIMIT 1", settings=[("user", user_name)] + ) with And("I check the user is able to select on system.contributors"): - node.query("SELECT count(*) FROM system.contributors", settings = [("user",user_name)]) + node.query( + "SELECT count(*) FROM system.contributors", + settings=[("user", user_name)], + ) with And("I check the user is able to select on system.functions"): - node.query("SELECT count(*) FROM system.functions", settings = [("user",user_name)]) + node.query( + "SELECT count(*) FROM system.functions", settings=[("user", user_name)] + ) + @TestScenario @Requirements( RQ_SRS_006_RBAC_Table_SensitiveTables("1.0"), ) def sensitive_tables(self, node=None): - """Check that a user with no privilege is not able to see from these tables. - """ + """Check that a user with no privilege is not able to see from these tables.""" user_name = f"user_{getuid()}" if node is None: node = self.context.node @@ -47,53 +56,84 @@ def sensitive_tables(self, node=None): node.query("SELECT 1") with When("I select from processes"): - output = node.query("SELECT count(*) FROM system.processes", settings = [("user",user_name)]).output + output = node.query( + "SELECT count(*) FROM system.processes", settings=[("user", user_name)] + ).output assert output == 0, error() with And("I select from query_log"): - output = node.query("SELECT count(*) FROM system.query_log", settings = [("user",user_name)]).output + output = node.query( + "SELECT count(*) FROM system.query_log", settings=[("user", user_name)] + ).output assert output == 0, error() with And("I select from query_thread_log"): - output = node.query("SELECT count(*) FROM system.query_thread_log", settings = [("user",user_name)]).output + output = node.query( + "SELECT count(*) FROM system.query_thread_log", + settings=[("user", user_name)], + ).output assert output == 0, error() with And("I select from query_views_log"): - output = node.query("SELECT count(*) FROM system.query_views_log", settings = [("user",user_name)]).output + output = node.query( + "SELECT count(*) FROM system.query_views_log", + settings=[("user", user_name)], + ).output assert output == 0, error() with And("I select from clusters"): - output = node.query("SELECT count(*) FROM system.clusters", settings = [("user",user_name)]).output + output = node.query( + "SELECT count(*) FROM system.clusters", settings=[("user", user_name)] + ).output assert output == 0, error() with And("I select from events"): - output = node.query("SELECT count(*) FROM system.events", settings = [("user",user_name)]).output + output = node.query( + "SELECT count(*) FROM system.events", settings=[("user", user_name)] + ).output assert output == 0, error() with And("I select from graphite_retentions"): - output = node.query("SELECT count(*) FROM system.graphite_retentions", settings = [("user",user_name)]).output + output = node.query( + "SELECT count(*) FROM system.graphite_retentions", + settings=[("user", user_name)], + ).output assert output == 0, error() with And("I select from stack_trace"): - output = node.query("SELECT count(*) FROM system.stack_trace", settings = [("user",user_name)]).output + output = node.query( + "SELECT count(*) FROM system.stack_trace", + settings=[("user", user_name)], + ).output assert output == 0, error() with And("I select from trace_log"): - output = node.query("SELECT count(*) FROM system.trace_log", settings = [("user",user_name)]).output + output = node.query( + "SELECT count(*) FROM system.trace_log", settings=[("user", user_name)] + ).output assert output == 0, error() with And("I select from user_directories"): - output = node.query("SELECT count(*) FROM system.user_directories", settings = [("user",user_name)]).output + output = node.query( + "SELECT count(*) FROM system.user_directories", + settings=[("user", user_name)], + ).output assert output == 0, error() with And("I select from zookeeper"): - output = node.query("SELECT count(*) FROM system.zookeeper WHERE path = '/clickhouse' ", settings = [("user",user_name)]).output + output = node.query( + "SELECT count(*) FROM system.zookeeper WHERE path = '/clickhouse' ", + settings=[("user", user_name)], + ).output assert output == 0, error() with And("I select from macros"): - output = node.query("SELECT count(*) FROM system.macros", settings = [("user",user_name)]).output + output = node.query( + "SELECT count(*) FROM system.macros", settings=[("user", user_name)] + ).output assert output == 0, error() + @TestFeature @Name("public tables") def feature(self, node="clickhouse1"): diff --git a/tests/testflows/rbac/tests/privileges/role_admin.py b/tests/testflows/rbac/tests/privileges/role_admin.py index 8deea7874cd..191d4cb13c5 100644 --- a/tests/testflows/rbac/tests/privileges/role_admin.py +++ b/tests/testflows/rbac/tests/privileges/role_admin.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): - """Check that a user is able to grant role with `ROLE ADMIN` privilege granted directly. - """ + """Check that a user is able to grant role with `ROLE ADMIN` privilege granted directly.""" user_name = f"user_{getuid()}" @@ -19,10 +19,10 @@ def privileges_granted_directly(self, node=None): Suite(test=role_admin)(grant_target_name=user_name, user_name=user_name) + @TestSuite def privileges_granted_via_role(self, node=None): - """Check that a user is able to grant role with `ROLE ADMIN` privilege granted through a role. - """ + """Check that a user is able to grant role with `ROLE ADMIN` privilege granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -37,10 +37,10 @@ def privileges_granted_via_role(self, node=None): Suite(test=role_admin)(grant_target_name=role_name, user_name=user_name) + @TestSuite def role_admin(self, grant_target_name, user_name, node=None): - """Check that user is able to execute to grant roles if and only if they have `ROLE ADMIN`. - """ + """Check that user is able to execute to grant roles if and only if they have `ROLE ADMIN`.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -59,8 +59,12 @@ def role_admin(self, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't grant a role"): - node.query(f"GRANT {role_admin_name} TO {target_user_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"GRANT {role_admin_name} TO {target_user_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("Grant role with privilege"): role_admin_name = f"role_admin_{getuid()}" @@ -72,7 +76,10 @@ def role_admin(self, grant_target_name, user_name, node=None): node.query(f"GRANT ROLE ADMIN ON *.* TO {grant_target_name}") with Then("I check the user can grant a role"): - node.query(f"GRANT {role_admin_name} TO {target_user_name}", settings = [("user", f"{user_name}")]) + node.query( + f"GRANT {role_admin_name} TO {target_user_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("Grant role on cluster"): role_admin_name = f"role_admin_{getuid()}" @@ -89,11 +96,16 @@ def role_admin(self, grant_target_name, user_name, node=None): node.query(f"GRANT ROLE ADMIN ON *.* TO {grant_target_name}") with Then("I check the user can grant a role"): - node.query(f"GRANT {role_admin_name} TO {target_user_name} ON CLUSTER sharded_cluster", settings = [("user", f"{user_name}")]) + node.query( + f"GRANT {role_admin_name} TO {target_user_name} ON CLUSTER sharded_cluster", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the user"): - node.query(f"DROP ROLE IF EXISTS {role_admin_name} ON CLUSTER sharded_cluster") + node.query( + f"DROP ROLE IF EXISTS {role_admin_name} ON CLUSTER sharded_cluster" + ) with Scenario("Grant role with revoked privilege"): role_admin_name = f"role_admin_{getuid()}" @@ -108,8 +120,12 @@ def role_admin(self, grant_target_name, user_name, node=None): node.query(f"REVOKE ROLE ADMIN ON *.* FROM {grant_target_name}") with Then("I check the user cannot grant a role"): - node.query(f"GRANT {role_admin_name} TO {target_user_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"GRANT {role_admin_name} TO {target_user_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("Grant role with revoked ALL privilege"): role_admin_name = f"role_admin_{getuid()}" @@ -124,8 +140,12 @@ def role_admin(self, grant_target_name, user_name, node=None): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I check the user cannot grant a role"): - node.query(f"GRANT {role_admin_name} TO {target_user_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"GRANT {role_admin_name} TO {target_user_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("Grant role with ALL privilege"): role_admin_name = f"role_admin_{getuid()}" @@ -137,18 +157,21 @@ def role_admin(self, grant_target_name, user_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I check the user can grant a role"): - node.query(f"GRANT {role_admin_name} TO {target_user_name}", settings = [("user", f"{user_name}")]) + node.query( + f"GRANT {role_admin_name} TO {target_user_name}", + settings=[("user", f"{user_name}")], + ) + @TestFeature @Name("role admin") @Requirements( RQ_SRS_006_RBAC_Privileges_RoleAdmin("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of ROLE ADMIN. - """ + """Check the RBAC functionality of ROLE ADMIN.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/select.py b/tests/testflows/rbac/tests/privileges/select.py index b1a95b4be0b..add90c7789d 100755 --- a/tests/testflows/rbac/tests/privileges/select.py +++ b/tests/testflows/rbac/tests/privileges/select.py @@ -8,13 +8,11 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_Privileges_None("1.0") -) +@Requirements(RQ_SRS_006_RBAC_Privileges_None("1.0")) def without_privilege(self, table_type, node=None): - """Check that user without select privilege on a table is not able to select on that table. - """ + """Check that user without select privilege on a table is not able to select on that table.""" user_name = f"user_{getuid()}" table_name = f"table_{getuid()}" @@ -34,16 +32,20 @@ def without_privilege(self, table_type, node=None): with Then("I run SELECT without privilege"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"SELECT * FROM {table_name}", settings = [("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( RQ_SRS_006_RBAC_Grant_Privilege_Select("1.0"), ) def user_with_privilege(self, table_type, node=None): - """Check that user can select from a table on which they have select privilege. - """ + """Check that user can select from a table on which they have select privilege.""" user_name = f"user_{getuid()}" table_name = f"table_{getuid()}" @@ -61,18 +63,18 @@ def user_with_privilege(self, table_type, node=None): node.query(f"GRANT SELECT ON {table_name} TO {user_name}") with Then("I verify SELECT command"): - user_select = node.query(f"SELECT d FROM {table_name}", settings = [("user",user_name)]) + user_select = node.query( + f"SELECT d FROM {table_name}", settings=[("user", user_name)] + ) default = node.query(f"SELECT d FROM {table_name}") assert user_select.output == default.output, error() + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_Privileges_All("1.0") -) +@Requirements(RQ_SRS_006_RBAC_Privileges_All("1.0")) def user_with_all_privilege(self, table_type, node=None): - """Check that user can select from a table if have ALL privilege. - """ + """Check that user can select from a table if have ALL privilege.""" user_name = f"user_{getuid()}" table_name = f"table_{getuid()}" @@ -90,11 +92,14 @@ def user_with_all_privilege(self, table_type, node=None): node.query(f"GRANT ALL ON *.* TO {user_name}") with Then("I verify SELECT command"): - user_select = node.query(f"SELECT d FROM {table_name}", settings = [("user",user_name)]) + user_select = node.query( + f"SELECT d FROM {table_name}", settings=[("user", user_name)] + ) default = node.query(f"SELECT d FROM {table_name}") assert user_select.output == default.output, error() + @TestScenario @Requirements( RQ_SRS_006_RBAC_Revoke_Privilege_Select("1.0"), @@ -121,8 +126,13 @@ def user_with_revoked_privilege(self, table_type, node=None): with Then("I use SELECT, throws exception"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"SELECT * FROM {table_name}", settings = [("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestScenario def user_with_revoked_all_privilege(self, table_type, node=None): @@ -147,26 +157,51 @@ def user_with_revoked_all_privilege(self, table_type, node=None): with Then("I use SELECT, throws exception"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"SELECT * FROM {table_name}", settings = [("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestScenario def user_with_privilege_on_columns(self, table_type): - Scenario(run=user_column_privileges, - examples=Examples("grant_columns revoke_columns select_columns_fail select_columns_pass data_pass table_type", - [tuple(list(row)+[table_type]) for row in user_column_privileges.examples])) + Scenario( + run=user_column_privileges, + examples=Examples( + "grant_columns revoke_columns select_columns_fail select_columns_pass data_pass table_type", + [ + tuple(list(row) + [table_type]) + for row in user_column_privileges.examples + ], + ), + ) + @TestOutline @Requirements( RQ_SRS_006_RBAC_Select_Column("1.0"), ) -@Examples("grant_columns revoke_columns select_columns_fail select_columns_pass data_pass", [ - ("d", "d", "x", "d", '\'2020-01-01\''), - ("d,a", "d", "x", "d", '\'2020-01-01\''), - ("d,a,b", "d,a,b", "x", "d,b", '\'2020-01-01\',9'), - ("d,a,b", "b", "y", "d,a,b", '\'2020-01-01\',\'woo\',9') -]) -def user_column_privileges(self, grant_columns, select_columns_pass, data_pass, table_type, revoke_columns=None, select_columns_fail=None, node=None): +@Examples( + "grant_columns revoke_columns select_columns_fail select_columns_pass data_pass", + [ + ("d", "d", "x", "d", "'2020-01-01'"), + ("d,a", "d", "x", "d", "'2020-01-01'"), + ("d,a,b", "d,a,b", "x", "d,b", "'2020-01-01',9"), + ("d,a,b", "b", "y", "d,a,b", "'2020-01-01','woo',9"), + ], +) +def user_column_privileges( + self, + grant_columns, + select_columns_pass, + data_pass, + table_type, + revoke_columns=None, + select_columns_fail=None, + node=None, +): """Check that user is able to select on granted columns and unable to select on not granted or revoked columns. """ @@ -179,7 +214,9 @@ def user_column_privileges(self, grant_columns, select_columns_pass, data_pass, with table(node, table_name, table_type), user(node, user_name): with Given("The table has some data on some columns"): - node.query(f"INSERT INTO {table_name} ({select_columns_pass}) VALUES ({data_pass})") + node.query( + f"INSERT INTO {table_name} ({select_columns_pass}) VALUES ({data_pass})" + ) with When("I grant select privilege"): node.query(f"GRANT SELECT({grant_columns}) ON {table_name} TO {user_name}") @@ -188,22 +225,37 @@ def user_column_privileges(self, grant_columns, select_columns_pass, data_pass, with And("I select from not granted column"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"SELECT ({select_columns_fail}) FROM {table_name}", - settings = [("user",user_name)], exitcode=exitcode, message=message) + node.query( + f"SELECT ({select_columns_fail}) FROM {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Then("I select from granted column, verify correct result"): - user_select = node.query(f"SELECT ({select_columns_pass}) FROM {table_name}", settings = [("user",user_name)]) + user_select = node.query( + f"SELECT ({select_columns_pass}) FROM {table_name}", + settings=[("user", user_name)], + ) default = node.query(f"SELECT ({select_columns_pass}) FROM {table_name}") assert user_select.output == default.output if revoke_columns is not None: with When("I revoke select privilege for columns from user"): - node.query(f"REVOKE SELECT({revoke_columns}) ON {table_name} FROM {user_name}") + node.query( + f"REVOKE SELECT({revoke_columns}) ON {table_name} FROM {user_name}" + ) with And("I select from revoked columns"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"SELECT ({select_columns_pass}) FROM {table_name}", settings = [("user",user_name)], exitcode=exitcode, message=message) + node.query( + f"SELECT ({select_columns_pass}) FROM {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( @@ -236,10 +288,13 @@ def role_with_privilege(self, table_type, node=None): node.query(f"GRANT {role_name} TO {user_name}") with Then("I verify SELECT command"): - user_select = node.query(f"SELECT d FROM {table_name}", settings = [("user",user_name)]) + user_select = node.query( + f"SELECT d FROM {table_name}", settings=[("user", user_name)] + ) default = node.query(f"SELECT d FROM {table_name}") assert user_select.output == default.output, error() + @TestScenario @Requirements( RQ_SRS_006_RBAC_Revoke_Privilege_Select("1.0"), @@ -270,8 +325,13 @@ def role_with_revoked_privilege(self, table_type, node=None): with And("I select from the table"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"SELECT * FROM {table_name}", settings = [("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestScenario def user_with_revoked_role(self, table_type, node=None): @@ -300,26 +360,51 @@ def user_with_revoked_role(self, table_type, node=None): with And("I select from the table"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"SELECT * FROM {table_name}", settings = [("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestScenario def role_with_privilege_on_columns(self, table_type): - Scenario(run=role_column_privileges, - examples=Examples("grant_columns revoke_columns select_columns_fail select_columns_pass data_pass table_type", - [tuple(list(row)+[table_type]) for row in role_column_privileges.examples])) + Scenario( + run=role_column_privileges, + examples=Examples( + "grant_columns revoke_columns select_columns_fail select_columns_pass data_pass table_type", + [ + tuple(list(row) + [table_type]) + for row in role_column_privileges.examples + ], + ), + ) + @TestOutline @Requirements( RQ_SRS_006_RBAC_Select_Column("1.0"), ) -@Examples("grant_columns revoke_columns select_columns_fail select_columns_pass data_pass", [ - ("d", "d", "x", "d", '\'2020-01-01\''), - ("d,a", "d", "x", "d", '\'2020-01-01\''), - ("d,a,b", "d,a,b", "x", "d,b", '\'2020-01-01\',9'), - ("d,a,b", "b", "y", "d,a,b", '\'2020-01-01\',\'woo\',9') -]) -def role_column_privileges(self, grant_columns, select_columns_pass, data_pass, table_type, revoke_columns=None, select_columns_fail=None, node=None): +@Examples( + "grant_columns revoke_columns select_columns_fail select_columns_pass data_pass", + [ + ("d", "d", "x", "d", "'2020-01-01'"), + ("d,a", "d", "x", "d", "'2020-01-01'"), + ("d,a,b", "d,a,b", "x", "d,b", "'2020-01-01',9"), + ("d,a,b", "b", "y", "d,a,b", "'2020-01-01','woo',9"), + ], +) +def role_column_privileges( + self, + grant_columns, + select_columns_pass, + data_pass, + table_type, + revoke_columns=None, + select_columns_fail=None, + node=None, +): """Check that user is able to select from granted columns and unable to select from not granted or revoked columns. """ @@ -333,12 +418,16 @@ def role_column_privileges(self, grant_columns, select_columns_pass, data_pass, with table(node, table_name, table_type): with Given("The table has some data on some columns"): - node.query(f"INSERT INTO {table_name} ({select_columns_pass}) VALUES ({data_pass})") + node.query( + f"INSERT INTO {table_name} ({select_columns_pass}) VALUES ({data_pass})" + ) with user(node, user_name), role(node, role_name): with When("I grant select privilege"): - node.query(f"GRANT SELECT({grant_columns}) ON {table_name} TO {role_name}") + node.query( + f"GRANT SELECT({grant_columns}) ON {table_name} TO {role_name}" + ) with And("I grant the role to a user"): node.query(f"GRANT {role_name} TO {user_name}") @@ -346,23 +435,36 @@ def role_column_privileges(self, grant_columns, select_columns_pass, data_pass, if select_columns_fail is not None: with And("I select from not granted column"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"SELECT ({select_columns_fail}) FROM {table_name}", - settings = [("user",user_name)], exitcode=exitcode, message=message) + node.query( + f"SELECT ({select_columns_fail}) FROM {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Then("I verify SELECT command"): - user_select = node.query(f"SELECT d FROM {table_name}", settings = [("user",user_name)]) + user_select = node.query( + f"SELECT d FROM {table_name}", settings=[("user", user_name)] + ) default = node.query(f"SELECT d FROM {table_name}") assert user_select.output == default.output, error() if revoke_columns is not None: with When("I revoke select privilege for columns from role"): - node.query(f"REVOKE SELECT({revoke_columns}) ON {table_name} FROM {role_name}") + node.query( + f"REVOKE SELECT({revoke_columns}) ON {table_name} FROM {role_name}" + ) with And("I select from revoked columns"): exitcode, message = errors.not_enough_privileges(name=user_name) - node.query(f"SELECT ({select_columns_pass}) FROM {table_name}", - settings = [("user",user_name)], exitcode=exitcode, message=message) + node.query( + f"SELECT ({select_columns_pass}) FROM {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( @@ -385,13 +487,19 @@ def user_with_privilege_on_cluster(self, table_type, node=None): node.query(f"INSERT INTO {table_name} (d) VALUES ('2020-01-01')") with Given("I have a user on a cluster"): - node.query(f"CREATE USER OR REPLACE {user_name} ON CLUSTER sharded_cluster") + node.query( + f"CREATE USER OR REPLACE {user_name} ON CLUSTER sharded_cluster" + ) with When("I grant select privilege on a cluster"): - node.query(f"GRANT ON CLUSTER sharded_cluster SELECT ON {table_name} TO {user_name}") + node.query( + f"GRANT ON CLUSTER sharded_cluster SELECT ON {table_name} TO {user_name}" + ) with Then("I verify SELECT command"): - user_select = node.query(f"SELECT d FROM {table_name}", settings = [("user",user_name)]) + user_select = node.query( + f"SELECT d FROM {table_name}", settings=[("user", user_name)] + ) default = node.query(f"SELECT d FROM {table_name}") assert user_select.output == default.output, error() @@ -399,28 +507,28 @@ def user_with_privilege_on_cluster(self, table_type, node=None): with Finally("I drop the user"): node.query(f"DROP USER {user_name} ON CLUSTER sharded_cluster") + @TestOutline(Feature) -@Requirements( - RQ_SRS_006_RBAC_Select("1.0"), - RQ_SRS_006_RBAC_Select_TableEngines("1.0") -) -@Examples("table_type", [ - (key,) for key in table_types.keys() -]) +@Requirements(RQ_SRS_006_RBAC_Select("1.0"), RQ_SRS_006_RBAC_Select_TableEngines("1.0")) +@Examples("table_type", [(key,) for key in table_types.keys()]) @Name("select") def feature(self, table_type, stress=None, node="clickhouse1"): - """Check the RBAC functionality of SELECT. - """ + """Check the RBAC functionality of SELECT.""" self.context.node = self.context.cluster.node(node) if stress is not None: self.context.stress = stress - args = {"table_type" : table_type} + args = {"table_type": table_type} with Pool(10) as pool: try: for scenario in loads(current_module(), Scenario): - Scenario(test=scenario, setup=instrument_clickhouse_server_log, parallel=True, executor=pool)(**args) + Scenario( + test=scenario, + setup=instrument_clickhouse_server_log, + parallel=True, + executor=pool, + )(**args) finally: join() diff --git a/tests/testflows/rbac/tests/privileges/show/show_columns.py b/tests/testflows/rbac/tests/privileges/show/show_columns.py index 108200e7a57..25bafe46a4e 100644 --- a/tests/testflows/rbac/tests/privileges/show/show_columns.py +++ b/tests/testflows/rbac/tests/privileges/show/show_columns.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def describe_with_privilege_granted_directly(self, node=None): """Check that user is able to execute DESCRIBE on a table if and only if @@ -18,7 +19,10 @@ def describe_with_privilege_granted_directly(self, node=None): with user(node, f"{user_name}"): table_name = f"table_name_{getuid()}" - Suite(test=describe)(grant_target_name=user_name, user_name=user_name, table_name=table_name) + Suite(test=describe)( + grant_target_name=user_name, user_name=user_name, table_name=table_name + ) + @TestSuite def describe_with_privilege_granted_via_role(self, node=None): @@ -37,15 +41,17 @@ def describe_with_privilege_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(test=describe)(grant_target_name=role_name, user_name=user_name, table_name=table_name) + Suite(test=describe)( + grant_target_name=role_name, user_name=user_name, table_name=table_name + ) + @TestSuite @Requirements( RQ_SRS_006_RBAC_DescribeTable_RequiredPrivilege("1.0"), ) def describe(self, grant_target_name, user_name, table_name, node=None): - """Check that user is able to execute DESCRIBE only when they have SHOW COLUMNS privilege. - """ + """Check that user is able to execute DESCRIBE only when they have SHOW COLUMNS privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -62,8 +68,12 @@ def describe(self, grant_target_name, user_name, table_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then(f"I attempt to DESCRIBE {table_name}"): - node.query(f"DESCRIBE {table_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"DESCRIBE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("DESCRIBE with privilege"): @@ -71,7 +81,9 @@ def describe(self, grant_target_name, user_name, table_name, node=None): node.query(f"GRANT SHOW COLUMNS ON {table_name} TO {grant_target_name}") with Then(f"I attempt to DESCRIBE {table_name}"): - node.query(f"DESCRIBE TABLE {table_name}", settings=[("user",user_name)]) + node.query( + f"DESCRIBE TABLE {table_name}", settings=[("user", user_name)] + ) with Scenario("DESCRIBE with revoked privilege"): @@ -79,11 +91,17 @@ def describe(self, grant_target_name, user_name, table_name, node=None): node.query(f"GRANT SHOW COLUMNS ON {table_name} TO {grant_target_name}") with And(f"I revoke SHOW COLUMNS on the table"): - node.query(f"REVOKE SHOW COLUMNS ON {table_name} FROM {grant_target_name}") + node.query( + f"REVOKE SHOW COLUMNS ON {table_name} FROM {grant_target_name}" + ) with Then(f"I attempt to DESCRIBE {table_name}"): - node.query(f"DESCRIBE {table_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"DESCRIBE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("DESCRIBE with revoked ALL privilege"): @@ -94,8 +112,12 @@ def describe(self, grant_target_name, user_name, table_name, node=None): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then(f"I attempt to DESCRIBE {table_name}"): - node.query(f"DESCRIBE {table_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"DESCRIBE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("DESCRIBE with ALL privilege"): @@ -103,7 +125,10 @@ def describe(self, grant_target_name, user_name, table_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then(f"I attempt to DESCRIBE {table_name}"): - node.query(f"DESCRIBE TABLE {table_name}", settings=[("user",user_name)]) + node.query( + f"DESCRIBE TABLE {table_name}", settings=[("user", user_name)] + ) + @TestSuite def show_create_with_privilege_granted_directly(self, node=None): @@ -118,7 +143,10 @@ def show_create_with_privilege_granted_directly(self, node=None): with user(node, f"{user_name}"): table_name = f"table_name_{getuid()}" - Suite(test=show_create)(grant_target_name=user_name, user_name=user_name, table_name=table_name) + Suite(test=show_create)( + grant_target_name=user_name, user_name=user_name, table_name=table_name + ) + @TestSuite def show_create_with_privilege_granted_via_role(self, node=None): @@ -137,15 +165,17 @@ def show_create_with_privilege_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(test=show_create)(grant_target_name=role_name, user_name=user_name, table_name=table_name) + Suite(test=show_create)( + grant_target_name=role_name, user_name=user_name, table_name=table_name + ) + @TestSuite @Requirements( RQ_SRS_006_RBAC_ShowCreateTable_RequiredPrivilege("1.0"), ) def show_create(self, grant_target_name, user_name, table_name, node=None): - """Check that user is able to execute SHOW CREATE on a table only when they have SHOW COLUMNS privilege. - """ + """Check that user is able to execute SHOW CREATE on a table only when they have SHOW COLUMNS privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -162,8 +192,12 @@ def show_create(self, grant_target_name, user_name, table_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then(f"I attempt to SHOW CREATE {table_name}"): - node.query(f"SHOW CREATE TABLE {table_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW CREATE TABLE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("SHOW CREATE with privilege"): @@ -171,7 +205,9 @@ def show_create(self, grant_target_name, user_name, table_name, node=None): node.query(f"GRANT SHOW COLUMNS ON {table_name} TO {grant_target_name}") with Then(f"I attempt to SHOW CREATE {table_name}"): - node.query(f"SHOW CREATE TABLE {table_name}", settings=[("user",user_name)]) + node.query( + f"SHOW CREATE TABLE {table_name}", settings=[("user", user_name)] + ) with Scenario("SHOW CREATE with revoked privilege"): @@ -179,11 +215,17 @@ def show_create(self, grant_target_name, user_name, table_name, node=None): node.query(f"GRANT SHOW COLUMNS ON {table_name} TO {grant_target_name}") with And(f"I revoke SHOW COLUMNS on the table"): - node.query(f"REVOKE SHOW COLUMNS ON {table_name} FROM {grant_target_name}") + node.query( + f"REVOKE SHOW COLUMNS ON {table_name} FROM {grant_target_name}" + ) with Then(f"I attempt to SHOW CREATE {table_name}"): - node.query(f"SHOW CREATE TABLE {table_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW CREATE TABLE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("SHOW CREATE with ALL privilege"): @@ -191,21 +233,35 @@ def show_create(self, grant_target_name, user_name, table_name, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then(f"I attempt to SHOW CREATE {table_name}"): - node.query(f"SHOW CREATE TABLE {table_name}", settings=[("user",user_name)]) + node.query( + f"SHOW CREATE TABLE {table_name}", settings=[("user", user_name)] + ) + @TestFeature @Name("show columns") @Requirements( RQ_SRS_006_RBAC_ShowColumns_Privilege("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SHOW COLUMNS. - """ + """Check the RBAC functionality of SHOW COLUMNS.""" self.context.node = self.context.cluster.node(node) - Suite(run=describe_with_privilege_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=describe_with_privilege_granted_via_role, setup=instrument_clickhouse_server_log) - Suite(run=show_create_with_privilege_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=show_create_with_privilege_granted_via_role, setup=instrument_clickhouse_server_log) + Suite( + run=describe_with_privilege_granted_directly, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=describe_with_privilege_granted_via_role, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=show_create_with_privilege_granted_directly, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=show_create_with_privilege_granted_via_role, + setup=instrument_clickhouse_server_log, + ) diff --git a/tests/testflows/rbac/tests/privileges/show/show_databases.py b/tests/testflows/rbac/tests/privileges/show/show_databases.py index 39a46947afe..b9e0dfc75d7 100644 --- a/tests/testflows/rbac/tests/privileges/show/show_databases.py +++ b/tests/testflows/rbac/tests/privileges/show/show_databases.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def dict_privileges_granted_directly(self, node=None): """Check that a user is able to execute `USE` and `SHOW CREATE` @@ -20,10 +21,18 @@ def dict_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): db_name = f"db_name_{getuid()}" - Suite(run=check_privilege, - examples=Examples("privilege on grant_target_name user_name db_name", [ - tuple(list(row)+[user_name,user_name,db_name]) for row in check_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege on grant_target_name user_name db_name", + [ + tuple(list(row) + [user_name, user_name, db_name]) + for row in check_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def dict_privileges_granted_via_role(self, node=None): @@ -44,39 +53,69 @@ def dict_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=check_privilege, - examples=Examples("privilege on grant_target_name user_name db_name", [ - tuple(list(row)+[role_name,user_name,db_name]) for row in check_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege on grant_target_name user_name db_name", + [ + tuple(list(row) + [role_name, user_name, db_name]) + for row in check_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("SHOW","*.*"), - ("SHOW DATABASES","db"), - ("CREATE DATABASE","db"), - ("DROP DATABASE","db"), -]) -def check_privilege(self, privilege, on, grant_target_name, user_name, db_name, node=None): - """Run checks for commands that require SHOW DATABASE privilege. - """ +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("SHOW", "*.*"), + ("SHOW DATABASES", "db"), + ("CREATE DATABASE", "db"), + ("DROP DATABASE", "db"), + ], +) +def check_privilege( + self, privilege, on, grant_target_name, user_name, db_name, node=None +): + """Run checks for commands that require SHOW DATABASE privilege.""" if node is None: node = self.context.node on = on.replace("db", f"{db_name}") - Suite(test=show_db)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name, db_name=db_name) - Suite(test=use)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name, db_name=db_name) - Suite(test=show_create)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name, db_name=db_name) + Suite(test=show_db)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + db_name=db_name, + ) + Suite(test=use)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + db_name=db_name, + ) + Suite(test=show_create)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + db_name=db_name, + ) + @TestSuite @Requirements( RQ_SRS_006_RBAC_ShowDatabases_RequiredPrivilege("1.0"), ) def show_db(self, privilege, on, grant_target_name, user_name, db_name, node=None): - """Check that user is only able to see a database in SHOW DATABASES when they have a privilege on that database. - """ + """Check that user is only able to see a database in SHOW DATABASES when they have a privilege on that database.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -96,8 +135,10 @@ def show_db(self, privilege, on, grant_target_name, user_name, db_name, node=Non node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user doesn't see the database"): - output = node.query("SHOW DATABASES", settings = [("user", f"{user_name}")]).output - assert output == '', error() + output = node.query( + "SHOW DATABASES", settings=[("user", f"{user_name}")] + ).output + assert output == "", error() with Scenario("SHOW DATABASES with privilege"): @@ -105,7 +146,11 @@ def show_db(self, privilege, on, grant_target_name, user_name, db_name, node=Non node.query(f"GRANT {privilege} ON {db_name}.* TO {grant_target_name}") with Then("I check the user does see a database"): - output = node.query("SHOW DATABASES", settings = [("user", f"{user_name}")], message = f'{db_name}') + output = node.query( + "SHOW DATABASES", + settings=[("user", f"{user_name}")], + message=f"{db_name}", + ) with Scenario("SHOW DATABASES with revoked privilege"): @@ -113,16 +158,21 @@ def show_db(self, privilege, on, grant_target_name, user_name, db_name, node=Non node.query(f"GRANT {privilege} ON {db_name}.* TO {grant_target_name}") with And(f"I revoke {privilege} on the database"): - node.query(f"REVOKE {privilege} ON {db_name}.* FROM {grant_target_name}") + node.query( + f"REVOKE {privilege} ON {db_name}.* FROM {grant_target_name}" + ) with Then("I check the user does not see a database"): - output = node.query("SHOW DATABASES", settings = [("user", f"{user_name}")]).output - assert output == f'', error() + output = node.query( + "SHOW DATABASES", settings=[("user", f"{user_name}")] + ).output + assert output == f"", error() finally: with Finally("I drop the database"): node.query(f"DROP DATABASE IF EXISTS {db_name}") + @TestSuite @Requirements( RQ_SRS_006_RBAC_UseDatabase_RequiredPrivilege("1.0"), @@ -149,8 +199,12 @@ def use(self, privilege, on, grant_target_name, user_name, db_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then(f"I attempt to USE {db_name}"): - node.query(f"USE {db_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"USE {db_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("USE with privilege"): @@ -158,7 +212,7 @@ def use(self, privilege, on, grant_target_name, user_name, db_name, node=None): node.query(f"GRANT {privilege} ON {db_name}.* TO {grant_target_name}") with Then(f"I attempt to USE {db_name}"): - node.query(f"USE {db_name}", settings=[("user",user_name)]) + node.query(f"USE {db_name}", settings=[("user", user_name)]) with Scenario("USE with revoked privilege"): @@ -166,16 +220,23 @@ def use(self, privilege, on, grant_target_name, user_name, db_name, node=None): node.query(f"GRANT {privilege} ON {db_name}.* TO {grant_target_name}") with And(f"I revoke {privilege} on the database"): - node.query(f"REVOKE {privilege} ON {db_name}.* FROM {grant_target_name}") + node.query( + f"REVOKE {privilege} ON {db_name}.* FROM {grant_target_name}" + ) with Then(f"I attempt to USE {db_name}"): - node.query(f"USE {db_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"USE {db_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the database"): node.query(f"DROP DATABASE IF EXISTS {db_name}") + @TestSuite @Requirements( RQ_SRS_006_RBAC_ShowCreateDatabase_RequiredPrivilege("1.0"), @@ -202,8 +263,12 @@ def show_create(self, privilege, on, grant_target_name, user_name, db_name, node node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then(f"I attempt to SHOW CREATE {db_name}"): - node.query(f"SHOW CREATE DATABASE {db_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW CREATE DATABASE {db_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("SHOW CREATE with privilege"): @@ -211,7 +276,9 @@ def show_create(self, privilege, on, grant_target_name, user_name, db_name, node node.query(f"GRANT {privilege} ON {db_name}.* TO {grant_target_name}") with Then(f"I attempt to SHOW CREATE {db_name}"): - node.query(f"SHOW CREATE DATABASE {db_name}", settings=[("user",user_name)]) + node.query( + f"SHOW CREATE DATABASE {db_name}", settings=[("user", user_name)] + ) with Scenario("SHOW CREATE with revoked privilege"): @@ -219,26 +286,32 @@ def show_create(self, privilege, on, grant_target_name, user_name, db_name, node node.query(f"GRANT {privilege} ON {db_name}.* TO {grant_target_name}") with And(f"I revoke {privilege} on the database"): - node.query(f"REVOKE {privilege} ON {db_name}.* FROM {grant_target_name}") + node.query( + f"REVOKE {privilege} ON {db_name}.* FROM {grant_target_name}" + ) with Then(f"I attempt to SHOW CREATE {db_name}"): - node.query(f"SHOW CREATE DATABASE {db_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW CREATE DATABASE {db_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the database"): node.query(f"DROP DATABASE IF EXISTS {db_name}") + @TestFeature @Name("show databases") @Requirements( RQ_SRS_006_RBAC_ShowDatabases_Privilege("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SHOW DATABASES. - """ + """Check the RBAC functionality of SHOW DATABASES.""" self.context.node = self.context.cluster.node(node) Suite(run=dict_privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/show/show_dictionaries.py b/tests/testflows/rbac/tests/privileges/show/show_dictionaries.py index 5b717b5f47c..9c571b8836e 100644 --- a/tests/testflows/rbac/tests/privileges/show/show_dictionaries.py +++ b/tests/testflows/rbac/tests/privileges/show/show_dictionaries.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def dict_privileges_granted_directly(self, node=None): """Check that a user is able to execute `SHOW CREATE` and `EXISTS` @@ -20,10 +21,18 @@ def dict_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): dict_name = f"dict_name_{getuid()}" - Suite(run=check_privilege, - examples=Examples("privilege on grant_target_name user_name dict_name", [ - tuple(list(row)+[user_name,user_name,dict_name]) for row in check_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege on grant_target_name user_name dict_name", + [ + tuple(list(row) + [user_name, user_name, dict_name]) + for row in check_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def dict_privileges_granted_via_role(self, node=None): @@ -44,31 +53,62 @@ def dict_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=check_privilege, - examples=Examples("privilege on grant_target_name user_name dict_name", [ - tuple(list(row)+[role_name,user_name,dict_name]) for row in check_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege on grant_target_name user_name dict_name", + [ + tuple(list(row) + [role_name, user_name, dict_name]) + for row in check_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("SHOW","*.*"), - ("SHOW DICTIONARIES","dict"), - ("CREATE DICTIONARY","dict"), - ("DROP DICTIONARY","dict"), -]) -def check_privilege(self, privilege, on, grant_target_name, user_name, dict_name, node=None): - """Run checks for commands that require SHOW DICTIONARY privilege. - """ +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("SHOW", "*.*"), + ("SHOW DICTIONARIES", "dict"), + ("CREATE DICTIONARY", "dict"), + ("DROP DICTIONARY", "dict"), + ], +) +def check_privilege( + self, privilege, on, grant_target_name, user_name, dict_name, node=None +): + """Run checks for commands that require SHOW DICTIONARY privilege.""" if node is None: node = self.context.node on = on.replace("dict", f"{dict_name}") - Suite(test=show_dict)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name, dict_name=dict_name) - Suite(test=exists)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name, dict_name=dict_name) - Suite(test=show_create)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name, dict_name=dict_name) + Suite(test=show_dict)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + dict_name=dict_name, + ) + Suite(test=exists)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + dict_name=dict_name, + ) + Suite(test=show_create)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + dict_name=dict_name, + ) + @TestSuite @Requirements( @@ -85,7 +125,9 @@ def show_dict(self, privilege, on, grant_target_name, user_name, dict_name, node try: with Given("I have a dictionary"): - node.query(f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)") + node.query( + f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)" + ) with Scenario("SHOW DICTIONARIES without privilege"): @@ -96,15 +138,21 @@ def show_dict(self, privilege, on, grant_target_name, user_name, dict_name, node node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user doesn't see the dictionary"): - output = node.query("SHOW DICTIONARIES", settings = [("user", f"{user_name}")]).output - assert output == '', error() + output = node.query( + "SHOW DICTIONARIES", settings=[("user", f"{user_name}")] + ).output + assert output == "", error() with Scenario("SHOW DICTIONARIES with privilege"): with When(f"I grant {privilege} on the dictionary"): node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user does see a dictionary"): - node.query("SHOW DICTIONARIES", settings = [("user", f"{user_name}")], message=f"{dict_name}") + node.query( + "SHOW DICTIONARIES", + settings=[("user", f"{user_name}")], + message=f"{dict_name}", + ) with Scenario("SHOW DICTIONARIES with revoked privilege"): with When(f"I grant {privilege} on the dictionary"): @@ -114,13 +162,16 @@ def show_dict(self, privilege, on, grant_target_name, user_name, dict_name, node node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user does not see a dictionary"): - output = node.query("SHOW DICTIONARIES", settings = [("user", f"{user_name}")]).output - assert output == f'', error() + output = node.query( + "SHOW DICTIONARIES", settings=[("user", f"{user_name}")] + ).output + assert output == f"", error() finally: with Finally("I drop the dictionary"): node.query(f"DROP DICTIONARY IF EXISTS {dict_name}") + @TestSuite @Requirements( RQ_SRS_006_RBAC_ExistsDictionary_RequiredPrivilege("1.0"), @@ -136,7 +187,9 @@ def exists(self, privilege, on, grant_target_name, user_name, dict_name, node=No try: with Given("I have a dictionary"): - node.query(f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)") + node.query( + f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)" + ) with Scenario("EXISTS without privilege"): @@ -147,15 +200,19 @@ def exists(self, privilege, on, grant_target_name, user_name, dict_name, node=No node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then(f"I check if {dict_name} EXISTS"): - node.query(f"EXISTS {dict_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"EXISTS {dict_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("EXISTS with privilege"): with When(f"I grant {privilege} on the dictionary"): node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then(f"I check if {dict_name} EXISTS"): - node.query(f"EXISTS {dict_name}", settings=[("user",user_name)]) + node.query(f"EXISTS {dict_name}", settings=[("user", user_name)]) with Scenario("EXISTS with revoked privilege"): with When(f"I grant {privilege} on the dictionary"): @@ -165,18 +222,25 @@ def exists(self, privilege, on, grant_target_name, user_name, dict_name, node=No node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then(f"I check if {dict_name} EXISTS"): - node.query(f"EXISTS {dict_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"EXISTS {dict_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the dictionary"): node.query(f"DROP DICTIONARY IF EXISTS {dict_name}") + @TestSuite @Requirements( RQ_SRS_006_RBAC_ShowCreateDictionary_RequiredPrivilege("1.0"), ) -def show_create(self, privilege, on, grant_target_name, user_name, dict_name, node=None): +def show_create( + self, privilege, on, grant_target_name, user_name, dict_name, node=None +): """Check that user is able to execute SHOW CREATE on a dictionary if and only if the user has SHOW DICTIONARY privilege on that dictionary. """ @@ -187,7 +251,9 @@ def show_create(self, privilege, on, grant_target_name, user_name, dict_name, no try: with Given("I have a dictionary"): - node.query(f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)") + node.query( + f"CREATE DICTIONARY {dict_name}(x Int32, y Int32) PRIMARY KEY x LAYOUT(FLAT()) SOURCE(CLICKHOUSE()) LIFETIME(0)" + ) with Scenario("SHOW CREATE without privilege"): @@ -198,15 +264,22 @@ def show_create(self, privilege, on, grant_target_name, user_name, dict_name, no node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then(f"I attempt to SHOW CREATE {dict_name}"): - node.query(f"SHOW CREATE DICTIONARY {dict_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW CREATE DICTIONARY {dict_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("SHOW CREATE with privilege"): with When(f"I grant {privilege} on the dictionary"): node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then(f"I attempt to SHOW CREATE {dict_name}"): - node.query(f"SHOW CREATE DICTIONARY {dict_name}", settings=[("user",user_name)]) + node.query( + f"SHOW CREATE DICTIONARY {dict_name}", + settings=[("user", user_name)], + ) with Scenario("SHOW CREATE with revoked privilege"): with When(f"I grant {privilege} on the dictionary"): @@ -216,23 +289,27 @@ def show_create(self, privilege, on, grant_target_name, user_name, dict_name, no node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then(f"I attempt to SHOW CREATE {dict_name}"): - node.query(f"SHOW CREATE DICTIONARY {dict_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW CREATE DICTIONARY {dict_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the dictionary"): node.query(f"DROP DICTIONARY IF EXISTS {dict_name}") + @TestFeature @Name("show dictionaries") @Requirements( RQ_SRS_006_RBAC_ShowDictionaries_Privilege("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SHOW DICTIONARIES. - """ + """Check the RBAC functionality of SHOW DICTIONARIES.""" self.context.node = self.context.cluster.node(node) Suite(run=dict_privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/show/show_quotas.py b/tests/testflows/rbac/tests/privileges/show/show_quotas.py index 20476ae759b..d7556db6d07 100644 --- a/tests/testflows/rbac/tests/privileges/show/show_quotas.py +++ b/tests/testflows/rbac/tests/privileges/show/show_quotas.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @contextmanager def quota(node, name): try: @@ -17,10 +18,10 @@ def quota(node, name): with Finally("I drop the quota"): node.query(f"DROP QUOTA IF EXISTS {name}") + @TestSuite def privileges_granted_directly(self, node=None): - """Check that a user is able to execute `SHOW QUOTAS` with privileges are granted directly. - """ + """Check that a user is able to execute `SHOW QUOTAS` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -29,15 +30,22 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=check_privilege, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in check_privilege.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in check_privilege.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): - """Check that a user is able to execute `SHOW QUOTAS` with privileges are granted through a role. - """ + """Check that a user is able to execute `SHOW QUOTAS` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -50,36 +58,50 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=check_privilege, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in check_privilege.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in check_privilege.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("SHOW ACCESS",), - ("SHOW QUOTAS",), - ("SHOW CREATE QUOTA",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("SHOW ACCESS",), + ("SHOW QUOTAS",), + ("SHOW CREATE QUOTA",), + ], +) def check_privilege(self, privilege, grant_target_name, user_name, node=None): - """Run checks for commands that require SHOW QUOTAS privilege. - """ + """Run checks for commands that require SHOW QUOTAS privilege.""" if node is None: node = self.context.node - Suite(test=show_quotas)(privilege=privilege, grant_target_name=grant_target_name, user_name=user_name) - Suite(test=show_create)(privilege=privilege, grant_target_name=grant_target_name, user_name=user_name) + Suite(test=show_quotas)( + privilege=privilege, grant_target_name=grant_target_name, user_name=user_name + ) + Suite(test=show_create)( + privilege=privilege, grant_target_name=grant_target_name, user_name=user_name + ) + @TestSuite @Requirements( RQ_SRS_006_RBAC_ShowQuotas_RequiredPrivilege("1.0"), ) def show_quotas(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SHOW QUOTAS` when they have the necessary privilege. - """ + """Check that user is only able to execute `SHOW QUOTAS` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -94,8 +116,12 @@ def show_quotas(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use SHOW QUOTAS"): - node.query(f"SHOW QUOTAS", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW QUOTAS", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("SHOW QUOTAS with privilege"): @@ -103,7 +129,7 @@ def show_quotas(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use SHOW QUOTAS"): - node.query(f"SHOW QUOTAS", settings = [("user", f"{user_name}")]) + node.query(f"SHOW QUOTAS", settings=[("user", f"{user_name}")]) with Scenario("SHOW QUOTAS with revoked privilege"): @@ -114,16 +140,20 @@ def show_quotas(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use SHOW QUOTAS"): - node.query(f"SHOW QUOTAS", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW QUOTAS", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite @Requirements( RQ_SRS_006_RBAC_ShowCreateQuota_RequiredPrivilege("1.0"), ) def show_create(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SHOW CREATE QUOTA` when they have the necessary privilege. - """ + """Check that user is only able to execute `SHOW CREATE QUOTA` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -141,8 +171,12 @@ def show_create(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use SHOW CREATE QUOTA"): - node.query(f"SHOW CREATE QUOTA {target_quota_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW CREATE QUOTA {target_quota_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("SHOW CREATE QUOTA with privilege"): target_quota_name = f"target_quota_{getuid()}" @@ -153,7 +187,10 @@ def show_create(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use SHOW CREATE QUOTA"): - node.query(f"SHOW CREATE QUOTA {target_quota_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SHOW CREATE QUOTA {target_quota_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SHOW CREATE QUOTA with revoked privilege"): target_quota_name = f"target_quota_{getuid()}" @@ -167,19 +204,23 @@ def show_create(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use SHOW CREATE QUOTA"): - node.query(f"SHOW CREATE QUOTA {target_quota_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW CREATE QUOTA {target_quota_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("show quotas") @Requirements( RQ_SRS_006_RBAC_ShowQuotas_Privilege("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SHOW QUOTAS. - """ + """Check the RBAC functionality of SHOW QUOTAS.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/show/show_roles.py b/tests/testflows/rbac/tests/privileges/show/show_roles.py index 14d038102dd..6fe3906eeb6 100644 --- a/tests/testflows/rbac/tests/privileges/show/show_roles.py +++ b/tests/testflows/rbac/tests/privileges/show/show_roles.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): - """Check that a user is able to execute `SHOW ROLES` with privileges are granted directly. - """ + """Check that a user is able to execute `SHOW ROLES` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -17,15 +17,22 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=check_privilege, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in check_privilege.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in check_privilege.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): - """Check that a user is able to execute `SHOW ROLES` with privileges are granted through a role. - """ + """Check that a user is able to execute `SHOW ROLES` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -38,36 +45,50 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=check_privilege, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in check_privilege.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in check_privilege.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("SHOW ACCESS",), - ("SHOW ROLES",), - ("SHOW CREATE ROLE",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("SHOW ACCESS",), + ("SHOW ROLES",), + ("SHOW CREATE ROLE",), + ], +) def check_privilege(self, privilege, grant_target_name, user_name, node=None): - """Run checks for commands that require SHOW ROLES privilege. - """ + """Run checks for commands that require SHOW ROLES privilege.""" if node is None: node = self.context.node - Suite(test=show_roles)(privilege=privilege, grant_target_name=grant_target_name, user_name=user_name) - Suite(test=show_create)(privilege=privilege, grant_target_name=grant_target_name, user_name=user_name) + Suite(test=show_roles)( + privilege=privilege, grant_target_name=grant_target_name, user_name=user_name + ) + Suite(test=show_create)( + privilege=privilege, grant_target_name=grant_target_name, user_name=user_name + ) + @TestSuite @Requirements( RQ_SRS_006_RBAC_ShowRoles_RequiredPrivilege("1.0"), ) def show_roles(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SHOW ROLES` when they have the necessary privilege. - """ + """Check that user is only able to execute `SHOW ROLES` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -82,8 +103,12 @@ def show_roles(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use SHOW ROLES"): - node.query(f"SHOW ROLES", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW ROLES", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("SHOW ROLES with privilege"): @@ -91,7 +116,7 @@ def show_roles(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use SHOW ROLES"): - node.query(f"SHOW ROLES", settings = [("user", f"{user_name}")]) + node.query(f"SHOW ROLES", settings=[("user", f"{user_name}")]) with Scenario("SHOW ROLES with revoked privilege"): @@ -102,16 +127,20 @@ def show_roles(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use SHOW ROLES"): - node.query(f"SHOW ROLES", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW ROLES", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite @Requirements( RQ_SRS_006_RBAC_ShowCreateRole_RequiredPrivilege("1.0"), ) def show_create(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SHOW CREATE ROLE` when they have the necessary privilege. - """ + """Check that user is only able to execute `SHOW CREATE ROLE` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -129,8 +158,12 @@ def show_create(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use SHOW CREATE ROLE"): - node.query(f"SHOW CREATE ROLE {target_role_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW CREATE ROLE {target_role_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("SHOW CREATE ROLE with privilege"): target_role_name = f"target_role_{getuid()}" @@ -141,7 +174,10 @@ def show_create(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use SHOW CREATE ROLE"): - node.query(f"SHOW CREATE ROLE {target_role_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SHOW CREATE ROLE {target_role_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SHOW CREATE ROLE with revoked privilege"): target_role_name = f"target_role_{getuid()}" @@ -155,19 +191,23 @@ def show_create(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use SHOW CREATE ROLE"): - node.query(f"SHOW CREATE ROLE {target_role_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW CREATE ROLE {target_role_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("show roles") @Requirements( RQ_SRS_006_RBAC_ShowRoles_Privilege("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SHOW ROLES. - """ + """Check the RBAC functionality of SHOW ROLES.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/show/show_row_policies.py b/tests/testflows/rbac/tests/privileges/show/show_row_policies.py index 789c4c95223..5591c5bd7a1 100644 --- a/tests/testflows/rbac/tests/privileges/show/show_row_policies.py +++ b/tests/testflows/rbac/tests/privileges/show/show_row_policies.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @contextmanager def row_policy(node, name, table): try: @@ -17,10 +18,10 @@ def row_policy(node, name, table): with Finally("I drop the row policy"): node.query(f"DROP ROW POLICY IF EXISTS {name} ON {table}") + @TestSuite def privileges_granted_directly(self, node=None): - """Check that a user is able to execute `SHOW ROW POLICIES` with privileges are granted directly. - """ + """Check that a user is able to execute `SHOW ROW POLICIES` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -29,15 +30,22 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=check_privilege, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in check_privilege.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in check_privilege.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): - """Check that a user is able to execute `SHOW ROW POLICIES` with privileges are granted through a role. - """ + """Check that a user is able to execute `SHOW ROW POLICIES` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -50,38 +58,52 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=check_privilege, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in check_privilege.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in check_privilege.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("SHOW ACCESS",), - ("SHOW ROW POLICIES",), - ("SHOW POLICIES",), - ("SHOW CREATE ROW POLICY",), - ("SHOW CREATE POLICY",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("SHOW ACCESS",), + ("SHOW ROW POLICIES",), + ("SHOW POLICIES",), + ("SHOW CREATE ROW POLICY",), + ("SHOW CREATE POLICY",), + ], +) def check_privilege(self, privilege, grant_target_name, user_name, node=None): - """Run checks for commands that require SHOW ROW POLICIES privilege. - """ + """Run checks for commands that require SHOW ROW POLICIES privilege.""" if node is None: node = self.context.node - Suite(test=show_row_policies)(privilege=privilege, grant_target_name=grant_target_name, user_name=user_name) - Suite(test=show_create)(privilege=privilege, grant_target_name=grant_target_name, user_name=user_name) + Suite(test=show_row_policies)( + privilege=privilege, grant_target_name=grant_target_name, user_name=user_name + ) + Suite(test=show_create)( + privilege=privilege, grant_target_name=grant_target_name, user_name=user_name + ) + @TestSuite @Requirements( RQ_SRS_006_RBAC_ShowRowPolicies_RequiredPrivilege("1.0"), ) def show_row_policies(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SHOW ROW POLICIES` when they have the necessary privilege. - """ + """Check that user is only able to execute `SHOW ROW POLICIES` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -96,8 +118,12 @@ def show_row_policies(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use SHOW ROW POLICIES"): - node.query(f"SHOW ROW POLICIES", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW ROW POLICIES", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("SHOW ROW POLICIES with privilege"): @@ -105,7 +131,7 @@ def show_row_policies(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use SHOW ROW POLICIES"): - node.query(f"SHOW ROW POLICIES", settings = [("user", f"{user_name}")]) + node.query(f"SHOW ROW POLICIES", settings=[("user", f"{user_name}")]) with Scenario("SHOW ROW POLICIES with revoked privilege"): @@ -116,16 +142,20 @@ def show_row_policies(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use SHOW ROW POLICIES"): - node.query(f"SHOW ROW POLICIES", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW ROW POLICIES", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite @Requirements( RQ_SRS_006_RBAC_ShowCreateRowPolicy_RequiredPrivilege("1.0"), ) def show_create(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SHOW CREATE ROW POLICY` when they have the necessary privilege. - """ + """Check that user is only able to execute `SHOW CREATE ROW POLICY` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -144,8 +174,12 @@ def show_create(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use SHOW CREATE ROW POLICY"): - node.query(f"SHOW CREATE ROW POLICY {target_row_policy_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW CREATE ROW POLICY {target_row_policy_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("SHOW CREATE ROW POLICY with privilege"): target_row_policy_name = f"target_row_policy_{getuid()}" @@ -157,7 +191,10 @@ def show_create(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use SHOW CREATE ROW POLICY"): - node.query(f"SHOW CREATE ROW POLICY {target_row_policy_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SHOW CREATE ROW POLICY {target_row_policy_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SHOW CREATE ROW POLICY with revoked privilege"): target_row_policy_name = f"target_row_policy_{getuid()}" @@ -172,19 +209,23 @@ def show_create(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use SHOW CREATE ROW POLICY"): - node.query(f"SHOW CREATE ROW POLICY {target_row_policy_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW CREATE ROW POLICY {target_row_policy_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("show row policies") @Requirements( RQ_SRS_006_RBAC_ShowRowPolicies_Privilege("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SHOW ROW POLICYS. - """ + """Check the RBAC functionality of SHOW ROW POLICYS.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/show/show_settings_profiles.py b/tests/testflows/rbac/tests/privileges/show/show_settings_profiles.py index 18ca0ee7f6e..1342b420afe 100644 --- a/tests/testflows/rbac/tests/privileges/show/show_settings_profiles.py +++ b/tests/testflows/rbac/tests/privileges/show/show_settings_profiles.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @contextmanager def settings_profile(node, name): try: @@ -17,10 +18,10 @@ def settings_profile(node, name): with Finally("I drop the settings_profile"): node.query(f"DROP SETTINGS PROFILE IF EXISTS {name}") + @TestSuite def privileges_granted_directly(self, node=None): - """Check that a user is able to execute `SHOW SETTINGS PROFILES` with privileges are granted directly. - """ + """Check that a user is able to execute `SHOW SETTINGS PROFILES` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -29,15 +30,22 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=check_privilege, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in check_privilege.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in check_privilege.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): - """Check that a user is able to execute `SHOW SETTINGS PROFILES` with privileges are granted through a role. - """ + """Check that a user is able to execute `SHOW SETTINGS PROFILES` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -50,38 +58,52 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=check_privilege, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in check_privilege.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in check_privilege.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("SHOW ACCESS",), - ("SHOW SETTINGS PROFILES",), - ("SHOW PROFILES",), - ("SHOW CREATE SETTINGS PROFILE",), - ("SHOW CREATE PROFILE",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("SHOW ACCESS",), + ("SHOW SETTINGS PROFILES",), + ("SHOW PROFILES",), + ("SHOW CREATE SETTINGS PROFILE",), + ("SHOW CREATE PROFILE",), + ], +) def check_privilege(self, privilege, grant_target_name, user_name, node=None): - """Run checks for commands that require SHOW SETTINGS PROFILES privilege. - """ + """Run checks for commands that require SHOW SETTINGS PROFILES privilege.""" if node is None: node = self.context.node - Suite(test=show_settings_profiles)(privilege=privilege, grant_target_name=grant_target_name, user_name=user_name) - Suite(test=show_create)(privilege=privilege, grant_target_name=grant_target_name, user_name=user_name) + Suite(test=show_settings_profiles)( + privilege=privilege, grant_target_name=grant_target_name, user_name=user_name + ) + Suite(test=show_create)( + privilege=privilege, grant_target_name=grant_target_name, user_name=user_name + ) + @TestSuite @Requirements( RQ_SRS_006_RBAC_ShowSettingsProfiles_RequiredPrivilege("1.0"), ) def show_settings_profiles(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SHOW SETTINGS PROFILES` when they have the necessary privilege. - """ + """Check that user is only able to execute `SHOW SETTINGS PROFILES` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -96,8 +118,12 @@ def show_settings_profiles(self, privilege, grant_target_name, user_name, node=N node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use SHOW SETTINGS PROFILES"): - node.query(f"SHOW SETTINGS PROFILES", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW SETTINGS PROFILES", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("SHOW SETTINGS PROFILES with privilege"): @@ -105,7 +131,7 @@ def show_settings_profiles(self, privilege, grant_target_name, user_name, node=N node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use SHOW SETTINGS PROFILES"): - node.query(f"SHOW SETTINGS PROFILES", settings = [("user", f"{user_name}")]) + node.query(f"SHOW SETTINGS PROFILES", settings=[("user", f"{user_name}")]) with Scenario("SHOW SETTINGS PROFILES with revoked privilege"): @@ -116,16 +142,20 @@ def show_settings_profiles(self, privilege, grant_target_name, user_name, node=N node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use SHOW SETTINGS PROFILES"): - node.query(f"SHOW SETTINGS PROFILES", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW SETTINGS PROFILES", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite @Requirements( RQ_SRS_006_RBAC_ShowCreateSettingsProfile_RequiredPrivilege("1.0"), ) def show_create(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SHOW CREATE SETTINGS PROFILE` when they have the necessary privilege. - """ + """Check that user is only able to execute `SHOW CREATE SETTINGS PROFILE` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -143,8 +173,12 @@ def show_create(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use SHOW CREATE SETTINGS PROFILE"): - node.query(f"SHOW CREATE SETTINGS PROFILE {target_settings_profile_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW CREATE SETTINGS PROFILE {target_settings_profile_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("SHOW CREATE SETTINGS PROFILE with privilege"): target_settings_profile_name = f"target_settings_profile_{getuid()}" @@ -155,7 +189,10 @@ def show_create(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use SHOW CREATE SETTINGS PROFILE"): - node.query(f"SHOW CREATE SETTINGS PROFILE {target_settings_profile_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SHOW CREATE SETTINGS PROFILE {target_settings_profile_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SHOW CREATE SETTINGS PROFILE with revoked privilege"): target_settings_profile_name = f"target_settings_profile_{getuid()}" @@ -169,19 +206,23 @@ def show_create(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use SHOW CREATE SETTINGS PROFILE"): - node.query(f"SHOW CREATE SETTINGS PROFILE {target_settings_profile_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW CREATE SETTINGS PROFILE {target_settings_profile_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("show settings profiles") @Requirements( RQ_SRS_006_RBAC_ShowSettingsProfiles_Privilege("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SHOW SETTINGS PROFILES. - """ + """Check the RBAC functionality of SHOW SETTINGS PROFILES.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/show/show_tables.py b/tests/testflows/rbac/tests/privileges/show/show_tables.py index d445550c032..f6eacef4164 100755 --- a/tests/testflows/rbac/tests/privileges/show/show_tables.py +++ b/tests/testflows/rbac/tests/privileges/show/show_tables.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def table_privileges_granted_directly(self, node=None): """Check that a user is able to execute `CHECK` and `EXISTS` @@ -20,10 +21,18 @@ def table_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): table_name = f"table_name_{getuid()}" - Suite(run=check_privilege, - examples=Examples("privilege on grant_target_name user_name table_name", [ - tuple(list(row)+[user_name,user_name,table_name]) for row in check_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege on grant_target_name user_name table_name", + [ + tuple(list(row) + [user_name, user_name, table_name]) + for row in check_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def table_privileges_granted_via_role(self, node=None): @@ -44,41 +53,73 @@ def table_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=check_privilege, - examples=Examples("privilege on grant_target_name user_name table_name", [ - tuple(list(row)+[role_name,user_name,table_name]) for row in check_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege on grant_target_name user_name table_name", + [ + tuple(list(row) + [role_name, user_name, table_name]) + for row in check_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("SHOW", "*.*"), - ("SHOW TABLES", "table"), - ("SELECT", "table"), - ("INSERT", "table"), - ("ALTER", "table"), - ("SELECT(a)", "table"), - ("INSERT(a)", "table"), - ("ALTER(a)", "table"), -]) -def check_privilege(self, privilege, on, grant_target_name, user_name, table_name, node=None): - """Run checks for commands that require SHOW TABLE privilege. - """ +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("SHOW", "*.*"), + ("SHOW TABLES", "table"), + ("SELECT", "table"), + ("INSERT", "table"), + ("ALTER", "table"), + ("SELECT(a)", "table"), + ("INSERT(a)", "table"), + ("ALTER(a)", "table"), + ], +) +def check_privilege( + self, privilege, on, grant_target_name, user_name, table_name, node=None +): + """Run checks for commands that require SHOW TABLE privilege.""" if node is None: node = self.context.node - Suite(test=show_tables)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name, table_name=table_name) - Suite(test=exists)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name, table_name=table_name) - Suite(test=check)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name, table_name=table_name) + Suite(test=show_tables)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + table_name=table_name, + ) + Suite(test=exists)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + table_name=table_name, + ) + Suite(test=check)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + table_name=table_name, + ) + @TestSuite @Requirements( RQ_SRS_006_RBAC_ShowTables_RequiredPrivilege("1.0"), ) -def show_tables(self, privilege, on, grant_target_name, user_name, table_name, node=None): - """Check that user is only able to see a table in SHOW TABLES when they have a privilege on that table. - """ +def show_tables( + self, privilege, on, grant_target_name, user_name, table_name, node=None +): + """Check that user is only able to see a table in SHOW TABLES when they have a privilege on that table.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -97,8 +138,10 @@ def show_tables(self, privilege, on, grant_target_name, user_name, table_name, n node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user doesn't see the table"): - output = node.query("SHOW TABLES", settings = [("user", f"{user_name}")]).output - assert output == '', error() + output = node.query( + "SHOW TABLES", settings=[("user", f"{user_name}")] + ).output + assert output == "", error() with Scenario("SHOW TABLES with privilege"): @@ -106,7 +149,11 @@ def show_tables(self, privilege, on, grant_target_name, user_name, table_name, n node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user does see a table"): - node.query("SHOW TABLES", settings = [("user", f"{user_name}")], message=f"{table_name}") + node.query( + "SHOW TABLES", + settings=[("user", f"{user_name}")], + message=f"{table_name}", + ) with Scenario("SHOW TABLES with revoked privilege"): @@ -117,8 +164,11 @@ def show_tables(self, privilege, on, grant_target_name, user_name, table_name, n node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user does not see a table"): - output = node.query("SHOW TABLES", settings = [("user", f"{user_name}")]).output - assert output == '', error() + output = node.query( + "SHOW TABLES", settings=[("user", f"{user_name}")] + ).output + assert output == "", error() + @TestSuite @Requirements( @@ -147,8 +197,12 @@ def exists(self, privilege, on, grant_target_name, user_name, table_name, node=N node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then(f"I check if {table_name} EXISTS"): - node.query(f"EXISTS {table_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"EXISTS {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("EXISTS with privilege"): @@ -156,7 +210,7 @@ def exists(self, privilege, on, grant_target_name, user_name, table_name, node=N node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then(f"I check if {table_name} EXISTS"): - node.query(f"EXISTS {table_name}", settings=[("user",user_name)]) + node.query(f"EXISTS {table_name}", settings=[("user", user_name)]) with Scenario("EXISTS with revoked privilege"): @@ -167,8 +221,13 @@ def exists(self, privilege, on, grant_target_name, user_name, table_name, node=N node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then(f"I check if {table_name} EXISTS"): - node.query(f"EXISTS {table_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"EXISTS {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite @Requirements( @@ -197,8 +256,12 @@ def check(self, privilege, on, grant_target_name, user_name, table_name, node=No node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then(f"I CHECK {table_name}"): - node.query(f"CHECK TABLE {table_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CHECK TABLE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("CHECK with privilege"): @@ -206,7 +269,7 @@ def check(self, privilege, on, grant_target_name, user_name, table_name, node=No node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then(f"I CHECK {table_name}"): - node.query(f"CHECK TABLE {table_name}", settings=[("user",user_name)]) + node.query(f"CHECK TABLE {table_name}", settings=[("user", user_name)]) with Scenario("CHECK with revoked privilege"): @@ -217,19 +280,23 @@ def check(self, privilege, on, grant_target_name, user_name, table_name, node=No node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then(f"I CHECK {table_name}"): - node.query(f"CHECK TABLE {table_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CHECK TABLE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("show tables") @Requirements( RQ_SRS_006_RBAC_ShowTables_Privilege("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SHOW TABLES. - """ + """Check the RBAC functionality of SHOW TABLES.""" self.context.node = self.context.cluster.node(node) Suite(run=table_privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/show/show_users.py b/tests/testflows/rbac/tests/privileges/show/show_users.py index aa5c97297b5..f3406c4134b 100644 --- a/tests/testflows/rbac/tests/privileges/show/show_users.py +++ b/tests/testflows/rbac/tests/privileges/show/show_users.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): - """Check that a user is able to execute `SHOW USERS` with privileges are granted directly. - """ + """Check that a user is able to execute `SHOW USERS` with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -17,15 +17,22 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=check_privilege, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in check_privilege.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in check_privilege.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): - """Check that a user is able to execute `SHOW USERS` with privileges are granted through a role. - """ + """Check that a user is able to execute `SHOW USERS` with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -38,36 +45,50 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=check_privilege, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in check_privilege.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in check_privilege.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("ACCESS MANAGEMENT",), - ("SHOW ACCESS",), - ("SHOW USERS",), - ("SHOW CREATE USER",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("ACCESS MANAGEMENT",), + ("SHOW ACCESS",), + ("SHOW USERS",), + ("SHOW CREATE USER",), + ], +) def check_privilege(self, privilege, grant_target_name, user_name, node=None): - """Run checks for commands that require SHOW USERS privilege. - """ + """Run checks for commands that require SHOW USERS privilege.""" if node is None: node = self.context.node - Suite(test=show_users)(privilege=privilege, grant_target_name=grant_target_name, user_name=user_name) - Suite(test=show_create)(privilege=privilege, grant_target_name=grant_target_name, user_name=user_name) + Suite(test=show_users)( + privilege=privilege, grant_target_name=grant_target_name, user_name=user_name + ) + Suite(test=show_create)( + privilege=privilege, grant_target_name=grant_target_name, user_name=user_name + ) + @TestSuite @Requirements( RQ_SRS_006_RBAC_ShowUsers_RequiredPrivilege("1.0"), ) def show_users(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SHOW USERS` when they have the necessary privilege. - """ + """Check that user is only able to execute `SHOW USERS` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -82,8 +103,12 @@ def show_users(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use SHOW USERS"): - node.query(f"SHOW USERS", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW USERS", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("SHOW USERS with privilege"): @@ -91,7 +116,7 @@ def show_users(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use SHOW USERS"): - node.query(f"SHOW USERS", settings = [("user", f"{user_name}")]) + node.query(f"SHOW USERS", settings=[("user", f"{user_name}")]) with Scenario("SHOW USERS with revoked privilege"): @@ -102,16 +127,20 @@ def show_users(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use SHOW USERS"): - node.query(f"SHOW USERS", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW USERS", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite @Requirements( RQ_SRS_006_RBAC_ShowCreateUser_RequiredPrivilege("1.0"), ) def show_create(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SHOW CREATE USER` when they have the necessary privilege. - """ + """Check that user is only able to execute `SHOW CREATE USER` when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -129,8 +158,12 @@ def show_create(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use SHOW CREATE USER"): - node.query(f"SHOW CREATE USER {target_user_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW CREATE USER {target_user_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("SHOW CREATE USER with privilege"): target_user_name = f"target_user_{getuid()}" @@ -141,7 +174,10 @@ def show_create(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use SHOW CREATE USER"): - node.query(f"SHOW CREATE USER {target_user_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SHOW CREATE USER {target_user_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SHOW CREATE USER with revoked privilege"): target_user_name = f"target_user_{getuid()}" @@ -155,19 +191,23 @@ def show_create(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use SHOW CREATE USER"): - node.query(f"SHOW CREATE USER {target_user_name}", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SHOW CREATE USER {target_user_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("show users") @Requirements( RQ_SRS_006_RBAC_ShowUsers_Privilege("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SHOW USERS. - """ + """Check the RBAC functionality of SHOW USERS.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/sources.py b/tests/testflows/rbac/tests/privileges/sources.py index 5342764dfc1..96e02198845 100644 --- a/tests/testflows/rbac/tests/privileges/sources.py +++ b/tests/testflows/rbac/tests/privileges/sources.py @@ -5,10 +5,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def file_privileges_granted_directly(self, node=None): - """Check that a user is able to create a table from a `File` source with privileges are granted directly. - """ + """Check that a user is able to create a table from a `File` source with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -17,15 +17,19 @@ def file_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=file, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in file.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=file, + examples=Examples( + "privilege grant_target_name user_name", + [tuple(list(row) + [user_name, user_name]) for row in file.examples], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def file_privileges_granted_via_role(self, node=None): - """Check that a user is able to create a table from a `File` source with privileges are granted through a role. - """ + """Check that a user is able to create a table from a `File` source with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -38,30 +42,37 @@ def file_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=file, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in file.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=file, + examples=Examples( + "privilege grant_target_name user_name", + [tuple(list(row) + [role_name, user_name]) for row in file.examples], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("SOURCES",), - ("FILE",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("SOURCES",), + ("FILE",), + ], +) @Requirements( RQ_SRS_006_RBAC_Privileges_Sources_File("1.0"), ) def file(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to to create a table from a `File` source when they have the necessary privilege. - """ + """Check that user is only able to to create a table from a `File` source when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: node = self.context.node with Scenario("File source without privilege"): - table_name = f'table_{getuid()}' + table_name = f"table_{getuid()}" with Given("The user has table privilege"): node.query(f"GRANT CREATE TABLE ON {table_name} TO {grant_target_name}") @@ -73,8 +84,12 @@ def file(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use the File source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=File('')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=File('')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("File source with privilege"): @@ -82,8 +97,12 @@ def file(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use the File source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=File('')", settings = [("user", f"{user_name}")], - exitcode=0, message=None) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=File('')", + settings=[("user", f"{user_name}")], + exitcode=0, + message=None, + ) with Scenario("File source with revoked privilege"): @@ -94,13 +113,17 @@ def file(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use the File source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=File('')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=File('')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite def url_privileges_granted_directly(self, node=None): - """Check that a user is able to create a table from a `URL` source with privileges are granted directly. - """ + """Check that a user is able to create a table from a `URL` source with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -109,15 +132,19 @@ def url_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=url, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in url.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=url, + examples=Examples( + "privilege grant_target_name user_name", + [tuple(list(row) + [user_name, user_name]) for row in url.examples], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def url_privileges_granted_via_role(self, node=None): - """Check that a user is able to create a table from a `URL` source with privileges are granted through a role. - """ + """Check that a user is able to create a table from a `URL` source with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -130,30 +157,37 @@ def url_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=url, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in url.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=url, + examples=Examples( + "privilege grant_target_name user_name", + [tuple(list(row) + [role_name, user_name]) for row in url.examples], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("SOURCES",), - ("URL",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("SOURCES",), + ("URL",), + ], +) @Requirements( RQ_SRS_006_RBAC_Privileges_Sources_URL("1.0"), ) def url(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to to create a table from a `URL` source when they have the necessary privilege. - """ + """Check that user is only able to to create a table from a `URL` source when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: node = self.context.node - table_name = f'table_{getuid()}' - + table_name = f"table_{getuid()}" + with Scenario("URL source without privilege"): with Given("The user has table privilege"): node.query(f"GRANT CREATE TABLE ON {table_name} TO {grant_target_name}") @@ -165,15 +199,22 @@ def url(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use the URL source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=URL('127.0.0.1', 'TSV')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=URL('127.0.0.1', 'TSV')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("URL source with privilege"): with When(f"I grant {privilege}"): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use the URL source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=URL('127.0.0.1', 'TSV')", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=URL('127.0.0.1', 'TSV')", + settings=[("user", f"{user_name}")], + ) with Scenario("URL source with revoked privilege"): with When(f"I grant {privilege}"): @@ -183,13 +224,17 @@ def url(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use the URL source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=URL('127.0.0.1', 'TSV')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=URL('127.0.0.1', 'TSV')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite def remote_privileges_granted_directly(self, node=None): - """Check that a user is able to create a table from a Remote source with privileges are granted directly. - """ + """Check that a user is able to create a table from a Remote source with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -198,15 +243,19 @@ def remote_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=remote, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in remote.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=remote, + examples=Examples( + "privilege grant_target_name user_name", + [tuple(list(row) + [user_name, user_name]) for row in remote.examples], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def remote_privileges_granted_via_role(self, node=None): - """Check that a user is able to create a table from a Remote source with privileges are granted through a role. - """ + """Check that a user is able to create a table from a Remote source with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -219,30 +268,37 @@ def remote_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=remote, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in remote.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=remote, + examples=Examples( + "privilege grant_target_name user_name", + [tuple(list(row) + [role_name, user_name]) for row in remote.examples], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("SOURCES",), - ("REMOTE",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("SOURCES",), + ("REMOTE",), + ], +) @Requirements( RQ_SRS_006_RBAC_Privileges_Sources_Remote("1.0"), ) def remote(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to to create a table from a remote source when they have the necessary privilege. - """ + """Check that user is only able to to create a table from a remote source when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: node = self.context.node with Scenario("Remote source without privilege"): - table_name = f'table_{getuid()}' + table_name = f"table_{getuid()}" with Given("The user has table privilege"): node.query(f"GRANT CREATE TABLE ON {table_name} TO {grant_target_name}") @@ -254,8 +310,12 @@ def remote(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use the Remote source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE = Distributed('127.0.0.1')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE = Distributed('127.0.0.1')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("Remote source with privilege"): @@ -263,8 +323,12 @@ def remote(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use the Remote source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE = Distributed('127.0.0.1')", settings = [("user", f"{user_name}")], - exitcode=42, message='Exception: Storage') + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE = Distributed('127.0.0.1')", + settings=[("user", f"{user_name}")], + exitcode=42, + message="Exception: Storage", + ) with Scenario("Remote source with revoked privilege"): @@ -275,13 +339,17 @@ def remote(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use the Remote source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE = Distributed('127.0.0.1')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE = Distributed('127.0.0.1')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite def MySQL_privileges_granted_directly(self, node=None): - """Check that a user is able to create a table from a `MySQL` source with privileges are granted directly. - """ + """Check that a user is able to create a table from a `MySQL` source with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -290,15 +358,19 @@ def MySQL_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=MySQL, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in MySQL.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=MySQL, + examples=Examples( + "privilege grant_target_name user_name", + [tuple(list(row) + [user_name, user_name]) for row in MySQL.examples], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def MySQL_privileges_granted_via_role(self, node=None): - """Check that a user is able to create a table from a `MySQL` source with privileges are granted through a role. - """ + """Check that a user is able to create a table from a `MySQL` source with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -311,30 +383,37 @@ def MySQL_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=MySQL, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in MySQL.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=MySQL, + examples=Examples( + "privilege grant_target_name user_name", + [tuple(list(row) + [role_name, user_name]) for row in MySQL.examples], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("SOURCES",), - ("MYSQL",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("SOURCES",), + ("MYSQL",), + ], +) @Requirements( RQ_SRS_006_RBAC_Privileges_Sources_MySQL("1.0"), ) def MySQL(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to to create a table from a `MySQL` source when they have the necessary privilege. - """ + """Check that user is only able to to create a table from a `MySQL` source when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: node = self.context.node with Scenario("MySQL source without privilege"): - table_name = f'table_{getuid()}' + table_name = f"table_{getuid()}" with Given("The user has table privilege"): node.query(f"GRANT CREATE TABLE ON {table_name} TO {grant_target_name}") @@ -346,8 +425,12 @@ def MySQL(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use the MySQL source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=MySQL('127.0.0.1')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=MySQL('127.0.0.1')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("MySQL source with privilege"): @@ -355,8 +438,12 @@ def MySQL(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use the MySQL source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=MySQL('127.0.0.1')", settings = [("user", f"{user_name}")], - exitcode=42, message='Exception: Storage') + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=MySQL('127.0.0.1')", + settings=[("user", f"{user_name}")], + exitcode=42, + message="Exception: Storage", + ) with Scenario("MySQL source with revoked privilege"): @@ -367,13 +454,17 @@ def MySQL(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use the MySQL source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=MySQL('127.0.0.1')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=MySQL('127.0.0.1')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite def ODBC_privileges_granted_directly(self, node=None): - """Check that a user is able to create a table from a `ODBC` source with privileges are granted directly. - """ + """Check that a user is able to create a table from a `ODBC` source with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -382,15 +473,19 @@ def ODBC_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=ODBC, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in ODBC.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=ODBC, + examples=Examples( + "privilege grant_target_name user_name", + [tuple(list(row) + [user_name, user_name]) for row in ODBC.examples], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def ODBC_privileges_granted_via_role(self, node=None): - """Check that a user is able to create a table from a `ODBC` source with privileges are granted through a role. - """ + """Check that a user is able to create a table from a `ODBC` source with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -403,30 +498,37 @@ def ODBC_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=ODBC, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in ODBC.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=ODBC, + examples=Examples( + "privilege grant_target_name user_name", + [tuple(list(row) + [role_name, user_name]) for row in ODBC.examples], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("SOURCES",), - ("ODBC",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("SOURCES",), + ("ODBC",), + ], +) @Requirements( RQ_SRS_006_RBAC_Privileges_Sources_ODBC("1.0"), ) def ODBC(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to to create a table from a `ODBC` source when they have the necessary privilege. - """ + """Check that user is only able to to create a table from a `ODBC` source when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: node = self.context.node with Scenario("ODBC source without privilege"): - table_name = f'table_{getuid()}' + table_name = f"table_{getuid()}" with Given("The user has table privilege"): node.query(f"GRANT CREATE TABLE ON {table_name} TO {grant_target_name}") @@ -438,8 +540,12 @@ def ODBC(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use the ODBC source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=ODBC('127.0.0.1')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=ODBC('127.0.0.1')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("ODBC source with privilege"): @@ -447,8 +553,12 @@ def ODBC(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use the ODBC source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=ODBC('127.0.0.1')", settings = [("user", f"{user_name}")], - exitcode=42, message='Exception: Storage') + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=ODBC('127.0.0.1')", + settings=[("user", f"{user_name}")], + exitcode=42, + message="Exception: Storage", + ) with Scenario("ODBC source with revoked privilege"): @@ -459,13 +569,17 @@ def ODBC(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use the ODBC source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=ODBC('127.0.0.1')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=ODBC('127.0.0.1')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite def JDBC_privileges_granted_directly(self, node=None): - """Check that a user is able to create a table from a `JDBC` source with privileges are granted directly. - """ + """Check that a user is able to create a table from a `JDBC` source with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -474,15 +588,19 @@ def JDBC_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=JDBC, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in JDBC.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=JDBC, + examples=Examples( + "privilege grant_target_name user_name", + [tuple(list(row) + [user_name, user_name]) for row in JDBC.examples], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def JDBC_privileges_granted_via_role(self, node=None): - """Check that a user is able to create a table from a `JDBC` source with privileges are granted through a role. - """ + """Check that a user is able to create a table from a `JDBC` source with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -495,30 +613,37 @@ def JDBC_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=JDBC, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in JDBC.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=JDBC, + examples=Examples( + "privilege grant_target_name user_name", + [tuple(list(row) + [role_name, user_name]) for row in JDBC.examples], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("SOURCES",), - ("JDBC",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("SOURCES",), + ("JDBC",), + ], +) @Requirements( RQ_SRS_006_RBAC_Privileges_Sources_JDBC("1.0"), ) def JDBC(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to to create a table from a `JDBC` source when they have the necessary privilege. - """ + """Check that user is only able to to create a table from a `JDBC` source when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: node = self.context.node with Scenario("JDBC source without privilege"): - table_name = f'table_{getuid()}' + table_name = f"table_{getuid()}" with Given("The user has table privilege"): node.query(f"GRANT CREATE TABLE ON {table_name} TO {grant_target_name}") @@ -530,8 +655,12 @@ def JDBC(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use the JDBC source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=JDBC('127.0.0.1')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=JDBC('127.0.0.1')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("JDBC source with privilege"): @@ -539,8 +668,12 @@ def JDBC(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use the JDBC source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=JDBC('127.0.0.1')", settings = [("user", f"{user_name}")], - exitcode=42, message='Exception: Storage') + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=JDBC('127.0.0.1')", + settings=[("user", f"{user_name}")], + exitcode=42, + message="Exception: Storage", + ) with Scenario("JDBC source with revoked privilege"): @@ -551,13 +684,17 @@ def JDBC(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use the JDBC source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=JDBC('127.0.0.1')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=JDBC('127.0.0.1')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite def HDFS_privileges_granted_directly(self, node=None): - """Check that a user is able to create a table from a `HDFS` source with privileges are granted directly. - """ + """Check that a user is able to create a table from a `HDFS` source with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -566,15 +703,19 @@ def HDFS_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=HDFS, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in HDFS.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=HDFS, + examples=Examples( + "privilege grant_target_name user_name", + [tuple(list(row) + [user_name, user_name]) for row in HDFS.examples], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def HDFS_privileges_granted_via_role(self, node=None): - """Check that a user is able to create a table from a `HDFS` source with privileges are granted through a role. - """ + """Check that a user is able to create a table from a `HDFS` source with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -587,29 +728,36 @@ def HDFS_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=HDFS, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in HDFS.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=HDFS, + examples=Examples( + "privilege grant_target_name user_name", + [tuple(list(row) + [role_name, user_name]) for row in HDFS.examples], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("SOURCES",), - ("HDFS",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("SOURCES",), + ("HDFS",), + ], +) @Requirements( RQ_SRS_006_RBAC_Privileges_Sources_HDFS("1.0"), ) def HDFS(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to to create a table from a `HDFS` source when they have the necessary privilege. - """ + """Check that user is only able to to create a table from a `HDFS` source when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: node = self.context.node - table_name = f'table_{getuid()}' + table_name = f"table_{getuid()}" with Scenario("HDFS source without privilege"): @@ -623,8 +771,12 @@ def HDFS(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use the HDFS source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=HDFS('hdfs://127.0.0.1:8020/path', 'TSV')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=HDFS('hdfs://127.0.0.1:8020/path', 'TSV')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("HDFS source with privilege"): @@ -632,7 +784,10 @@ def HDFS(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use the HDFS source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=HDFS('hdfs://127.0.0.1:8020/path', 'TSV')", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=HDFS('hdfs://127.0.0.1:8020/path', 'TSV')", + settings=[("user", f"{user_name}")], + ) with Scenario("HDFS source with revoked privilege"): @@ -643,13 +798,17 @@ def HDFS(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use the HDFS source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=HDFS('hdfs://127.0.0.1:8020/path', 'TSV')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=HDFS('hdfs://127.0.0.1:8020/path', 'TSV')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite def S3_privileges_granted_directly(self, node=None): - """Check that a user is able to create a table from a `S3` source with privileges are granted directly. - """ + """Check that a user is able to create a table from a `S3` source with privileges are granted directly.""" user_name = f"user_{getuid()}" @@ -658,15 +817,19 @@ def S3_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=S3, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in S3.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=S3, + examples=Examples( + "privilege grant_target_name user_name", + [tuple(list(row) + [user_name, user_name]) for row in S3.examples], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def S3_privileges_granted_via_role(self, node=None): - """Check that a user is able to create a table from a `S3` source with privileges are granted through a role. - """ + """Check that a user is able to create a table from a `S3` source with privileges are granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -679,29 +842,36 @@ def S3_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=S3, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in S3.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=S3, + examples=Examples( + "privilege grant_target_name user_name", + [tuple(list(row) + [role_name, user_name]) for row in S3.examples], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("SOURCES",), - ("S3",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("SOURCES",), + ("S3",), + ], +) @Requirements( RQ_SRS_006_RBAC_Privileges_Sources_S3("1.0"), ) def S3(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to to create a table from a `S3` source when they have the necessary privilege. - """ + """Check that user is only able to to create a table from a `S3` source when they have the necessary privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: node = self.context.node - table_name = f'table_{getuid()}' + table_name = f"table_{getuid()}" with Scenario("S3 source without privilege"): @@ -715,8 +885,12 @@ def S3(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use the S3 source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=S3('https://my.amazonaws.com/mybucket/mydata', 'TSV')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=S3('https://my.amazonaws.com/mybucket/mydata', 'TSV')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("S3 source with privilege"): @@ -724,7 +898,10 @@ def S3(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use the S3 source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=S3('https://my.amazonaws.com/mybucket/mydata', 'TSV')", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=S3('https://my.amazonaws.com/mybucket/mydata', 'TSV')", + settings=[("user", f"{user_name}")], + ) with Scenario("S3 source with revoked privilege"): @@ -735,27 +912,35 @@ def S3(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use the S3 source"): - node.query(f"CREATE TABLE {table_name} (x String) ENGINE=S3('https://my.amazonaws.com/mybucket/mydata', 'TSV')", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"CREATE TABLE {table_name} (x String) ENGINE=S3('https://my.amazonaws.com/mybucket/mydata', 'TSV')", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("sources") @Requirements( RQ_SRS_006_RBAC_Privileges_Sources("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SOURCES. - """ + """Check the RBAC functionality of SOURCES.""" self.context.node = self.context.cluster.node(node) Suite(run=file_privileges_granted_directly, setup=instrument_clickhouse_server_log) Suite(run=file_privileges_granted_via_role, setup=instrument_clickhouse_server_log) Suite(run=url_privileges_granted_directly, setup=instrument_clickhouse_server_log) Suite(run=url_privileges_granted_via_role, setup=instrument_clickhouse_server_log) - Suite(run=remote_privileges_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=remote_privileges_granted_via_role, setup=instrument_clickhouse_server_log) + Suite( + run=remote_privileges_granted_directly, setup=instrument_clickhouse_server_log + ) + Suite( + run=remote_privileges_granted_via_role, setup=instrument_clickhouse_server_log + ) Suite(run=MySQL_privileges_granted_directly, setup=instrument_clickhouse_server_log) Suite(run=MySQL_privileges_granted_via_role, setup=instrument_clickhouse_server_log) Suite(run=ODBC_privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/system/drop_cache.py b/tests/testflows/rbac/tests/privileges/system/drop_cache.py index 8f1a6caeaac..cda6838b974 100644 --- a/tests/testflows/rbac/tests/privileges/system/drop_cache.py +++ b/tests/testflows/rbac/tests/privileges/system/drop_cache.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def dns_cache_privileges_granted_directly(self, node=None): """Check that a user is able to execute `SYSTEM DROP DNS CACHE` if and only if @@ -17,10 +18,18 @@ def dns_cache_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=dns_cache, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in dns_cache.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=dns_cache, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in dns_cache.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def dns_cache_privileges_granted_via_role(self, node=None): @@ -38,28 +47,38 @@ def dns_cache_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=dns_cache, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in dns_cache.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=dns_cache, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in dns_cache.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) @Requirements( RQ_SRS_006_RBAC_Privileges_System_DropCache_DNS("1.0"), ) -@Examples("privilege",[ - ("ALL",), - ("SYSTEM",), - ("SYSTEM DROP CACHE",), - ("SYSTEM DROP DNS CACHE",), - ("DROP CACHE",), - ("DROP DNS CACHE",), - ("SYSTEM DROP DNS",), - ("DROP DNS",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("SYSTEM",), + ("SYSTEM DROP CACHE",), + ("SYSTEM DROP DNS CACHE",), + ("DROP CACHE",), + ("DROP DNS CACHE",), + ("SYSTEM DROP DNS",), + ("DROP DNS",), + ], +) def dns_cache(self, privilege, grant_target_name, user_name, node=None): - """Run checks for `SYSTEM DROP DNS CACHE` privilege. - """ + """Run checks for `SYSTEM DROP DNS CACHE` privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -74,8 +93,12 @@ def dns_cache(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user is unable to execute SYSTEM DROP DNS CACHE"): - node.query("SYSTEM DROP DNS CACHE", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + "SYSTEM DROP DNS CACHE", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM DROP DNS CACHE with privilege"): @@ -83,7 +106,7 @@ def dns_cache(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user is bale to execute SYSTEM DROP DNS CACHE"): - node.query("SYSTEM DROP DNS CACHE", settings = [("user", f"{user_name}")]) + node.query("SYSTEM DROP DNS CACHE", settings=[("user", f"{user_name}")]) with Scenario("SYSTEM DROP DNS CACHE with revoked privilege"): @@ -94,8 +117,13 @@ def dns_cache(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user is unable to execute SYSTEM DROP DNS CACHE"): - node.query("SYSTEM DROP DNS CACHE", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + "SYSTEM DROP DNS CACHE", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestSuite def mark_cache_privileges_granted_directly(self, node=None): @@ -109,10 +137,18 @@ def mark_cache_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=mark_cache, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in mark_cache.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=mark_cache, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in mark_cache.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def mark_cache_privileges_granted_via_role(self, node=None): @@ -130,28 +166,38 @@ def mark_cache_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=mark_cache, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in mark_cache.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=mark_cache, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in mark_cache.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) @Requirements( RQ_SRS_006_RBAC_Privileges_System_DropCache_Mark("1.0"), ) -@Examples("privilege",[ - ("ALL",), - ("SYSTEM",), - ("SYSTEM DROP CACHE",), - ("SYSTEM DROP MARK CACHE",), - ("DROP CACHE",), - ("DROP MARK CACHE",), - ("SYSTEM DROP MARK",), - ("DROP MARKS",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("SYSTEM",), + ("SYSTEM DROP CACHE",), + ("SYSTEM DROP MARK CACHE",), + ("DROP CACHE",), + ("DROP MARK CACHE",), + ("SYSTEM DROP MARK",), + ("DROP MARKS",), + ], +) def mark_cache(self, privilege, grant_target_name, user_name, node=None): - """Run checks for `SYSTEM DROP MARK CACHE` privilege. - """ + """Run checks for `SYSTEM DROP MARK CACHE` privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -166,8 +212,12 @@ def mark_cache(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user is unable to execute SYSTEM DROP MARK CACHE"): - node.query("SYSTEM DROP MARK CACHE", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + "SYSTEM DROP MARK CACHE", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM DROP MARK CACHE with privilege"): @@ -175,7 +225,7 @@ def mark_cache(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user is bale to execute SYSTEM DROP MARK CACHE"): - node.query("SYSTEM DROP MARK CACHE", settings = [("user", f"{user_name}")]) + node.query("SYSTEM DROP MARK CACHE", settings=[("user", f"{user_name}")]) with Scenario("SYSTEM DROP MARK CACHE with revoked privilege"): @@ -186,8 +236,13 @@ def mark_cache(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user is unable to execute SYSTEM DROP MARK CACHE"): - node.query("SYSTEM DROP MARK CACHE", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + "SYSTEM DROP MARK CACHE", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestSuite def uncompressed_cache_privileges_granted_directly(self, node=None): @@ -201,10 +256,18 @@ def uncompressed_cache_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=uncompressed_cache, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in uncompressed_cache.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=uncompressed_cache, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in uncompressed_cache.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def uncompressed_cache_privileges_granted_via_role(self, node=None): @@ -222,28 +285,38 @@ def uncompressed_cache_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=uncompressed_cache, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in uncompressed_cache.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=uncompressed_cache, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in uncompressed_cache.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) @Requirements( RQ_SRS_006_RBAC_Privileges_System_DropCache_Uncompressed("1.0"), ) -@Examples("privilege",[ - ("ALL",), - ("SYSTEM",), - ("SYSTEM DROP CACHE",), - ("SYSTEM DROP UNCOMPRESSED CACHE",), - ("DROP CACHE",), - ("DROP UNCOMPRESSED CACHE",), - ("SYSTEM DROP UNCOMPRESSED",), - ("DROP UNCOMPRESSED",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("SYSTEM",), + ("SYSTEM DROP CACHE",), + ("SYSTEM DROP UNCOMPRESSED CACHE",), + ("DROP CACHE",), + ("DROP UNCOMPRESSED CACHE",), + ("SYSTEM DROP UNCOMPRESSED",), + ("DROP UNCOMPRESSED",), + ], +) def uncompressed_cache(self, privilege, grant_target_name, user_name, node=None): - """Run checks for `SYSTEM DROP UNCOMPRESSED CACHE` privilege. - """ + """Run checks for `SYSTEM DROP UNCOMPRESSED CACHE` privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -257,9 +330,15 @@ def uncompressed_cache(self, privilege, grant_target_name, user_name, node=None) with And("I grant the user USAGE privilege"): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") - with Then("I check the user is unable to execute SYSTEM DROP UNCOMPRESSED CACHE"): - node.query("SYSTEM DROP UNCOMPRESSED CACHE", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + with Then( + "I check the user is unable to execute SYSTEM DROP UNCOMPRESSED CACHE" + ): + node.query( + "SYSTEM DROP UNCOMPRESSED CACHE", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM DROP UNCOMPRESSED CACHE with privilege"): @@ -267,7 +346,9 @@ def uncompressed_cache(self, privilege, grant_target_name, user_name, node=None) node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user is bale to execute SYSTEM DROP UNCOMPRESSED CACHE"): - node.query("SYSTEM DROP UNCOMPRESSED CACHE", settings = [("user", f"{user_name}")]) + node.query( + "SYSTEM DROP UNCOMPRESSED CACHE", settings=[("user", f"{user_name}")] + ) with Scenario("SYSTEM DROP UNCOMPRESSED CACHE with revoked privilege"): @@ -277,25 +358,49 @@ def uncompressed_cache(self, privilege, grant_target_name, user_name, node=None) with And(f"I revoke {privilege} on the table"): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") - with Then("I check the user is unable to execute SYSTEM DROP UNCOMPRESSED CACHE"): - node.query("SYSTEM DROP UNCOMPRESSED CACHE", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + with Then( + "I check the user is unable to execute SYSTEM DROP UNCOMPRESSED CACHE" + ): + node.query( + "SYSTEM DROP UNCOMPRESSED CACHE", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("system drop cache") @Requirements( RQ_SRS_006_RBAC_Privileges_System_DropCache("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SYSTEM DROP CACHE. - """ + """Check the RBAC functionality of SYSTEM DROP CACHE.""" self.context.node = self.context.cluster.node(node) - Suite(run=dns_cache_privileges_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=dns_cache_privileges_granted_via_role, setup=instrument_clickhouse_server_log) - Suite(run=mark_cache_privileges_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=mark_cache_privileges_granted_via_role, setup=instrument_clickhouse_server_log) - Suite(run=uncompressed_cache_privileges_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=uncompressed_cache_privileges_granted_via_role, setup=instrument_clickhouse_server_log) + Suite( + run=dns_cache_privileges_granted_directly, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=dns_cache_privileges_granted_via_role, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=mark_cache_privileges_granted_directly, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=mark_cache_privileges_granted_via_role, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=uncompressed_cache_privileges_granted_directly, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=uncompressed_cache_privileges_granted_via_role, + setup=instrument_clickhouse_server_log, + ) diff --git a/tests/testflows/rbac/tests/privileges/system/fetches.py b/tests/testflows/rbac/tests/privileges/system/fetches.py index 3aba1b71566..28c0be6c8b5 100644 --- a/tests/testflows/rbac/tests/privileges/system/fetches.py +++ b/tests/testflows/rbac/tests/privileges/system/fetches.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def replicated_privileges_granted_directly(self, node=None): """Check that a user is able to execute `SYSTEM FETCHES` commands if and only if @@ -17,10 +18,18 @@ def replicated_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=check_replicated_privilege, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in check_replicated_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_replicated_privilege, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in check_replicated_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def replicated_privileges_granted_via_role(self, node=None): @@ -38,35 +47,59 @@ def replicated_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=check_replicated_privilege, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in check_replicated_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_replicated_privilege, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in check_replicated_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("SYSTEM", "*.*"), - ("SYSTEM FETCHES", "table"), - ("SYSTEM STOP FETCHES", "table"), - ("SYSTEM START FETCHES", "table"), - ("START FETCHES", "table"), - ("STOP FETCHES", "table"), -]) -def check_replicated_privilege(self, privilege, on, grant_target_name, user_name, node=None): - """Run checks for commands that require SYSTEM FETCHES privilege. - """ +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("SYSTEM", "*.*"), + ("SYSTEM FETCHES", "table"), + ("SYSTEM STOP FETCHES", "table"), + ("SYSTEM START FETCHES", "table"), + ("START FETCHES", "table"), + ("STOP FETCHES", "table"), + ], +) +def check_replicated_privilege( + self, privilege, on, grant_target_name, user_name, node=None +): + """Run checks for commands that require SYSTEM FETCHES privilege.""" if node is None: node = self.context.node - Suite(test=start_replication_queues)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name) - Suite(test=stop_replication_queues)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name) + Suite(test=start_replication_queues)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + ) + Suite(test=stop_replication_queues)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + ) + @TestSuite -def start_replication_queues(self, privilege, on, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SYSTEM START FETCHES` when they have privilege. - """ +def start_replication_queues( + self, privilege, on, grant_target_name, user_name, node=None +): + """Check that user is only able to execute `SYSTEM START FETCHES` when they have privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) table_name = f"table_name_{getuid()}" @@ -86,8 +119,12 @@ def start_replication_queues(self, privilege, on, grant_target_name, user_name, node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't start fetches"): - node.query(f"SYSTEM START FETCHES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM START FETCHES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM START FETCHES with privilege"): @@ -95,7 +132,10 @@ def start_replication_queues(self, privilege, on, grant_target_name, user_name, node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user can start fetches"): - node.query(f"SYSTEM START FETCHES {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SYSTEM START FETCHES {table_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM START FETCHES with revoked privilege"): @@ -106,13 +146,19 @@ def start_replication_queues(self, privilege, on, grant_target_name, user_name, node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user can't start fetches"): - node.query(f"SYSTEM START FETCHES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM START FETCHES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestSuite -def stop_replication_queues(self, privilege, on, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SYSTEM STOP FETCHES` when they have privilege. - """ +def stop_replication_queues( + self, privilege, on, grant_target_name, user_name, node=None +): + """Check that user is only able to execute `SYSTEM STOP FETCHES` when they have privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) table_name = f"table_name_{getuid()}" @@ -132,8 +178,12 @@ def stop_replication_queues(self, privilege, on, grant_target_name, user_name, n node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't stop fetches"): - node.query(f"SYSTEM STOP FETCHES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM STOP FETCHES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM STOP FETCHES with privilege"): @@ -141,7 +191,10 @@ def stop_replication_queues(self, privilege, on, grant_target_name, user_name, n node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user can start fetches"): - node.query(f"SYSTEM STOP FETCHES {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SYSTEM STOP FETCHES {table_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM STOP FETCHES with revoked privilege"): @@ -152,20 +205,30 @@ def stop_replication_queues(self, privilege, on, grant_target_name, user_name, n node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user can't start fetches"): - node.query(f"SYSTEM STOP FETCHES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM STOP FETCHES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("system fetches") @Requirements( RQ_SRS_006_RBAC_Privileges_System_Fetches("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SYSTEM FETCHES. - """ + """Check the RBAC functionality of SYSTEM FETCHES.""" self.context.node = self.context.cluster.node(node) - Suite(run=replicated_privileges_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=replicated_privileges_granted_via_role, setup=instrument_clickhouse_server_log) + Suite( + run=replicated_privileges_granted_directly, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=replicated_privileges_granted_via_role, + setup=instrument_clickhouse_server_log, + ) diff --git a/tests/testflows/rbac/tests/privileges/system/flush.py b/tests/testflows/rbac/tests/privileges/system/flush.py index 8c540fa1286..f225639ee46 100644 --- a/tests/testflows/rbac/tests/privileges/system/flush.py +++ b/tests/testflows/rbac/tests/privileges/system/flush.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): """Check that a user is able to execute `SYSTEM FLUSH LOGS` commands if and only if @@ -17,10 +18,18 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=flush_logs, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in flush_logs.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=flush_logs, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in flush_logs.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): @@ -38,25 +47,35 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=flush_logs, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in flush_logs.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=flush_logs, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in flush_logs.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("SYSTEM", "*.*"), - ("SYSTEM FLUSH", "*.*"), - ("SYSTEM FLUSH LOGS", "*.*"), - ("FLUSH LOGS", "*.*"), -]) +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("SYSTEM", "*.*"), + ("SYSTEM FLUSH", "*.*"), + ("SYSTEM FLUSH LOGS", "*.*"), + ("FLUSH LOGS", "*.*"), + ], +) @Requirements( RQ_SRS_006_RBAC_Privileges_System_Flush_Logs("1.0"), ) def flush_logs(self, privilege, on, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SYSTEM START REPLICATED FLUSH` when they have privilege. - """ + """Check that user is only able to execute `SYSTEM START REPLICATED FLUSH` when they have privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -71,8 +90,12 @@ def flush_logs(self, privilege, on, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't flush logs"): - node.query(f"SYSTEM FLUSH LOGS", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM FLUSH LOGS", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM FLUSH LOGS with privilege"): @@ -80,7 +103,7 @@ def flush_logs(self, privilege, on, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user can flush logs"): - node.query(f"SYSTEM FLUSH LOGS", settings = [("user", f"{user_name}")]) + node.query(f"SYSTEM FLUSH LOGS", settings=[("user", f"{user_name}")]) with Scenario("SYSTEM FLUSH LOGS with revoked privilege"): @@ -91,8 +114,13 @@ def flush_logs(self, privilege, on, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user can't flush logs"): - node.query(f"SYSTEM FLUSH LOGS", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM FLUSH LOGS", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestSuite def distributed_privileges_granted_directly(self, node=None): @@ -107,10 +135,18 @@ def distributed_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): table_name = f"table_name_{getuid()}" - Suite(run=flush_distributed, - examples=Examples("privilege on grant_target_name user_name table_name", [ - tuple(list(row)+[user_name,user_name,table_name]) for row in flush_distributed.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=flush_distributed, + examples=Examples( + "privilege on grant_target_name user_name table_name", + [ + tuple(list(row) + [user_name, user_name, table_name]) + for row in flush_distributed.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def distributed_privileges_granted_via_role(self, node=None): @@ -129,25 +165,37 @@ def distributed_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=flush_distributed, - examples=Examples("privilege on grant_target_name user_name table_name", [ - tuple(list(row)+[role_name,user_name,table_name]) for row in flush_distributed.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=flush_distributed, + examples=Examples( + "privilege on grant_target_name user_name table_name", + [ + tuple(list(row) + [role_name, user_name, table_name]) + for row in flush_distributed.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("SYSTEM", "*.*"), - ("SYSTEM FLUSH", "*.*"), - ("SYSTEM FLUSH DISTRIBUTED", "table"), - ("FLUSH DISTRIBUTED", "table"), -]) +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("SYSTEM", "*.*"), + ("SYSTEM FLUSH", "*.*"), + ("SYSTEM FLUSH DISTRIBUTED", "table"), + ("FLUSH DISTRIBUTED", "table"), + ], +) @Requirements( RQ_SRS_006_RBAC_Privileges_System_Flush_Distributed("1.0"), ) -def flush_distributed(self, privilege, on, grant_target_name, user_name, table_name, node=None): - """Check that user is only able to execute `SYSTEM FLUSH DISTRIBUTED` when they have privilege. - """ +def flush_distributed( + self, privilege, on, grant_target_name, user_name, table_name, node=None +): + """Check that user is only able to execute `SYSTEM FLUSH DISTRIBUTED` when they have privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) table0_name = f"table0_{getuid()}" @@ -159,7 +207,9 @@ def flush_distributed(self, privilege, on, grant_target_name, user_name, table_n with table(node, table0_name): try: with Given("I have a distributed table"): - node.query(f"CREATE TABLE {table_name} (a UInt64) ENGINE = Distributed(sharded_cluster, default, {table0_name}, rand())") + node.query( + f"CREATE TABLE {table_name} (a UInt64) ENGINE = Distributed(sharded_cluster, default, {table0_name}, rand())" + ) with Scenario("SYSTEM FLUSH DISTRIBUTED without privilege"): @@ -170,8 +220,12 @@ def flush_distributed(self, privilege, on, grant_target_name, user_name, table_n node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't flush distributed"): - node.query(f"SYSTEM FLUSH DISTRIBUTED {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM FLUSH DISTRIBUTED {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM FLUSH DISTRIBUTED with privilege"): @@ -179,7 +233,10 @@ def flush_distributed(self, privilege, on, grant_target_name, user_name, table_n node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user can flush distributed"): - node.query(f"SYSTEM FLUSH DISTRIBUTED {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SYSTEM FLUSH DISTRIBUTED {table_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM FLUSH DISTRIBUTED with revoked privilege"): @@ -190,8 +247,12 @@ def flush_distributed(self, privilege, on, grant_target_name, user_name, table_n node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user can't flush distributed"): - node.query(f"SYSTEM FLUSH DISTRIBUTED {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM FLUSH DISTRIBUTED {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the distributed table"): @@ -203,14 +264,19 @@ def flush_distributed(self, privilege, on, grant_target_name, user_name, table_n @Requirements( RQ_SRS_006_RBAC_Privileges_System_Flush("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SYSTEM FLUSH. - """ + """Check the RBAC functionality of SYSTEM FLUSH.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) Suite(run=privileges_granted_via_role, setup=instrument_clickhouse_server_log) - Suite(run=distributed_privileges_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=distributed_privileges_granted_via_role, setup=instrument_clickhouse_server_log) + Suite( + run=distributed_privileges_granted_directly, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=distributed_privileges_granted_via_role, + setup=instrument_clickhouse_server_log, + ) diff --git a/tests/testflows/rbac/tests/privileges/system/merges.py b/tests/testflows/rbac/tests/privileges/system/merges.py index 324b9c0b4ec..35d32220b4d 100644 --- a/tests/testflows/rbac/tests/privileges/system/merges.py +++ b/tests/testflows/rbac/tests/privileges/system/merges.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): """Check that a user is able to execute `SYSTEM MERGES` commands if and only if @@ -18,10 +19,18 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): table_name = f"table_name_{getuid()}" - Suite(run=check_privilege, - examples=Examples("privilege on grant_target_name user_name table_name", [ - tuple(list(row)+[user_name,user_name,table_name]) for row in check_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege on grant_target_name user_name table_name", + [ + tuple(list(row) + [user_name, user_name, table_name]) + for row in check_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): @@ -40,35 +49,61 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=check_privilege, - examples=Examples("privilege on grant_target_name user_name table_name", [ - tuple(list(row)+[role_name,user_name,table_name]) for row in check_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege on grant_target_name user_name table_name", + [ + tuple(list(row) + [role_name, user_name, table_name]) + for row in check_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("SYSTEM", "*.*"), - ("SYSTEM MERGES", "table"), - ("SYSTEM STOP MERGES", "table"), - ("SYSTEM START MERGES", "table"), - ("START MERGES", "table"), - ("STOP MERGES", "table"), -]) -def check_privilege(self, privilege, on, grant_target_name, user_name, table_name, node=None): - """Run checks for commands that require SYSTEM MERGES privilege. - """ +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("SYSTEM", "*.*"), + ("SYSTEM MERGES", "table"), + ("SYSTEM STOP MERGES", "table"), + ("SYSTEM START MERGES", "table"), + ("START MERGES", "table"), + ("STOP MERGES", "table"), + ], +) +def check_privilege( + self, privilege, on, grant_target_name, user_name, table_name, node=None +): + """Run checks for commands that require SYSTEM MERGES privilege.""" if node is None: node = self.context.node - Suite(test=start_merges)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name, table_name=table_name) - Suite(test=stop_merges)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name, table_name=table_name) + Suite(test=start_merges)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + table_name=table_name, + ) + Suite(test=stop_merges)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + table_name=table_name, + ) + @TestSuite -def start_merges(self, privilege, on, grant_target_name, user_name, table_name, node=None): - """Check that user is only able to execute `SYSTEM START MERGES` when they have privilege. - """ +def start_merges( + self, privilege, on, grant_target_name, user_name, table_name, node=None +): + """Check that user is only able to execute `SYSTEM START MERGES` when they have privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -87,8 +122,12 @@ def start_merges(self, privilege, on, grant_target_name, user_name, table_name, node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't start merges"): - node.query(f"SYSTEM START MERGES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM START MERGES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM START MERGES with privilege"): @@ -96,7 +135,10 @@ def start_merges(self, privilege, on, grant_target_name, user_name, table_name, node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user can start merges"): - node.query(f"SYSTEM START MERGES {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SYSTEM START MERGES {table_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM START MERGES with revoked privilege"): @@ -107,13 +149,19 @@ def start_merges(self, privilege, on, grant_target_name, user_name, table_name, node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user can't start merges"): - node.query(f"SYSTEM START MERGES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM START MERGES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestSuite -def stop_merges(self, privilege, on, grant_target_name, user_name, table_name, node=None): - """Check that user is only able to execute `SYSTEM STOP MERGES` when they have privilege. - """ +def stop_merges( + self, privilege, on, grant_target_name, user_name, table_name, node=None +): + """Check that user is only able to execute `SYSTEM STOP MERGES` when they have privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -132,8 +180,12 @@ def stop_merges(self, privilege, on, grant_target_name, user_name, table_name, n node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't stop merges"): - node.query(f"SYSTEM STOP MERGES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM STOP MERGES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM STOP MERGES with privilege"): @@ -141,7 +193,10 @@ def stop_merges(self, privilege, on, grant_target_name, user_name, table_name, n node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user can stop merges"): - node.query(f"SYSTEM STOP MERGES {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SYSTEM STOP MERGES {table_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM STOP MERGES with revoked privilege"): @@ -152,19 +207,23 @@ def stop_merges(self, privilege, on, grant_target_name, user_name, table_name, n node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user can't stop merges"): - node.query(f"SYSTEM STOP MERGES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM STOP MERGES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("system merges") @Requirements( RQ_SRS_006_RBAC_Privileges_System_Merges("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SYSTEM MERGES. - """ + """Check the RBAC functionality of SYSTEM MERGES.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/system/moves.py b/tests/testflows/rbac/tests/privileges/system/moves.py index 2a75ff39aaf..17ce6d931b3 100644 --- a/tests/testflows/rbac/tests/privileges/system/moves.py +++ b/tests/testflows/rbac/tests/privileges/system/moves.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): """Check that a user is able to execute `SYSTEM MOVES` commands if and only if @@ -18,10 +19,18 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): table_name = f"table_name_{getuid()}" - Suite(run=check_privilege, - examples=Examples("privilege on grant_target_name user_name table_name", [ - tuple(list(row)+[user_name,user_name,table_name]) for row in check_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege on grant_target_name user_name table_name", + [ + tuple(list(row) + [user_name, user_name, table_name]) + for row in check_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): @@ -40,35 +49,61 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=check_privilege, - examples=Examples("privilege on grant_target_name user_name table_name", [ - tuple(list(row)+[role_name,user_name,table_name]) for row in check_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege on grant_target_name user_name table_name", + [ + tuple(list(row) + [role_name, user_name, table_name]) + for row in check_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("SYSTEM", "*.*"), - ("SYSTEM MOVES", "table"), - ("SYSTEM STOP MOVES", "table"), - ("SYSTEM START MOVES", "table"), - ("START MOVES", "table"), - ("STOP MOVES", "table"), -]) -def check_privilege(self, privilege, on, grant_target_name, user_name, table_name, node=None): - """Run checks for commands that require SYSTEM MOVES privilege. - """ +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("SYSTEM", "*.*"), + ("SYSTEM MOVES", "table"), + ("SYSTEM STOP MOVES", "table"), + ("SYSTEM START MOVES", "table"), + ("START MOVES", "table"), + ("STOP MOVES", "table"), + ], +) +def check_privilege( + self, privilege, on, grant_target_name, user_name, table_name, node=None +): + """Run checks for commands that require SYSTEM MOVES privilege.""" if node is None: node = self.context.node - Suite(test=start_moves)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name, table_name=table_name) - Suite(test=stop_moves)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name, table_name=table_name) + Suite(test=start_moves)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + table_name=table_name, + ) + Suite(test=stop_moves)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + table_name=table_name, + ) + @TestSuite -def start_moves(self, privilege, on, grant_target_name, user_name, table_name, node=None): - """Check that user is only able to execute `SYSTEM START MOVES` when they have privilege. - """ +def start_moves( + self, privilege, on, grant_target_name, user_name, table_name, node=None +): + """Check that user is only able to execute `SYSTEM START MOVES` when they have privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -87,8 +122,12 @@ def start_moves(self, privilege, on, grant_target_name, user_name, table_name, n node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't start moves"): - node.query(f"SYSTEM START MOVES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM START MOVES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM START MOVES with privilege"): @@ -96,7 +135,10 @@ def start_moves(self, privilege, on, grant_target_name, user_name, table_name, n node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user can start moves"): - node.query(f"SYSTEM START MOVES {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SYSTEM START MOVES {table_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM START MOVES with revoked privilege"): @@ -107,13 +149,19 @@ def start_moves(self, privilege, on, grant_target_name, user_name, table_name, n node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user can't start moves"): - node.query(f"SYSTEM START MOVES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM START MOVES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestSuite -def stop_moves(self, privilege, on, grant_target_name, user_name, table_name, node=None): - """Check that user is only able to execute `SYSTEM STOP MOVES` when they have privilege. - """ +def stop_moves( + self, privilege, on, grant_target_name, user_name, table_name, node=None +): + """Check that user is only able to execute `SYSTEM STOP MOVES` when they have privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -132,8 +180,12 @@ def stop_moves(self, privilege, on, grant_target_name, user_name, table_name, no node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't stop moves"): - node.query(f"SYSTEM STOP MOVES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM STOP MOVES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM STOP MOVES with privilege"): @@ -141,7 +193,10 @@ def stop_moves(self, privilege, on, grant_target_name, user_name, table_name, no node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user can stop moves"): - node.query(f"SYSTEM STOP MOVES {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SYSTEM STOP MOVES {table_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM STOP MOVES with revoked privilege"): @@ -152,19 +207,23 @@ def stop_moves(self, privilege, on, grant_target_name, user_name, table_name, no node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user can't stop moves"): - node.query(f"SYSTEM STOP MOVES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM STOP MOVES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("system moves") @Requirements( RQ_SRS_006_RBAC_Privileges_System_Moves("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SYSTEM MOVES. - """ + """Check the RBAC functionality of SYSTEM MOVES.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/system/reload.py b/tests/testflows/rbac/tests/privileges/system/reload.py index 08df5803287..d0c7a2caea8 100644 --- a/tests/testflows/rbac/tests/privileges/system/reload.py +++ b/tests/testflows/rbac/tests/privileges/system/reload.py @@ -5,17 +5,21 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @contextmanager def dict_setup(node, table_name, dict_name): - """Setup and teardown of table and dictionary needed for the tests. - """ + """Setup and teardown of table and dictionary needed for the tests.""" try: with Given("I have a table"): - node.query(f"CREATE TABLE {table_name} (key UInt64, val UInt64) Engine=Memory()") + node.query( + f"CREATE TABLE {table_name} (key UInt64, val UInt64) Engine=Memory()" + ) with And("I have a dictionary"): - node.query(f"CREATE DICTIONARY {dict_name} (key UInt64 DEFAULT 0, val UInt64 DEFAULT 10) PRIMARY KEY key SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE '{table_name}' PASSWORD '' DB 'default')) LIFETIME(MIN 0 MAX 0) LAYOUT(FLAT())") + node.query( + f"CREATE DICTIONARY {dict_name} (key UInt64 DEFAULT 0, val UInt64 DEFAULT 10) PRIMARY KEY key SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE '{table_name}' PASSWORD '' DB 'default')) LIFETIME(MIN 0 MAX 0) LAYOUT(FLAT())" + ) yield @@ -26,6 +30,7 @@ def dict_setup(node, table_name, dict_name): with And("I drop the table", flags=TE): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestSuite def config_privileges_granted_directly(self, node=None): """Check that a user is able to execute `SYSTEM RELOAD CONFIG` if and only if @@ -38,10 +43,15 @@ def config_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=config, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in config.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=config, + examples=Examples( + "privilege grant_target_name user_name", + [tuple(list(row) + [user_name, user_name]) for row in config.examples], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def config_privileges_granted_via_role(self, node=None): @@ -59,25 +69,32 @@ def config_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=config, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in config.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=config, + examples=Examples( + "privilege grant_target_name user_name", + [tuple(list(row) + [role_name, user_name]) for row in config.examples], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) @Requirements( RQ_SRS_006_RBAC_Privileges_System_Reload_Config("1.0"), ) -@Examples("privilege",[ - ("ALL",), - ("SYSTEM",), - ("SYSTEM RELOAD",), - ("SYSTEM RELOAD CONFIG",), - ("RELOAD CONFIG",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("SYSTEM",), + ("SYSTEM RELOAD",), + ("SYSTEM RELOAD CONFIG",), + ("RELOAD CONFIG",), + ], +) def config(self, privilege, grant_target_name, user_name, node=None): - """Run checks for `SYSTEM RELOAD CONFIG` privilege. - """ + """Run checks for `SYSTEM RELOAD CONFIG` privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -92,8 +109,12 @@ def config(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user is unable to execute SYSTEM RELOAD CONFIG"): - node.query("SYSTEM RELOAD CONFIG", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + "SYSTEM RELOAD CONFIG", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM RELOAD CONFIG with privilege"): @@ -101,7 +122,7 @@ def config(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user is bale to execute SYSTEM RELOAD CONFIG"): - node.query("SYSTEM RELOAD CONFIG", settings = [("user", f"{user_name}")]) + node.query("SYSTEM RELOAD CONFIG", settings=[("user", f"{user_name}")]) with Scenario("SYSTEM RELOAD CONFIG with revoked privilege"): @@ -112,8 +133,13 @@ def config(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user is unable to execute SYSTEM RELOAD CONFIG"): - node.query("SYSTEM RELOAD CONFIG", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + "SYSTEM RELOAD CONFIG", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestSuite def dictionary_privileges_granted_directly(self, node=None): @@ -127,10 +153,18 @@ def dictionary_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=dictionary, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in dictionary.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=dictionary, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in dictionary.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def dictionary_privileges_granted_via_role(self, node=None): @@ -148,26 +182,36 @@ def dictionary_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=dictionary, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in dictionary.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=dictionary, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in dictionary.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) @Requirements( RQ_SRS_006_RBAC_Privileges_System_Reload_Dictionary("1.0"), ) -@Examples("privilege",[ - ("ALL",), - ("SYSTEM",), - ("SYSTEM RELOAD",), - ("SYSTEM RELOAD DICTIONARIES",), - ("RELOAD DICTIONARIES",), - ("RELOAD DICTIONARY",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("SYSTEM",), + ("SYSTEM RELOAD",), + ("SYSTEM RELOAD DICTIONARIES",), + ("RELOAD DICTIONARIES",), + ("RELOAD DICTIONARY",), + ], +) def dictionary(self, privilege, grant_target_name, user_name, node=None): - """Run checks for `SYSTEM RELOAD DICTIONARY` privilege. - """ + """Run checks for `SYSTEM RELOAD DICTIONARY` privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -187,8 +231,12 @@ def dictionary(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user is unable to execute SYSTEM RELOAD DICTIONARY"): - node.query(f"SYSTEM RELOAD DICTIONARY default.{dict_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM RELOAD DICTIONARY default.{dict_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM RELOAD DICTIONARY with privilege"): @@ -201,7 +249,10 @@ def dictionary(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user is bale to execute SYSTEM RELOAD DICTIONARY"): - node.query(f"SYSTEM RELOAD DICTIONARY default.{dict_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SYSTEM RELOAD DICTIONARY default.{dict_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM RELOAD DICTIONARY with revoked privilege"): @@ -217,8 +268,13 @@ def dictionary(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user is unable to execute SYSTEM RELOAD DICTIONARY"): - node.query(f"SYSTEM RELOAD DICTIONARY default.{dict_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM RELOAD DICTIONARY default.{dict_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestSuite def dictionaries_privileges_granted_directly(self, node=None): @@ -232,10 +288,18 @@ def dictionaries_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=dictionaries, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in dictionaries.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=dictionaries, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in dictionaries.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def dictionaries_privileges_granted_via_role(self, node=None): @@ -253,26 +317,36 @@ def dictionaries_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=dictionaries, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in dictionaries.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=dictionaries, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in dictionaries.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) @Requirements( RQ_SRS_006_RBAC_Privileges_System_Reload_Dictionaries("1.0"), ) -@Examples("privilege",[ - ("ALL",), - ("SYSTEM",), - ("SYSTEM RELOAD",), - ("SYSTEM RELOAD DICTIONARIES",), - ("RELOAD DICTIONARIES",), - ("RELOAD DICTIONARY",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("SYSTEM",), + ("SYSTEM RELOAD",), + ("SYSTEM RELOAD DICTIONARIES",), + ("RELOAD DICTIONARIES",), + ("RELOAD DICTIONARY",), + ], +) def dictionaries(self, privilege, grant_target_name, user_name, node=None): - """Run checks for `SYSTEM RELOAD DICTIONARIES` privilege. - """ + """Run checks for `SYSTEM RELOAD DICTIONARIES` privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -287,8 +361,12 @@ def dictionaries(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user is unable to execute SYSTEM RELOAD DICTIONARIES"): - node.query("SYSTEM RELOAD DICTIONARIES", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + "SYSTEM RELOAD DICTIONARIES", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM RELOAD DICTIONARIES with privilege"): @@ -296,7 +374,9 @@ def dictionaries(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user is bale to execute SYSTEM RELOAD DICTIONARIES"): - node.query("SYSTEM RELOAD DICTIONARIES", settings = [("user", f"{user_name}")]) + node.query( + "SYSTEM RELOAD DICTIONARIES", settings=[("user", f"{user_name}")] + ) with Scenario("SYSTEM RELOAD DICTIONARIES with revoked privilege"): @@ -307,8 +387,13 @@ def dictionaries(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user is unable to execute SYSTEM RELOAD DICTIONARIES"): - node.query("SYSTEM RELOAD DICTIONARIES", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + "SYSTEM RELOAD DICTIONARIES", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestSuite def embedded_dictionaries_privileges_granted_directly(self, node=None): @@ -322,10 +407,18 @@ def embedded_dictionaries_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=embedded_dictionaries, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in embedded_dictionaries.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=embedded_dictionaries, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in embedded_dictionaries.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def embedded_dictionaries_privileges_granted_via_role(self, node=None): @@ -343,25 +436,35 @@ def embedded_dictionaries_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=embedded_dictionaries, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in embedded_dictionaries.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=embedded_dictionaries, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in embedded_dictionaries.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) @Requirements( RQ_SRS_006_RBAC_Privileges_System_Reload_EmbeddedDictionaries("1.0"), ) -@Examples("privilege",[ - ("ALL",), - ("SYSTEM",), - ("SYSTEM RELOAD",), - ("SYSTEM RELOAD EMBEDDED DICTIONARIES",), - ("SYSTEM RELOAD DICTIONARY",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("SYSTEM",), + ("SYSTEM RELOAD",), + ("SYSTEM RELOAD EMBEDDED DICTIONARIES",), + ("SYSTEM RELOAD DICTIONARY",), + ], +) def embedded_dictionaries(self, privilege, grant_target_name, user_name, node=None): - """Run checks for `SYSTEM RELOAD EMBEDDED DICTIONARIES` privilege. - """ + """Run checks for `SYSTEM RELOAD EMBEDDED DICTIONARIES` privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -375,17 +478,28 @@ def embedded_dictionaries(self, privilege, grant_target_name, user_name, node=No with And("I grant the user USAGE privilege"): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") - with Then("I check the user is unable to execute SYSTEM RELOAD EMBEDDED DICTIONARIES"): - node.query("SYSTEM RELOAD EMBEDDED DICTIONARIES", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + with Then( + "I check the user is unable to execute SYSTEM RELOAD EMBEDDED DICTIONARIES" + ): + node.query( + "SYSTEM RELOAD EMBEDDED DICTIONARIES", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM RELOAD EMBEDDED DICTIONARIES with privilege"): with When(f"I grant {privilege} on the table"): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") - with Then("I check the user is bale to execute SYSTEM RELOAD EMBEDDED DICTIONARIES"): - node.query("SYSTEM RELOAD EMBEDDED DICTIONARIES", settings = [("user", f"{user_name}")]) + with Then( + "I check the user is bale to execute SYSTEM RELOAD EMBEDDED DICTIONARIES" + ): + node.query( + "SYSTEM RELOAD EMBEDDED DICTIONARIES", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM RELOAD EMBEDDED DICTIONARIES with revoked privilege"): @@ -395,27 +509,55 @@ def embedded_dictionaries(self, privilege, grant_target_name, user_name, node=No with And(f"I revoke {privilege} on the table"): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") - with Then("I check the user is unable to execute SYSTEM RELOAD EMBEDDED DICTIONARIES"): - node.query("SYSTEM RELOAD EMBEDDED DICTIONARIES", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + with Then( + "I check the user is unable to execute SYSTEM RELOAD EMBEDDED DICTIONARIES" + ): + node.query( + "SYSTEM RELOAD EMBEDDED DICTIONARIES", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("system reload") @Requirements( RQ_SRS_006_RBAC_Privileges_System_Reload("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SYSTEM RELOAD. - """ + """Check the RBAC functionality of SYSTEM RELOAD.""" self.context.node = self.context.cluster.node(node) - Suite(run=config_privileges_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=config_privileges_granted_via_role, setup=instrument_clickhouse_server_log) - Suite(run=dictionary_privileges_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=dictionary_privileges_granted_via_role, setup=instrument_clickhouse_server_log) - Suite(run=dictionaries_privileges_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=dictionaries_privileges_granted_via_role, setup=instrument_clickhouse_server_log) - Suite(run=embedded_dictionaries_privileges_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=embedded_dictionaries_privileges_granted_via_role, setup=instrument_clickhouse_server_log) + Suite( + run=config_privileges_granted_directly, setup=instrument_clickhouse_server_log + ) + Suite( + run=config_privileges_granted_via_role, setup=instrument_clickhouse_server_log + ) + Suite( + run=dictionary_privileges_granted_directly, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=dictionary_privileges_granted_via_role, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=dictionaries_privileges_granted_directly, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=dictionaries_privileges_granted_via_role, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=embedded_dictionaries_privileges_granted_directly, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=embedded_dictionaries_privileges_granted_via_role, + setup=instrument_clickhouse_server_log, + ) diff --git a/tests/testflows/rbac/tests/privileges/system/replication_queues.py b/tests/testflows/rbac/tests/privileges/system/replication_queues.py index 47f12b7c866..7bf5f0d8ad5 100644 --- a/tests/testflows/rbac/tests/privileges/system/replication_queues.py +++ b/tests/testflows/rbac/tests/privileges/system/replication_queues.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def replicated_privileges_granted_directly(self, node=None): """Check that a user is able to execute `SYSTEM REPLICATION QUEUES` commands if and only if @@ -17,10 +18,18 @@ def replicated_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=check_replicated_privilege, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in check_replicated_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_replicated_privilege, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in check_replicated_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def replicated_privileges_granted_via_role(self, node=None): @@ -38,35 +47,59 @@ def replicated_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=check_replicated_privilege, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in check_replicated_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_replicated_privilege, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in check_replicated_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("SYSTEM", "*.*"), - ("SYSTEM REPLICATION QUEUES", "table"), - ("SYSTEM STOP REPLICATION QUEUES", "table"), - ("SYSTEM START REPLICATION QUEUES", "table"), - ("START REPLICATION QUEUES", "table"), - ("STOP REPLICATION QUEUES", "table"), -]) -def check_replicated_privilege(self, privilege, on, grant_target_name, user_name, node=None): - """Run checks for commands that require SYSTEM REPLICATION QUEUES privilege. - """ +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("SYSTEM", "*.*"), + ("SYSTEM REPLICATION QUEUES", "table"), + ("SYSTEM STOP REPLICATION QUEUES", "table"), + ("SYSTEM START REPLICATION QUEUES", "table"), + ("START REPLICATION QUEUES", "table"), + ("STOP REPLICATION QUEUES", "table"), + ], +) +def check_replicated_privilege( + self, privilege, on, grant_target_name, user_name, node=None +): + """Run checks for commands that require SYSTEM REPLICATION QUEUES privilege.""" if node is None: node = self.context.node - Suite(test=start_replication_queues)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name) - Suite(test=stop_replication_queues)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name) + Suite(test=start_replication_queues)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + ) + Suite(test=stop_replication_queues)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + ) + @TestSuite -def start_replication_queues(self, privilege, on, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SYSTEM START REPLICATION QUEUES` when they have privilege. - """ +def start_replication_queues( + self, privilege, on, grant_target_name, user_name, node=None +): + """Check that user is only able to execute `SYSTEM START REPLICATION QUEUES` when they have privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) table_name = f"table_name_{getuid()}" @@ -86,8 +119,12 @@ def start_replication_queues(self, privilege, on, grant_target_name, user_name, node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't start sends"): - node.query(f"SYSTEM START REPLICATION QUEUES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM START REPLICATION QUEUES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM START REPLICATION QUEUES with privilege"): @@ -95,7 +132,10 @@ def start_replication_queues(self, privilege, on, grant_target_name, user_name, node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user can start sends"): - node.query(f"SYSTEM START REPLICATION QUEUES {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SYSTEM START REPLICATION QUEUES {table_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM START REPLICATION QUEUES with revoked privilege"): @@ -106,13 +146,19 @@ def start_replication_queues(self, privilege, on, grant_target_name, user_name, node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user can't start sends"): - node.query(f"SYSTEM START REPLICATION QUEUES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM START REPLICATION QUEUES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestSuite -def stop_replication_queues(self, privilege, on, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SYSTEM STOP REPLICATION QUEUES` when they have privilege. - """ +def stop_replication_queues( + self, privilege, on, grant_target_name, user_name, node=None +): + """Check that user is only able to execute `SYSTEM STOP REPLICATION QUEUES` when they have privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) table_name = f"table_name_{getuid()}" @@ -132,8 +178,12 @@ def stop_replication_queues(self, privilege, on, grant_target_name, user_name, n node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't stop sends"): - node.query(f"SYSTEM STOP REPLICATION QUEUES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM STOP REPLICATION QUEUES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM STOP REPLICATION QUEUES with privilege"): @@ -141,7 +191,10 @@ def stop_replication_queues(self, privilege, on, grant_target_name, user_name, n node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user can start sends"): - node.query(f"SYSTEM STOP REPLICATION QUEUES {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SYSTEM STOP REPLICATION QUEUES {table_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM STOP REPLICATION QUEUES with revoked privilege"): @@ -152,20 +205,30 @@ def stop_replication_queues(self, privilege, on, grant_target_name, user_name, n node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user can't start sends"): - node.query(f"SYSTEM STOP REPLICATION QUEUES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM STOP REPLICATION QUEUES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("system replication queues") @Requirements( RQ_SRS_006_RBAC_Privileges_System_ReplicationQueues("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SYSTEM REPLICATION QUEUES. - """ + """Check the RBAC functionality of SYSTEM REPLICATION QUEUES.""" self.context.node = self.context.cluster.node(node) - Suite(run=replicated_privileges_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=replicated_privileges_granted_via_role, setup=instrument_clickhouse_server_log) + Suite( + run=replicated_privileges_granted_directly, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=replicated_privileges_granted_via_role, + setup=instrument_clickhouse_server_log, + ) diff --git a/tests/testflows/rbac/tests/privileges/system/restart_replica.py b/tests/testflows/rbac/tests/privileges/system/restart_replica.py index 4e3d5f7b060..cf45a784b03 100644 --- a/tests/testflows/rbac/tests/privileges/system/restart_replica.py +++ b/tests/testflows/rbac/tests/privileges/system/restart_replica.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): """Check that a user is able to execute `SYSTEM RESTART REPLICA` commands if and only if @@ -17,10 +18,18 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=restart_replica, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in restart_replica.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=restart_replica, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in restart_replica.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): @@ -38,21 +47,31 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=restart_replica, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in restart_replica.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=restart_replica, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in restart_replica.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("SYSTEM", "*.*"), - ("SYSTEM RESTART REPLICA", "table"), - ("RESTART REPLICA", "table"), -]) +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("SYSTEM", "*.*"), + ("SYSTEM RESTART REPLICA", "table"), + ("RESTART REPLICA", "table"), + ], +) def restart_replica(self, privilege, on, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SYSTEM RESTARTE REPLICA` when they have privilege. - """ + """Check that user is only able to execute `SYSTEM RESTARTE REPLICA` when they have privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) table_name = f"table_name_{getuid()}" @@ -72,8 +91,12 @@ def restart_replica(self, privilege, on, grant_target_name, user_name, node=None node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't restart replica"): - node.query(f"SYSTEM RESTART REPLICA {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM RESTART REPLICA {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM RESTART REPLICA with privilege"): @@ -81,7 +104,10 @@ def restart_replica(self, privilege, on, grant_target_name, user_name, node=None node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user can restart replica"): - node.query(f"SYSTEM RESTART REPLICA {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SYSTEM RESTART REPLICA {table_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM RESTART REPLICA with revoked privilege"): @@ -92,19 +118,23 @@ def restart_replica(self, privilege, on, grant_target_name, user_name, node=None node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user can't restart replica"): - node.query(f"SYSTEM RESTART REPLICA {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM RESTART REPLICA {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("system restart replica") @Requirements( RQ_SRS_006_RBAC_Privileges_System_RestartReplica("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SYSTEM RESTART REPLICA. - """ + """Check the RBAC functionality of SYSTEM RESTART REPLICA.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/system/sends.py b/tests/testflows/rbac/tests/privileges/system/sends.py index 4acd173d922..ee298c50cb1 100644 --- a/tests/testflows/rbac/tests/privileges/system/sends.py +++ b/tests/testflows/rbac/tests/privileges/system/sends.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def replicated_privileges_granted_directly(self, node=None): """Check that a user is able to execute `SYSTEM REPLICATED SENDS` commands if and only if @@ -17,10 +18,18 @@ def replicated_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=check_replicated_privilege, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in check_replicated_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_replicated_privilege, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in check_replicated_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def replicated_privileges_granted_via_role(self, node=None): @@ -38,43 +47,67 @@ def replicated_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=check_replicated_privilege, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in check_replicated_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_replicated_privilege, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in check_replicated_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("SYSTEM", "*.*"), - ("SYSTEM SENDS", "*.*"), - ("SYSTEM START SENDS", "*.*"), - ("SYSTEM STOP SENDS", "*.*"), - ("START SENDS", "*.*"), - ("STOP SENDS", "*.*"), - ("SYSTEM REPLICATED SENDS", "table"), - ("SYSTEM STOP REPLICATED SENDS", "table"), - ("SYSTEM START REPLICATED SENDS", "table"), - ("START REPLICATED SENDS", "table"), - ("STOP REPLICATED SENDS", "table"), -]) +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("SYSTEM", "*.*"), + ("SYSTEM SENDS", "*.*"), + ("SYSTEM START SENDS", "*.*"), + ("SYSTEM STOP SENDS", "*.*"), + ("START SENDS", "*.*"), + ("STOP SENDS", "*.*"), + ("SYSTEM REPLICATED SENDS", "table"), + ("SYSTEM STOP REPLICATED SENDS", "table"), + ("SYSTEM START REPLICATED SENDS", "table"), + ("START REPLICATED SENDS", "table"), + ("STOP REPLICATED SENDS", "table"), + ], +) @Requirements( RQ_SRS_006_RBAC_Privileges_System_Sends_Replicated("1.0"), ) -def check_replicated_privilege(self, privilege, on, grant_target_name, user_name, node=None): - """Run checks for commands that require SYSTEM REPLICATED SENDS privilege. - """ +def check_replicated_privilege( + self, privilege, on, grant_target_name, user_name, node=None +): + """Run checks for commands that require SYSTEM REPLICATED SENDS privilege.""" if node is None: node = self.context.node - Suite(test=start_replicated_sends)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name) - Suite(test=stop_replicated_sends)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name) + Suite(test=start_replicated_sends)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + ) + Suite(test=stop_replicated_sends)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + ) + @TestSuite -def start_replicated_sends(self, privilege, on, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SYSTEM START REPLICATED SENDS` when they have privilege. - """ +def start_replicated_sends( + self, privilege, on, grant_target_name, user_name, node=None +): + """Check that user is only able to execute `SYSTEM START REPLICATED SENDS` when they have privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) table_name = f"table_name_{getuid()}" @@ -94,8 +127,12 @@ def start_replicated_sends(self, privilege, on, grant_target_name, user_name, no node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't start sends"): - node.query(f"SYSTEM START REPLICATED SENDS {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM START REPLICATED SENDS {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM START REPLICATED SENDS with privilege"): @@ -103,7 +140,10 @@ def start_replicated_sends(self, privilege, on, grant_target_name, user_name, no node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user can start sends"): - node.query(f"SYSTEM START REPLICATED SENDS {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SYSTEM START REPLICATED SENDS {table_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM START REPLICATED SENDS with revoked privilege"): @@ -114,13 +154,17 @@ def start_replicated_sends(self, privilege, on, grant_target_name, user_name, no node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user can't start sends"): - node.query(f"SYSTEM START REPLICATED SENDS {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM START REPLICATED SENDS {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestSuite def stop_replicated_sends(self, privilege, on, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SYSTEM STOP REPLICATED SENDS` when they have privilege. - """ + """Check that user is only able to execute `SYSTEM STOP REPLICATED SENDS` when they have privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) table_name = f"table_name_{getuid()}" @@ -140,8 +184,12 @@ def stop_replicated_sends(self, privilege, on, grant_target_name, user_name, nod node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't stop sends"): - node.query(f"SYSTEM STOP REPLICATED SENDS {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM STOP REPLICATED SENDS {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM STOP REPLICATED SENDS with privilege"): @@ -149,7 +197,10 @@ def stop_replicated_sends(self, privilege, on, grant_target_name, user_name, nod node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user can stop sends"): - node.query(f"SYSTEM STOP REPLICATED SENDS {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SYSTEM STOP REPLICATED SENDS {table_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM STOP REPLICATED SENDS with revoked privilege"): @@ -160,8 +211,13 @@ def stop_replicated_sends(self, privilege, on, grant_target_name, user_name, nod node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user can't stop sends"): - node.query(f"SYSTEM STOP REPLICATED SENDS {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM STOP REPLICATED SENDS {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestSuite def distributed_privileges_granted_directly(self, node=None): @@ -176,10 +232,18 @@ def distributed_privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): table_name = f"table_name_{getuid()}" - Suite(run=check_distributed_privilege, - examples=Examples("privilege on grant_target_name user_name table_name", [ - tuple(list(row)+[user_name,user_name,table_name]) for row in check_distributed_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_distributed_privilege, + examples=Examples( + "privilege on grant_target_name user_name table_name", + [ + tuple(list(row) + [user_name, user_name, table_name]) + for row in check_distributed_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def distributed_privileges_granted_via_role(self, node=None): @@ -198,43 +262,69 @@ def distributed_privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=check_distributed_privilege, - examples=Examples("privilege on grant_target_name user_name table_name", [ - tuple(list(row)+[role_name,user_name,table_name]) for row in check_distributed_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_distributed_privilege, + examples=Examples( + "privilege on grant_target_name user_name table_name", + [ + tuple(list(row) + [role_name, user_name, table_name]) + for row in check_distributed_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("SYSTEM", "*.*"), - ("SYSTEM SENDS", "*.*"), - ("SYSTEM START SENDS", "*.*"), - ("SYSTEM STOP SENDS", "*.*"), - ("START SENDS", "*.*"), - ("STOP SENDS", "*.*"), - ("SYSTEM DISTRIBUTED SENDS", "table"), - ("SYSTEM STOP DISTRIBUTED SENDS", "table"), - ("SYSTEM START DISTRIBUTED SENDS", "table"), - ("START DISTRIBUTED SENDS", "table"), - ("STOP DISTRIBUTED SENDS", "table"), -]) +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("SYSTEM", "*.*"), + ("SYSTEM SENDS", "*.*"), + ("SYSTEM START SENDS", "*.*"), + ("SYSTEM STOP SENDS", "*.*"), + ("START SENDS", "*.*"), + ("STOP SENDS", "*.*"), + ("SYSTEM DISTRIBUTED SENDS", "table"), + ("SYSTEM STOP DISTRIBUTED SENDS", "table"), + ("SYSTEM START DISTRIBUTED SENDS", "table"), + ("START DISTRIBUTED SENDS", "table"), + ("STOP DISTRIBUTED SENDS", "table"), + ], +) @Requirements( RQ_SRS_006_RBAC_Privileges_System_Sends_Distributed("1.0"), ) -def check_distributed_privilege(self, privilege, on, grant_target_name, user_name, table_name, node=None): - """Run checks for commands that require SYSTEM DISTRIBUTED SENDS privilege. - """ +def check_distributed_privilege( + self, privilege, on, grant_target_name, user_name, table_name, node=None +): + """Run checks for commands that require SYSTEM DISTRIBUTED SENDS privilege.""" if node is None: node = self.context.node - Suite(test=start_distributed_moves)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name, table_name=table_name) - Suite(test=stop_distributed_moves)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name, table_name=table_name) + Suite(test=start_distributed_moves)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + table_name=table_name, + ) + Suite(test=stop_distributed_moves)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + table_name=table_name, + ) + @TestSuite -def start_distributed_moves(self, privilege, on, grant_target_name, user_name, table_name, node=None): - """Check that user is only able to execute `SYSTEM START DISTRIBUTED SENDS` when they have privilege. - """ +def start_distributed_moves( + self, privilege, on, grant_target_name, user_name, table_name, node=None +): + """Check that user is only able to execute `SYSTEM START DISTRIBUTED SENDS` when they have privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) table0_name = f"table0_{getuid()}" @@ -246,7 +336,9 @@ def start_distributed_moves(self, privilege, on, grant_target_name, user_name, t with table(node, table0_name): try: with Given("I have a distributed table"): - node.query(f"CREATE TABLE {table_name} (a UInt64) ENGINE = Distributed(sharded_cluster, default, {table0_name}, rand())") + node.query( + f"CREATE TABLE {table_name} (a UInt64) ENGINE = Distributed(sharded_cluster, default, {table0_name}, rand())" + ) with Scenario("SYSTEM START DISTRIBUTED SENDS without privilege"): @@ -257,8 +349,12 @@ def start_distributed_moves(self, privilege, on, grant_target_name, user_name, t node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't start merges"): - node.query(f"SYSTEM START DISTRIBUTED SENDS {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM START DISTRIBUTED SENDS {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM START DISTRIBUTED SENDS with privilege"): @@ -266,7 +362,10 @@ def start_distributed_moves(self, privilege, on, grant_target_name, user_name, t node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user can start merges"): - node.query(f"SYSTEM START DISTRIBUTED SENDS {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SYSTEM START DISTRIBUTED SENDS {table_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM START DISTRIBUTED SENDS with revoked privilege"): @@ -277,17 +376,23 @@ def start_distributed_moves(self, privilege, on, grant_target_name, user_name, t node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user can't start merges"): - node.query(f"SYSTEM START DISTRIBUTED SENDS {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM START DISTRIBUTED SENDS {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the distributed table"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestSuite -def stop_distributed_moves(self, privilege, on, grant_target_name, user_name, table_name, node=None): - """Check that user is only able to execute `SYSTEM STOP DISTRIBUTED SENDS` when they have privilege. - """ +def stop_distributed_moves( + self, privilege, on, grant_target_name, user_name, table_name, node=None +): + """Check that user is only able to execute `SYSTEM STOP DISTRIBUTED SENDS` when they have privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) table0_name = f"table0_{getuid()}" @@ -299,7 +404,9 @@ def stop_distributed_moves(self, privilege, on, grant_target_name, user_name, ta with table(node, table0_name): try: with Given("I have a distributed table"): - node.query(f"CREATE TABLE {table_name} (a UInt64) ENGINE = Distributed(sharded_cluster, default, {table0_name}, rand())") + node.query( + f"CREATE TABLE {table_name} (a UInt64) ENGINE = Distributed(sharded_cluster, default, {table0_name}, rand())" + ) with Scenario("SYSTEM STOP DISTRIBUTED SENDS without privilege"): @@ -310,8 +417,12 @@ def stop_distributed_moves(self, privilege, on, grant_target_name, user_name, ta node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't stop merges"): - node.query(f"SYSTEM STOP DISTRIBUTED SENDS {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM STOP DISTRIBUTED SENDS {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM STOP DISTRIBUTED SENDS with privilege"): @@ -319,7 +430,10 @@ def stop_distributed_moves(self, privilege, on, grant_target_name, user_name, ta node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user can stop merges"): - node.query(f"SYSTEM STOP DISTRIBUTED SENDS {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SYSTEM STOP DISTRIBUTED SENDS {table_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM STOP DISTRIBUTED SENDS with revoked privilege"): @@ -330,25 +444,41 @@ def stop_distributed_moves(self, privilege, on, grant_target_name, user_name, ta node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user can't stop merges"): - node.query(f"SYSTEM STOP DISTRIBUTED SENDS {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM STOP DISTRIBUTED SENDS {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the distributed table"): node.query(f"DROP TABLE IF EXISTS {table_name}") + @TestFeature @Name("system sends") @Requirements( RQ_SRS_006_RBAC_Privileges_System_Sends("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SYSTEM SENDS. - """ + """Check the RBAC functionality of SYSTEM SENDS.""" self.context.node = self.context.cluster.node(node) - Suite(run=replicated_privileges_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=replicated_privileges_granted_via_role, setup=instrument_clickhouse_server_log) - Suite(run=distributed_privileges_granted_directly, setup=instrument_clickhouse_server_log) - Suite(run=distributed_privileges_granted_via_role, setup=instrument_clickhouse_server_log) + Suite( + run=replicated_privileges_granted_directly, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=replicated_privileges_granted_via_role, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=distributed_privileges_granted_directly, + setup=instrument_clickhouse_server_log, + ) + Suite( + run=distributed_privileges_granted_via_role, + setup=instrument_clickhouse_server_log, + ) diff --git a/tests/testflows/rbac/tests/privileges/system/shutdown.py b/tests/testflows/rbac/tests/privileges/system/shutdown.py index 26752ef4d01..2b09b7d8585 100644 --- a/tests/testflows/rbac/tests/privileges/system/shutdown.py +++ b/tests/testflows/rbac/tests/privileges/system/shutdown.py @@ -7,10 +7,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): - """Run checks with privileges granted directly. - """ + """Run checks with privileges granted directly.""" user_name = f"user_{getuid()}" @@ -19,15 +19,22 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=check_privilege, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in check_privilege.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in check_privilege.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): - """Run checks with privileges granted through a role. - """ + """Run checks with privileges granted through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" @@ -40,33 +47,47 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=check_privilege, - examples=Examples("privilege grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in check_privilege.examples - ], args=Args(name="privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in check_privilege.examples + ], + args=Args(name="privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege",[ - ("ALL",), - ("SYSTEM",), - ("SYSTEM SHUTDOWN",), - ("SHUTDOWN",), - ("SYSTEM KILL",), -]) +@Examples( + "privilege", + [ + ("ALL",), + ("SYSTEM",), + ("SYSTEM SHUTDOWN",), + ("SHUTDOWN",), + ("SYSTEM KILL",), + ], +) def check_privilege(self, privilege, grant_target_name, user_name, node=None): - """Run checks for commands that require SYSTEM SHUTDOWN privilege. - """ + """Run checks for commands that require SYSTEM SHUTDOWN privilege.""" if node is None: node = self.context.node - Suite(test=shutdown)(privilege=privilege, grant_target_name=grant_target_name, user_name=user_name) - Suite(test=kill)(privilege=privilege, grant_target_name=grant_target_name, user_name=user_name) + Suite(test=shutdown)( + privilege=privilege, grant_target_name=grant_target_name, user_name=user_name + ) + Suite(test=kill)( + privilege=privilege, grant_target_name=grant_target_name, user_name=user_name + ) + @TestSuite def shutdown(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SYSTEM SHUTDOWN` when they have the necessary privilege. - """ + """Check that user is only able to execute `SYSTEM SHUTDOWN` when they have the necessary privilege.""" cluster = self.context.cluster exitcode, message = errors.not_enough_privileges(name=user_name) @@ -83,8 +104,12 @@ def shutdown(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use SYSTEM SHUTDOWN"): - node.query(f"SYSTEM SHUTDOWN", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM SHUTDOWN", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM SHUTDOWN with privilege"): timeout = 60 @@ -94,13 +119,13 @@ def shutdown(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use SYSTEM SHUTDOWN"): - node.query(f"SYSTEM SHUTDOWN", settings = [("user", f"{user_name}")]) + node.query(f"SYSTEM SHUTDOWN", settings=[("user", f"{user_name}")]) with And("I close all connections to the node"): node.close_bashes() with And("I check that system is down"): - command = f"echo -e \"SELECT 1\" | {cluster.docker_compose} exec -T {node.name} clickhouse client -n" + command = f'echo -e "SELECT 1" | {cluster.docker_compose} exec -T {node.name} clickhouse client -n' start_time = time.time() @@ -127,13 +152,17 @@ def shutdown(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use SYSTEM SHUTDOWN"): - node.query(f"SYSTEM SHUTDOWN", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM SHUTDOWN", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestSuite def kill(self, privilege, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SYSTEM KILL` when they have the necessary privilege. - """ + """Check that user is only able to execute `SYSTEM KILL` when they have the necessary privilege.""" cluster = self.context.cluster exitcode, message = errors.not_enough_privileges(name=user_name) @@ -150,8 +179,12 @@ def kill(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't use SYSTEM KILL"): - node.query(f"SYSTEM KILL", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM KILL", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM KILL with privilege"): timeout = 60 @@ -161,7 +194,7 @@ def kill(self, privilege, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}") with Then("I check the user can use SYSTEM KILL"): - command = f"echo -e \"SYSTEM KILL\" | clickhouse client -n" + command = f'echo -e "SYSTEM KILL" | clickhouse client -n' with By("executing command", description=command): self.context.cluster.bash(node.name).send(command) @@ -169,7 +202,7 @@ def kill(self, privilege, grant_target_name, user_name, node=None): node.close_bashes() with And("I check that system is down"): - command = f"echo -e \"SELECT 1\" | {cluster.docker_compose} exec -T {node.name} clickhouse client -n" + command = f'echo -e "SELECT 1" | {cluster.docker_compose} exec -T {node.name} clickhouse client -n' start_time = time.time() @@ -196,19 +229,23 @@ def kill(self, privilege, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}") with Then("I check the user cannot use SYSTEM KILL"): - node.query(f"SYSTEM KILL", settings=[("user",user_name)], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM KILL", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("system shutdown") @Requirements( RQ_SRS_006_RBAC_Privileges_System_Shutdown("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SYSTEM SHUTDOWN. - """ + """Check the RBAC functionality of SYSTEM SHUTDOWN.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/system/sync_replica.py b/tests/testflows/rbac/tests/privileges/system/sync_replica.py index 14681ad31ae..6bb7f9820a9 100644 --- a/tests/testflows/rbac/tests/privileges/system/sync_replica.py +++ b/tests/testflows/rbac/tests/privileges/system/sync_replica.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): """Check that a user is able to execute `SYSTEM SYNC REPLICA` commands if and only if @@ -17,10 +18,18 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): - Suite(run=sync_replica, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[user_name,user_name]) for row in sync_replica.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=sync_replica, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [user_name, user_name]) + for row in sync_replica.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): @@ -38,21 +47,31 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=sync_replica, - examples=Examples("privilege on grant_target_name user_name", [ - tuple(list(row)+[role_name,user_name]) for row in sync_replica.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=sync_replica, + examples=Examples( + "privilege on grant_target_name user_name", + [ + tuple(list(row) + [role_name, user_name]) + for row in sync_replica.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("SYSTEM", "*.*"), - ("SYSTEM SYNC REPLICA", "table"), - ("SYNC REPLICA", "table"), -]) +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("SYSTEM", "*.*"), + ("SYSTEM SYNC REPLICA", "table"), + ("SYNC REPLICA", "table"), + ], +) def sync_replica(self, privilege, on, grant_target_name, user_name, node=None): - """Check that user is only able to execute `SYSTEM SYNCE REPLICA` when they have privilege. - """ + """Check that user is only able to execute `SYSTEM SYNCE REPLICA` when they have privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) table_name = f"table_name_{getuid()}" @@ -72,8 +91,12 @@ def sync_replica(self, privilege, on, grant_target_name, user_name, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't sync replica"): - node.query(f"SYSTEM SYNC REPLICA {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM SYNC REPLICA {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM SYNC REPLICA with privilege"): @@ -81,7 +104,10 @@ def sync_replica(self, privilege, on, grant_target_name, user_name, node=None): node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user can sync replica"): - node.query(f"SYSTEM SYNC REPLICA {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SYSTEM SYNC REPLICA {table_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM SYNC REPLICA with revoked privilege"): @@ -92,19 +118,23 @@ def sync_replica(self, privilege, on, grant_target_name, user_name, node=None): node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user can't sync replica"): - node.query(f"SYSTEM SYNC REPLICA {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM SYNC REPLICA {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("system sync replica") @Requirements( RQ_SRS_006_RBAC_Privileges_System_SyncReplica("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SYSTEM SYNC REPLICA. - """ + """Check the RBAC functionality of SYSTEM SYNC REPLICA.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/system/ttl_merges.py b/tests/testflows/rbac/tests/privileges/system/ttl_merges.py index a59cc530a6d..5f6d1c9475f 100644 --- a/tests/testflows/rbac/tests/privileges/system/ttl_merges.py +++ b/tests/testflows/rbac/tests/privileges/system/ttl_merges.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privileges_granted_directly(self, node=None): """Check that a user is able to execute `SYSTEM TTL MERGES` commands if and only if @@ -18,10 +19,18 @@ def privileges_granted_directly(self, node=None): with user(node, f"{user_name}"): table_name = f"table_name_{getuid()}" - Suite(run=check_privilege, - examples=Examples("privilege on grant_target_name user_name table_name", [ - tuple(list(row)+[user_name,user_name,table_name]) for row in check_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege on grant_target_name user_name table_name", + [ + tuple(list(row) + [user_name, user_name, table_name]) + for row in check_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestSuite def privileges_granted_via_role(self, node=None): @@ -40,35 +49,61 @@ def privileges_granted_via_role(self, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Suite(run=check_privilege, - examples=Examples("privilege on grant_target_name user_name table_name", [ - tuple(list(row)+[role_name,user_name,table_name]) for row in check_privilege.examples - ], args=Args(name="check privilege={privilege}", format_name=True))) + Suite( + run=check_privilege, + examples=Examples( + "privilege on grant_target_name user_name table_name", + [ + tuple(list(row) + [role_name, user_name, table_name]) + for row in check_privilege.examples + ], + args=Args(name="check privilege={privilege}", format_name=True), + ), + ) + @TestOutline(Suite) -@Examples("privilege on",[ - ("ALL", "*.*"), - ("SYSTEM", "*.*"), - ("SYSTEM TTL MERGES", "table"), - ("SYSTEM STOP TTL MERGES", "table"), - ("SYSTEM START TTL MERGES", "table"), - ("START TTL MERGES", "table"), - ("STOP TTL MERGES", "table"), -]) -def check_privilege(self, privilege, on, grant_target_name, user_name, table_name, node=None): - """Run checks for commands that require SYSTEM TTL MERGES privilege. - """ +@Examples( + "privilege on", + [ + ("ALL", "*.*"), + ("SYSTEM", "*.*"), + ("SYSTEM TTL MERGES", "table"), + ("SYSTEM STOP TTL MERGES", "table"), + ("SYSTEM START TTL MERGES", "table"), + ("START TTL MERGES", "table"), + ("STOP TTL MERGES", "table"), + ], +) +def check_privilege( + self, privilege, on, grant_target_name, user_name, table_name, node=None +): + """Run checks for commands that require SYSTEM TTL MERGES privilege.""" if node is None: node = self.context.node - Suite(test=start_ttl_merges)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name, table_name=table_name) - Suite(test=stop_ttl_merges)(privilege=privilege, on=on, grant_target_name=grant_target_name, user_name=user_name, table_name=table_name) + Suite(test=start_ttl_merges)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + table_name=table_name, + ) + Suite(test=stop_ttl_merges)( + privilege=privilege, + on=on, + grant_target_name=grant_target_name, + user_name=user_name, + table_name=table_name, + ) + @TestSuite -def start_ttl_merges(self, privilege, on, grant_target_name, user_name, table_name, node=None): - """Check that user is only able to execute `SYSTEM START TTL MERGES` when they have privilege. - """ +def start_ttl_merges( + self, privilege, on, grant_target_name, user_name, table_name, node=None +): + """Check that user is only able to execute `SYSTEM START TTL MERGES` when they have privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -87,8 +122,12 @@ def start_ttl_merges(self, privilege, on, grant_target_name, user_name, table_na node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't start merges"): - node.query(f"SYSTEM START TTL MERGES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM START TTL MERGES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM START TTL MERGES with privilege"): @@ -96,7 +135,10 @@ def start_ttl_merges(self, privilege, on, grant_target_name, user_name, table_na node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user can start merges"): - node.query(f"SYSTEM START TTL MERGES {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SYSTEM START TTL MERGES {table_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM START TTL MERGES with revoked privilege"): @@ -107,13 +149,19 @@ def start_ttl_merges(self, privilege, on, grant_target_name, user_name, table_na node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user can't start merges"): - node.query(f"SYSTEM START TTL MERGES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM START TTL MERGES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestSuite -def stop_ttl_merges(self, privilege, on, grant_target_name, user_name, table_name, node=None): - """Check that user is only able to execute `SYSTEM STOP TTL MERGES` when they have privilege. - """ +def stop_ttl_merges( + self, privilege, on, grant_target_name, user_name, table_name, node=None +): + """Check that user is only able to execute `SYSTEM STOP TTL MERGES` when they have privilege.""" exitcode, message = errors.not_enough_privileges(name=user_name) if node is None: @@ -132,8 +180,12 @@ def stop_ttl_merges(self, privilege, on, grant_target_name, user_name, table_nam node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I check the user can't stop merges"): - node.query(f"SYSTEM STOP TTL MERGES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM STOP TTL MERGES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with Scenario("SYSTEM STOP TTL MERGES with privilege"): @@ -141,7 +193,10 @@ def stop_ttl_merges(self, privilege, on, grant_target_name, user_name, table_nam node.query(f"GRANT {privilege} ON {on} TO {grant_target_name}") with Then("I check the user can stop merges"): - node.query(f"SYSTEM STOP TTL MERGES {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SYSTEM STOP TTL MERGES {table_name}", + settings=[("user", f"{user_name}")], + ) with Scenario("SYSTEM STOP TTL MERGES with revoked privilege"): @@ -152,19 +207,23 @@ def stop_ttl_merges(self, privilege, on, grant_target_name, user_name, table_nam node.query(f"REVOKE {privilege} ON {on} FROM {grant_target_name}") with Then("I check the user can't stop merges"): - node.query(f"SYSTEM STOP TTL MERGES {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SYSTEM STOP TTL MERGES {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("system ttl merges") @Requirements( RQ_SRS_006_RBAC_Privileges_System_TTLMerges("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) def feature(self, node="clickhouse1"): - """Check the RBAC functionality of SYSTEM TTL MERGES. - """ + """Check the RBAC functionality of SYSTEM TTL MERGES.""" self.context.node = self.context.cluster.node(node) Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log) diff --git a/tests/testflows/rbac/tests/privileges/truncate.py b/tests/testflows/rbac/tests/privileges/truncate.py index df81913f0a8..8e107da3c0f 100644 --- a/tests/testflows/rbac/tests/privileges/truncate.py +++ b/tests/testflows/rbac/tests/privileges/truncate.py @@ -2,10 +2,10 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite def privilege_granted_directly_or_via_role(self, table_type, node=None): - """Check that user is only able to execute TRUNCATE when they have required privilege, either directly or via role. - """ + """Check that user is only able to execute TRUNCATE when they have required privilege, either directly or via role.""" role_name = f"role_{getuid()}" user_name = f"user_{getuid()}" @@ -15,8 +15,15 @@ def privilege_granted_directly_or_via_role(self, table_type, node=None): with Suite("user with direct privilege"): with user(node, user_name): - with When(f"I run checks that {user_name} is only able to execute TRUNCATE with required privileges"): - privilege_check(grant_target_name=user_name, user_name=user_name, table_type=table_type, node=node) + with When( + f"I run checks that {user_name} is only able to execute TRUNCATE with required privileges" + ): + privilege_check( + grant_target_name=user_name, + user_name=user_name, + table_type=table_type, + node=node, + ) with Suite("user with privilege via role"): with user(node, user_name), role(node, role_name): @@ -24,12 +31,19 @@ def privilege_granted_directly_or_via_role(self, table_type, node=None): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - with And(f"I run checks that {user_name} with {role_name} is only able to execute TRUNCATE with required privileges"): - privilege_check(grant_target_name=role_name, user_name=user_name, table_type=table_type, node=node) + with And( + f"I run checks that {user_name} with {role_name} is only able to execute TRUNCATE with required privileges" + ): + privilege_check( + grant_target_name=role_name, + user_name=user_name, + table_type=table_type, + node=node, + ) + def privilege_check(grant_target_name, user_name, table_type, node=None): - """Run scenarios to check the user's access with different privileges. - """ + """Run scenarios to check the user's access with different privileges.""" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") with Scenario("user without privilege"): @@ -44,8 +58,12 @@ def privilege_check(grant_target_name, user_name, table_type, node=None): node.query(f"GRANT USAGE ON *.* TO {grant_target_name}") with Then("I attempt to truncate a table without privilege"): - node.query(f"TRUNCATE TABLE {table_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"TRUNCATE TABLE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("user with privilege"): table_name = f"merge_tree_{getuid()}" @@ -56,7 +74,9 @@ def privilege_check(grant_target_name, user_name, table_type, node=None): node.query(f"GRANT TRUNCATE ON {table_name} TO {grant_target_name}") with Then("I attempt to truncate a table"): - node.query(f"TRUNCATE TABLE {table_name}", settings = [("user", user_name)]) + node.query( + f"TRUNCATE TABLE {table_name}", settings=[("user", user_name)] + ) with Scenario("user with revoked privilege"): table_name = f"merge_tree_{getuid()}" @@ -70,8 +90,12 @@ def privilege_check(grant_target_name, user_name, table_type, node=None): node.query(f"REVOKE TRUNCATE ON {table_name} FROM {grant_target_name}") with Then("I attempt to truncate a table"): - node.query(f"TRUNCATE TABLE {table_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"TRUNCATE TABLE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("user with revoked ALL privilege"): table_name = f"merge_tree_{getuid()}" @@ -85,8 +109,12 @@ def privilege_check(grant_target_name, user_name, table_type, node=None): node.query(f"REVOKE ALL ON *.* FROM {grant_target_name}") with Then("I attempt to truncate a table"): - node.query(f"TRUNCATE TABLE {table_name}", settings = [("user", user_name)], - exitcode=exitcode, message=message) + node.query( + f"TRUNCATE TABLE {table_name}", + settings=[("user", user_name)], + exitcode=exitcode, + message=message, + ) with Scenario("execute on cluster"): table_name = f"merge_tree_{getuid()}" @@ -97,7 +125,10 @@ def privilege_check(grant_target_name, user_name, table_type, node=None): node.query(f"GRANT TRUNCATE ON {table_name} TO {grant_target_name}") with Then("I attempt to truncate a table"): - node.query(f"TRUNCATE TABLE IF EXISTS {table_name} ON CLUSTER sharded_cluster", settings = [("user", user_name)]) + node.query( + f"TRUNCATE TABLE IF EXISTS {table_name} ON CLUSTER sharded_cluster", + settings=[("user", user_name)], + ) with Scenario("user with ALL privilege"): table_name = f"merge_tree_{getuid()}" @@ -111,21 +142,21 @@ def privilege_check(grant_target_name, user_name, table_type, node=None): node.query(f"GRANT ALL ON *.* TO {grant_target_name}") with Then("I attempt to truncate a table"): - node.query(f"TRUNCATE TABLE {table_name}", settings = [("user", user_name)]) + node.query( + f"TRUNCATE TABLE {table_name}", settings=[("user", user_name)] + ) + @TestFeature @Requirements( RQ_SRS_006_RBAC_Privileges_Truncate("1.0"), RQ_SRS_006_RBAC_Privileges_All("1.0"), - RQ_SRS_006_RBAC_Privileges_None("1.0") + RQ_SRS_006_RBAC_Privileges_None("1.0"), ) -@Examples("table_type", [ - (key,) for key in table_types.keys() -]) +@Examples("table_type", [(key,) for key in table_types.keys()]) @Name("truncate") def feature(self, node="clickhouse1", stress=None, parallel=None): - """Check the RBAC functionality of TRUNCATE. - """ + """Check the RBAC functionality of TRUNCATE.""" self.context.node = self.context.cluster.node(node) if parallel is not None: @@ -134,11 +165,14 @@ def feature(self, node="clickhouse1", stress=None, parallel=None): self.context.stress = stress for example in self.examples: - table_type, = example + (table_type,) = example if table_type != "MergeTree" and not self.context.stress: continue with Example(str(example)): - with Suite(test=privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log): + with Suite( + test=privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ): privilege_granted_directly_or_via_role(table_type=table_type) diff --git a/tests/testflows/rbac/tests/syntax/alter_quota.py b/tests/testflows/rbac/tests/syntax/alter_quota.py index 6ccafc4dbcd..34ed1b00f8d 100755 --- a/tests/testflows/rbac/tests/syntax/alter_quota.py +++ b/tests/testflows/rbac/tests/syntax/alter_quota.py @@ -5,6 +5,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @TestFeature @Name("alter quota") @Args(format_description=False) @@ -33,13 +34,17 @@ def feature(self, node="clickhouse1"): node.query(f"CREATE USER user0") node.query(f"CREATE ROLE role0") - with Scenario("I alter quota with no options", requirements=[ - RQ_SRS_006_RBAC_Quota_Alter("1.0")]): + with Scenario( + "I alter quota with no options", + requirements=[RQ_SRS_006_RBAC_Quota_Alter("1.0")], + ): with When("I alter quota"): node.query("ALTER QUOTA quota0") - with Scenario("I alter quota that does not exist, throws an exception", requirements=[ - RQ_SRS_006_RBAC_Quota_Alter("1.0")]): + with Scenario( + "I alter quota that does not exist, throws an exception", + requirements=[RQ_SRS_006_RBAC_Quota_Alter("1.0")], + ): quota = "quota1" cleanup_quota(quota) with When(f"I alter quota {quota}, which does not exist"): @@ -47,24 +52,32 @@ def feature(self, node="clickhouse1"): node.query(f"ALTER QUOTA {quota}", exitcode=exitcode, message=message) del quota - with Scenario("I alter quota with if exists, quota does exist", requirements=[ - RQ_SRS_006_RBAC_Quota_Alter_IfExists("1.0")]): + with Scenario( + "I alter quota with if exists, quota does exist", + requirements=[RQ_SRS_006_RBAC_Quota_Alter_IfExists("1.0")], + ): node.query("ALTER QUOTA IF EXISTS quota0") - with Scenario("I alter quota with if exists, quota does not exist", requirements=[ - RQ_SRS_006_RBAC_Quota_Alter_IfExists("1.0")]): + with Scenario( + "I alter quota with if exists, quota does not exist", + requirements=[RQ_SRS_006_RBAC_Quota_Alter_IfExists("1.0")], + ): quota = "quota1" cleanup_quota(quota) with When(f"I alter quota {quota}, which does not exist, with IF EXISTS"): node.query(f"ALTER QUOTA IF EXISTS {quota}") del quota - with Scenario("I alter quota using rename, target available", requirements=[ - RQ_SRS_006_RBAC_Quota_Alter_Rename("1.0")]): + with Scenario( + "I alter quota using rename, target available", + requirements=[RQ_SRS_006_RBAC_Quota_Alter_Rename("1.0")], + ): node.query("ALTER QUOTA quota0 RENAME TO quota0") - with Scenario("I alter quota using rename, target unavailable", requirements=[ - RQ_SRS_006_RBAC_Quota_Alter_Rename("1.0")]): + with Scenario( + "I alter quota using rename, target unavailable", + requirements=[RQ_SRS_006_RBAC_Quota_Alter_Rename("1.0")], + ): new_quota = "quota1" try: @@ -72,39 +85,72 @@ def feature(self, node="clickhouse1"): node.query(f"CREATE QUOTA IF NOT EXISTS {new_quota}") with When(f"I try to rename to {new_quota}"): - exitcode, message = errors.cannot_rename_quota(name="quota0", name_new=new_quota) - node.query(f"ALTER QUOTA quota0 RENAME TO {new_quota}", exitcode=exitcode, message=message) + exitcode, message = errors.cannot_rename_quota( + name="quota0", name_new=new_quota + ) + node.query( + f"ALTER QUOTA quota0 RENAME TO {new_quota}", + exitcode=exitcode, + message=message, + ) finally: with Finally(f"I cleanup target name {new_quota}"): node.query(f"DROP QUOTA IF EXISTS {new_quota}") del new_quota - keys = ['none', 'user name', 'ip address', 'client key', 'client key or user name', 'client key or ip address'] + keys = [ + "none", + "user name", + "ip address", + "client key", + "client key or user name", + "client key or ip address", + ] for key in keys: - with Scenario(f"I alter quota keyed by {key}", requirements=[ + with Scenario( + f"I alter quota keyed by {key}", + requirements=[ RQ_SRS_006_RBAC_Quota_Alter_KeyedBy("1.0"), - RQ_SRS_006_RBAC_Quota_Alter_KeyedByOptions("1.0")]): + RQ_SRS_006_RBAC_Quota_Alter_KeyedByOptions("1.0"), + ], + ): with When("I alter quota with a key"): node.query(f"ALTER QUOTA quota0 KEYED BY '{key}'") - with Scenario("I alter quota for randomized interval", requirements=[ - RQ_SRS_006_RBAC_Quota_Alter_Interval_Randomized("1.0")]): + with Scenario( + "I alter quota for randomized interval", + requirements=[RQ_SRS_006_RBAC_Quota_Alter_Interval_Randomized("1.0")], + ): with When("I alter quota on a randomized interval"): node.query("ALTER QUOTA quota0 FOR RANDOMIZED INTERVAL 1 DAY NO LIMITS") - intervals = ['SECOND', 'MINUTE', 'HOUR', 'DAY', 'MONTH'] + intervals = ["SECOND", "MINUTE", "HOUR", "DAY", "MONTH"] for i, interval in enumerate(intervals): - with Scenario(f"I alter quota for interval {interval}", requirements=[ - RQ_SRS_006_RBAC_Quota_Alter_Interval("1.0")]): + with Scenario( + f"I alter quota for interval {interval}", + requirements=[RQ_SRS_006_RBAC_Quota_Alter_Interval("1.0")], + ): with When(f"I alter quota for {interval}"): - node.query(f"ALTER QUOTA quota0 FOR INTERVAL 1 {interval} NO LIMITS") + node.query( + f"ALTER QUOTA quota0 FOR INTERVAL 1 {interval} NO LIMITS" + ) - constraints = ['MAX QUERIES', 'MAX ERRORS', 'MAX RESULT ROWS', - 'MAX RESULT BYTES', 'MAX READ ROWS', 'MAX READ BYTES', 'MAX EXECUTION TIME', - 'NO LIMITS', 'TRACKING ONLY'] + constraints = [ + "MAX QUERIES", + "MAX ERRORS", + "MAX RESULT ROWS", + "MAX RESULT BYTES", + "MAX READ ROWS", + "MAX READ BYTES", + "MAX EXECUTION TIME", + "NO LIMITS", + "TRACKING ONLY", + ] for i, constraint in enumerate(constraints): - with Scenario(f"I alter quota for {constraint.lower()}", requirements=[ + with Scenario( + f"I alter quota for {constraint.lower()}", + requirements=[ RQ_SRS_006_RBAC_Quota_Alter_Queries("1.0"), RQ_SRS_006_RBAC_Quota_Alter_Errors("1.0"), RQ_SRS_006_RBAC_Quota_Alter_ResultRows("1.0"), @@ -113,70 +159,106 @@ def feature(self, node="clickhouse1"): RQ_SRS_006_RBAC_Quota_Alter_ReadBytes("1.0"), RQ_SRS_006_RBAC_Quota_Alter_ExecutionTime("1.0"), RQ_SRS_006_RBAC_Quota_Alter_NoLimits("1.0"), - RQ_SRS_006_RBAC_Quota_Alter_TrackingOnly("1.0")]): + RQ_SRS_006_RBAC_Quota_Alter_TrackingOnly("1.0"), + ], + ): with When("I alter quota for a constraint"): - node.query(f"ALTER QUOTA quota0 FOR INTERVAL 1 DAY {constraint}{' 1024' if constraint.startswith('MAX') else ''}") + node.query( + f"ALTER QUOTA quota0 FOR INTERVAL 1 DAY {constraint}{' 1024' if constraint.startswith('MAX') else ''}" + ) - with Scenario("I create quota for multiple constraints", requirements=[ + with Scenario( + "I create quota for multiple constraints", + requirements=[ RQ_SRS_006_RBAC_Quota_Alter_Interval("1.0"), - RQ_SRS_006_RBAC_Quota_Alter_Queries("1.0")]): - node.query("ALTER QUOTA quota0 \ + RQ_SRS_006_RBAC_Quota_Alter_Queries("1.0"), + ], + ): + node.query( + "ALTER QUOTA quota0 \ FOR INTERVAL 1 DAY NO LIMITS, \ FOR INTERVAL 2 DAY MAX QUERIES 124, \ - FOR INTERVAL 1 MONTH TRACKING ONLY") + FOR INTERVAL 1 MONTH TRACKING ONLY" + ) - with Scenario("I alter quota to assign to one role", requirements=[ - RQ_SRS_006_RBAC_Quota_Alter_Assignment("1.0")]): + with Scenario( + "I alter quota to assign to one role", + requirements=[RQ_SRS_006_RBAC_Quota_Alter_Assignment("1.0")], + ): with When("I alter quota to a role"): node.query("ALTER QUOTA quota0 TO role0") - with Scenario("I alter quota to assign to role that does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_Quota_Alter_Assignment("1.0")]): + with Scenario( + "I alter quota to assign to role that does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_Quota_Alter_Assignment("1.0")], + ): role = "role1" with Given(f"I drop {role} if it exists"): node.query(f"DROP ROLE IF EXISTS {role}") with Then(f"I alter a quota, assign to role {role}, which does not exist"): exitcode, message = errors.role_not_found_in_disk(name=role) - node.query(f"ALTER QUOTA quota0 TO {role}", exitcode=exitcode, message=message) + node.query( + f"ALTER QUOTA quota0 TO {role}", exitcode=exitcode, message=message + ) del role - with Scenario("I alter quota to assign to all except role that does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_Quota_Alter_Assignment("1.0")]): + with Scenario( + "I alter quota to assign to all except role that does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_Quota_Alter_Assignment("1.0")], + ): role = "role1" with Given(f"I drop {role} if it exists"): node.query(f"DROP ROLE IF EXISTS {role}") - with Then(f"I alter a quota, assign to all except role {role}, which does not exist"): + with Then( + f"I alter a quota, assign to all except role {role}, which does not exist" + ): exitcode, message = errors.role_not_found_in_disk(name=role) - node.query(f"ALTER QUOTA quota0 TO ALL EXCEPT {role}", exitcode=exitcode, message=message) + node.query( + f"ALTER QUOTA quota0 TO ALL EXCEPT {role}", + exitcode=exitcode, + message=message, + ) del role - with Scenario("I alter quota to assign to one role and one user", requirements=[ - RQ_SRS_006_RBAC_Quota_Alter_Assignment("1.0")]): + with Scenario( + "I alter quota to assign to one role and one user", + requirements=[RQ_SRS_006_RBAC_Quota_Alter_Assignment("1.0")], + ): with When("I alter quota to a role and a user"): node.query("ALTER QUOTA quota0 TO role0, user0") - with Scenario("I alter quota assigned to none", requirements=[ - RQ_SRS_006_RBAC_Quota_Alter_Assignment_None("1.0")]): + with Scenario( + "I alter quota assigned to none", + requirements=[RQ_SRS_006_RBAC_Quota_Alter_Assignment_None("1.0")], + ): with When("I alter quota to none"): node.query("ALTER QUOTA quota0 TO NONE") - with Scenario("I alter quota to assign to all", requirements=[ - RQ_SRS_006_RBAC_Quota_Alter_Assignment_All("1.0")]): + with Scenario( + "I alter quota to assign to all", + requirements=[RQ_SRS_006_RBAC_Quota_Alter_Assignment_All("1.0")], + ): with When("I alter quota to all"): node.query("ALTER QUOTA quota0 TO ALL") - with Scenario("I alter quota to assign to all except one role", requirements=[ - RQ_SRS_006_RBAC_Quota_Alter_Assignment_Except("1.0")]): + with Scenario( + "I alter quota to assign to all except one role", + requirements=[RQ_SRS_006_RBAC_Quota_Alter_Assignment_Except("1.0")], + ): with When("I alter quota to all except one role"): node.query("ALTER QUOTA quota0 TO ALL EXCEPT role0") - with Scenario("I alter quota to assign to all except multiple roles", requirements=[ - RQ_SRS_006_RBAC_Quota_Alter_Assignment_Except("1.0")]): + with Scenario( + "I alter quota to assign to all except multiple roles", + requirements=[RQ_SRS_006_RBAC_Quota_Alter_Assignment_Except("1.0")], + ): with When("I alter quota to all except one multiple roles"): node.query("ALTER QUOTA quota0 TO ALL EXCEPT role0, user0") - with Scenario("I alter quota on cluster", requirements=[ - RQ_SRS_006_RBAC_Quota_Alter_Cluster("1.0")]): + with Scenario( + "I alter quota on cluster", + requirements=[RQ_SRS_006_RBAC_Quota_Alter_Cluster("1.0")], + ): try: with Given("I have a quota on a cluster"): node.query("CREATE QUOTA quota1 ON CLUSTER sharded_cluster") @@ -184,23 +266,33 @@ def feature(self, node="clickhouse1"): with When("I run alter quota command on a cluster"): node.query("ALTER QUOTA quota1 ON CLUSTER sharded_cluster") with And("I run alter quota command on a cluster with a key"): - node.query("ALTER QUOTA quota1 ON CLUSTER sharded_cluster KEYED BY 'none'") + node.query( + "ALTER QUOTA quota1 ON CLUSTER sharded_cluster KEYED BY 'none'" + ) with And("I run alter quota command on a cluster with an interval"): - node.query("ALTER QUOTA quota1 ON CLUSTER sharded_cluster FOR INTERVAL 1 DAY TRACKING ONLY") + node.query( + "ALTER QUOTA quota1 ON CLUSTER sharded_cluster FOR INTERVAL 1 DAY TRACKING ONLY" + ) with And("I run alter quota command on a cluster for all"): node.query("ALTER QUOTA quota1 ON CLUSTER sharded_cluster TO ALL") finally: with Finally("I drop the quota"): node.query("DROP QUOTA IF EXISTS quota1 ON CLUSTER sharded_cluster") - with Scenario("I alter quota on nonexistent cluster, throws exception", requirements=[ - RQ_SRS_006_RBAC_Quota_Alter_Cluster("1.0")]): + with Scenario( + "I alter quota on nonexistent cluster, throws exception", + requirements=[RQ_SRS_006_RBAC_Quota_Alter_Cluster("1.0")], + ): with When("I run alter quota on a cluster"): exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("ALTER QUOTA quota0 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + node.query( + "ALTER QUOTA quota0 ON CLUSTER fake_cluster", + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the quota and all the users and roles"): node.query(f"DROP QUOTA IF EXISTS quota0") node.query(f"DROP USER IF EXISTS user0") - node.query(f"DROP ROLE IF EXISTS role0") \ No newline at end of file + node.query(f"DROP ROLE IF EXISTS role0") diff --git a/tests/testflows/rbac/tests/syntax/alter_role.py b/tests/testflows/rbac/tests/syntax/alter_role.py index 5068302fc84..eb826e32a77 100755 --- a/tests/testflows/rbac/tests/syntax/alter_role.py +++ b/tests/testflows/rbac/tests/syntax/alter_role.py @@ -5,6 +5,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @TestFeature @Name("alter role") def feature(self, node="clickhouse1"): @@ -23,7 +24,7 @@ def feature(self, node="clickhouse1"): try: with Given("I have a role"): node.query(f"CREATE ROLE OR REPLACE {role}") - if profile != None: #create profile when name is given + if profile != None: # create profile when name is given with Given("And I have a profile"): node.query(f"CREATE SETTINGS PROFILE OR REPLACE {profile}") yield @@ -38,14 +39,17 @@ def feature(self, node="clickhouse1"): with Given(f"I ensure that role {role} does not exist"): node.query(f"DROP ROLE IF EXISTS {role}") - with Scenario("I alter role with no options", requirements=[ - RQ_SRS_006_RBAC_Role_Alter("1.0")]): + with Scenario( + "I alter role with no options", requirements=[RQ_SRS_006_RBAC_Role_Alter("1.0")] + ): with setup("role0"): with When("I alter role"): node.query("ALTER ROLE role0") - with Scenario("I alter role that does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_Role_Alter("1.0")]): + with Scenario( + "I alter role that does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_Role_Alter("1.0")], + ): role = "role0" cleanup_role(role) with When(f"I alter role {role} that does not exist"): @@ -53,43 +57,61 @@ def feature(self, node="clickhouse1"): node.query(f"ALTER ROLE {role}", exitcode=exitcode, message=message) del role - with Scenario("I alter role if exists, role does exist", requirements=[ - RQ_SRS_006_RBAC_Role_Alter_IfExists("1.0")]): + with Scenario( + "I alter role if exists, role does exist", + requirements=[RQ_SRS_006_RBAC_Role_Alter_IfExists("1.0")], + ): with setup("role1"): with When("I alter role with if exists"): node.query("ALTER ROLE IF EXISTS role1") - with Scenario("I alter role if exists, role does not exist", requirements=[ - RQ_SRS_006_RBAC_Role_Alter_IfExists("1.0")]): + with Scenario( + "I alter role if exists, role does not exist", + requirements=[RQ_SRS_006_RBAC_Role_Alter_IfExists("1.0")], + ): role = "role0" cleanup_role(role) with When(f"I alter role {role} that does not exist"): node.query(f"ALTER ROLE IF EXISTS {role}") del role - with Scenario("I alter role on cluster", requirements=[ - RQ_SRS_006_RBAC_Role_Alter_Cluster("1.0")]): + with Scenario( + "I alter role on cluster", + requirements=[RQ_SRS_006_RBAC_Role_Alter_Cluster("1.0")], + ): try: with Given("I have a role on a cluster"): node.query("CREATE ROLE role1 ON CLUSTER sharded_cluster") with When("I run alter role on a cluster"): node.query("ALTER ROLE role1 ON CLUSTER sharded_cluster") with And("I rename role on a cluster"): - node.query("ALTER ROLE role1 ON CLUSTER sharded_cluster RENAME TO role2") + node.query( + "ALTER ROLE role1 ON CLUSTER sharded_cluster RENAME TO role2" + ) with And("I alter role with settings on a cluster"): - node.query("ALTER ROLE role2 ON CLUSTER sharded_cluster SETTINGS max_memory_usage=10000000 READONLY") + node.query( + "ALTER ROLE role2 ON CLUSTER sharded_cluster SETTINGS max_memory_usage=10000000 READONLY" + ) finally: with Finally("I drop the role"): node.query("DROP ROLE IF EXISTS role1,role2 ON CLUSTER sharded_cluster") - with Scenario("I alter role on nonexistent cluster, throws exception", requirements=[ - RQ_SRS_006_RBAC_Role_Alter_Cluster("1.0")]): + with Scenario( + "I alter role on nonexistent cluster, throws exception", + requirements=[RQ_SRS_006_RBAC_Role_Alter_Cluster("1.0")], + ): with When("I run alter role on a cluster"): exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("ALTER ROLE role1 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + node.query( + "ALTER ROLE role1 ON CLUSTER fake_cluster", + exitcode=exitcode, + message=message, + ) - with Scenario("I alter role to rename, new name is available", requirements=[ - RQ_SRS_006_RBAC_Role_Alter_Rename("1.0")]): + with Scenario( + "I alter role to rename, new name is available", + requirements=[RQ_SRS_006_RBAC_Role_Alter_Rename("1.0")], + ): with setup("role2"): new_role = "role3" try: @@ -102,95 +124,144 @@ def feature(self, node="clickhouse1"): node.query(f"DROP ROLE IF EXISTS {new_role}") del new_role - with Scenario("I alter role to rename, new name is not available, throws exception", requirements=[ - RQ_SRS_006_RBAC_Role_Alter_Rename("1.0")]): + with Scenario( + "I alter role to rename, new name is not available, throws exception", + requirements=[RQ_SRS_006_RBAC_Role_Alter_Rename("1.0")], + ): with setup("role2a"): new_role = "role3a" try: with Given(f"Ensure target name {new_role} is NOT available"): node.query(f"CREATE ROLE IF NOT EXISTS {new_role}") with When(f"I try to rename to {new_role}"): - exitcode, message = errors.cannot_rename_role(name="role2a", name_new=new_role) - node.query(f"ALTER ROLE role2a RENAME TO {new_role}", exitcode=exitcode, message=message) + exitcode, message = errors.cannot_rename_role( + name="role2a", name_new=new_role + ) + node.query( + f"ALTER ROLE role2a RENAME TO {new_role}", + exitcode=exitcode, + message=message, + ) finally: with Finally(f"I cleanup target name {new_role}"): node.query(f"DROP ROLE IF EXISTS {new_role}") del new_role - with Scenario("I alter role settings profile", requirements=[ - RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with Scenario( + "I alter role settings profile", + requirements=[RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")], + ): with setup("role4"): with When("I alter role with settings profile"): - node.query("ALTER ROLE role4 SETTINGS PROFILE default, max_memory_usage=10000000 READONLY") + node.query( + "ALTER ROLE role4 SETTINGS PROFILE default, max_memory_usage=10000000 READONLY" + ) - with Scenario("I alter role settings profile, profile does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with Scenario( + "I alter role settings profile, profile does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")], + ): with setup("role4a"): with Given("I ensure profile profile0 does not exist"): node.query("DROP SETTINGS PROFILE IF EXISTS profile0") with When("I alter role with settings profile that does not exist"): - exitcode, message = errors.settings_profile_not_found_in_disk("profile0") - node.query("ALTER ROLE role4a SETTINGS PROFILE profile0", exitcode=exitcode, message=message) + exitcode, message = errors.settings_profile_not_found_in_disk( + "profile0" + ) + node.query( + "ALTER ROLE role4a SETTINGS PROFILE profile0", + exitcode=exitcode, + message=message, + ) - with Scenario("I alter role settings profile multiple", requirements=[ - RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with Scenario( + "I alter role settings profile multiple", + requirements=[RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")], + ): with setup("role4b", profile="profile0"): with When("I alter role with multiple profiles"): - node.query("ALTER ROLE role4b SETTINGS PROFILE default, PROFILE profile0, \ - max_memory_usage=10000000 READONLY") + node.query( + "ALTER ROLE role4b SETTINGS PROFILE default, PROFILE profile0, \ + max_memory_usage=10000000 READONLY" + ) - with Scenario("I alter role settings without profile", requirements=[ - RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with Scenario( + "I alter role settings without profile", + requirements=[RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")], + ): with setup("role5"): with When("I alter role with settings and no profile"): - node.query("ALTER ROLE role5 SETTINGS max_memory_usage=10000000 READONLY") + node.query( + "ALTER ROLE role5 SETTINGS max_memory_usage=10000000 READONLY" + ) - with Scenario("I alter role settings, variable does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with Scenario( + "I alter role settings, variable does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")], + ): with setup("role5a"): with When("I alter role using settings and nonexistent value"): exitcode, message = errors.unknown_setting("fake_setting") - node.query("ALTER ROLE role5a SETTINGS fake_setting = 100000001", exitcode=exitcode, message=message) + node.query( + "ALTER ROLE role5a SETTINGS fake_setting = 100000001", + exitcode=exitcode, + message=message, + ) - - with Scenario("I alter role settings without profile multiple", requirements=[ - RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with Scenario( + "I alter role settings without profile multiple", + requirements=[RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")], + ): with setup("role6"): with When("I alter role with multiple settings and no profile"): - node.query("ALTER ROLE role6 SETTINGS max_memory_usage=10000000 READONLY, \ - max_rows_to_read MIN 20 MAX 25") + node.query( + "ALTER ROLE role6 SETTINGS max_memory_usage=10000000 READONLY, \ + max_rows_to_read MIN 20 MAX 25" + ) - with Scenario("I alter role settings with multiple profiles multiple variables", requirements=[ - RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with Scenario( + "I alter role settings with multiple profiles multiple variables", + requirements=[RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")], + ): with setup("role7", profile="profile1"): with When("I alter role with multiple settings and profiles"): - node.query("ALTER ROLE role7 SETTINGS PROFILE default, PROFILE profile1, \ - max_memory_usage=10000000 READONLY, max_rows_to_read MIN 20 MAX 25") + node.query( + "ALTER ROLE role7 SETTINGS PROFILE default, PROFILE profile1, \ + max_memory_usage=10000000 READONLY, max_rows_to_read MIN 20 MAX 25" + ) - with Scenario("I alter role settings readonly", requirements=[ - RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with Scenario( + "I alter role settings readonly", + requirements=[RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")], + ): with setup("role8"): with When("I alter role with readonly"): node.query("ALTER ROLE role8 SETTINGS max_memory_usage READONLY") - with Scenario("I alter role settings writable", requirements=[ - RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with Scenario( + "I alter role settings writable", + requirements=[RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")], + ): with setup("role9"): with When("I alter role with writable"): node.query("ALTER ROLE role9 SETTINGS max_memory_usage WRITABLE") - with Scenario("I alter role settings min, with and without = sign", requirements=[ - RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with Scenario( + "I alter role settings min, with and without = sign", + requirements=[RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")], + ): with setup("role10"): with When("I set min, no equals"): node.query("ALTER ROLE role10 SETTINGS max_memory_usage MIN 200") with When("I set min, yes equals"): node.query("ALTER ROLE role10 SETTINGS max_memory_usage MIN = 200") - with Scenario("I alter role settings max, with and without = sign", requirements=[ - RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")]): + with Scenario( + "I alter role settings max, with and without = sign", + requirements=[RQ_SRS_006_RBAC_Role_Alter_Settings("1.0")], + ): with setup("role11"): with When("I set max, no equals"): node.query("ALTER ROLE role11 SETTINGS max_memory_usage MAX 2000") with When("I set max, yes equals"): - node.query("ALTER ROLE role11 SETTINGS max_memory_usage MAX = 200") \ No newline at end of file + node.query("ALTER ROLE role11 SETTINGS max_memory_usage MAX = 200") diff --git a/tests/testflows/rbac/tests/syntax/alter_row_policy.py b/tests/testflows/rbac/tests/syntax/alter_row_policy.py index 6422a81fec2..55eb3060699 100755 --- a/tests/testflows/rbac/tests/syntax/alter_row_policy.py +++ b/tests/testflows/rbac/tests/syntax/alter_row_policy.py @@ -5,6 +5,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @TestFeature @Name("alter row policy") @Args(format_description=False) @@ -42,165 +43,279 @@ def feature(self, node="clickhouse1"): node.query(f"CREATE ROLE role0") node.query(f"CREATE ROLE role1") - with Scenario("I alter row policy with no options", requirements=[ + with Scenario( + "I alter row policy with no options", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): with cleanup("policy0"): with When("I alter row policy"): node.query("ALTER ROW POLICY policy0 ON default.foo") - with Scenario("I alter row policy using short syntax with no options", requirements=[ + with Scenario( + "I alter row policy using short syntax with no options", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): with cleanup("policy1"): with When("I alter row policy short form"): node.query("ALTER POLICY policy1 ON default.foo") - with Scenario("I alter row policy, does not exist, throws exception", requirements=[ + with Scenario( + "I alter row policy, does not exist, throws exception", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): policy = "policy2" cleanup_policy(policy) with When(f"I alter row policy {policy} that doesn't exist"): - exitcode, message = errors.row_policy_not_found_in_disk(name=f"{policy} ON default.foo") - node.query(f"ALTER ROW POLICY {policy} ON default.foo", exitcode=exitcode, message=message) + exitcode, message = errors.row_policy_not_found_in_disk( + name=f"{policy} ON default.foo" + ) + node.query( + f"ALTER ROW POLICY {policy} ON default.foo", + exitcode=exitcode, + message=message, + ) del policy - with Scenario("I alter row policy if exists", requirements=[ + with Scenario( + "I alter row policy if exists", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter_IfExists("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): with cleanup("policy2"): with When("I alter row policy using if exists"): node.query("ALTER ROW POLICY IF EXISTS policy2 ON default.foo") - with Scenario("I alter row policy if exists, policy does not exist", requirements=[ + with Scenario( + "I alter row policy if exists, policy does not exist", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter_IfExists("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): policy = "policy2" cleanup_policy(policy) with When(f"I alter row policy {policy} that doesn't exist"): node.query(f"ALTER ROW POLICY IF EXISTS {policy} ON default.foo") del policy - with Scenario("I alter row policy to rename, target available", requirements=[ + with Scenario( + "I alter row policy to rename, target available", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter_Rename("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): with cleanup("policy3"): with When("I alter row policy with rename"): - node.query("ALTER ROW POLICY policy3 ON default.foo RENAME TO policy3") + node.query( + "ALTER ROW POLICY policy3 ON default.foo RENAME TO policy3" + ) - with Scenario("I alter row policy to rename, target unavailable", requirements=[ + with Scenario( + "I alter row policy to rename, target unavailable", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter_Rename("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): with cleanup("policy3"): new_policy = "policy4" try: with Given(f"Ensure target name {new_policy} is NOT available"): - node.query(f"CREATE ROW POLICY IF NOT EXISTS {new_policy} ON default.foo") + node.query( + f"CREATE ROW POLICY IF NOT EXISTS {new_policy} ON default.foo" + ) with When(f"I try to rename to {new_policy}"): - exitcode, message = errors.cannot_rename_row_policy(name="policy3 ON default.foo", - name_new=f"{new_policy} ON default.foo") - node.query(f"ALTER ROW POLICY policy3 ON default.foo RENAME TO {new_policy}", exitcode=exitcode, message=message) + exitcode, message = errors.cannot_rename_row_policy( + name="policy3 ON default.foo", + name_new=f"{new_policy} ON default.foo", + ) + node.query( + f"ALTER ROW POLICY policy3 ON default.foo RENAME TO {new_policy}", + exitcode=exitcode, + message=message, + ) finally: with Finally(f"I cleanup target name {new_policy}"): - node.query(f"DROP ROW POLICY IF EXISTS {new_policy} ON default.foo") + node.query( + f"DROP ROW POLICY IF EXISTS {new_policy} ON default.foo" + ) del new_policy - with Scenario("I alter row policy to permissive", requirements=[ + with Scenario( + "I alter row policy to permissive", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter_Access_Permissive("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): with cleanup("policy4"): with When("I alter row policy as permissive"): node.query("ALTER ROW POLICY policy4 ON default.foo AS PERMISSIVE") - with Scenario("I alter row policy to restrictive", requirements=[ + with Scenario( + "I alter row policy to restrictive", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter_Access_Restrictive("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): with cleanup("policy5"): with When("I alter row policy as restrictive"): node.query("ALTER ROW POLICY policy5 ON default.foo AS RESTRICTIVE") - with Scenario("I alter row policy for select", requirements=[ + with Scenario( + "I alter row policy for select", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter_ForSelect("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): with cleanup("policy6"): with When("I alter row policy using for select"): - node.query("ALTER ROW POLICY policy6 ON default.foo FOR SELECT USING x > 10") + node.query( + "ALTER ROW POLICY policy6 ON default.foo FOR SELECT USING x > 10" + ) - with Scenario("I alter row policy using condition", requirements=[ + with Scenario( + "I alter row policy using condition", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter_Condition("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): with cleanup("policy6"): with When("I alter row policy wtih condition"): node.query("ALTER ROW POLICY policy6 ON default.foo USING x > 10") - with Scenario("I alter row policy using condition none", requirements=[ + with Scenario( + "I alter row policy using condition none", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter_Condition_None("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): with cleanup("policy7"): with When("I alter row policy using no condition"): node.query("ALTER ROW POLICY policy7 ON default.foo USING NONE") - with Scenario("I alter row policy to one role", requirements=[ + with Scenario( + "I alter row policy to one role", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): with cleanup("policy8"): with When("I alter row policy to a role"): node.query("ALTER ROW POLICY policy8 ON default.foo TO role0") - with Scenario("I alter row policy to assign to role that does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment("1.0")]): + with Scenario( + "I alter row policy to assign to role that does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment("1.0")], + ): role = "role2" with cleanup("policy8a"): with Given(f"I drop {role} if it exists"): node.query(f"DROP ROLE IF EXISTS {role}") - with Then(f"I alter a row policy, assign to role {role}, which does not exist"): + with Then( + f"I alter a row policy, assign to role {role}, which does not exist" + ): exitcode, message = errors.role_not_found_in_disk(name=role) - node.query(f"ALTER ROW POLICY policy8a ON default.foo TO {role}", exitcode=exitcode, message=message) + node.query( + f"ALTER ROW POLICY policy8a ON default.foo TO {role}", + exitcode=exitcode, + message=message, + ) del role - with Scenario("I alter row policy to assign to all excpet role that does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment("1.0")]): + with Scenario( + "I alter row policy to assign to all excpet role that does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment("1.0")], + ): role = "role2" with cleanup("policy8a"): with Given(f"I drop {role} if it exists"): node.query(f"DROP ROLE IF EXISTS {role}") - with Then(f"I alter a row policy, assign to all except role {role}, which does not exist"): + with Then( + f"I alter a row policy, assign to all except role {role}, which does not exist" + ): exitcode, message = errors.role_not_found_in_disk(name=role) - node.query(f"ALTER ROW POLICY policy8a ON default.foo TO ALL EXCEPT {role}", exitcode=exitcode, message=message) + node.query( + f"ALTER ROW POLICY policy8a ON default.foo TO ALL EXCEPT {role}", + exitcode=exitcode, + message=message, + ) del role - with Scenario("I alter row policy assigned to multiple roles", requirements=[ + with Scenario( + "I alter row policy assigned to multiple roles", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): with cleanup("policy9"): with When("I alter row policy to multiple roles"): - node.query("ALTER ROW POLICY policy9 ON default.foo TO role0, role1") + node.query( + "ALTER ROW POLICY policy9 ON default.foo TO role0, role1" + ) - with Scenario("I alter row policy assigned to all", requirements=[ + with Scenario( + "I alter row policy assigned to all", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_All("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): with cleanup("policy10"): with When("I alter row policy to all"): node.query("ALTER ROW POLICY policy10 ON default.foo TO ALL") - with Scenario("I alter row policy assigned to all except one role", requirements=[ + with Scenario( + "I alter row policy assigned to all except one role", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_AllExcept("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): with cleanup("policy11"): with When("I alter row policy to all except"): - node.query("ALTER ROW POLICY policy11 ON default.foo TO ALL EXCEPT role0") + node.query( + "ALTER ROW POLICY policy11 ON default.foo TO ALL EXCEPT role0" + ) - with Scenario("I alter row policy assigned to all except multiple roles", requirements=[ + with Scenario( + "I alter row policy assigned to all except multiple roles", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_AllExcept("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): with cleanup("policy12"): with When("I alter row policy to all except multiple roles"): - node.query("ALTER ROW POLICY policy12 ON default.foo TO ALL EXCEPT role0, role1") + node.query( + "ALTER ROW POLICY policy12 ON default.foo TO ALL EXCEPT role0, role1" + ) - with Scenario("I alter row policy assigned to none", requirements=[ + with Scenario( + "I alter row policy assigned to none", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter_Assignment_None("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): with cleanup("policy12"): with When("I alter row policy to no assignment"): node.query("ALTER ROW POLICY policy12 ON default.foo TO NONE") @@ -208,37 +323,65 @@ def feature(self, node="clickhouse1"): # Official syntax: ON CLUSTER cluster_name ON database.table # Working syntax: both orderings of ON CLUSTER and TABLE clauses work - with Scenario("I alter row policy on cluster", requirements=[ + with Scenario( + "I alter row policy on cluster", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter_OnCluster("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): try: with Given("I have a row policy"): - node.query("CREATE ROW POLICY policy13 ON CLUSTER sharded_cluster ON default.foo") + node.query( + "CREATE ROW POLICY policy13 ON CLUSTER sharded_cluster ON default.foo" + ) with When("I run alter row policy command"): - node.query("ALTER ROW POLICY policy13 ON CLUSTER sharded_cluster ON default.foo") + node.query( + "ALTER ROW POLICY policy13 ON CLUSTER sharded_cluster ON default.foo" + ) finally: with Finally("I drop the row policy"): - node.query("DROP ROW POLICY IF EXISTS policy13 ON CLUSTER sharded_cluster ON default.foo") + node.query( + "DROP ROW POLICY IF EXISTS policy13 ON CLUSTER sharded_cluster ON default.foo" + ) - with Scenario("I alter row policy on fake cluster, throws exception", requirements=[ + with Scenario( + "I alter row policy on fake cluster, throws exception", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter_OnCluster("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): with When("I run alter row policy command"): exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("ALTER ROW POLICY policy13 ON CLUSTER fake_cluster ON default.foo", exitcode=exitcode, message=message) + node.query( + "ALTER ROW POLICY policy13 ON CLUSTER fake_cluster ON default.foo", + exitcode=exitcode, + message=message, + ) - with Scenario("I alter row policy on cluster after table", requirements=[ + with Scenario( + "I alter row policy on cluster after table", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Alter_OnCluster("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Alter_On("1.0"), + ], + ): try: with Given("I have a row policy"): - node.query("CREATE ROW POLICY policy14 ON default.foo ON CLUSTER sharded_cluster") + node.query( + "CREATE ROW POLICY policy14 ON default.foo ON CLUSTER sharded_cluster" + ) with When("I run create row policy command"): - node.query("ALTER ROW POLICY policy14 ON default.foo ON CLUSTER sharded_cluster") + node.query( + "ALTER ROW POLICY policy14 ON default.foo ON CLUSTER sharded_cluster" + ) finally: with Finally("I drop the row policy"): - node.query("DROP ROW POLICY IF EXISTS policy14 ON default.foo ON CLUSTER sharded_cluster") + node.query( + "DROP ROW POLICY IF EXISTS policy14 ON default.foo ON CLUSTER sharded_cluster" + ) finally: with Finally("I drop the table and the roles"): node.query(f"DROP TABLE IF EXISTS default.foo") - node.query(f"DROP ROLE IF EXISTS role0, role1") \ No newline at end of file + node.query(f"DROP ROLE IF EXISTS role0, role1") diff --git a/tests/testflows/rbac/tests/syntax/alter_settings_profile.py b/tests/testflows/rbac/tests/syntax/alter_settings_profile.py index 4533f6aea65..bd78a76bd51 100755 --- a/tests/testflows/rbac/tests/syntax/alter_settings_profile.py +++ b/tests/testflows/rbac/tests/syntax/alter_settings_profile.py @@ -5,6 +5,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @TestFeature @Name("alter settings profile") @Args(format_description=False) @@ -31,28 +32,49 @@ def feature(self, node="clickhouse1"): node.query(f"CREATE USER user0") node.query(f"CREATE ROLE role0") - with Scenario("I alter settings profile with no options", requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter("1.0")]): + with Scenario( + "I alter settings profile with no options", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter("1.0")], + ): with When("I alter settings profile"): node.query("ALTER SETTINGS PROFILE profile0") - with Scenario("I alter settings profile short form", requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter("1.0")]): + with Scenario( + "I alter settings profile short form", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter("1.0")], + ): with When("I short form alter settings profile"): node.query("ALTER PROFILE profile0") - with Scenario("I alter settings profile that does not exist, throws exception", requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter("1.0")]): + with Scenario( + "I alter settings profile that does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter("1.0")], + ): profile = "profile1" cleanup_profile(profile) with When(f"I alter settings profile {profile} that doesn't exist"): - exitcode, message = errors.settings_profile_not_found_in_disk(name=profile) - node.query(f"ALTER SETTINGS PROFILE {profile}", exitcode=exitcode, message=message) + exitcode, message = errors.settings_profile_not_found_in_disk( + name=profile + ) + node.query( + f"ALTER SETTINGS PROFILE {profile}", + exitcode=exitcode, + message=message, + ) del profile - with Scenario("I alter settings profile if exists", requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter_IfExists("1.0")]): + with Scenario( + "I alter settings profile if exists", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter_IfExists("1.0")], + ): with When("I alter settings profile using if exists"): node.query("ALTER SETTINGS PROFILE IF EXISTS profile0") - with Scenario("I alter settings profile if exists, profile does not exist", requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter_IfExists("1.0")]): + with Scenario( + "I alter settings profile if exists, profile does not exist", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter_IfExists("1.0")], + ): profile = "profile1" cleanup_profile(profile) @@ -61,11 +83,17 @@ def feature(self, node="clickhouse1"): del profile - with Scenario("I alter settings profile to rename, target available", requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter_Rename("1.0")]): + with Scenario( + "I alter settings profile to rename, target available", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter_Rename("1.0")], + ): with When("I alter settings profile by renaming it"): node.query("ALTER SETTINGS PROFILE profile0 RENAME TO profile0") - with Scenario("I alter settings profile to rename, target unavailable", requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter_Rename("1.0")]): + with Scenario( + "I alter settings profile to rename, target unavailable", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter_Rename("1.0")], + ): new_profile = "profile1" try: @@ -73,157 +101,293 @@ def feature(self, node="clickhouse1"): node.query(f"CREATE SETTINGS PROFILE IF NOT EXISTS {new_profile}") with When(f"I try to rename to {new_profile}"): - exitcode, message = errors.cannot_rename_settings_profile(name="profile0", name_new=new_profile) - node.query(f"ALTER SETTINGS PROFILE profile0 RENAME TO {new_profile}", exitcode=exitcode, message=message) + exitcode, message = errors.cannot_rename_settings_profile( + name="profile0", name_new=new_profile + ) + node.query( + f"ALTER SETTINGS PROFILE profile0 RENAME TO {new_profile}", + exitcode=exitcode, + message=message, + ) finally: with Finally(f"I cleanup target name {new_profile}"): node.query(f"DROP SETTINGS PROFILE IF EXISTS {new_profile}") del new_profile - with Scenario("I alter settings profile with a setting value", requirements=[ + with Scenario( + "I alter settings profile with a setting value", + requirements=[ RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables("1.0"), - RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Value("1.0")]): + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Value("1.0"), + ], + ): with When("I alter settings profile using settings"): - node.query("ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage = 100000001") + node.query( + "ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage = 100000001" + ) - with Scenario("I alter settings profile with a setting value, does not exist, throws exception", requirements=[ + with Scenario( + "I alter settings profile with a setting value, does not exist, throws exception", + requirements=[ RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables("1.0"), - RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Value("1.0")]): + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Value("1.0"), + ], + ): with When("I alter settings profile using settings and nonexistent value"): exitcode, message = errors.unknown_setting("fake_setting") - node.query("ALTER SETTINGS PROFILE profile0 SETTINGS fake_setting = 100000001", exitcode=exitcode, message=message) + node.query( + "ALTER SETTINGS PROFILE profile0 SETTINGS fake_setting = 100000001", + exitcode=exitcode, + message=message, + ) - with Scenario("I alter settings profile with a min setting value", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints("1.0")]): + with Scenario( + "I alter settings profile with a min setting value", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints("1.0") + ], + ): with When("I alter settings profile using 2 minimum formats"): - node.query("ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage MIN 100000001") - node.query("ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage MIN = 100000001") + node.query( + "ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage MIN 100000001" + ) + node.query( + "ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage MIN = 100000001" + ) - with Scenario("I alter settings profile with a max setting value", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints("1.0")]): + with Scenario( + "I alter settings profile with a max setting value", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints("1.0") + ], + ): with When("I alter settings profile using 2 maximum formats"): - node.query("ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage MAX 100000001") - node.query("ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage MAX = 100000001") + node.query( + "ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage MAX 100000001" + ) + node.query( + "ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage MAX = 100000001" + ) - with Scenario("I alter settings profile with min and max setting values", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints("1.0")]): + with Scenario( + "I alter settings profile with min and max setting values", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints("1.0") + ], + ): with When("I alter settings profile with both min and max"): - node.query("ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage MIN 100000001 MAX 200000001") + node.query( + "ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage MIN 100000001 MAX 200000001" + ) - with Scenario("I alter settings profile with a readonly setting", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints("1.0")]): + with Scenario( + "I alter settings profile with a readonly setting", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints("1.0") + ], + ): with When("I alter settings profile with with readonly"): - node.query("ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage READONLY") + node.query( + "ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage READONLY" + ) - with Scenario("I alter settings profile with a writable setting", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints("1.0")]): + with Scenario( + "I alter settings profile with a writable setting", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Constraints("1.0") + ], + ): with When("I alter settings profile with writable"): - node.query("ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage WRITABLE") + node.query( + "ALTER SETTINGS PROFILE profile0 SETTINGS max_memory_usage WRITABLE" + ) - with Scenario("I alter settings profile with inherited settings", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_Inherit("1.0")]): + with Scenario( + "I alter settings profile with inherited settings", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_Inherit("1.0") + ], + ): with When("I alter settings profile with inherit"): node.query("ALTER SETTINGS PROFILE profile0 SETTINGS INHERIT 'default'") - with Scenario("I alter settings profile with inherit, parent profile does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_Inherit("1.0")]): + with Scenario( + "I alter settings profile with inherit, parent profile does not exist, throws exception", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_Inherit("1.0") + ], + ): profile = "profile3" with Given(f"I ensure that profile {profile} does not exist"): node.query(f"DROP SETTINGS PROFILE IF EXISTS {profile}") with When("I alter settings profile inherit from nonexistant parent"): exitcode, message = errors.settings_profile_not_found_in_disk(profile) - node.query(f"ALTER PROFILE profile0 SETTINGS INHERIT {profile}", exitcode=exitcode, message=message) + node.query( + f"ALTER PROFILE profile0 SETTINGS INHERIT {profile}", + exitcode=exitcode, + message=message, + ) del profile - with Scenario("I alter settings profile with multiple settings", requirements=[ + with Scenario( + "I alter settings profile with multiple settings", + requirements=[ RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables("1.0"), - RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Value("1.0")]): + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Value("1.0"), + ], + ): with When("I alter settings profile with multiple settings"): - node.query("ALTER SETTINGS PROFILE profile0" - " SETTINGS max_memory_usage = 100000001" - " SETTINGS max_memory_usage_for_user = 100000001") + node.query( + "ALTER SETTINGS PROFILE profile0" + " SETTINGS max_memory_usage = 100000001" + " SETTINGS max_memory_usage_for_user = 100000001" + ) - with Scenario("I alter settings profile with multiple settings short form", requirements=[ + with Scenario( + "I alter settings profile with multiple settings short form", + requirements=[ RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables("1.0"), - RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Value("1.0")]): + RQ_SRS_006_RBAC_SettingsProfile_Alter_Variables_Value("1.0"), + ], + ): with When("I alter settings profile with short form multiple settings"): - node.query("ALTER SETTINGS PROFILE profile0" - " SETTINGS max_memory_usage = 100000001," - " max_memory_usage_for_user = 100000001") + node.query( + "ALTER SETTINGS PROFILE profile0" + " SETTINGS max_memory_usage = 100000001," + " max_memory_usage_for_user = 100000001" + ) - with Scenario("I alter settings profile assigned to one role", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment("1.0")]): + with Scenario( + "I alter settings profile assigned to one role", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment("1.0")], + ): with When("I alter settings profile with assignment to role"): node.query("ALTER SETTINGS PROFILE profile0 TO role0") - with Scenario("I alter settings profile to assign to role that does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment("1.0")]): + with Scenario( + "I alter settings profile to assign to role that does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment("1.0")], + ): role = "role1" with Given(f"I drop {role} if it exists"): node.query(f"DROP ROLE IF EXISTS {role}") - with Then(f"I alter a settings profile, assign to role {role}, which does not exist"): + with Then( + f"I alter a settings profile, assign to role {role}, which does not exist" + ): exitcode, message = errors.role_not_found_in_disk(name=role) - node.query(f"ALTER SETTINGS PROFILE profile0 TO {role}", exitcode=exitcode, message=message) + node.query( + f"ALTER SETTINGS PROFILE profile0 TO {role}", + exitcode=exitcode, + message=message, + ) del role - with Scenario("I alter settings profile to assign to all except role that does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment("1.0")]): + with Scenario( + "I alter settings profile to assign to all except role that does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment("1.0")], + ): role = "role1" with Given(f"I drop {role} if it exists"): node.query(f"DROP ROLE IF EXISTS {role}") - with Then(f"I alter a settings profile, assign to all except role {role}, which does not exist"): + with Then( + f"I alter a settings profile, assign to all except role {role}, which does not exist" + ): exitcode, message = errors.role_not_found_in_disk(name=role) - node.query(f"ALTER SETTINGS PROFILE profile0 TO ALL EXCEPT {role}", exitcode=exitcode, message=message) + node.query( + f"ALTER SETTINGS PROFILE profile0 TO ALL EXCEPT {role}", + exitcode=exitcode, + message=message, + ) del role - with Scenario("I alter settings profile assigned to multiple roles", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment("1.0")]): + with Scenario( + "I alter settings profile assigned to multiple roles", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment("1.0")], + ): with When("I alter settings profile with assignment to multiple roles"): node.query("ALTER SETTINGS PROFILE profile0 TO role0, user0") - with Scenario("I alter settings profile assigned to all", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_All("1.0")]): + with Scenario( + "I alter settings profile assigned to all", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_All("1.0")], + ): with When("I alter settings profile with assignment to all"): node.query("ALTER SETTINGS PROFILE profile0 TO ALL") - with Scenario("I alter settings profile assigned to all except one role", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_AllExcept("1.0")]): + with Scenario( + "I alter settings profile assigned to all except one role", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_AllExcept("1.0") + ], + ): with When("I alter settings profile with assignment to all except a role"): node.query("ALTER SETTINGS PROFILE profile0 TO ALL EXCEPT role0") - with Scenario("I alter settings profile assigned to all except multiple roles", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_AllExcept("1.0")]): - with When("I alter settings profile with assignmentto all except multiple roles"): + with Scenario( + "I alter settings profile assigned to all except multiple roles", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_AllExcept("1.0") + ], + ): + with When( + "I alter settings profile with assignmentto all except multiple roles" + ): node.query("ALTER SETTINGS PROFILE profile0 TO ALL EXCEPT role0, user0") - with Scenario("I alter settings profile assigned to none", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_None("1.0")]): + with Scenario( + "I alter settings profile assigned to none", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_None("1.0")], + ): with When("I alter settings profile with assignment to none"): node.query("ALTER SETTINGS PROFILE profile0 TO NONE") - with Scenario("I alter settings profile on cluster", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_OnCluster("1.0")]): + with Scenario( + "I alter settings profile on cluster", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_OnCluster("1.0") + ], + ): try: with Given("I have a settings profile on cluster"): - node.query("CREATE SETTINGS PROFILE profile1 ON CLUSTER sharded_cluster") + node.query( + "CREATE SETTINGS PROFILE profile1 ON CLUSTER sharded_cluster" + ) with When("I run alter settings profile command"): - node.query("ALTER SETTINGS PROFILE profile1 ON CLUSTER sharded_cluster") + node.query( + "ALTER SETTINGS PROFILE profile1 ON CLUSTER sharded_cluster" + ) with And("I alter settings profile with settings"): - node.query("ALTER SETTINGS PROFILE profile1 ON CLUSTER sharded_cluster SETTINGS max_memory_usage = 100000001") + node.query( + "ALTER SETTINGS PROFILE profile1 ON CLUSTER sharded_cluster SETTINGS max_memory_usage = 100000001" + ) with And("I alter settings profile with inherit"): - node.query("ALTER SETTINGS PROFILE profile1 ON CLUSTER sharded_cluster SETTINGS INHERIT 'default'") + node.query( + "ALTER SETTINGS PROFILE profile1 ON CLUSTER sharded_cluster SETTINGS INHERIT 'default'" + ) with And("I alter settings profile to all"): - node.query("ALTER SETTINGS PROFILE profile1 ON CLUSTER sharded_cluster TO ALL") + node.query( + "ALTER SETTINGS PROFILE profile1 ON CLUSTER sharded_cluster TO ALL" + ) finally: with Finally("I drop the settings profile"): - node.query("DROP SETTINGS PROFILE IF EXISTS profile1 ON CLUSTER sharded_cluster") + node.query( + "DROP SETTINGS PROFILE IF EXISTS profile1 ON CLUSTER sharded_cluster" + ) - with Scenario("I alter settings profile on fake cluster, throws exception", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_OnCluster("1.0")]): + with Scenario( + "I alter settings profile on fake cluster, throws exception", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Alter_Assignment_OnCluster("1.0") + ], + ): with When("I run alter settings profile command"): exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("ALTER SETTINGS PROFILE profile1 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + node.query( + "ALTER SETTINGS PROFILE profile1 ON CLUSTER fake_cluster", + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the profile and all the users and roles"): diff --git a/tests/testflows/rbac/tests/syntax/alter_user.py b/tests/testflows/rbac/tests/syntax/alter_user.py index cf8a13008c9..d022176a598 100755 --- a/tests/testflows/rbac/tests/syntax/alter_user.py +++ b/tests/testflows/rbac/tests/syntax/alter_user.py @@ -6,6 +6,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @TestFeature @Name("alter user") @Args(format_description=False) @@ -33,26 +34,33 @@ def feature(self, node="clickhouse1"): with Finally("I drop the user"): node.query(f"DROP USER IF EXISTS {user}") - with Scenario("I alter user, base command", requirements=[ - RQ_SRS_006_RBAC_User_Alter("1.0")]): + with Scenario( + "I alter user, base command", requirements=[RQ_SRS_006_RBAC_User_Alter("1.0")] + ): with setup("user0"): with When("I alter user"): node.query("ALTER USER user0") - with Scenario("I alter user that does not exist without if exists, throws exception", requirements=[ - RQ_SRS_006_RBAC_User_Alter("1.0")]): + with Scenario( + "I alter user that does not exist without if exists, throws exception", + requirements=[RQ_SRS_006_RBAC_User_Alter("1.0")], + ): with When("I run alter user command, expecting error 192"): exitcode, message = errors.user_not_found_in_disk(name="user0") - node.query(f"ALTER USER user0",exitcode=exitcode, message=message) + node.query(f"ALTER USER user0", exitcode=exitcode, message=message) - with Scenario("I alter user with if exists", requirements=[ - RQ_SRS_006_RBAC_User_Alter_IfExists("1.0")]): + with Scenario( + "I alter user with if exists", + requirements=[RQ_SRS_006_RBAC_User_Alter_IfExists("1.0")], + ): with setup("user0"): with When(f"I alter user with if exists"): node.query(f"ALTER USER IF EXISTS user0") - with Scenario("I alter user that does not exist with if exists", requirements=[ - RQ_SRS_006_RBAC_User_Alter_IfExists("1.0")]): + with Scenario( + "I alter user that does not exist with if exists", + requirements=[RQ_SRS_006_RBAC_User_Alter_IfExists("1.0")], + ): user = "user0" with Given("I don't have a user"): node.query(f"DROP USER IF EXISTS {user}") @@ -60,8 +68,10 @@ def feature(self, node="clickhouse1"): node.query(f"ALTER USER IF EXISTS {user}") del user - with Scenario("I alter user on a cluster", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Cluster("1.0")]): + with Scenario( + "I alter user on a cluster", + requirements=[RQ_SRS_006_RBAC_User_Alter_Cluster("1.0")], + ): with Given("I have a user on a cluster"): node.query("CREATE USER OR REPLACE user0 ON CLUSTER sharded_cluster") with When("I alter user on a cluster"): @@ -69,106 +79,160 @@ def feature(self, node="clickhouse1"): with Finally("I drop user from cluster"): node.query("DROP USER IF EXISTS user0 ON CLUSTER sharded_cluster") - with Scenario("I alter user on a fake cluster, throws exception", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Cluster("1.0")]): + with Scenario( + "I alter user on a fake cluster, throws exception", + requirements=[RQ_SRS_006_RBAC_User_Alter_Cluster("1.0")], + ): with When("I alter user on a fake cluster"): exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("ALTER USER user0 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + node.query( + "ALTER USER user0 ON CLUSTER fake_cluster", + exitcode=exitcode, + message=message, + ) - with Scenario("I alter user to rename, target available", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Rename("1.0")]): + with Scenario( + "I alter user to rename, target available", + requirements=[RQ_SRS_006_RBAC_User_Alter_Rename("1.0")], + ): with setup("user15"): with When("I alter user name"): node.query("ALTER USER user15 RENAME TO user15") - with Scenario("I alter user to rename, target unavailable", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Rename("1.0")]): + with Scenario( + "I alter user to rename, target unavailable", + requirements=[RQ_SRS_006_RBAC_User_Alter_Rename("1.0")], + ): with setup("user15"): new_user = "user16" try: with Given(f"Ensure target name {new_user} is NOT available"): node.query(f"CREATE USER IF NOT EXISTS {new_user}") with When(f"I try to rename to {new_user}"): - exitcode, message = errors.cannot_rename_user(name="user15", name_new=new_user) - node.query(f"ALTER USER user15 RENAME TO {new_user}", exitcode=exitcode, message=message) + exitcode, message = errors.cannot_rename_user( + name="user15", name_new=new_user + ) + node.query( + f"ALTER USER user15 RENAME TO {new_user}", + exitcode=exitcode, + message=message, + ) finally: with Finally(f"I cleanup target name {new_user}"): node.query(f"DROP USER IF EXISTS {new_user}") del new_user - with Scenario("I alter user password plaintext password", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Password_PlainText("1.0")]): + with Scenario( + "I alter user password plaintext password", + requirements=[RQ_SRS_006_RBAC_User_Alter_Password_PlainText("1.0")], + ): with setup("user1"): with When("I alter user with plaintext password"): - node.query("ALTER USER user1 IDENTIFIED WITH PLAINTEXT_PASSWORD BY 'mypassword'", step=When) + node.query( + "ALTER USER user1 IDENTIFIED WITH PLAINTEXT_PASSWORD BY 'mypassword'", + step=When, + ) - with Scenario("I alter user password to sha256", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Password_Sha256Password("1.0")]): + with Scenario( + "I alter user password to sha256", + requirements=[RQ_SRS_006_RBAC_User_Alter_Password_Sha256Password("1.0")], + ): with setup("user2"): with When("I alter user with sha256_password"): password = hashlib.sha256("mypassword".encode("utf-8")).hexdigest() - node.query(f"ALTER USER user2 IDENTIFIED WITH SHA256_PASSWORD BY '{password}'",step=When) + node.query( + f"ALTER USER user2 IDENTIFIED WITH SHA256_PASSWORD BY '{password}'", + step=When, + ) - with Scenario("I alter user password to double_sha1_password", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Password_DoubleSha1Password("1.0")]): + with Scenario( + "I alter user password to double_sha1_password", + requirements=[RQ_SRS_006_RBAC_User_Alter_Password_DoubleSha1Password("1.0")], + ): with setup("user3"): with When("I alter user with double_sha1_password"): + def hash(password): return hashlib.sha1(password.encode("utf-8")).hexdigest() - password = hash(hash("mypassword")) - node.query(f"ALTER USER user3 IDENTIFIED WITH DOUBLE_SHA1_PASSWORD BY '{password}'", step=When) - with Scenario("I alter user host local", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Host_Local("1.0")]): + password = hash(hash("mypassword")) + node.query( + f"ALTER USER user3 IDENTIFIED WITH DOUBLE_SHA1_PASSWORD BY '{password}'", + step=When, + ) + + with Scenario( + "I alter user host local", + requirements=[RQ_SRS_006_RBAC_User_Alter_Host_Local("1.0")], + ): with setup("user4"): with When("I alter user with host local"): node.query("ALTER USER user4 HOST LOCAL") - with Scenario("I alter user host name", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Host_Name("1.0")]): + with Scenario( + "I alter user host name", + requirements=[RQ_SRS_006_RBAC_User_Alter_Host_Name("1.0")], + ): with setup("user5"): with When("I alter user with host name"): - node.query("ALTER USER user5 HOST NAME 'localhost', NAME 'clickhouse.com'") + node.query( + "ALTER USER user5 HOST NAME 'localhost', NAME 'clickhouse.com'" + ) - with Scenario("I alter user host regexp", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Host_Regexp("1.0")]): + with Scenario( + "I alter user host regexp", + requirements=[RQ_SRS_006_RBAC_User_Alter_Host_Regexp("1.0")], + ): with setup("user6"): with When("I alter user with host regexp"): node.query("ALTER USER user6 HOST REGEXP 'lo..*host', 'lo*host'") - with Scenario("I alter user host ip", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Host_IP("1.0")]): + with Scenario( + "I alter user host ip", requirements=[RQ_SRS_006_RBAC_User_Alter_Host_IP("1.0")] + ): with setup("user7"): with When("I alter user with host ip"): node.query("ALTER USER user7 HOST IP '127.0.0.1', IP '127.0.0.2'") - with Scenario("I alter user host like", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Host_Like("1.0")]): + with Scenario( + "I alter user host like", + requirements=[RQ_SRS_006_RBAC_User_Alter_Host_Like("1.0")], + ): with setup("user8"): with When("I alter user with host like"): node.query("ALTER USER user8 HOST LIKE '%.clickhouse.com'") - with Scenario("I alter user host any", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Host_Any("1.0")]): + with Scenario( + "I alter user host any", + requirements=[RQ_SRS_006_RBAC_User_Alter_Host_Any("1.0")], + ): with setup("user9"): with When("I alter user with host any"): node.query("ALTER USER user9 HOST ANY") - with Scenario("I alter user host many hosts", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Host_Like("1.0")]): + with Scenario( + "I alter user host many hosts", + requirements=[RQ_SRS_006_RBAC_User_Alter_Host_Like("1.0")], + ): with setup("user11"): with When("I alter user with multiple hosts"): - node.query("ALTER USER user11 HOST LIKE '%.clickhouse.com', \ - IP '127.0.0.2', NAME 'localhost', REGEXP 'lo*host'") + node.query( + "ALTER USER user11 HOST LIKE '%.clickhouse.com', \ + IP '127.0.0.2', NAME 'localhost', REGEXP 'lo*host'" + ) - with Scenario("I alter user default role set to none", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Host_None("1.0")]): + with Scenario( + "I alter user default role set to none", + requirements=[RQ_SRS_006_RBAC_User_Alter_Host_None("1.0")], + ): with setup("user12"): with When("I alter user with default role none"): node.query("ALTER USER user12 DEFAULT ROLE NONE") - with Scenario("I alter user default role set to all", requirements=[ - RQ_SRS_006_RBAC_User_Alter_DefaultRole_All("1.0")]): + with Scenario( + "I alter user default role set to all", + requirements=[RQ_SRS_006_RBAC_User_Alter_DefaultRole_All("1.0")], + ): with setup("user13"): with When("I alter user with all roles set to default"): node.query("ALTER USER user13 DEFAULT ROLE ALL") @@ -183,120 +247,178 @@ def feature(self, node="clickhouse1"): with Finally(f"I drop the role {role}", flags=TE): node.query(f"DROP ROLE IF EXISTS {role}") - with Scenario("I alter user default role", requirements=[ - RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")]): + with Scenario( + "I alter user default role", + requirements=[RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")], + ): with setup("user14"), setup_role("role2"): with Given("I have a user with a role"): node.query("GRANT role2 TO user14") with When("I alter user default role"): node.query("ALTER USER user14 DEFAULT ROLE role2") - with Scenario("I alter user default role, setting default role", requirements=[ - RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")]): + with Scenario( + "I alter user default role, setting default role", + requirements=[RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")], + ): with setup("user14a"), setup_role("default"): with Given("I grant default role to the user"): node.query("GRANT default TO user14a") with When("I alter user default role"): node.query("ALTER USER user14a DEFAULT ROLE default") - with Scenario("I alter user default role, role doesn't exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")]): + with Scenario( + "I alter user default role, role doesn't exist, throws exception", + requirements=[RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")], + ): with setup("user12"): role = "role0" with Given(f"I ensure that role {role} does not exist"): node.query(f"DROP ROLE IF EXISTS {role}") with When(f"I alter user with default role {role}"): exitcode, message = errors.role_not_found_in_disk(role) - node.query(f"ALTER USER user12 DEFAULT ROLE {role}",exitcode=exitcode, message=message) + node.query( + f"ALTER USER user12 DEFAULT ROLE {role}", + exitcode=exitcode, + message=message, + ) del role - with Scenario("I alter user default role, all except role doesn't exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")]): + with Scenario( + "I alter user default role, all except role doesn't exist, throws exception", + requirements=[RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")], + ): with setup("user12"): role = "role0" with Given(f"I ensure that role {role} does not exist"): node.query(f"DROP ROLE IF EXISTS {role}") with When(f"I alter user with default role {role}"): exitcode, message = errors.role_not_found_in_disk(role) - node.query(f"ALTER USER user12 DEFAULT ROLE ALL EXCEPT {role}",exitcode=exitcode, message=message) + node.query( + f"ALTER USER user12 DEFAULT ROLE ALL EXCEPT {role}", + exitcode=exitcode, + message=message, + ) del role - with Scenario("I alter user default role multiple", requirements=[ - RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")]): + with Scenario( + "I alter user default role multiple", + requirements=[RQ_SRS_006_RBAC_User_Alter_DefaultRole("1.0")], + ): with setup("user15"), setup_role("second"), setup_role("third"): with Given("I have a user with multiple roles"): node.query("GRANT second,third TO user15") with When("I alter user default role to second, third"): node.query("ALTER USER user15 DEFAULT ROLE second, third") - with Scenario("I alter user default role set to all except", requirements=[ - RQ_SRS_006_RBAC_User_Alter_DefaultRole_AllExcept("1.0")]): + with Scenario( + "I alter user default role set to all except", + requirements=[RQ_SRS_006_RBAC_User_Alter_DefaultRole_AllExcept("1.0")], + ): with setup("user16"), setup_role("second"): with Given("I have a user with a role"): node.query("GRANT second TO user16") with When("I alter user default role"): node.query("ALTER USER user16 DEFAULT ROLE ALL EXCEPT second") - with Scenario("I alter user default role multiple all except", requirements=[ - RQ_SRS_006_RBAC_User_Alter_DefaultRole_AllExcept("1.0")]): + with Scenario( + "I alter user default role multiple all except", + requirements=[RQ_SRS_006_RBAC_User_Alter_DefaultRole_AllExcept("1.0")], + ): with setup("user17"), setup_role("second"), setup_role("third"): with Given("I have a user with multiple roles"): node.query("GRANT second,third TO user17") with When("I alter user default role to all except second"): node.query("ALTER USER user17 DEFAULT ROLE ALL EXCEPT second") - with Scenario("I alter user settings profile", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Settings("1.0"), \ - RQ_SRS_006_RBAC_User_Alter_Settings_Profile("1.0")]): + with Scenario( + "I alter user settings profile", + requirements=[ + RQ_SRS_006_RBAC_User_Alter_Settings("1.0"), + RQ_SRS_006_RBAC_User_Alter_Settings_Profile("1.0"), + ], + ): with setup("user18"): try: with Given("I have a profile"): node.query(f"CREATE SETTINGS PROFILE profile10") with When("I alter user with settings and set profile to profile1"): - node.query("ALTER USER user18 SETTINGS PROFILE profile10, max_memory_usage = 100 MIN 0 MAX 1000 READONLY") + node.query( + "ALTER USER user18 SETTINGS PROFILE profile10, max_memory_usage = 100 MIN 0 MAX 1000 READONLY" + ) finally: with Finally("I drop the profile"): node.query(f"DROP SETTINGS PROFILE profile10") - with Scenario("I alter user settings profile, fake profile, throws exception", requirements=[ + with Scenario( + "I alter user settings profile, fake profile, throws exception", + requirements=[ RQ_SRS_006_RBAC_User_Alter_Settings("1.0"), - RQ_SRS_006_RBAC_User_Alter_Settings_Profile("1.0")]): + RQ_SRS_006_RBAC_User_Alter_Settings_Profile("1.0"), + ], + ): with setup("user18a"): profile = "profile0" with Given(f"I ensure that profile {profile} does not exist"): node.query(f"DROP SETTINGS PROFILE IF EXISTS {profile}") - with When(f"I alter user with Settings and set profile to fake profile {profile}"): + with When( + f"I alter user with Settings and set profile to fake profile {profile}" + ): exitcode, message = errors.settings_profile_not_found_in_disk(profile) - node.query("ALTER USER user18a SETTINGS PROFILE profile0", exitcode=exitcode, message=message) + node.query( + "ALTER USER user18a SETTINGS PROFILE profile0", + exitcode=exitcode, + message=message, + ) del profile - with Scenario("I alter user settings with a fake setting, throws exception", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Settings("1.0")]): - with setup("user18b"): - with When("I alter settings profile using settings and nonexistent value"): - exitcode, message = errors.unknown_setting("fake_setting") - node.query("ALTER USER user18b SETTINGS fake_setting = 100000001", exitcode=exitcode, message=message) + with Scenario( + "I alter user settings with a fake setting, throws exception", + requirements=[RQ_SRS_006_RBAC_User_Alter_Settings("1.0")], + ): + with setup("user18b"): + with When("I alter settings profile using settings and nonexistent value"): + exitcode, message = errors.unknown_setting("fake_setting") + node.query( + "ALTER USER user18b SETTINGS fake_setting = 100000001", + exitcode=exitcode, + message=message, + ) - with Scenario("I alter user settings without profile (no equals)", requirements=[ + with Scenario( + "I alter user settings without profile (no equals)", + requirements=[ RQ_SRS_006_RBAC_User_Alter_Settings("1.0"), RQ_SRS_006_RBAC_User_Alter_Settings_Min("1.0"), - RQ_SRS_006_RBAC_User_Alter_Settings_Max("1.0")]): + RQ_SRS_006_RBAC_User_Alter_Settings_Max("1.0"), + ], + ): with setup("user19"): with When("I alter user with settings without profile using no equals"): - node.query("ALTER USER user19 SETTINGS max_memory_usage=10000000 MIN 100000 MAX 1000000000 READONLY") + node.query( + "ALTER USER user19 SETTINGS max_memory_usage=10000000 MIN 100000 MAX 1000000000 READONLY" + ) - #equals sign (=) syntax verify - with Scenario("I alter user settings without profile (yes equals)", requirements=[ + # equals sign (=) syntax verify + with Scenario( + "I alter user settings without profile (yes equals)", + requirements=[ RQ_SRS_006_RBAC_User_Alter_Settings("1.0"), RQ_SRS_006_RBAC_User_Alter_Settings_Min("1.0"), - RQ_SRS_006_RBAC_User_Alter_Settings_Max("1.0")]): + RQ_SRS_006_RBAC_User_Alter_Settings_Max("1.0"), + ], + ): with setup("user20"): with When("I alter user with settings without profile using equals"): - node.query("ALTER USER user20 SETTINGS max_memory_usage=10000000 MIN=100000 MAX=1000000000 READONLY") + node.query( + "ALTER USER user20 SETTINGS max_memory_usage=10000000 MIN=100000 MAX=1000000000 READONLY" + ) - #Add requirement to host: add/drop - with Scenario("I alter user to add host", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Host_AddDrop("1.0")]): + # Add requirement to host: add/drop + with Scenario( + "I alter user to add host", + requirements=[RQ_SRS_006_RBAC_User_Alter_Host_AddDrop("1.0")], + ): with setup("user21"): with When("I alter user by adding local host"): node.query("ALTER USER user21 ADD HOST LOCAL") @@ -309,8 +431,10 @@ def feature(self, node="clickhouse1"): with And("I alter user by adding host name"): node.query("ALTER USER user21 ADD HOST NAME 'localhost'") - with Scenario("I alter user to remove host", requirements=[ - RQ_SRS_006_RBAC_User_Alter_Host_AddDrop("1.0")]): + with Scenario( + "I alter user to remove host", + requirements=[RQ_SRS_006_RBAC_User_Alter_Host_AddDrop("1.0")], + ): with setup("user22"): with When("I alter user by removing local host"): node.query("ALTER USER user22 DROP HOST LOCAL") diff --git a/tests/testflows/rbac/tests/syntax/create_quota.py b/tests/testflows/rbac/tests/syntax/create_quota.py index 33dbbf9c153..8301d918c8c 100755 --- a/tests/testflows/rbac/tests/syntax/create_quota.py +++ b/tests/testflows/rbac/tests/syntax/create_quota.py @@ -5,6 +5,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @TestFeature @Name("create quota") @Args(format_description=False) @@ -34,39 +35,51 @@ def feature(self, node="clickhouse1"): def create_quota(quota): with And(f"I ensure I do have quota {quota}"): - node.query(f"CREATE QUOTA OR REPLACE {quota}") + node.query(f"CREATE QUOTA OR REPLACE {quota}") try: with Given("I have a user and a role"): node.query(f"CREATE USER user0") node.query(f"CREATE ROLE role0") - with Scenario("I create quota with no options", requirements=[ - RQ_SRS_006_RBAC_Quota_Create("1.0")]): + with Scenario( + "I create quota with no options", + requirements=[RQ_SRS_006_RBAC_Quota_Create("1.0")], + ): with cleanup("quota0"): with When("I create a quota with no options"): node.query("CREATE QUOTA quota0") - with Scenario("I create quota that already exists, throws exception", requirements=[ - RQ_SRS_006_RBAC_Quota_Create("1.0")]): + with Scenario( + "I create quota that already exists, throws exception", + requirements=[RQ_SRS_006_RBAC_Quota_Create("1.0")], + ): quota = "quota0" with cleanup(quota): create_quota(quota) - with When(f"I create a quota {quota} that already exists without IF EXISTS, throws exception"): + with When( + f"I create a quota {quota} that already exists without IF EXISTS, throws exception" + ): exitcode, message = errors.cannot_insert_quota(name=quota) - node.query(f"CREATE QUOTA {quota}", exitcode=exitcode, message=message) + node.query( + f"CREATE QUOTA {quota}", exitcode=exitcode, message=message + ) del quota - with Scenario("I create quota if not exists, quota does not exist", requirements=[ - RQ_SRS_006_RBAC_Quota_Create_IfNotExists("1.0")]): + with Scenario( + "I create quota if not exists, quota does not exist", + requirements=[RQ_SRS_006_RBAC_Quota_Create_IfNotExists("1.0")], + ): quota = "quota1" with cleanup(quota): with When(f"I create a quota {quota} with if not exists"): node.query(f"CREATE QUOTA IF NOT EXISTS {quota}") del quota - with Scenario("I create quota if not exists, quota does exist", requirements=[ - RQ_SRS_006_RBAC_Quota_Create_IfNotExists("1.0")]): + with Scenario( + "I create quota if not exists, quota does exist", + requirements=[RQ_SRS_006_RBAC_Quota_Create_IfNotExists("1.0")], + ): quota = "quota1" with cleanup(quota): create_quota(quota) @@ -74,16 +87,20 @@ def feature(self, node="clickhouse1"): node.query(f"CREATE QUOTA IF NOT EXISTS {quota}") del quota - with Scenario("I create quota or replace, quota does not exist", requirements=[ - RQ_SRS_006_RBAC_Quota_Create_Replace("1.0")]): + with Scenario( + "I create quota or replace, quota does not exist", + requirements=[RQ_SRS_006_RBAC_Quota_Create_Replace("1.0")], + ): quota = "quota2" with cleanup(quota): with When(f"I create a quota {quota} with or replace"): node.query(f"CREATE QUOTA OR REPLACE {quota}") del quota - with Scenario("I create quota or replace, quota does exist", requirements=[ - RQ_SRS_006_RBAC_Quota_Create_Replace("1.0")]): + with Scenario( + "I create quota or replace, quota does exist", + requirements=[RQ_SRS_006_RBAC_Quota_Create_Replace("1.0")], + ): quota = "quota2" with cleanup(quota): create_quota(quota) @@ -91,36 +108,65 @@ def feature(self, node="clickhouse1"): node.query(f"CREATE QUOTA OR REPLACE {quota}") del quota - keys = ['none', 'user name', 'ip address', 'client key', 'client key or user name', 'client key or ip address'] + keys = [ + "none", + "user name", + "ip address", + "client key", + "client key or user name", + "client key or ip address", + ] for i, key in enumerate(keys): - with Scenario(f"I create quota keyed by {key}", requirements=[ + with Scenario( + f"I create quota keyed by {key}", + requirements=[ RQ_SRS_006_RBAC_Quota_Create_KeyedBy("1.0"), - RQ_SRS_006_RBAC_Quota_Create_KeyedByOptions("1.0")]): - name = f'quota{3 + i}' + RQ_SRS_006_RBAC_Quota_Create_KeyedByOptions("1.0"), + ], + ): + name = f"quota{3 + i}" with cleanup(name): with When(f"I create a quota with {key}"): node.query(f"CREATE QUOTA {name} KEYED BY '{key}'") - with Scenario("I create quota for randomized interval", requirements=[ - RQ_SRS_006_RBAC_Quota_Create_Interval_Randomized("1.0")]): + with Scenario( + "I create quota for randomized interval", + requirements=[RQ_SRS_006_RBAC_Quota_Create_Interval_Randomized("1.0")], + ): with cleanup("quota9"): with When("I create a quota for randomized interval"): - node.query("CREATE QUOTA quota9 FOR RANDOMIZED INTERVAL 1 DAY NO LIMITS") + node.query( + "CREATE QUOTA quota9 FOR RANDOMIZED INTERVAL 1 DAY NO LIMITS" + ) - intervals = ['SECOND', 'MINUTE', 'HOUR', 'DAY', 'MONTH'] + intervals = ["SECOND", "MINUTE", "HOUR", "DAY", "MONTH"] for i, interval in enumerate(intervals): - with Scenario(f"I create quota for interval {interval}", requirements=[ - RQ_SRS_006_RBAC_Quota_Create_Interval("1.0")]): - name = f'quota{10 + i}' + with Scenario( + f"I create quota for interval {interval}", + requirements=[RQ_SRS_006_RBAC_Quota_Create_Interval("1.0")], + ): + name = f"quota{10 + i}" with cleanup(name): with When(f"I create a quota for {interval} interval"): - node.query(f"CREATE QUOTA {name} FOR INTERVAL 1 {interval} NO LIMITS") + node.query( + f"CREATE QUOTA {name} FOR INTERVAL 1 {interval} NO LIMITS" + ) - constraints = ['MAX QUERIES', 'MAX ERRORS', 'MAX RESULT ROWS', - 'MAX RESULT BYTES', 'MAX READ ROWS', 'MAX READ BYTES', 'MAX EXECUTION TIME', - 'NO LIMITS', 'TRACKING ONLY'] + constraints = [ + "MAX QUERIES", + "MAX ERRORS", + "MAX RESULT ROWS", + "MAX RESULT BYTES", + "MAX READ ROWS", + "MAX READ BYTES", + "MAX EXECUTION TIME", + "NO LIMITS", + "TRACKING ONLY", + ] for i, constraint in enumerate(constraints): - with Scenario(f"I create quota for {constraint.lower()}", requirements=[ + with Scenario( + f"I create quota for {constraint.lower()}", + requirements=[ RQ_SRS_006_RBAC_Quota_Create_Queries("1.0"), RQ_SRS_006_RBAC_Quota_Create_Errors("1.0"), RQ_SRS_006_RBAC_Quota_Create_ResultRows("1.0"), @@ -129,99 +175,149 @@ def feature(self, node="clickhouse1"): RQ_SRS_006_RBAC_Quota_Create_ReadBytes("1.0"), RQ_SRS_006_RBAC_Quota_Create_ExecutionTime("1.0"), RQ_SRS_006_RBAC_Quota_Create_NoLimits("1.0"), - RQ_SRS_006_RBAC_Quota_Create_TrackingOnly("1.0")]): - name = f'quota{15 + i}' + RQ_SRS_006_RBAC_Quota_Create_TrackingOnly("1.0"), + ], + ): + name = f"quota{15 + i}" with cleanup(name): with When(f"I create quota for {constraint.lower()}"): - node.query(f"CREATE QUOTA {name} FOR INTERVAL 1 DAY {constraint}{' 1024' if constraint.startswith('MAX') else ''}") + node.query( + f"CREATE QUOTA {name} FOR INTERVAL 1 DAY {constraint}{' 1024' if constraint.startswith('MAX') else ''}" + ) - with Scenario("I create quota for multiple constraints", requirements=[ + with Scenario( + "I create quota for multiple constraints", + requirements=[ RQ_SRS_006_RBAC_Quota_Create_Interval("1.0"), - RQ_SRS_006_RBAC_Quota_Create_Queries("1.0")]): + RQ_SRS_006_RBAC_Quota_Create_Queries("1.0"), + ], + ): with cleanup("quota23"): with When(f"I create quota for multiple constraints"): - node.query('CREATE QUOTA quota23 \ + node.query( + "CREATE QUOTA quota23 \ FOR INTERVAL 1 DAY NO LIMITS, \ FOR INTERVAL 2 DAY MAX QUERIES 124, \ - FOR INTERVAL 1 HOUR TRACKING ONLY') + FOR INTERVAL 1 HOUR TRACKING ONLY" + ) - with Scenario("I create quota assigned to one role", requirements=[ - RQ_SRS_006_RBAC_Quota_Create_Assignment("1.0")]): + with Scenario( + "I create quota assigned to one role", + requirements=[RQ_SRS_006_RBAC_Quota_Create_Assignment("1.0")], + ): with cleanup("quota24"): with When("I create quota for role"): node.query("CREATE QUOTA quota24 TO role0") - with Scenario("I create quota to assign to role that does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_Quota_Create_Assignment("1.0")]): + with Scenario( + "I create quota to assign to role that does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_Quota_Create_Assignment("1.0")], + ): role = "role1" with Given(f"I drop {role} if it exists"): node.query(f"DROP ROLE IF EXISTS {role}") with Then(f"I create a quota, assign to role {role}, which does not exist"): exitcode, message = errors.role_not_found_in_disk(name=role) - node.query(f"CREATE QUOTA quota0 TO {role}", exitcode=exitcode, message=message) + node.query( + f"CREATE QUOTA quota0 TO {role}", exitcode=exitcode, message=message + ) del role - with Scenario("I create quota to assign to all except role that does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_Quota_Create_Assignment("1.0")]): + with Scenario( + "I create quota to assign to all except role that does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_Quota_Create_Assignment("1.0")], + ): role = "role1" with Given(f"I drop {role} if it exists"): node.query(f"DROP ROLE IF EXISTS {role}") - with Then(f"I create a quota, assign to all except role {role}, which does not exist"): + with Then( + f"I create a quota, assign to all except role {role}, which does not exist" + ): exitcode, message = errors.role_not_found_in_disk(name=role) - node.query(f"CREATE QUOTA quota0 TO ALL EXCEPT {role}", exitcode=exitcode, message=message) + node.query( + f"CREATE QUOTA quota0 TO ALL EXCEPT {role}", + exitcode=exitcode, + message=message, + ) del role - with Scenario("I create quota assigned to no role", requirements=[ - RQ_SRS_006_RBAC_Quota_Create_Assignment_None("1.0")]): + with Scenario( + "I create quota assigned to no role", + requirements=[RQ_SRS_006_RBAC_Quota_Create_Assignment_None("1.0")], + ): with When("I create quota for no role"): node.query("CREATE QUOTA quota24 TO NONE") - with Scenario("I create quota assigned to multiple roles", requirements=[ - RQ_SRS_006_RBAC_Quota_Create_Assignment("1.0")]): + with Scenario( + "I create quota assigned to multiple roles", + requirements=[RQ_SRS_006_RBAC_Quota_Create_Assignment("1.0")], + ): with cleanup("quota25"): with When("I create quota for multiple roles"): node.query("CREATE QUOTA quota25 TO role0, user0") - with Scenario("I create quota assigned to all", requirements=[ - RQ_SRS_006_RBAC_Quota_Create_Assignment_All("1.0")]): + with Scenario( + "I create quota assigned to all", + requirements=[RQ_SRS_006_RBAC_Quota_Create_Assignment_All("1.0")], + ): with cleanup("quota26"): with When("I create quota for all"): node.query("CREATE QUOTA quota26 TO ALL") - with Scenario("I create quota assigned to all except one role", requirements=[ - RQ_SRS_006_RBAC_Quota_Create_Assignment_Except("1.0")]): + with Scenario( + "I create quota assigned to all except one role", + requirements=[RQ_SRS_006_RBAC_Quota_Create_Assignment_Except("1.0")], + ): with cleanup("quota27"): with When("I create quota for all except one role"): node.query("CREATE QUOTA quota27 TO ALL EXCEPT role0") - with Scenario("I create quota assigned to all except multiple roles", requirements=[ - RQ_SRS_006_RBAC_Quota_Create_Assignment_Except("1.0")]): + with Scenario( + "I create quota assigned to all except multiple roles", + requirements=[RQ_SRS_006_RBAC_Quota_Create_Assignment_Except("1.0")], + ): with cleanup("quota28"): with When("I create quota for all except multiple roles"): node.query("CREATE QUOTA quota28 TO ALL EXCEPT role0, user0") - with Scenario("I create quota on cluster", requirements=[ - RQ_SRS_006_RBAC_Quota_Create_Cluster("1.0")]): + with Scenario( + "I create quota on cluster", + requirements=[RQ_SRS_006_RBAC_Quota_Create_Cluster("1.0")], + ): try: with When("I run create quota command on cluster"): node.query("CREATE QUOTA quota29 ON CLUSTER sharded_cluster") with When("I run create quota command on cluster, keyed"): - node.query("CREATE QUOTA OR REPLACE quota29 ON CLUSTER sharded_cluster KEYED BY 'none'") + node.query( + "CREATE QUOTA OR REPLACE quota29 ON CLUSTER sharded_cluster KEYED BY 'none'" + ) with When("I run create quota command on cluster, interval"): - node.query("CREATE QUOTA OR REPLACE quota29 ON CLUSTER sharded_cluster FOR INTERVAL 1 DAY TRACKING ONLY") + node.query( + "CREATE QUOTA OR REPLACE quota29 ON CLUSTER sharded_cluster FOR INTERVAL 1 DAY TRACKING ONLY" + ) with When("I run create quota command on cluster, assign"): - node.query("CREATE QUOTA OR REPLACE quota29 ON CLUSTER sharded_cluster TO ALL") + node.query( + "CREATE QUOTA OR REPLACE quota29 ON CLUSTER sharded_cluster TO ALL" + ) finally: with Finally("I drop the quota from cluster"): - node.query("DROP QUOTA IF EXISTS quota29 ON CLUSTER sharded_cluster") + node.query( + "DROP QUOTA IF EXISTS quota29 ON CLUSTER sharded_cluster" + ) - with Scenario("I create quota on nonexistent cluster, throws exception", requirements=[ - RQ_SRS_006_RBAC_Quota_Create_Cluster("1.0")]): + with Scenario( + "I create quota on nonexistent cluster, throws exception", + requirements=[RQ_SRS_006_RBAC_Quota_Create_Cluster("1.0")], + ): with When("I run create quota on a cluster"): exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("CREATE QUOTA quota0 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + node.query( + "CREATE QUOTA quota0 ON CLUSTER fake_cluster", + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop all the users and roles"): node.query(f"DROP USER IF EXISTS user0") - node.query(f"DROP ROLE IF EXISTS role0") \ No newline at end of file + node.query(f"DROP ROLE IF EXISTS role0") diff --git a/tests/testflows/rbac/tests/syntax/create_role.py b/tests/testflows/rbac/tests/syntax/create_role.py index 1cb10077570..993cdf822a5 100755 --- a/tests/testflows/rbac/tests/syntax/create_role.py +++ b/tests/testflows/rbac/tests/syntax/create_role.py @@ -5,6 +5,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @TestFeature @Name("create role") @Args(format_description=False) @@ -30,16 +31,20 @@ def feature(self, node="clickhouse1"): def create_role(role): with Given(f"I ensure I do have role {role}"): - node.query(f"CREATE ROLE OR REPLACE {role}") + node.query(f"CREATE ROLE OR REPLACE {role}") - with Scenario("I create role with no options", requirements=[ - RQ_SRS_006_RBAC_Role_Create("1.0")]): + with Scenario( + "I create role with no options", + requirements=[RQ_SRS_006_RBAC_Role_Create("1.0")], + ): with cleanup("role0"): with When("I create role"): node.query("CREATE ROLE role0") - with Scenario("I create role that already exists, throws exception", requirements=[ - RQ_SRS_006_RBAC_Role_Create("1.0")]): + with Scenario( + "I create role that already exists, throws exception", + requirements=[RQ_SRS_006_RBAC_Role_Create("1.0")], + ): role = "role0" with cleanup(role): with Given(f"I have role {role}"): @@ -49,16 +54,20 @@ def feature(self, node="clickhouse1"): node.query(f"CREATE ROLE {role}", exitcode=exitcode, message=message) del role - with Scenario("I create role if not exists, role does not exist", requirements=[ - RQ_SRS_006_RBAC_Role_Create_IfNotExists("1.0")]): + with Scenario( + "I create role if not exists, role does not exist", + requirements=[RQ_SRS_006_RBAC_Role_Create_IfNotExists("1.0")], + ): role = "role1" with cleanup(role): with When(f"I create role {role} with if not exists"): node.query(f"CREATE ROLE IF NOT EXISTS {role}") del role - with Scenario("I create role if not exists, role does exist", requirements=[ - RQ_SRS_006_RBAC_Role_Create_IfNotExists("1.0")]): + with Scenario( + "I create role if not exists, role does exist", + requirements=[RQ_SRS_006_RBAC_Role_Create_IfNotExists("1.0")], + ): role = "role1" with cleanup(role): create_role(role) @@ -66,16 +75,20 @@ def feature(self, node="clickhouse1"): node.query(f"CREATE ROLE IF NOT EXISTS {role}") del role - with Scenario("I create role or replace, role does not exist", requirements=[ - RQ_SRS_006_RBAC_Role_Create_Replace("1.0")]): + with Scenario( + "I create role or replace, role does not exist", + requirements=[RQ_SRS_006_RBAC_Role_Create_Replace("1.0")], + ): role = "role2" with cleanup(role): with When(f"I create role {role} with or replace"): node.query(f"CREATE ROLE OR REPLACE {role}") del role - with Scenario("I create role or replace, role does exist", requirements=[ - RQ_SRS_006_RBAC_Role_Create_Replace("1.0")]): + with Scenario( + "I create role or replace, role does exist", + requirements=[RQ_SRS_006_RBAC_Role_Create_Replace("1.0")], + ): role = "role2" with cleanup(role): create_role(role) @@ -83,42 +96,67 @@ def feature(self, node="clickhouse1"): node.query(f"CREATE ROLE OR REPLACE {role}") del role - with Scenario("I create role on cluster", requirements=[ - RQ_SRS_006_RBAC_Role_Create("1.0")]): + with Scenario( + "I create role on cluster", requirements=[RQ_SRS_006_RBAC_Role_Create("1.0")] + ): try: with When("I have a role on a cluster"): node.query("CREATE ROLE role1 ON CLUSTER sharded_cluster") with And("I run create role or replace on a cluster"): node.query("CREATE ROLE OR REPLACE role1 ON CLUSTER sharded_cluster") with And("I create role with settings on a cluster"): - node.query("CREATE ROLE role2 ON CLUSTER sharded_cluster SETTINGS max_memory_usage=10000000 READONLY") + node.query( + "CREATE ROLE role2 ON CLUSTER sharded_cluster SETTINGS max_memory_usage=10000000 READONLY" + ) finally: with Finally("I drop the role"): node.query("DROP ROLE IF EXISTS role1,role2 ON CLUSTER sharded_cluster") - with Scenario("I create role on nonexistent cluster, throws exception", requirements=[ - RQ_SRS_006_RBAC_Role_Create("1.0")]): + with Scenario( + "I create role on nonexistent cluster, throws exception", + requirements=[RQ_SRS_006_RBAC_Role_Create("1.0")], + ): with When("I run create role on a cluster"): exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("CREATE ROLE role1 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + node.query( + "CREATE ROLE role1 ON CLUSTER fake_cluster", + exitcode=exitcode, + message=message, + ) - with Scenario("I create role with settings profile", requirements=[ - RQ_SRS_006_RBAC_Role_Create_Settings("1.0")]): + with Scenario( + "I create role with settings profile", + requirements=[RQ_SRS_006_RBAC_Role_Create_Settings("1.0")], + ): with cleanup("role3"): with When("I create role with settings profile"): - node.query("CREATE ROLE role3 SETTINGS PROFILE default, max_memory_usage=10000000 WRITABLE") + node.query( + "CREATE ROLE role3 SETTINGS PROFILE default, max_memory_usage=10000000 WRITABLE" + ) - with Scenario("I create role settings profile, fake profile, throws exception", requirements=[ - RQ_SRS_006_RBAC_Role_Create_Settings("1.0")]): + with Scenario( + "I create role settings profile, fake profile, throws exception", + requirements=[RQ_SRS_006_RBAC_Role_Create_Settings("1.0")], + ): with cleanup("role4a"): with Given("I ensure profile profile0 does not exist"): node.query("DROP SETTINGS PROFILE IF EXISTS profile0") with When("I create role with settings profile that does not exist"): - exitcode, message = errors.settings_profile_not_found_in_disk("profile0") - node.query("CREATE ROLE role4a SETTINGS PROFILE profile0", exitcode=exitcode, message=message) + exitcode, message = errors.settings_profile_not_found_in_disk( + "profile0" + ) + node.query( + "CREATE ROLE role4a SETTINGS PROFILE profile0", + exitcode=exitcode, + message=message, + ) - with Scenario("I create role with settings without profile", requirements=[ - RQ_SRS_006_RBAC_Role_Create_Settings("1.0")]): + with Scenario( + "I create role with settings without profile", + requirements=[RQ_SRS_006_RBAC_Role_Create_Settings("1.0")], + ): with cleanup("role4"): with When("I create role with settings without profile"): - node.query("CREATE ROLE role4 SETTINGS max_memory_usage=10000000 READONLY") + node.query( + "CREATE ROLE role4 SETTINGS max_memory_usage=10000000 READONLY" + ) diff --git a/tests/testflows/rbac/tests/syntax/create_row_policy.py b/tests/testflows/rbac/tests/syntax/create_row_policy.py index 8bf83579dd5..cbc3b02a2e9 100755 --- a/tests/testflows/rbac/tests/syntax/create_row_policy.py +++ b/tests/testflows/rbac/tests/syntax/create_row_policy.py @@ -5,6 +5,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @TestFeature @Name("create row policy") @Args(format_description=False) @@ -33,7 +34,7 @@ def feature(self, node="clickhouse1"): def create_policy(policy, on="default.foo"): with Given(f"I ensure I do have policy {policy} on {on}"): - node.query(f"CREATE ROW POLICY OR REPLACE {policy} ON {on}") + node.query(f"CREATE ROW POLICY OR REPLACE {policy} ON {on}") try: with Given("I have a table and some roles"): @@ -41,58 +42,94 @@ def feature(self, node="clickhouse1"): node.query(f"CREATE ROLE role0") node.query(f"CREATE ROLE role1") - with Scenario("I create row policy with no options", requirements=[ + with Scenario( + "I create row policy with no options", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): with cleanup("policy0"): with When("I create row policy"): node.query("CREATE ROW POLICY policy0 ON default.foo") - with Scenario("I create row policy using short syntax with no options", requirements=[ + with Scenario( + "I create row policy using short syntax with no options", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): with cleanup("policy1"): with When("I create row policy short form"): node.query("CREATE POLICY policy1 ON default.foo") - with Scenario("I create row policy that already exists, throws exception", requirements=[ + with Scenario( + "I create row policy that already exists, throws exception", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): policy = "policy0" with cleanup(policy): create_policy(policy) with When(f"I create row policy {policy}"): - exitcode, message = errors.cannot_insert_row_policy(name=f"{policy} ON default.foo") - node.query(f"CREATE ROW POLICY {policy} ON default.foo", exitcode=exitcode, message=message) + exitcode, message = errors.cannot_insert_row_policy( + name=f"{policy} ON default.foo" + ) + node.query( + f"CREATE ROW POLICY {policy} ON default.foo", + exitcode=exitcode, + message=message, + ) del policy - with Scenario("I create row policy if not exists, policy does not exist", requirements=[ + with Scenario( + "I create row policy if not exists, policy does not exist", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create_IfNotExists("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): with cleanup("policy2"): with When("I create row policy with if not exists"): node.query("CREATE ROW POLICY IF NOT EXISTS policy2 ON default.foo") - with Scenario("I create row policy if not exists, policy does exist", requirements=[ + with Scenario( + "I create row policy if not exists, policy does exist", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create_IfNotExists("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): policy = "policy2" with cleanup(policy): create_policy(policy) with When(f"I create row policy {policy} with if not exists"): - node.query(f"CREATE ROW POLICY IF NOT EXISTS {policy} ON default.foo") + node.query( + f"CREATE ROW POLICY IF NOT EXISTS {policy} ON default.foo" + ) del policy - with Scenario("I create row policy or replace, policy does not exist", requirements=[ + with Scenario( + "I create row policy or replace, policy does not exist", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create_Replace("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): with cleanup("policy3"): with When("I create row policy with or replace"): node.query("CREATE ROW POLICY OR REPLACE policy3 ON default.foo") - with Scenario("I create row policy or replace, policy does exist", requirements=[ + with Scenario( + "I create row policy or replace, policy does exist", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create_Replace("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): policy = "policy3" with cleanup(policy): create_policy(policy) @@ -100,126 +137,216 @@ def feature(self, node="clickhouse1"): node.query(f"CREATE ROW POLICY OR REPLACE {policy} ON default.foo") del policy - with Scenario("I create row policy as permissive", requirements=[ + with Scenario( + "I create row policy as permissive", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create_Access_Permissive("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): with cleanup("policy4"): with When("I create row policy as permissive"): node.query("CREATE ROW POLICY policy4 ON default.foo AS PERMISSIVE") - with Scenario("I create row policy as restrictive", requirements=[ + with Scenario( + "I create row policy as restrictive", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create_Access_Restrictive("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): with cleanup("policy5"): with When("I create row policy as restrictive"): - node.query("CREATE ROW POLICY policy5 ON default.foo AS RESTRICTIVE") + node.query( + "CREATE ROW POLICY policy5 ON default.foo AS RESTRICTIVE" + ) - with Scenario("I create row policy for select", requirements=[ + with Scenario( + "I create row policy for select", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create_ForSelect("1.0"), RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_Condition("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_Condition("1.0"), + ], + ): with cleanup("policy6"): with When("I create row policy with for select"): - node.query("CREATE ROW POLICY policy6 ON default.foo FOR SELECT USING x > 10") + node.query( + "CREATE ROW POLICY policy6 ON default.foo FOR SELECT USING x > 10" + ) - with Scenario("I create row policy using condition", requirements=[ + with Scenario( + "I create row policy using condition", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create_Condition("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): with cleanup("policy6"): with When("I create row policy with condition"): node.query("CREATE ROW POLICY policy6 ON default.foo USING x > 10") - with Scenario("I create row policy assigned to one role", requirements=[ + with Scenario( + "I create row policy assigned to one role", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): with cleanup("policy7"): with When("I create row policy for one role"): node.query("CREATE ROW POLICY policy7 ON default.foo TO role0") - with Scenario("I create row policy to assign to role that does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0")]): + with Scenario( + "I create row policy to assign to role that does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0")], + ): role = "role2" with cleanup("policy8a"): with Given(f"I drop {role} if it exists"): node.query(f"DROP ROLE IF EXISTS {role}") - with Then(f"I create a row policy, assign to role {role}, which does not exist"): + with Then( + f"I create a row policy, assign to role {role}, which does not exist" + ): exitcode, message = errors.role_not_found_in_disk(name=role) - node.query(f"CREATE ROW POLICY policy8a ON default.foo TO {role}", exitcode=exitcode, message=message) + node.query( + f"CREATE ROW POLICY policy8a ON default.foo TO {role}", + exitcode=exitcode, + message=message, + ) del role - with Scenario("I create row policy to assign to all excpet role that does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0")]): + with Scenario( + "I create row policy to assign to all excpet role that does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0")], + ): role = "role2" with cleanup("policy8a"): with Given(f"I drop {role} if it exists"): node.query(f"DROP ROLE IF EXISTS {role}") - with Then(f"I create a row policy, assign to all except role {role}, which does not exist"): + with Then( + f"I create a row policy, assign to all except role {role}, which does not exist" + ): exitcode, message = errors.role_not_found_in_disk(name=role) - node.query(f"CREATE ROW POLICY policy8a ON default.foo TO ALL EXCEPT {role}", exitcode=exitcode, message=message) + node.query( + f"CREATE ROW POLICY policy8a ON default.foo TO ALL EXCEPT {role}", + exitcode=exitcode, + message=message, + ) del role - with Scenario("I create row policy assigned to multiple roles", requirements=[ + with Scenario( + "I create row policy assigned to multiple roles", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create_Assignment("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): with cleanup("policy8b"): with When("I create row policy for multiple roles"): - node.query("CREATE ROW POLICY policy8b ON default.foo TO role0, role1") + node.query( + "CREATE ROW POLICY policy8b ON default.foo TO role0, role1" + ) - with Scenario("I create row policy assigned to all", requirements=[ + with Scenario( + "I create row policy assigned to all", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_All("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): with cleanup("policy9"): with When("I create row policy for all"): node.query("CREATE ROW POLICY policy9 ON default.foo TO ALL") - with Scenario("I create row policy assigned to all except one role", requirements=[ + with Scenario( + "I create row policy assigned to all except one role", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_AllExcept("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): with cleanup("policy10"): with When("I create row policy for all except one"): - node.query("CREATE ROW POLICY policy10 ON default.foo TO ALL EXCEPT role0") + node.query( + "CREATE ROW POLICY policy10 ON default.foo TO ALL EXCEPT role0" + ) - with Scenario("I create row policy assigned to all except multiple roles", requirements=[ + with Scenario( + "I create row policy assigned to all except multiple roles", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_AllExcept("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): with cleanup("policy11"): with When("I create row policy for all except multiple roles"): - node.query("CREATE ROW POLICY policy11 ON default.foo TO ALL EXCEPT role0, role1") + node.query( + "CREATE ROW POLICY policy11 ON default.foo TO ALL EXCEPT role0, role1" + ) - with Scenario("I create row policy assigned to none", requirements=[ + with Scenario( + "I create row policy assigned to none", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create_Assignment_None("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): with cleanup("policy11"): with When("I create row policy for none"): node.query("CREATE ROW POLICY policy11 ON default.foo TO NONE") - with Scenario("I create row policy on cluster", requirements=[ + with Scenario( + "I create row policy on cluster", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create_OnCluster("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): try: with When("I run create row policy command on cluster"): - node.query("CREATE ROW POLICY policy12 ON CLUSTER sharded_cluster ON default.foo") + node.query( + "CREATE ROW POLICY policy12 ON CLUSTER sharded_cluster ON default.foo" + ) finally: with Finally("I drop the row policy from cluster"): - node.query("DROP ROW POLICY IF EXISTS policy12 ON default.foo ON CLUSTER sharded_cluster") + node.query( + "DROP ROW POLICY IF EXISTS policy12 ON default.foo ON CLUSTER sharded_cluster" + ) - with Scenario("I create row policy on fake cluster, throws exception", requirements=[ + with Scenario( + "I create row policy on fake cluster, throws exception", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create_OnCluster("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): with When("I run create row policy command"): exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("CREATE ROW POLICY policy13 ON CLUSTER fake_cluster ON default.foo", exitcode=exitcode, message=message) + node.query( + "CREATE ROW POLICY policy13 ON CLUSTER fake_cluster ON default.foo", + exitcode=exitcode, + message=message, + ) - with Scenario("I create row policy on cluster after table", requirements=[ + with Scenario( + "I create row policy on cluster after table", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Create_OnCluster("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Create_On("1.0"), + ], + ): try: with When("I run create row policy command on cluster"): - node.query("CREATE ROW POLICY policy12 ON default.foo ON CLUSTER sharded_cluster") + node.query( + "CREATE ROW POLICY policy12 ON default.foo ON CLUSTER sharded_cluster" + ) finally: with Finally("I drop the row policy from cluster"): - node.query("DROP ROW POLICY IF EXISTS policy12 ON default.foo ON CLUSTER sharded_cluster") + node.query( + "DROP ROW POLICY IF EXISTS policy12 ON default.foo ON CLUSTER sharded_cluster" + ) finally: with Finally("I drop the table and the roles"): node.query(f"DROP TABLE IF EXISTS default.foo") - node.query(f"DROP ROLE IF EXISTS role0, role1") \ No newline at end of file + node.query(f"DROP ROLE IF EXISTS role0, role1") diff --git a/tests/testflows/rbac/tests/syntax/create_settings_profile.py b/tests/testflows/rbac/tests/syntax/create_settings_profile.py index 8976ce6843a..dc04ea0eb4c 100755 --- a/tests/testflows/rbac/tests/syntax/create_settings_profile.py +++ b/tests/testflows/rbac/tests/syntax/create_settings_profile.py @@ -5,6 +5,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @TestFeature @Name("create settings profile") @Args(format_description=False) @@ -32,37 +33,51 @@ def feature(self, node="clickhouse1"): def create_profile(profile): with Given(f"I ensure I do have profile {profile}"): - node.query(f"CREATE SETTINGS PROFILE OR REPLACE {profile}") + node.query(f"CREATE SETTINGS PROFILE OR REPLACE {profile}") try: with Given("I have a user and a role"): node.query(f"CREATE USER user0") node.query(f"CREATE ROLE role0") - with Scenario("I create settings profile with no options", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create("1.0")]): + with Scenario( + "I create settings profile with no options", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Create("1.0")], + ): with cleanup("profile0"): with When("I create settings profile"): node.query("CREATE SETTINGS PROFILE profile0") - with Scenario("I create settings profile that already exists, throws exception", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create("1.0")]): + with Scenario( + "I create settings profile that already exists, throws exception", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Create("1.0")], + ): profile = "profile0" with cleanup(profile): create_profile(profile) with When(f"I create settings profile {profile} that already exists"): - exitcode, message = errors.cannot_insert_settings_profile(name=profile) - node.query(f"CREATE SETTINGS PROFILE {profile}", exitcode=exitcode, message=message) + exitcode, message = errors.cannot_insert_settings_profile( + name=profile + ) + node.query( + f"CREATE SETTINGS PROFILE {profile}", + exitcode=exitcode, + message=message, + ) del profile - with Scenario("I create settings profile if not exists, profile does not exist", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_IfNotExists("1.0")]): + with Scenario( + "I create settings profile if not exists, profile does not exist", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Create_IfNotExists("1.0")], + ): with cleanup("profile1"): with When("I create settings profile with if not exists"): node.query("CREATE SETTINGS PROFILE IF NOT EXISTS profile1") - with Scenario("I create settings profile if not exists, profile does exist", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_IfNotExists("1.0")]): + with Scenario( + "I create settings profile if not exists, profile does exist", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Create_IfNotExists("1.0")], + ): profile = "profile1" with cleanup(profile): create_profile(profile) @@ -70,184 +85,326 @@ def feature(self, node="clickhouse1"): node.query(f"CREATE SETTINGS PROFILE IF NOT EXISTS {profile}") del profile - with Scenario("I create settings profile or replace, profile does not exist", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Replace("1.0")]): + with Scenario( + "I create settings profile or replace, profile does not exist", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Create_Replace("1.0")], + ): with cleanup("profile2"): with When("I create settings policy with or replace"): node.query("CREATE SETTINGS PROFILE OR REPLACE profile2") - with Scenario("I create settings profile or replace, profile does exist", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Replace("1.0")]): + with Scenario( + "I create settings profile or replace, profile does exist", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Create_Replace("1.0")], + ): with cleanup("profile2"): create_profile("profile2") with When("I create settings policy with or replace"): node.query("CREATE SETTINGS PROFILE OR REPLACE profile2") - with Scenario("I create settings profile short form", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create("1.0")]): + with Scenario( + "I create settings profile short form", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Create("1.0")], + ): with cleanup("profile3"): with When("I create settings profile short form"): node.query("CREATE PROFILE profile3") - with Scenario("I create settings profile with a setting value", requirements=[ + with Scenario( + "I create settings profile with a setting value", + requirements=[ RQ_SRS_006_RBAC_SettingsProfile_Create_Variables("1.0"), - RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Value("1.0")]): + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Value("1.0"), + ], + ): with cleanup("profile4"): with When("I create settings profile with settings"): - node.query("CREATE SETTINGS PROFILE profile4 SETTINGS max_memory_usage = 100000001") + node.query( + "CREATE SETTINGS PROFILE profile4 SETTINGS max_memory_usage = 100000001" + ) - with Scenario("I create settings profile with a setting value, does not exist, throws exception", requirements=[ + with Scenario( + "I create settings profile with a setting value, does not exist, throws exception", + requirements=[ RQ_SRS_006_RBAC_SettingsProfile_Create_Variables("1.0"), - RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Value("1.0")]): + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Value("1.0"), + ], + ): with When("I create settings profile using settings and nonexistent value"): exitcode, message = errors.unknown_setting("fake_setting") - node.query("CREATE SETTINGS PROFILE profile0 SETTINGS fake_setting = 100000001", exitcode=exitcode, message=message) + node.query( + "CREATE SETTINGS PROFILE profile0 SETTINGS fake_setting = 100000001", + exitcode=exitcode, + message=message, + ) - with Scenario("I create settings profile with a min setting value", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0")]): + with Scenario( + "I create settings profile with a min setting value", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0") + ], + ): with cleanup("profile5"), cleanup("profile6"): - with When("I create settings profile with min setting with and without equals"): - node.query("CREATE SETTINGS PROFILE profile5 SETTINGS max_memory_usage MIN 100000001") - node.query("CREATE SETTINGS PROFILE profile6 SETTINGS max_memory_usage MIN = 100000001") + with When( + "I create settings profile with min setting with and without equals" + ): + node.query( + "CREATE SETTINGS PROFILE profile5 SETTINGS max_memory_usage MIN 100000001" + ) + node.query( + "CREATE SETTINGS PROFILE profile6 SETTINGS max_memory_usage MIN = 100000001" + ) - with Scenario("I create settings profile with a max setting value", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0")]): + with Scenario( + "I create settings profile with a max setting value", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0") + ], + ): with cleanup("profile7"), cleanup("profile8"): - with When("I create settings profile with max setting with and without equals"): - node.query("CREATE SETTINGS PROFILE profile7 SETTINGS max_memory_usage MAX 100000001") - node.query("CREATE SETTINGS PROFILE profile8 SETTINGS max_memory_usage MAX = 100000001") + with When( + "I create settings profile with max setting with and without equals" + ): + node.query( + "CREATE SETTINGS PROFILE profile7 SETTINGS max_memory_usage MAX 100000001" + ) + node.query( + "CREATE SETTINGS PROFILE profile8 SETTINGS max_memory_usage MAX = 100000001" + ) - with Scenario("I create settings profile with min and max setting values", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0")]): + with Scenario( + "I create settings profile with min and max setting values", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0") + ], + ): with cleanup("profile9"): with When("I create settings profile with min and max setting"): - node.query("CREATE SETTINGS PROFILE profile9 SETTINGS max_memory_usage MIN 100000001 MAX 200000001") + node.query( + "CREATE SETTINGS PROFILE profile9 SETTINGS max_memory_usage MIN 100000001 MAX 200000001" + ) - with Scenario("I create settings profile with a readonly setting", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0")]): + with Scenario( + "I create settings profile with a readonly setting", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0") + ], + ): with cleanup("profile10"): with When("I create settings profile with readonly"): - node.query("CREATE SETTINGS PROFILE profile10 SETTINGS max_memory_usage READONLY") + node.query( + "CREATE SETTINGS PROFILE profile10 SETTINGS max_memory_usage READONLY" + ) - with Scenario("I create settings profile with a writable setting", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0")]): + with Scenario( + "I create settings profile with a writable setting", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0") + ], + ): with cleanup("profile21"): with When("I create settings profile with writable"): - node.query("CREATE SETTINGS PROFILE profile21 SETTINGS max_memory_usage WRITABLE") + node.query( + "CREATE SETTINGS PROFILE profile21 SETTINGS max_memory_usage WRITABLE" + ) - with Scenario("I create settings profile with inherited settings", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Inherit("1.0")]): + with Scenario( + "I create settings profile with inherited settings", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Create_Inherit("1.0")], + ): with cleanup("profile11"): with When("I create settings profile with inherit"): - node.query("CREATE SETTINGS PROFILE profile11 SETTINGS INHERIT 'default'") + node.query( + "CREATE SETTINGS PROFILE profile11 SETTINGS INHERIT 'default'" + ) - with Scenario("I create settings profile with inherit/from profile, fake profile, throws exception", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Inherit("1.0")]): + with Scenario( + "I create settings profile with inherit/from profile, fake profile, throws exception", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Create_Inherit("1.0")], + ): profile = "profile3" with Given(f"I ensure that profile {profile} does not exist"): node.query(f"DROP SETTINGS PROFILE IF EXISTS {profile}") - sources = {"INHERIT","PROFILE"} + sources = {"INHERIT", "PROFILE"} for source in sources: - with When(f"I create settings profile {source} from nonexistant parent"): - exitcode, message = errors.settings_profile_not_found_in_disk(profile) - node.query(f"CREATE PROFILE profile0 SETTINGS {source} {profile}", exitcode=exitcode, message=message) + with When( + f"I create settings profile {source} from nonexistant parent" + ): + exitcode, message = errors.settings_profile_not_found_in_disk( + profile + ) + node.query( + f"CREATE PROFILE profile0 SETTINGS {source} {profile}", + exitcode=exitcode, + message=message, + ) del profile - with Scenario("I create settings profile with inherited settings other form", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Inherit("1.0")]): + with Scenario( + "I create settings profile with inherited settings other form", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Create_Inherit("1.0")], + ): with cleanup("profile12"): with When("I create settings profile with inherit short form"): node.query("CREATE PROFILE profile12 SETTINGS PROFILE 'default'") - with Scenario("I create settings profile with multiple settings", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0")]): + with Scenario( + "I create settings profile with multiple settings", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0") + ], + ): with cleanup("profile13"): with When("I create settings profile with multiple settings"): - node.query("CREATE SETTINGS PROFILE profile13" + node.query( + "CREATE SETTINGS PROFILE profile13" " SETTINGS max_memory_usage = 100000001" - " SETTINGS max_memory_usage_for_user = 100000001") + " SETTINGS max_memory_usage_for_user = 100000001" + ) - with Scenario("I create settings profile with multiple settings short form", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0")]): + with Scenario( + "I create settings profile with multiple settings short form", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Variables_Constraints("1.0") + ], + ): with cleanup("profile14"): - with When("I create settings profile with multiple settings short form"): - node.query("CREATE SETTINGS PROFILE profile14" + with When( + "I create settings profile with multiple settings short form" + ): + node.query( + "CREATE SETTINGS PROFILE profile14" " SETTINGS max_memory_usage = 100000001," - " max_memory_usage_for_user = 100000001") + " max_memory_usage_for_user = 100000001" + ) - with Scenario("I create settings profile assigned to one role", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment("1.0")]): + with Scenario( + "I create settings profile assigned to one role", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment("1.0")], + ): with cleanup("profile15"): with When("I create settings profile for a role"): node.query("CREATE SETTINGS PROFILE profile15 TO role0") - with Scenario("I create settings profile to assign to role that does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment("1.0")]): + with Scenario( + "I create settings profile to assign to role that does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment("1.0")], + ): role = "role1" with Given(f"I drop {role} if it exists"): node.query(f"DROP ROLE IF EXISTS {role}") - with Then(f"I create a settings profile, assign to role {role}, which does not exist"): + with Then( + f"I create a settings profile, assign to role {role}, which does not exist" + ): exitcode, message = errors.role_not_found_in_disk(name=role) - node.query(f"CREATE SETTINGS PROFILE profile0 TO {role}", exitcode=exitcode, message=message) + node.query( + f"CREATE SETTINGS PROFILE profile0 TO {role}", + exitcode=exitcode, + message=message, + ) del role - with Scenario("I create settings profile to assign to all except role that does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment("1.0")]): + with Scenario( + "I create settings profile to assign to all except role that does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment("1.0")], + ): role = "role1" with Given(f"I drop {role} if it exists"): node.query(f"DROP ROLE IF EXISTS {role}") - with Then(f"I create a settings profile, assign to all except role {role}, which does not exist"): + with Then( + f"I create a settings profile, assign to all except role {role}, which does not exist" + ): exitcode, message = errors.role_not_found_in_disk(name=role) - node.query(f"CREATE SETTINGS PROFILE profile0 TO ALL EXCEPT {role}", exitcode=exitcode, message=message) + node.query( + f"CREATE SETTINGS PROFILE profile0 TO ALL EXCEPT {role}", + exitcode=exitcode, + message=message, + ) del role - with Scenario("I create settings profile assigned to multiple roles", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment("1.0")]): + with Scenario( + "I create settings profile assigned to multiple roles", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment("1.0")], + ): with cleanup("profile16"): with When("I create settings profile for multiple roles"): node.query("CREATE SETTINGS PROFILE profile16 TO role0, user0") - with Scenario("I create settings profile assigned to all", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_All("1.0")]): + with Scenario( + "I create settings profile assigned to all", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_All("1.0")], + ): with cleanup("profile17"): with When("I create settings profile for all"): node.query("CREATE SETTINGS PROFILE profile17 TO ALL") - with Scenario("I create settings profile assigned to all except one role",requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_AllExcept("1.0")]): + with Scenario( + "I create settings profile assigned to all except one role", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_AllExcept("1.0") + ], + ): with cleanup("profile18"): with When("I create settings profile for all except one role"): node.query("CREATE SETTINGS PROFILE profile18 TO ALL EXCEPT role0") - with Scenario("I create settings profile assigned to all except multiple roles", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_AllExcept("1.0")]): + with Scenario( + "I create settings profile assigned to all except multiple roles", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_AllExcept("1.0") + ], + ): with cleanup("profile19"): with When("I create settings profile for all except multiple roles"): - node.query("CREATE SETTINGS PROFILE profile19 TO ALL EXCEPT role0, user0") + node.query( + "CREATE SETTINGS PROFILE profile19 TO ALL EXCEPT role0, user0" + ) - with Scenario("I create settings profile assigned to none", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_None("1.0")]): + with Scenario( + "I create settings profile assigned to none", + requirements=[ + RQ_SRS_006_RBAC_SettingsProfile_Create_Assignment_None("1.0") + ], + ): with cleanup("profile22"): with When("I create settings profile for none"): node.query("CREATE SETTINGS PROFILE profile22 TO NONE") - with Scenario("I create settings profile on cluster", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_OnCluster("1.0")]): + with Scenario( + "I create settings profile on cluster", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Create_OnCluster("1.0")], + ): try: with When("I run create settings profile command"): - node.query("CREATE SETTINGS PROFILE profile20 ON CLUSTER sharded_cluster") - node.query("CREATE SETTINGS PROFILE OR REPLACE profile20 ON CLUSTER sharded_cluster SETTINGS max_memory_usage = 100000001") - node.query("CREATE SETTINGS PROFILE OR REPLACE profile20 ON CLUSTER sharded_cluster SETTINGS INHERIT 'default'") - node.query("CREATE SETTINGS PROFILE OR REPLACE profile20 ON CLUSTER sharded_cluster TO ALL") + node.query( + "CREATE SETTINGS PROFILE profile20 ON CLUSTER sharded_cluster" + ) + node.query( + "CREATE SETTINGS PROFILE OR REPLACE profile20 ON CLUSTER sharded_cluster SETTINGS max_memory_usage = 100000001" + ) + node.query( + "CREATE SETTINGS PROFILE OR REPLACE profile20 ON CLUSTER sharded_cluster SETTINGS INHERIT 'default'" + ) + node.query( + "CREATE SETTINGS PROFILE OR REPLACE profile20 ON CLUSTER sharded_cluster TO ALL" + ) finally: with Finally("I drop the settings profile"): - node.query("DROP SETTINGS PROFILE IF EXISTS profile20 ON CLUSTER sharded_cluster") + node.query( + "DROP SETTINGS PROFILE IF EXISTS profile20 ON CLUSTER sharded_cluster" + ) - with Scenario("I create settings profile on fake cluster, throws exception", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Create_OnCluster("1.0")]): + with Scenario( + "I create settings profile on fake cluster, throws exception", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Create_OnCluster("1.0")], + ): with When("I run create settings profile command"): exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("CREATE SETTINGS PROFILE profile1 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + node.query( + "CREATE SETTINGS PROFILE profile1 ON CLUSTER fake_cluster", + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop all the users and roles"): node.query(f"DROP USER IF EXISTS user0") diff --git a/tests/testflows/rbac/tests/syntax/create_user.py b/tests/testflows/rbac/tests/syntax/create_user.py index 326446e4620..20916e2a171 100755 --- a/tests/testflows/rbac/tests/syntax/create_user.py +++ b/tests/testflows/rbac/tests/syntax/create_user.py @@ -6,6 +6,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @TestFeature @Name("create user") @Args(format_description=False) @@ -34,37 +35,51 @@ def feature(self, node="clickhouse1"): def create_user(user): with Given(f"I ensure I do have user {user}"): - node.query(f"CREATE USER OR REPLACE {user}") + node.query(f"CREATE USER OR REPLACE {user}") - with Scenario("I create user with no options", requirements=[ + with Scenario( + "I create user with no options", + requirements=[ RQ_SRS_006_RBAC_User_Create("1.0"), - RQ_SRS_006_RBAC_User_Create_Host_Default("1.0")]): + RQ_SRS_006_RBAC_User_Create_Host_Default("1.0"), + ], + ): with cleanup("user0"): with When("I create a user with no options"): node.query("CREATE USER user0") - with Scenario("I create user that already exists, throws exception", requirements=[ + with Scenario( + "I create user that already exists, throws exception", + requirements=[ RQ_SRS_006_RBAC_User_Create("1.0"), - RQ_SRS_006_RBAC_User_Create_Host_Default("1.0")]): + RQ_SRS_006_RBAC_User_Create_Host_Default("1.0"), + ], + ): user = "user0" with cleanup(user): create_user(user) - with When(f"I create a user {user} that already exists without IF EXISTS, throws exception"): + with When( + f"I create a user {user} that already exists without IF EXISTS, throws exception" + ): exitcode, message = errors.cannot_insert_user(name=user) node.query(f"CREATE USER {user}", exitcode=exitcode, message=message) del user - with Scenario("I create user with if not exists, user does not exist", requirements=[ - RQ_SRS_006_RBAC_User_Create_IfNotExists("1.0")]): + with Scenario( + "I create user with if not exists, user does not exist", + requirements=[RQ_SRS_006_RBAC_User_Create_IfNotExists("1.0")], + ): user = "user0" with cleanup(user): with When(f"I create a user {user} with if not exists"): node.query(f"CREATE USER IF NOT EXISTS {user}") del user - #Bug exists, mark as xfail - with Scenario("I create user with if not exists, user does exist", requirements=[ - RQ_SRS_006_RBAC_User_Create_IfNotExists("1.0")]): + # Bug exists, mark as xfail + with Scenario( + "I create user with if not exists, user does exist", + requirements=[RQ_SRS_006_RBAC_User_Create_IfNotExists("1.0")], + ): user = "user0" with cleanup(user): create_user(user) @@ -72,16 +87,20 @@ def feature(self, node="clickhouse1"): node.query(f"CREATE USER IF NOT EXISTS {user}") del user - with Scenario("I create user or replace, user does not exist", requirements=[ - RQ_SRS_006_RBAC_User_Create_Replace("1.0")]): + with Scenario( + "I create user or replace, user does not exist", + requirements=[RQ_SRS_006_RBAC_User_Create_Replace("1.0")], + ): user = "user0" with cleanup(user): with When(f"I create a user {user} with or replace"): node.query(f"CREATE USER OR REPLACE {user}") del user - with Scenario("I create user or replace, user does exist", requirements=[ - RQ_SRS_006_RBAC_User_Create_Replace("1.0")]): + with Scenario( + "I create user or replace, user does exist", + requirements=[RQ_SRS_006_RBAC_User_Create_Replace("1.0")], + ): user = "user0" with cleanup(user): create_user(user) @@ -89,106 +108,156 @@ def feature(self, node="clickhouse1"): node.query(f"CREATE USER OR REPLACE {user}") del user - with Scenario("I create user with no password", requirements=[ - RQ_SRS_006_RBAC_User_Create_Password_NoPassword("1.0")]): + with Scenario( + "I create user with no password", + requirements=[RQ_SRS_006_RBAC_User_Create_Password_NoPassword("1.0")], + ): with cleanup("user1"): with When("I create a user with no password"): node.query("CREATE USER user1 IDENTIFIED WITH NO_PASSWORD") - with Scenario("I create user with plaintext password", requirements=[ - RQ_SRS_006_RBAC_User_Create_Password_PlainText("1.0")]): + with Scenario( + "I create user with plaintext password", + requirements=[RQ_SRS_006_RBAC_User_Create_Password_PlainText("1.0")], + ): with cleanup("user1"): with When("I create a user with plaintext password"): - node.query("CREATE USER user1 IDENTIFIED WITH PLAINTEXT_PASSWORD BY 'mypassword'") + node.query( + "CREATE USER user1 IDENTIFIED WITH PLAINTEXT_PASSWORD BY 'mypassword'" + ) - with Scenario("I create user with sha256 password", requirements=[ - RQ_SRS_006_RBAC_User_Create_Password_Sha256Password("1.0")]): + with Scenario( + "I create user with sha256 password", + requirements=[RQ_SRS_006_RBAC_User_Create_Password_Sha256Password("1.0")], + ): with cleanup("user2"): with When("I create a user with sha256 password"): password = hashlib.sha256("mypassword".encode("utf-8")).hexdigest() - node.query(f"CREATE USER user2 IDENTIFIED WITH SHA256_PASSWORD BY '{password}'") + node.query( + f"CREATE USER user2 IDENTIFIED WITH SHA256_PASSWORD BY '{password}'" + ) - with Scenario("I create user with sha256 password using IDENTIFIED BY", requirements=[ - RQ_SRS_006_RBAC_User_Create_Password_Sha256Password("1.0")]): + with Scenario( + "I create user with sha256 password using IDENTIFIED BY", + requirements=[RQ_SRS_006_RBAC_User_Create_Password_Sha256Password("1.0")], + ): with cleanup("user2"): with When("I create a user with sha256 password using short form"): password = hashlib.sha256("mypassword".encode("utf-8")).hexdigest() node.query(f"CREATE USER user2 IDENTIFIED BY '{password}'") - with Scenario("I create user with sha256_hash password", requirements=[ - RQ_SRS_006_RBAC_User_Create_Password_Sha256Hash("1.0")]): + with Scenario( + "I create user with sha256_hash password", + requirements=[RQ_SRS_006_RBAC_User_Create_Password_Sha256Hash("1.0")], + ): with cleanup("user3"): with When("I create a user with sha256_hash"): + def hash(password): return hashlib.sha256(password.encode("utf-8")).hexdigest() - password = hash(hash("mypassword")) - node.query(f"CREATE USER user3 IDENTIFIED WITH SHA256_HASH BY '{password}'") - with Scenario("I create user with double sha1 password", requirements=[ - RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Password("1.0")]): + password = hash(hash("mypassword")) + node.query( + f"CREATE USER user3 IDENTIFIED WITH SHA256_HASH BY '{password}'" + ) + + with Scenario( + "I create user with double sha1 password", + requirements=[RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Password("1.0")], + ): with cleanup("user3"): with When("I create a user with double_sha1_password"): - node.query(f"CREATE USER user3 IDENTIFIED WITH DOUBLE_SHA1_PASSWORD BY 'mypassword'") + node.query( + f"CREATE USER user3 IDENTIFIED WITH DOUBLE_SHA1_PASSWORD BY 'mypassword'" + ) - with Scenario("I create user with double sha1 hash", requirements=[ - RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Hash("1.0")]): + with Scenario( + "I create user with double sha1 hash", + requirements=[RQ_SRS_006_RBAC_User_Create_Password_DoubleSha1Hash("1.0")], + ): with cleanup("user3"): with When("I create a user with double_sha1_hash"): + def hash(password): return hashlib.sha1(password.encode("utf-8")).hexdigest() - password = hash(hash("mypassword")) - node.query(f"CREATE USER user3 IDENTIFIED WITH DOUBLE_SHA1_HASH BY '{password}'") - with Scenario("I create user with host name", requirements=[ - RQ_SRS_006_RBAC_User_Create_Host_Name("1.0")]): + password = hash(hash("mypassword")) + node.query( + f"CREATE USER user3 IDENTIFIED WITH DOUBLE_SHA1_HASH BY '{password}'" + ) + + with Scenario( + "I create user with host name", + requirements=[RQ_SRS_006_RBAC_User_Create_Host_Name("1.0")], + ): with cleanup("user4"): with When("I create a user with host name"): - node.query("CREATE USER user4 HOST NAME 'localhost', NAME 'clickhouse.com'") + node.query( + "CREATE USER user4 HOST NAME 'localhost', NAME 'clickhouse.com'" + ) - with Scenario("I create user with host regexp", requirements=[ - RQ_SRS_006_RBAC_User_Create_Host_Regexp("1.0")]): + with Scenario( + "I create user with host regexp", + requirements=[RQ_SRS_006_RBAC_User_Create_Host_Regexp("1.0")], + ): with cleanup("user5"): with When("I create a user with host regexp"): - node.query("CREATE USER user5 HOST REGEXP 'lo.?*host', REGEXP 'lo*host'") + node.query( + "CREATE USER user5 HOST REGEXP 'lo.?*host', REGEXP 'lo*host'" + ) - with Scenario("I create user with host ip", requirements=[ - RQ_SRS_006_RBAC_User_Create_Host_IP("1.0")]): + with Scenario( + "I create user with host ip", + requirements=[RQ_SRS_006_RBAC_User_Create_Host_IP("1.0")], + ): with cleanup("user6"): with When("I create a user with host ip"): node.query("CREATE USER user6 HOST IP '127.0.0.1', IP '127.0.0.2'") - with Scenario("I create user with host like", requirements=[ - RQ_SRS_006_RBAC_User_Create_Host_Like("1.0")]): + with Scenario( + "I create user with host like", + requirements=[RQ_SRS_006_RBAC_User_Create_Host_Like("1.0")], + ): with cleanup("user7"): with When("I create a user with host like"): node.query("CREATE USER user7 HOST LIKE 'local%'") - with Scenario("I create user with host none", requirements=[ - RQ_SRS_006_RBAC_User_Create_Host_None("1.0")]): + with Scenario( + "I create user with host none", + requirements=[RQ_SRS_006_RBAC_User_Create_Host_None("1.0")], + ): with cleanup("user7"): with When("I create a user with host none"): node.query("CREATE USER user7 HOST NONE") - with Scenario("I create user with host local", requirements=[ - RQ_SRS_006_RBAC_User_Create_Host_Local("1.0")]): + with Scenario( + "I create user with host local", + requirements=[RQ_SRS_006_RBAC_User_Create_Host_Local("1.0")], + ): with cleanup("user7"): with When("I create a user with host local"): node.query("CREATE USER user7 HOST LOCAL") - with Scenario("I create user with host any", requirements=[ - RQ_SRS_006_RBAC_User_Create_Host_Any("1.0")]): + with Scenario( + "I create user with host any", + requirements=[RQ_SRS_006_RBAC_User_Create_Host_Any("1.0")], + ): with cleanup("user7"): with When("I create a user with host any"): node.query("CREATE USER user7 HOST ANY") - with Scenario("I create user with default role set to none", requirements=[ - RQ_SRS_006_RBAC_User_Create_DefaultRole_None("1.0")]): + with Scenario( + "I create user with default role set to none", + requirements=[RQ_SRS_006_RBAC_User_Create_DefaultRole_None("1.0")], + ): with cleanup("user8"): with When("I create a user with no default role"): node.query("CREATE USER user8 DEFAULT ROLE NONE") - with Scenario("I create user with default role", requirements=[ - RQ_SRS_006_RBAC_User_Create_DefaultRole("1.0")]): + with Scenario( + "I create user with default role", + requirements=[RQ_SRS_006_RBAC_User_Create_DefaultRole("1.0")], + ): with Given("I have a role"): node.query("CREATE ROLE default") with cleanup("user9"): @@ -197,66 +266,104 @@ def feature(self, node="clickhouse1"): with Finally("I drop the role"): node.query("DROP ROLE default") - with Scenario("I create user default role, role doesn't exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_User_Create_DefaultRole("1.0")]): + with Scenario( + "I create user default role, role doesn't exist, throws exception", + requirements=[RQ_SRS_006_RBAC_User_Create_DefaultRole("1.0")], + ): with cleanup("user12"): role = "role0" with Given(f"I ensure that role {role} does not exist"): node.query(f"DROP ROLE IF EXISTS {role}") with When(f"I create user with default role {role}"): exitcode, message = errors.role_not_found_in_disk(role) - node.query(f"CREATE USER user12 DEFAULT ROLE {role}",exitcode=exitcode, message=message) + node.query( + f"CREATE USER user12 DEFAULT ROLE {role}", + exitcode=exitcode, + message=message, + ) del role - with Scenario("I create user default role, all except role doesn't exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_User_Create_DefaultRole("1.0")]): + with Scenario( + "I create user default role, all except role doesn't exist, throws exception", + requirements=[RQ_SRS_006_RBAC_User_Create_DefaultRole("1.0")], + ): with cleanup("user12"): role = "role0" with Given(f"I ensure that role {role} does not exist"): node.query(f"DROP ROLE IF EXISTS {role}") with When(f"I create user with default role {role}"): exitcode, message = errors.role_not_found_in_disk(role) - node.query(f"CREATE USER user12 DEFAULT ROLE ALL EXCEPT {role}",exitcode=exitcode, message=message) + node.query( + f"CREATE USER user12 DEFAULT ROLE ALL EXCEPT {role}", + exitcode=exitcode, + message=message, + ) del role - with Scenario("I create user with all roles set to default", requirements=[ - RQ_SRS_006_RBAC_User_Create_DefaultRole_All("1.0")]): + with Scenario( + "I create user with all roles set to default", + requirements=[RQ_SRS_006_RBAC_User_Create_DefaultRole_All("1.0")], + ): with cleanup("user10"): with When("I create a user with all roles as default"): node.query("CREATE USER user10 DEFAULT ROLE ALL") - with Scenario("I create user with settings profile", requirements=[ - RQ_SRS_006_RBAC_User_Create_Settings("1.0")]): + with Scenario( + "I create user with settings profile", + requirements=[RQ_SRS_006_RBAC_User_Create_Settings("1.0")], + ): with cleanup("user11"): with When("I create a user with a settings profile"): - node.query("CREATE USER user11 SETTINGS PROFILE default, max_memory_usage=10000000 READONLY") + node.query( + "CREATE USER user11 SETTINGS PROFILE default, max_memory_usage=10000000 READONLY" + ) - with Scenario("I create user settings profile, fake profile, throws exception", requirements=[ - RQ_SRS_006_RBAC_User_Create_Settings("1.0")]): + with Scenario( + "I create user settings profile, fake profile, throws exception", + requirements=[RQ_SRS_006_RBAC_User_Create_Settings("1.0")], + ): with cleanup("user18a"): profile = "profile0" with Given(f"I ensure that profile {profile} does not exist"): node.query(f"DROP SETTINGS PROFILE IF EXISTS {profile}") - with When(f"I create user with Settings and set profile to fake profile {profile}"): + with When( + f"I create user with Settings and set profile to fake profile {profile}" + ): exitcode, message = errors.settings_profile_not_found_in_disk(profile) - node.query("CREATE USER user18a SETTINGS PROFILE profile0", exitcode=exitcode, message=message) + node.query( + "CREATE USER user18a SETTINGS PROFILE profile0", + exitcode=exitcode, + message=message, + ) del profile - with Scenario("I create user settings with a fake setting, throws exception", requirements=[ - RQ_SRS_006_RBAC_User_Create_Settings("1.0")]): + with Scenario( + "I create user settings with a fake setting, throws exception", + requirements=[RQ_SRS_006_RBAC_User_Create_Settings("1.0")], + ): with cleanup("user18b"): with When("I create settings profile using settings and nonexistent value"): exitcode, message = errors.unknown_setting("fake_setting") - node.query("CREATE USER user18b SETTINGS fake_setting = 100000001", exitcode=exitcode, message=message) + node.query( + "CREATE USER user18b SETTINGS fake_setting = 100000001", + exitcode=exitcode, + message=message, + ) - with Scenario("I create user with settings without profile", requirements=[ - RQ_SRS_006_RBAC_User_Create_Settings("1.0")]): + with Scenario( + "I create user with settings without profile", + requirements=[RQ_SRS_006_RBAC_User_Create_Settings("1.0")], + ): with cleanup("user12"): with When("I create a user with settings and no profile"): - node.query("CREATE USER user12 SETTINGS max_memory_usage=10000000 READONLY") + node.query( + "CREATE USER user12 SETTINGS max_memory_usage=10000000 READONLY" + ) - with Scenario("I create user on cluster", requirements=[ - RQ_SRS_006_RBAC_User_Create_OnCluster("1.0")]): + with Scenario( + "I create user on cluster", + requirements=[RQ_SRS_006_RBAC_User_Create_OnCluster("1.0")], + ): try: with When("I create user on cluster"): node.query("CREATE USER user13 ON CLUSTER sharded_cluster") @@ -264,8 +371,14 @@ def feature(self, node="clickhouse1"): with Finally("I drop the user"): node.query("DROP USER user13 ON CLUSTER sharded_cluster") - with Scenario("I create user on fake cluster, throws exception", requirements=[ - RQ_SRS_006_RBAC_User_Create_OnCluster("1.0")]): - with When("I create user on fake cluster"): - exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("CREATE USER user14 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + with Scenario( + "I create user on fake cluster, throws exception", + requirements=[RQ_SRS_006_RBAC_User_Create_OnCluster("1.0")], + ): + with When("I create user on fake cluster"): + exitcode, message = errors.cluster_not_found("fake_cluster") + node.query( + "CREATE USER user14 ON CLUSTER fake_cluster", + exitcode=exitcode, + message=message, + ) diff --git a/tests/testflows/rbac/tests/syntax/drop_quota.py b/tests/testflows/rbac/tests/syntax/drop_quota.py index 879964e46fb..9692bdaddcb 100755 --- a/tests/testflows/rbac/tests/syntax/drop_quota.py +++ b/tests/testflows/rbac/tests/syntax/drop_quota.py @@ -5,6 +5,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @TestFeature @Name("drop quota") def feature(self, node="clickhouse1"): @@ -30,14 +31,17 @@ def feature(self, node="clickhouse1"): with Given(f"I ensure that quota {quota} does not exist"): node.query(f"DROP QUOTA IF EXISTS {quota}") - with Scenario("I drop quota with no options", requirements=[ - RQ_SRS_006_RBAC_Quota_Drop("1.0")]): + with Scenario( + "I drop quota with no options", requirements=[RQ_SRS_006_RBAC_Quota_Drop("1.0")] + ): with cleanup("quota0"): with When("I run drop quota command"): node.query("DROP QUOTA quota0") - with Scenario("I drop quota, does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_Quota_Drop("1.0")]): + with Scenario( + "I drop quota, does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_Quota_Drop("1.0")], + ): quota = "quota0" cleanup_quota(quota) with When("I run drop quota command, throws exception"): @@ -45,32 +49,41 @@ def feature(self, node="clickhouse1"): node.query(f"DROP QUOTA {quota}", exitcode=exitcode, message=message) del quota - with Scenario("I drop quota if exists, quota exists", requirements=[ - RQ_SRS_006_RBAC_Quota_Drop_IfExists("1.0")]): + with Scenario( + "I drop quota if exists, quota exists", + requirements=[RQ_SRS_006_RBAC_Quota_Drop_IfExists("1.0")], + ): with cleanup("quota1"): with When("I run drop quota command"): node.query("DROP QUOTA IF EXISTS quota1") - with Scenario("I drop quota if exists, quota does not exist", requirements=[ - RQ_SRS_006_RBAC_Quota_Drop_IfExists("1.0")]): + with Scenario( + "I drop quota if exists, quota does not exist", + requirements=[RQ_SRS_006_RBAC_Quota_Drop_IfExists("1.0")], + ): cleanup_quota("quota2") with When("I run drop quota command, quota does not exist"): node.query("DROP QUOTA IF EXISTS quota2") - with Scenario("I drop default quota, throws error", requirements=[ - RQ_SRS_006_RBAC_Quota_Drop("1.0")]): + with Scenario( + "I drop default quota, throws error", + requirements=[RQ_SRS_006_RBAC_Quota_Drop("1.0")], + ): with When("I drop default quota"): exitcode, message = errors.cannot_remove_quota_default() node.query("DROP QUOTA default", exitcode=exitcode, message=message) - with Scenario("I drop multiple quotas", requirements=[ - RQ_SRS_006_RBAC_Quota_Drop("1.0")]): + with Scenario( + "I drop multiple quotas", requirements=[RQ_SRS_006_RBAC_Quota_Drop("1.0")] + ): with cleanup("quota2"), cleanup("quota3"): with When("I run drop quota command"): node.query("DROP QUOTA quota2, quota3") - with Scenario("I drop quota on cluster", requirements=[ - RQ_SRS_006_RBAC_Quota_Drop_Cluster("1.0")]): + with Scenario( + "I drop quota on cluster", + requirements=[RQ_SRS_006_RBAC_Quota_Drop_Cluster("1.0")], + ): try: with Given("I have a quota"): node.query("CREATE QUOTA quota4 ON CLUSTER sharded_cluster") @@ -80,8 +93,14 @@ def feature(self, node="clickhouse1"): with Finally("I drop the quota in case it still exists"): node.query("DROP QUOTA IF EXISTS quota4 ON CLUSTER sharded_cluster") - with Scenario("I drop quota on fake cluster", requirements=[ - RQ_SRS_006_RBAC_Quota_Drop_Cluster("1.0")]): + with Scenario( + "I drop quota on fake cluster", + requirements=[RQ_SRS_006_RBAC_Quota_Drop_Cluster("1.0")], + ): with When("I run drop quota command"): exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("DROP QUOTA quota5 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + node.query( + "DROP QUOTA quota5 ON CLUSTER fake_cluster", + exitcode=exitcode, + message=message, + ) diff --git a/tests/testflows/rbac/tests/syntax/drop_role.py b/tests/testflows/rbac/tests/syntax/drop_role.py index 87810dc0184..7824b6509c6 100755 --- a/tests/testflows/rbac/tests/syntax/drop_role.py +++ b/tests/testflows/rbac/tests/syntax/drop_role.py @@ -5,6 +5,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @TestFeature @Name("drop role") def feature(self, node="clickhouse1"): @@ -28,17 +29,19 @@ def feature(self, node="clickhouse1"): def cleanup_role(role): with Given(f"I ensure that role {role} does not exist"): - node.query(f"DROP ROLE IF EXISTS {role}") + node.query(f"DROP ROLE IF EXISTS {role}") - - with Scenario("I drop role with no options", requirements=[ - RQ_SRS_006_RBAC_Role_Drop("1.0")]): + with Scenario( + "I drop role with no options", requirements=[RQ_SRS_006_RBAC_Role_Drop("1.0")] + ): with setup("role0"): with When("I drop role"): node.query("DROP ROLE role0") - with Scenario("I drop role that doesn't exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_Role_Drop("1.0")]): + with Scenario( + "I drop role that doesn't exist, throws exception", + requirements=[RQ_SRS_006_RBAC_Role_Drop("1.0")], + ): role = "role0" cleanup_role(role) with When(f"I drop role {role}"): @@ -46,39 +49,54 @@ def feature(self, node="clickhouse1"): node.query(f"DROP ROLE {role}", exitcode=exitcode, message=message) del role - with Scenario("I drop multiple roles", requirements=[ - RQ_SRS_006_RBAC_Role_Drop("1.0")]): + with Scenario( + "I drop multiple roles", requirements=[RQ_SRS_006_RBAC_Role_Drop("1.0")] + ): with setup("role1"), setup("role2"): with When("I drop multiple roles"): node.query("DROP ROLE role1, role2") - with Scenario("I drop role that does not exist, using if exists", requirements=[ - RQ_SRS_006_RBAC_Role_Drop_IfExists("1.0")]): + with Scenario( + "I drop role that does not exist, using if exists", + requirements=[RQ_SRS_006_RBAC_Role_Drop_IfExists("1.0")], + ): with When("I drop role if exists"): node.query("DROP ROLE IF EXISTS role3") - with Scenario("I drop multiple roles where one does not exist", requirements=[ - RQ_SRS_006_RBAC_Role_Drop_IfExists("1.0")]): + with Scenario( + "I drop multiple roles where one does not exist", + requirements=[RQ_SRS_006_RBAC_Role_Drop_IfExists("1.0")], + ): with setup("role5"): with When("I drop multiple roles where one doesnt exist"): node.query("DROP ROLE IF EXISTS role3, role5") - with Scenario("I drop multiple roles where both do not exist", requirements=[ - RQ_SRS_006_RBAC_Role_Drop_IfExists("1.0")]): + with Scenario( + "I drop multiple roles where both do not exist", + requirements=[RQ_SRS_006_RBAC_Role_Drop_IfExists("1.0")], + ): with Given("I ensure role does not exist"): node.query("DROP ROLE IF EXISTS role6") with When("I drop the nonexistant roles"): node.query("DROP USER IF EXISTS role5, role6") - with Scenario("I drop role on cluster", requirements=[ - RQ_SRS_006_RBAC_Role_Drop_Cluster("1.0")]): + with Scenario( + "I drop role on cluster", + requirements=[RQ_SRS_006_RBAC_Role_Drop_Cluster("1.0")], + ): with Given("I have a role on cluster"): node.query("CREATE ROLE OR REPLACE role0 ON CLUSTER sharded_cluster") with When("I drop the role from the cluster"): node.query("DROP ROLE IF EXISTS role0 ON CLUSTER sharded_cluster") - with Scenario("I drop role on fake cluster", requirements=[ - RQ_SRS_006_RBAC_Role_Drop_Cluster("1.0")]): + with Scenario( + "I drop role on fake cluster", + requirements=[RQ_SRS_006_RBAC_Role_Drop_Cluster("1.0")], + ): with When("I run drop role command"): exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("DROP ROLE role2 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + node.query( + "DROP ROLE role2 ON CLUSTER fake_cluster", + exitcode=exitcode, + message=message, + ) diff --git a/tests/testflows/rbac/tests/syntax/drop_row_policy.py b/tests/testflows/rbac/tests/syntax/drop_row_policy.py index 357f5084bb3..7efda97b721 100755 --- a/tests/testflows/rbac/tests/syntax/drop_row_policy.py +++ b/tests/testflows/rbac/tests/syntax/drop_row_policy.py @@ -5,6 +5,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @TestFeature @Name("drop row policy") def feature(self, node="clickhouse1"): @@ -39,96 +40,163 @@ def feature(self, node="clickhouse1"): node.query(f"CREATE TABLE default.foo (x UInt64, y String) Engine=Memory") node.query(f"CREATE TABLE default.foo2 (x UInt64, y String) Engine=Memory") - with Scenario("I drop row policy with no options", requirements=[ + with Scenario( + "I drop row policy with no options", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Drop("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0"), + ], + ): with cleanup(["policy1"]): with When("I drop row policy"): node.query("DROP ROW POLICY policy1 ON default.foo") - with Scenario("I drop row policy using short syntax with no options", requirements=[ + with Scenario( + "I drop row policy using short syntax with no options", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Drop("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0"), + ], + ): with cleanup(["policy2"]): with When("I drop row policy short form"): node.query("DROP POLICY policy2 ON default.foo") - with Scenario("I drop row policy, does not exist, throws exception", requirements=[ + with Scenario( + "I drop row policy, does not exist, throws exception", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Drop("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0"), + ], + ): policy = "policy1" cleanup_policy(policy) with When("I drop row policy"): - exitcode, message = errors.row_policy_not_found_in_disk(name=f"{policy} ON default.foo") - node.query(f"DROP ROW POLICY {policy} ON default.foo", exitcode=exitcode, message=message) + exitcode, message = errors.row_policy_not_found_in_disk( + name=f"{policy} ON default.foo" + ) + node.query( + f"DROP ROW POLICY {policy} ON default.foo", + exitcode=exitcode, + message=message, + ) del policy - with Scenario("I drop row policy if exists, policy does exist", requirements=[ + with Scenario( + "I drop row policy if exists, policy does exist", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Drop_IfExists("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0"), + ], + ): with cleanup(["policy3"]): with When("I drop row policy if exists"): node.query("DROP ROW POLICY IF EXISTS policy3 ON default.foo") - with Scenario("I drop row policy if exists, policy doesn't exist", requirements=[ + with Scenario( + "I drop row policy if exists, policy doesn't exist", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Drop_IfExists("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0"), + ], + ): cleanup_policy("policy3") with When("I drop row policy if exists"): node.query("DROP ROW POLICY IF EXISTS policy3 ON default.foo") - with Scenario("I drop multiple row policies", requirements=[ + with Scenario( + "I drop multiple row policies", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Drop("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0"), + ], + ): with cleanup(["policy3", "policy4"]): with When("I drop multiple row policies"): node.query("DROP ROW POLICY policy3, policy4 ON default.foo") - with Scenario("I drop row policy on multiple tables", requirements=[ + with Scenario( + "I drop row policy on multiple tables", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Drop("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): - with cleanup(["policy3"], ["default.foo","default.foo2"]): + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0"), + ], + ): + with cleanup(["policy3"], ["default.foo", "default.foo2"]): with When("I drop row policy on multiple tables"): node.query("DROP ROW POLICY policy3 ON default.foo, default.foo2") - with Scenario("I drop multiple row policies on multiple tables", requirements=[ + with Scenario( + "I drop multiple row policies on multiple tables", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Drop("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): - with cleanup(["policy3", "policy4"], ["default.foo","default.foo2"]): + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0"), + ], + ): + with cleanup(["policy3", "policy4"], ["default.foo", "default.foo2"]): with When("I drop the row policies from the tables"): - node.query("DROP ROW POLICY policy3 ON default.foo, policy4 ON default.foo2") + node.query( + "DROP ROW POLICY policy3 ON default.foo, policy4 ON default.foo2" + ) - with Scenario("I drop row policy on cluster", requirements=[ + with Scenario( + "I drop row policy on cluster", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Drop_OnCluster("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0"), + ], + ): try: with Given("I have a row policy"): - node.query("CREATE ROW POLICY policy13 ON default.foo ON CLUSTER sharded_cluster") + node.query( + "CREATE ROW POLICY policy13 ON default.foo ON CLUSTER sharded_cluster" + ) with When("I run drop row policy command"): - node.query("DROP ROW POLICY IF EXISTS policy13 ON CLUSTER sharded_cluster ON default.foo") + node.query( + "DROP ROW POLICY IF EXISTS policy13 ON CLUSTER sharded_cluster ON default.foo" + ) finally: with Finally("I drop the row policy in case it still exists"): - node.query("DROP ROW POLICY IF EXISTS policy13 ON default.foo ON CLUSTER sharded_cluster") + node.query( + "DROP ROW POLICY IF EXISTS policy13 ON default.foo ON CLUSTER sharded_cluster" + ) - with Scenario("I drop row policy on cluster after table", requirements=[ + with Scenario( + "I drop row policy on cluster after table", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Drop_OnCluster("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0"), + ], + ): try: with Given("I have a row policy"): - node.query("CREATE ROW POLICY policy12 ON default.foo ON CLUSTER sharded_cluster") + node.query( + "CREATE ROW POLICY policy12 ON default.foo ON CLUSTER sharded_cluster" + ) with When("I run drop row policy command"): - node.query("DROP ROW POLICY IF EXISTS policy13 ON default.foo ON CLUSTER sharded_cluster") + node.query( + "DROP ROW POLICY IF EXISTS policy13 ON default.foo ON CLUSTER sharded_cluster" + ) finally: with Finally("I drop the row policy in case it still exists"): - node.query("DROP ROW POLICY IF EXISTS policy12 ON default.foo ON CLUSTER sharded_cluster") + node.query( + "DROP ROW POLICY IF EXISTS policy12 ON default.foo ON CLUSTER sharded_cluster" + ) - with Scenario("I drop row policy on fake cluster throws exception", requirements=[ + with Scenario( + "I drop row policy on fake cluster throws exception", + requirements=[ RQ_SRS_006_RBAC_RowPolicy_Drop_OnCluster("1.0"), - RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0")]): + RQ_SRS_006_RBAC_RowPolicy_Drop_On("1.0"), + ], + ): with When("I run drop row policy command"): exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("DROP ROW POLICY IF EXISTS policy14 ON default.foo ON CLUSTER fake_cluster", - exitcode=exitcode, message=message) + node.query( + "DROP ROW POLICY IF EXISTS policy14 ON default.foo ON CLUSTER fake_cluster", + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the tables"): node.query(f"DROP TABLE IF EXISTS default.foo") diff --git a/tests/testflows/rbac/tests/syntax/drop_settings_profile.py b/tests/testflows/rbac/tests/syntax/drop_settings_profile.py index 514c3042679..de69bc7e0a7 100755 --- a/tests/testflows/rbac/tests/syntax/drop_settings_profile.py +++ b/tests/testflows/rbac/tests/syntax/drop_settings_profile.py @@ -5,6 +5,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @TestFeature @Name("drop settings profile") def feature(self, node="clickhouse1"): @@ -30,64 +31,94 @@ def feature(self, node="clickhouse1"): with Given(f"I ensure that profile {profile} does not exist"): node.query(f"DROP SETTINGS PROFILE IF EXISTS {profile}") - with Scenario("I drop settings profile with no options", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Drop("1.0")]): + with Scenario( + "I drop settings profile with no options", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Drop("1.0")], + ): with cleanup("profile0"): with When("I drop settings profile"): node.query("DROP SETTINGS PROFILE profile0") - with Scenario("I drop settings profile, does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Drop("1.0")]): + with Scenario( + "I drop settings profile, does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Drop("1.0")], + ): profile = "profile0" cleanup_profile(profile) with When("I drop settings profile"): exitcode, message = errors.settings_profile_not_found_in_disk(name=profile) - node.query("DROP SETTINGS PROFILE profile0", exitcode=exitcode, message=message) + node.query( + "DROP SETTINGS PROFILE profile0", exitcode=exitcode, message=message + ) del profile - with Scenario("I drop settings profile short form", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Drop("1.0")]): + with Scenario( + "I drop settings profile short form", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Drop("1.0")], + ): with cleanup("profile1"): with When("I drop settings profile short form"): node.query("DROP PROFILE profile1") - with Scenario("I drop settings profile if exists, profile does exist", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Drop_IfExists("1.0")]): + with Scenario( + "I drop settings profile if exists, profile does exist", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Drop_IfExists("1.0")], + ): with cleanup("profile2"): with When("I drop settings profile if exists"): node.query("DROP SETTINGS PROFILE IF EXISTS profile2") - with Scenario("I drop settings profile if exists, profile does not exist", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Drop_IfExists("1.0")]): + with Scenario( + "I drop settings profile if exists, profile does not exist", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Drop_IfExists("1.0")], + ): cleanup_profile("profile2") with When("I drop settings profile if exists"): node.query("DROP SETTINGS PROFILE IF EXISTS profile2") - with Scenario("I drop default settings profile, throws error", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Drop("1.0")]): + with Scenario( + "I drop default settings profile, throws error", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Drop("1.0")], + ): with When("I drop default profile"): exitcode, message = errors.cannot_remove_settings_profile_default() - node.query("DROP SETTINGS PROFILE default", exitcode=exitcode, message=message) + node.query( + "DROP SETTINGS PROFILE default", exitcode=exitcode, message=message + ) - with Scenario("I drop multiple settings profiles", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Drop("1.0")]): + with Scenario( + "I drop multiple settings profiles", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Drop("1.0")], + ): with cleanup("profile3"), cleanup("profile4"): with When("I drop multiple settings profiles"): node.query("DROP SETTINGS PROFILE profile3, profile4") - with Scenario("I drop settings profile on cluster", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Drop_OnCluster("1.0")]): + with Scenario( + "I drop settings profile on cluster", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Drop_OnCluster("1.0")], + ): try: with Given("I have a settings profile"): - node.query("CREATE SETTINGS PROFILE profile5 ON CLUSTER sharded_cluster") + node.query( + "CREATE SETTINGS PROFILE profile5 ON CLUSTER sharded_cluster" + ) with When("I run drop settings profile command"): node.query("DROP SETTINGS PROFILE profile5 ON CLUSTER sharded_cluster") finally: with Finally("I drop the profile in case it still exists"): - node.query("DROP SETTINGS PROFILE IF EXISTS profile5 ON CLUSTER sharded_cluster") + node.query( + "DROP SETTINGS PROFILE IF EXISTS profile5 ON CLUSTER sharded_cluster" + ) - with Scenario("I drop settings profile on fake cluster, throws exception", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_Drop_OnCluster("1.0")]): + with Scenario( + "I drop settings profile on fake cluster, throws exception", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_Drop_OnCluster("1.0")], + ): with When("I run drop settings profile command"): exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("DROP SETTINGS PROFILE profile6 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + node.query( + "DROP SETTINGS PROFILE profile6 ON CLUSTER fake_cluster", + exitcode=exitcode, + message=message, + ) diff --git a/tests/testflows/rbac/tests/syntax/drop_user.py b/tests/testflows/rbac/tests/syntax/drop_user.py index 9bd2433d487..287d61fdbe0 100755 --- a/tests/testflows/rbac/tests/syntax/drop_user.py +++ b/tests/testflows/rbac/tests/syntax/drop_user.py @@ -5,6 +5,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @TestFeature @Name("drop user") def feature(self, node="clickhouse1"): @@ -30,69 +31,91 @@ def feature(self, node="clickhouse1"): with Given(f"I ensure that user {user} does not exist"): node.query(f"DROP USER IF EXISTS {user}") - with Scenario("I drop user with no options", requirements=[ - RQ_SRS_006_RBAC_User_Drop("1.0")]): + with Scenario( + "I drop user with no options", requirements=[RQ_SRS_006_RBAC_User_Drop("1.0")] + ): with setup("user0"): with When("I drop user"): node.query("DROP USER user0") - with Scenario("I drop user, does not exist, throws exception", requirements=[ - RQ_SRS_006_RBAC_User_Drop("1.0")]): - user = "user0" - cleanup_user(user) - with When(f"I drop user {user}"): - exitcode, message = errors.user_not_found_in_disk(name=user) - node.query(f"DROP USER {user}", exitcode=exitcode, message=message) - del user + with Scenario( + "I drop user, does not exist, throws exception", + requirements=[RQ_SRS_006_RBAC_User_Drop("1.0")], + ): + user = "user0" + cleanup_user(user) + with When(f"I drop user {user}"): + exitcode, message = errors.user_not_found_in_disk(name=user) + node.query(f"DROP USER {user}", exitcode=exitcode, message=message) + del user - with Scenario("I drop multiple users", requirements=[ - RQ_SRS_006_RBAC_User_Drop("1.0")]): + with Scenario( + "I drop multiple users", requirements=[RQ_SRS_006_RBAC_User_Drop("1.0")] + ): with setup("user1"), setup("user2"): with When("I drop multiple users"): node.query("DROP USER user1, user2") - with Scenario("I drop user if exists, user does exist", requirements=[ - RQ_SRS_006_RBAC_User_Drop_IfExists("1.0")]): + with Scenario( + "I drop user if exists, user does exist", + requirements=[RQ_SRS_006_RBAC_User_Drop_IfExists("1.0")], + ): with setup("user3"): with When("I drop user that exists"): node.query("DROP USER IF EXISTS user3") - with Scenario("I drop user if exists, user does not exist", requirements=[ - RQ_SRS_006_RBAC_User_Drop_IfExists("1.0")]): + with Scenario( + "I drop user if exists, user does not exist", + requirements=[RQ_SRS_006_RBAC_User_Drop_IfExists("1.0")], + ): cleanup_user("user3") with When("I drop nonexistant user"): node.query("DROP USER IF EXISTS user3") - with Scenario("I drop default user, throws error", requirements=[ - RQ_SRS_006_RBAC_User_Drop("1.0")]): + with Scenario( + "I drop default user, throws error", + requirements=[RQ_SRS_006_RBAC_User_Drop("1.0")], + ): with When("I drop user"): exitcode, message = errors.cannot_remove_user_default() node.query("DROP USER default", exitcode=exitcode, message=message) - with Scenario("I drop multiple users where one does not exist", requirements=[ - RQ_SRS_006_RBAC_User_Drop_IfExists("1.0")]): + with Scenario( + "I drop multiple users where one does not exist", + requirements=[RQ_SRS_006_RBAC_User_Drop_IfExists("1.0")], + ): with setup("user3"): with When("I drop multiple users where one does not exist"): node.query("DROP USER IF EXISTS user3, user4") - with Scenario("I drop multiple users where both do not exist", requirements=[ - RQ_SRS_006_RBAC_User_Drop_IfExists("1.0")]): + with Scenario( + "I drop multiple users where both do not exist", + requirements=[RQ_SRS_006_RBAC_User_Drop_IfExists("1.0")], + ): with When("I drop the nonexistant users"): node.query("DROP USER IF EXISTS user5, user6") - with Scenario("I drop user from specific cluster", requirements=[ - RQ_SRS_006_RBAC_User_Drop_OnCluster("1.0")]): - try: + with Scenario( + "I drop user from specific cluster", + requirements=[RQ_SRS_006_RBAC_User_Drop_OnCluster("1.0")], + ): + try: with Given("I have a user on cluster"): node.query("CREATE USER user4 ON CLUSTER sharded_cluster") with When("I drop a user from the cluster"): node.query("DROP USER user4 ON CLUSTER sharded_cluster") - finally: - with Finally("I make sure the user is dropped"): - node.query("DROP USER IF EXISTS user4 ON CLUSTER sharded_cluster") + finally: + with Finally("I make sure the user is dropped"): + node.query("DROP USER IF EXISTS user4 ON CLUSTER sharded_cluster") - with Scenario("I drop user from fake cluster", requirements=[ - RQ_SRS_006_RBAC_User_Drop_OnCluster("1.0")]): + with Scenario( + "I drop user from fake cluster", + requirements=[RQ_SRS_006_RBAC_User_Drop_OnCluster("1.0")], + ): with When("I drop a user from the fake cluster"): exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("DROP USER user5 ON CLUSTER fake_cluster", exitcode=exitcode, message=message) + node.query( + "DROP USER user5 ON CLUSTER fake_cluster", + exitcode=exitcode, + message=message, + ) diff --git a/tests/testflows/rbac/tests/syntax/feature.py b/tests/testflows/rbac/tests/syntax/feature.py index b7c23f8d7ee..0e8ea921d43 100755 --- a/tests/testflows/rbac/tests/syntax/feature.py +++ b/tests/testflows/rbac/tests/syntax/feature.py @@ -1,5 +1,6 @@ from testflows.core import * + @TestFeature @Name("syntax") def feature(self): @@ -12,10 +13,10 @@ def feature(self): Feature(run=load("rbac.tests.syntax.drop_role", "feature")) Feature(run=load("rbac.tests.syntax.show_create_role", "feature")) Feature(run=load("rbac.tests.syntax.grant_role", "feature")) - Feature(run=load("rbac.tests.syntax.grant_privilege","feature")) + Feature(run=load("rbac.tests.syntax.grant_privilege", "feature")) Feature(run=load("rbac.tests.syntax.show_grants", "feature")) Feature(run=load("rbac.tests.syntax.revoke_role", "feature")) - Feature(run=load("rbac.tests.syntax.revoke_privilege","feature")) + Feature(run=load("rbac.tests.syntax.revoke_privilege", "feature")) Feature(run=load("rbac.tests.syntax.create_row_policy", "feature")) Feature(run=load("rbac.tests.syntax.alter_row_policy", "feature")) Feature(run=load("rbac.tests.syntax.drop_row_policy", "feature")) @@ -31,4 +32,4 @@ def feature(self): Feature(run=load("rbac.tests.syntax.drop_settings_profile", "feature")) Feature(run=load("rbac.tests.syntax.show_create_settings_profile", "feature")) Feature(run=load("rbac.tests.syntax.set_default_role", "feature")) - Feature(run=load("rbac.tests.syntax.set_role","feature")) \ No newline at end of file + Feature(run=load("rbac.tests.syntax.set_role", "feature")) diff --git a/tests/testflows/rbac/tests/syntax/grant_privilege.py b/tests/testflows/rbac/tests/syntax/grant_privilege.py index 817a70498f4..ab422f38eec 100755 --- a/tests/testflows/rbac/tests/syntax/grant_privilege.py +++ b/tests/testflows/rbac/tests/syntax/grant_privilege.py @@ -5,6 +5,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @contextmanager def setup(node): try: @@ -19,31 +20,142 @@ def setup(node): node.query("DROP USER IF EXISTS user1") node.query("DROP ROLE IF EXISTS role1") + @TestOutline(Scenario) -@Examples("privilege on allow_column allow_introspection", [ - ("dictGet", ("db0.table0","db0.*","*.*","tb0","*"), False, False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_DictGet("1.0"))), - ("INTROSPECTION", ("*.*",), False, True, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Introspection("1.0"))), - ("SELECT", ("db0.table0","db0.*","*.*","tb0","*"), True, False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Select("1.0"))), - ("INSERT",("db0.table0","db0.*","*.*","tb0","*"), True, False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Insert("1.0"))), - ("ALTER",("db0.table0","db0.*","*.*","tb0","*"), False, False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Alter("1.0"))), - ("CREATE",("db0.table0","db0.*","*.*","tb0","*"), False, False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Create("1.0"))), - ("DROP",("db0.table0","db0.*","*.*","tb0","*"), False, False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Drop("1.0"))), - ("TRUNCATE",("db0.table0","db0.*","*.*","tb0","*"), False, False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Truncate("1.0"))), - ("OPTIMIZE",("db0.table0","db0.*","*.*","tb0","*"), False, False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Optimize("1.0"))), - ("SHOW",("db0.table0","db0.*","*.*","tb0","*"), True, False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Show("1.0"))), - ("KILL QUERY",("*.*",), False, False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_KillQuery("1.0"))), - ("ACCESS MANAGEMENT",("*.*",), False, False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_AccessManagement("1.0"))), - ("SYSTEM",("db0.table0","db0.*","*.*","tb0","*"), False, False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_System("1.0"))), - ("SOURCES",("*.*",), False, False, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Sources("1.0"))), - ("ALL",("*.*",), True, True, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_All("1.0"))), - ("ALL PRIVILEGES",("*.*",), True, True, Requirements(RQ_SRS_006_RBAC_Grant_Privilege_All("1.0"))), #alias for all - ],) -def grant_privileges(self, privilege, on, allow_column, allow_introspection, node="clickhouse1"): - grant_privilege(privilege=privilege, on=on, allow_column=allow_column, allow_introspection=allow_introspection, node=node) +@Examples( + "privilege on allow_column allow_introspection", + [ + ( + "dictGet", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + False, + False, + Requirements(RQ_SRS_006_RBAC_Grant_Privilege_DictGet("1.0")), + ), + ( + "INTROSPECTION", + ("*.*",), + False, + True, + Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Introspection("1.0")), + ), + ( + "SELECT", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + True, + False, + Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Select("1.0")), + ), + ( + "INSERT", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + True, + False, + Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Insert("1.0")), + ), + ( + "ALTER", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + False, + False, + Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Alter("1.0")), + ), + ( + "CREATE", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + False, + False, + Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Create("1.0")), + ), + ( + "DROP", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + False, + False, + Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Drop("1.0")), + ), + ( + "TRUNCATE", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + False, + False, + Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Truncate("1.0")), + ), + ( + "OPTIMIZE", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + False, + False, + Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Optimize("1.0")), + ), + ( + "SHOW", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + True, + False, + Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Show("1.0")), + ), + ( + "KILL QUERY", + ("*.*",), + False, + False, + Requirements(RQ_SRS_006_RBAC_Grant_Privilege_KillQuery("1.0")), + ), + ( + "ACCESS MANAGEMENT", + ("*.*",), + False, + False, + Requirements(RQ_SRS_006_RBAC_Grant_Privilege_AccessManagement("1.0")), + ), + ( + "SYSTEM", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + False, + False, + Requirements(RQ_SRS_006_RBAC_Grant_Privilege_System("1.0")), + ), + ( + "SOURCES", + ("*.*",), + False, + False, + Requirements(RQ_SRS_006_RBAC_Grant_Privilege_Sources("1.0")), + ), + ( + "ALL", + ("*.*",), + True, + True, + Requirements(RQ_SRS_006_RBAC_Grant_Privilege_All("1.0")), + ), + ( + "ALL PRIVILEGES", + ("*.*",), + True, + True, + Requirements(RQ_SRS_006_RBAC_Grant_Privilege_All("1.0")), + ), # alias for all + ], +) +def grant_privileges( + self, privilege, on, allow_column, allow_introspection, node="clickhouse1" +): + grant_privilege( + privilege=privilege, + on=on, + allow_column=allow_column, + allow_introspection=allow_introspection, + node=node, + ) + @TestOutline(Scenario) @Requirements(RQ_SRS_006_RBAC_Grant_Privilege_GrantOption("1.0")) -def grant_privilege(self, privilege, on, allow_column, allow_introspection, node="clickhouse1"): +def grant_privilege( + self, privilege, on, allow_column, allow_introspection, node="clickhouse1" +): node = self.context.cluster.node(node) for on_ in on: @@ -54,14 +166,22 @@ def grant_privilege(self, privilege, on, allow_column, allow_introspection, node settings.append(("allow_introspection_functions", 1)) node.query("SET allow_introspection_functions = 1") with When("I grant privilege without grant option"): - node.query(f"GRANT {privilege} ON {on_} TO user0", settings=settings) + node.query( + f"GRANT {privilege} ON {on_} TO user0", settings=settings + ) with When("I grant privilege with grant option"): - node.query(f"GRANT {privilege} ON {on_} TO user1 WITH GRANT OPTION", settings=settings) + node.query( + f"GRANT {privilege} ON {on_} TO user1 WITH GRANT OPTION", + settings=settings, + ) - if allow_column and ('*' not in on_): + if allow_column and ("*" not in on_): # Grant column specific for some column 'x' with When("I grant privilege with columns"): - node.query(f"GRANT {privilege}(x) ON {on_} TO user0", settings=settings) + node.query( + f"GRANT {privilege}(x) ON {on_} TO user0", settings=settings + ) + @TestFeature @Name("grant privilege") @@ -82,53 +202,83 @@ def feature(self, node="clickhouse1"): Scenario(run=grant_privileges) # with nonexistant object name, GRANT assumes type role - with Scenario("I grant privilege to role that does not exist", requirements=[ - RQ_SRS_006_RBAC_Grant_Privilege_None("1.0")]): + with Scenario( + "I grant privilege to role that does not exist", + requirements=[RQ_SRS_006_RBAC_Grant_Privilege_None("1.0")], + ): with Given("I ensure that role does not exist"): node.query("DROP ROLE IF EXISTS role0") with When("I grant privilege ON CLUSTER"): exitcode, message = errors.role_not_found_in_disk(name="role0") node.query("GRANT NONE TO role0", exitcode=exitcode, message=message) - with Scenario("I grant privilege ON CLUSTER", requirements=[ + with Scenario( + "I grant privilege ON CLUSTER", + requirements=[ RQ_SRS_006_RBAC_Grant_Privilege_OnCluster("1.0"), - RQ_SRS_006_RBAC_Grant_Privilege_None("1.0")]): + RQ_SRS_006_RBAC_Grant_Privilege_None("1.0"), + ], + ): with setup(node): with When("I grant privilege ON CLUSTER"): node.query("GRANT ON CLUSTER sharded_cluster NONE TO user0") - with Scenario("I grant privilege on fake cluster, throws exception", requirements=[ - RQ_SRS_006_RBAC_Grant_Privilege_OnCluster("1.0")]): + with Scenario( + "I grant privilege on fake cluster, throws exception", + requirements=[RQ_SRS_006_RBAC_Grant_Privilege_OnCluster("1.0")], + ): with setup(node): with When("I grant privilege ON CLUSTER"): exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("GRANT ON CLUSTER fake_cluster NONE TO user0", exitcode=exitcode, message=message) + node.query( + "GRANT ON CLUSTER fake_cluster NONE TO user0", + exitcode=exitcode, + message=message, + ) - with Scenario("I grant privilege to multiple users and roles", requirements=[ + with Scenario( + "I grant privilege to multiple users and roles", + requirements=[ RQ_SRS_006_RBAC_Grant_Privilege_To("1.0"), - RQ_SRS_006_RBAC_Grant_Privilege_None("1.0")]): + RQ_SRS_006_RBAC_Grant_Privilege_None("1.0"), + ], + ): with setup(node): with When("I grant privilege to several users"): node.query("GRANT NONE TO user0, user1, role1") - with Scenario("I grant privilege to current user", requirements=[ + with Scenario( + "I grant privilege to current user", + requirements=[ RQ_SRS_006_RBAC_Grant_Privilege_ToCurrentUser("1.0"), - RQ_SRS_006_RBAC_Grant_Privilege_None("1.0")]): + RQ_SRS_006_RBAC_Grant_Privilege_None("1.0"), + ], + ): with setup(node): with When("I grant privilege to current user"): - node.query("GRANT NONE TO CURRENT_USER", settings = [("user","user0")]) + node.query("GRANT NONE TO CURRENT_USER", settings=[("user", "user0")]) - with Scenario("I grant privilege NONE to default user, throws exception", requirements=[ + with Scenario( + "I grant privilege NONE to default user, throws exception", + requirements=[ RQ_SRS_006_RBAC_Grant_Privilege_ToCurrentUser("1.0"), - RQ_SRS_006_RBAC_Grant_Privilege_None("1.0")]): + RQ_SRS_006_RBAC_Grant_Privilege_None("1.0"), + ], + ): with setup(node): with When("I grant privilege to current user"): exitcode, message = errors.cannot_update_default() - node.query("GRANT NONE TO CURRENT_USER", exitcode=exitcode, message=message) + node.query( + "GRANT NONE TO CURRENT_USER", exitcode=exitcode, message=message + ) - with Scenario("I grant privilege with grant option", requirements=[ + with Scenario( + "I grant privilege with grant option", + requirements=[ RQ_SRS_006_RBAC_Grant_Privilege_GrantOption("1.0"), - RQ_SRS_006_RBAC_Grant_Privilege_None("1.0")]): + RQ_SRS_006_RBAC_Grant_Privilege_None("1.0"), + ], + ): with setup(node): with When("I grant privilege with grant option"): - node.query("GRANT NONE ON *.* TO user0 WITH GRANT OPTION") \ No newline at end of file + node.query("GRANT NONE ON *.* TO user0 WITH GRANT OPTION") diff --git a/tests/testflows/rbac/tests/syntax/grant_role.py b/tests/testflows/rbac/tests/syntax/grant_role.py index 66db2635b27..321513cb8b7 100755 --- a/tests/testflows/rbac/tests/syntax/grant_role.py +++ b/tests/testflows/rbac/tests/syntax/grant_role.py @@ -6,6 +6,7 @@ import rbac.helper.errors as errors from rbac.requirements import * from helpers.common import check_clickhouse_version + @TestFeature @Name("grant role") @Args(format_description=False) @@ -19,7 +20,7 @@ def feature(self, node="clickhouse1"): node = self.context.cluster.node(node) @contextmanager - def setup(users=0,roles=0): + def setup(users=0, roles=0): try: with Given("I have some users and roles"): for i in range(users): @@ -34,69 +35,94 @@ def feature(self, node="clickhouse1"): for j in range(roles): node.query(f"DROP ROLE IF EXISTS role{j}") - with Scenario("I grant a role to a user", requirements=[ - RQ_SRS_006_RBAC_Grant_Role("1.0")]): - with setup(1,1): + with Scenario( + "I grant a role to a user", requirements=[RQ_SRS_006_RBAC_Grant_Role("1.0")] + ): + with setup(1, 1): with When("I grant a role"): node.query("GRANT role0 TO user0") - with Scenario("I grant a nonexistent role to user", requirements=[ - RQ_SRS_006_RBAC_Grant_Role("1.0")]): - with setup(1,0): + with Scenario( + "I grant a nonexistent role to user", + requirements=[RQ_SRS_006_RBAC_Grant_Role("1.0")], + ): + with setup(1, 0): with When("I grant nonexistent role to a user"): exitcode, message = errors.role_not_found_in_disk(name="role0") node.query("GRANT role0 TO user0", exitcode=exitcode, message=message) # with nonexistent object name, GRANT assumes type role (treats user0 as role) - with Scenario("I grant a role to a nonexistent user", requirements=[ - RQ_SRS_006_RBAC_Grant_Role("1.0")]): - with setup(0,1): + with Scenario( + "I grant a role to a nonexistent user", + requirements=[RQ_SRS_006_RBAC_Grant_Role("1.0")], + ): + with setup(0, 1): with When("I grant role to a nonexistent user"): exitcode, message = errors.role_not_found_in_disk(name="user0") node.query("GRANT role0 TO user0", exitcode=exitcode, message=message) - with Scenario("I grant a nonexistent role to a nonexistent user", requirements=[ - RQ_SRS_006_RBAC_Grant_Role("1.0")]): - with setup(0,0): + with Scenario( + "I grant a nonexistent role to a nonexistent user", + requirements=[RQ_SRS_006_RBAC_Grant_Role("1.0")], + ): + with setup(0, 0): with When("I grant nonexistent role to a nonexistent user"): - exitcode, message = errors.role_not_found_in_disk(name="user0") if check_clickhouse_version(">=21.09")(self) else errors.role_not_found_in_disk(name="role0") + exitcode, message = ( + errors.role_not_found_in_disk(name="user0") + if check_clickhouse_version(">=21.09")(self) + else errors.role_not_found_in_disk(name="role0") + ) node.query("GRANT role0 TO user0", exitcode=exitcode, message=message) - with Scenario("I grant a role to multiple users", requirements=[ - RQ_SRS_006_RBAC_Grant_Role("1.0")]): - with setup(2,1): + with Scenario( + "I grant a role to multiple users", + requirements=[RQ_SRS_006_RBAC_Grant_Role("1.0")], + ): + with setup(2, 1): with When("I grant role to a multiple users"): node.query("GRANT role0 TO user0, user1") - with Scenario("I grant multiple roles to multiple users", requirements=[ - RQ_SRS_006_RBAC_Grant_Role("1.0")]): - with setup(2,2): + with Scenario( + "I grant multiple roles to multiple users", + requirements=[RQ_SRS_006_RBAC_Grant_Role("1.0")], + ): + with setup(2, 2): with When("I grant multiple roles to multiple users"): node.query("GRANT role0, role1 TO user0, user1") - with Scenario("I grant role to current user", requirements=[ - RQ_SRS_006_RBAC_Grant_Role_CurrentUser("1.0")]): - with setup(1,1): + with Scenario( + "I grant role to current user", + requirements=[RQ_SRS_006_RBAC_Grant_Role_CurrentUser("1.0")], + ): + with setup(1, 1): with Given("I have a user with access management privilege"): node.query("GRANT ACCESS MANAGEMENT ON *.* TO user0") with When("I grant role to current user"): - node.query("GRANT role0 TO CURRENT_USER", settings = [("user","user0")]) + node.query("GRANT role0 TO CURRENT_USER", settings=[("user", "user0")]) - with Scenario("I grant role to default user, throws exception", requirements=[ - RQ_SRS_006_RBAC_Grant_Role_CurrentUser("1.0")]): - with setup(1,1): + with Scenario( + "I grant role to default user, throws exception", + requirements=[RQ_SRS_006_RBAC_Grant_Role_CurrentUser("1.0")], + ): + with setup(1, 1): with When("I grant role to default user"): exitcode, message = errors.cannot_update_default() - node.query("GRANT role0 TO CURRENT_USER", exitcode=exitcode, message=message) + node.query( + "GRANT role0 TO CURRENT_USER", exitcode=exitcode, message=message + ) - with Scenario("I grant role to user with admin option", requirements=[ - RQ_SRS_006_RBAC_Grant_Role_AdminOption("1.0")]): - with setup(1,1): + with Scenario( + "I grant role to user with admin option", + requirements=[RQ_SRS_006_RBAC_Grant_Role_AdminOption("1.0")], + ): + with setup(1, 1): with When("I grant role to a user with admin option"): node.query("GRANT role0 TO user0 WITH ADMIN OPTION") - with Scenario("I grant role to user on cluster", requirements=[ - RQ_SRS_006_RBAC_Grant_Role_OnCluster("1.0")]): + with Scenario( + "I grant role to user on cluster", + requirements=[RQ_SRS_006_RBAC_Grant_Role_OnCluster("1.0")], + ): try: with Given("I have a user and a role on a cluster"): node.query("CREATE USER OR REPLACE user0 ON CLUSTER sharded_cluster") @@ -108,9 +134,15 @@ def feature(self, node="clickhouse1"): node.query("DROP USER IF EXISTS user0 ON CLUSTER sharded_cluster") node.query("DROP ROLE IF EXISTS role0 ON CLUSTER sharded_cluster") - with Scenario("I grant role to user on fake cluster, throws exception", requirements=[ - RQ_SRS_006_RBAC_Grant_Role_OnCluster("1.0")]): - with setup(1,1): + with Scenario( + "I grant role to user on fake cluster, throws exception", + requirements=[RQ_SRS_006_RBAC_Grant_Role_OnCluster("1.0")], + ): + with setup(1, 1): with When("I grant the role to the user"): exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("GRANT ON CLUSTER fake_cluster role0 TO user0", exitcode=exitcode, message=message) + node.query( + "GRANT ON CLUSTER fake_cluster role0 TO user0", + exitcode=exitcode, + message=message, + ) diff --git a/tests/testflows/rbac/tests/syntax/revoke_privilege.py b/tests/testflows/rbac/tests/syntax/revoke_privilege.py index 3e23f2ddfc9..023dd803018 100755 --- a/tests/testflows/rbac/tests/syntax/revoke_privilege.py +++ b/tests/testflows/rbac/tests/syntax/revoke_privilege.py @@ -5,6 +5,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @contextmanager def setup(node): try: @@ -21,32 +22,142 @@ def setup(node): @TestOutline(Scenario) -@Examples("privilege on allow_column allow_introspection", [ - ("dictGet", ("db0.table0","db0.*","*.*","tb0","*"), False, False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_DictGet("1.0"))), - ("INTROSPECTION", ("*.*",), False, True, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Introspection("1.0"))), - ("SELECT", ("db0.table0","db0.*","*.*","tb0","*"), True, False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Select("1.0"))), - ("INSERT",("db0.table0","db0.*","*.*","tb0","*"), True, False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Insert("1.0"))), - ("ALTER",("db0.table0","db0.*","*.*","tb0","*"), False, False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Alter("1.0"))), - ("CREATE",("db0.table0","db0.*","*.*","tb0","*"), False, False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Create("1.0"))), - ("DROP",("db0.table0","db0.*","*.*","tb0","*"), False, False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Drop("1.0"))), - ("TRUNCATE",("db0.table0","db0.*","*.*","tb0","*"), False, False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Truncate("1.0"))), - ("OPTIMIZE",("db0.table0","db0.*","*.*","tb0","*"), False, False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Optimize("1.0"))), - ("SHOW",("db0.table0","db0.*","*.*","tb0","*"), True, False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Show("1.0"))), - ("KILL QUERY",("*.*",), False, False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_KillQuery("1.0"))), - ("ACCESS MANAGEMENT",("*.*",), False, False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_AccessManagement("1.0"))), - ("SYSTEM",("db0.table0","db0.*","*.*","tb0","*"), False, False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_System("1.0"))), - ("SOURCES",("*.*",), False, False, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Sources("1.0"))), - ("ALL",("*.*",), True, True, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_All("1.0"))), - ("ALL PRIVILEGES",("*.*",), True, True, Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_All("1.0"))), #alias for all - ],) -def revoke_privileges(self, privilege, on, allow_column, allow_introspection, node="clickhouse1"): - revoke_privilege(privilege=privilege, on=on, allow_column=allow_column, allow_introspection=allow_introspection, node=node) +@Examples( + "privilege on allow_column allow_introspection", + [ + ( + "dictGet", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + False, + False, + Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_DictGet("1.0")), + ), + ( + "INTROSPECTION", + ("*.*",), + False, + True, + Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Introspection("1.0")), + ), + ( + "SELECT", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + True, + False, + Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Select("1.0")), + ), + ( + "INSERT", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + True, + False, + Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Insert("1.0")), + ), + ( + "ALTER", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + False, + False, + Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Alter("1.0")), + ), + ( + "CREATE", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + False, + False, + Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Create("1.0")), + ), + ( + "DROP", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + False, + False, + Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Drop("1.0")), + ), + ( + "TRUNCATE", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + False, + False, + Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Truncate("1.0")), + ), + ( + "OPTIMIZE", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + False, + False, + Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Optimize("1.0")), + ), + ( + "SHOW", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + True, + False, + Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Show("1.0")), + ), + ( + "KILL QUERY", + ("*.*",), + False, + False, + Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_KillQuery("1.0")), + ), + ( + "ACCESS MANAGEMENT", + ("*.*",), + False, + False, + Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_AccessManagement("1.0")), + ), + ( + "SYSTEM", + ("db0.table0", "db0.*", "*.*", "tb0", "*"), + False, + False, + Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_System("1.0")), + ), + ( + "SOURCES", + ("*.*",), + False, + False, + Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_Sources("1.0")), + ), + ( + "ALL", + ("*.*",), + True, + True, + Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_All("1.0")), + ), + ( + "ALL PRIVILEGES", + ("*.*",), + True, + True, + Requirements(RQ_SRS_006_RBAC_Revoke_Privilege_All("1.0")), + ), # alias for all + ], +) +def revoke_privileges( + self, privilege, on, allow_column, allow_introspection, node="clickhouse1" +): + revoke_privilege( + privilege=privilege, + on=on, + allow_column=allow_column, + allow_introspection=allow_introspection, + node=node, + ) + @TestOutline(Scenario) @Requirements( RQ_SRS_006_RBAC_Revoke_Privilege_PrivilegeColumns("1.0"), ) -def revoke_privilege(self, privilege, on, allow_column, allow_introspection, node="clickhouse1"): +def revoke_privilege( + self, privilege, on, allow_column, allow_introspection, node="clickhouse1" +): node = self.context.cluster.node(node) for on_ in on: with When(f"I revoke {privilege} privilege from user on {on_}"): @@ -56,12 +167,18 @@ def revoke_privilege(self, privilege, on, allow_column, allow_introspection, nod settings.append(("allow_introspection_functions", 1)) node.query("SET allow_introspection_functions = 1") with When("I revoke privilege without columns"): - node.query(f"REVOKE {privilege} ON {on_} FROM user0", settings=settings) + node.query( + f"REVOKE {privilege} ON {on_} FROM user0", settings=settings + ) - if allow_column and ('*' not in on_): + if allow_column and ("*" not in on_): # Revoke column specific for some column 'x' with When("I revoke privilege with columns"): - node.query(f"REVOKE {privilege}(x) ON {on_} FROM user0", settings=settings) + node.query( + f"REVOKE {privilege}(x) ON {on_} FROM user0", + settings=settings, + ) + @TestFeature @Name("revoke privilege") @@ -80,83 +197,134 @@ def feature(self, node="clickhouse1"): Scenario(run=revoke_privileges) - with Scenario("I revoke privilege ON CLUSTER", requirements=[ + with Scenario( + "I revoke privilege ON CLUSTER", + requirements=[ RQ_SRS_006_RBAC_Revoke_Privilege_Cluster("1.0"), - RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0"), + ], + ): with setup(node): with When("I revoke privilege ON CLUSTER"): node.query("REVOKE ON CLUSTER sharded_cluster NONE FROM user0") - with Scenario("I revoke privilege ON fake CLUSTER, throws exception", requirements=[ + with Scenario( + "I revoke privilege ON fake CLUSTER, throws exception", + requirements=[ RQ_SRS_006_RBAC_Revoke_Privilege_Cluster("1.0"), - RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0"), + ], + ): with setup(node): with When("I revoke privilege ON CLUSTER"): exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("REVOKE ON CLUSTER fake_cluster NONE FROM user0", - exitcode=exitcode, message=message) + node.query( + "REVOKE ON CLUSTER fake_cluster NONE FROM user0", + exitcode=exitcode, + message=message, + ) - with Scenario("I revoke privilege from multiple users and roles", requirements=[ + with Scenario( + "I revoke privilege from multiple users and roles", + requirements=[ RQ_SRS_006_RBAC_Revoke_Privilege_From("1.0"), - RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0"), + ], + ): with setup(node): with When("I revoke privilege from multiple users"): node.query("REVOKE NONE FROM user0, user1, role1") - with Scenario("I revoke privilege from current user", requirements=[ + with Scenario( + "I revoke privilege from current user", + requirements=[ RQ_SRS_006_RBAC_Revoke_Privilege_From("1.0"), - RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0"), + ], + ): with setup(node): with When("I revoke privilege from current user"): - node.query("REVOKE NONE FROM CURRENT_USER", settings = [("user","user0")]) + node.query( + "REVOKE NONE FROM CURRENT_USER", settings=[("user", "user0")] + ) - with Scenario("I revoke privilege from all users", requirements=[ + with Scenario( + "I revoke privilege from all users", + requirements=[ RQ_SRS_006_RBAC_Revoke_Privilege_From("1.0"), - RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0"), + ], + ): with setup(node): with When("I revoke privilege from all users"): exitcode, message = errors.cannot_update_default() - node.query("REVOKE NONE FROM ALL", exitcode=exitcode,message=message) + node.query("REVOKE NONE FROM ALL", exitcode=exitcode, message=message) - with Scenario("I revoke privilege from default user", requirements=[ + with Scenario( + "I revoke privilege from default user", + requirements=[ RQ_SRS_006_RBAC_Revoke_Privilege_From("1.0"), - RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0"), + ], + ): with setup(node): with When("I revoke privilege from default user"): exitcode, message = errors.cannot_update_default() - node.query("REVOKE NONE FROM default", exitcode=exitcode,message=message) + node.query( + "REVOKE NONE FROM default", exitcode=exitcode, message=message + ) - #By default, ClickHouse treats unnamed object as role - with Scenario("I revoke privilege from nonexistent role, throws exception", requirements=[ + # By default, ClickHouse treats unnamed object as role + with Scenario( + "I revoke privilege from nonexistent role, throws exception", + requirements=[ RQ_SRS_006_RBAC_Revoke_Privilege_From("1.0"), - RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0"), + ], + ): role = "role5" with Given(f"I ensure that role {role} does not exist"): node.query(f"DROP ROLE IF EXISTS {role}") with When(f"I revoke privilege from nonexistent role {role}"): exitcode, message = errors.role_not_found_in_disk(role) - node.query(f"REVOKE NONE FROM {role}", exitcode=exitcode,message=message) + node.query(f"REVOKE NONE FROM {role}", exitcode=exitcode, message=message) - with Scenario("I revoke privilege from ALL EXCEPT nonexistent role, throws exception", requirements=[ + with Scenario( + "I revoke privilege from ALL EXCEPT nonexistent role, throws exception", + requirements=[ RQ_SRS_006_RBAC_Revoke_Privilege_From("1.0"), - RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0"), + ], + ): role = "role5" with Given(f"I ensure that role {role} does not exist"): node.query(f"DROP ROLE IF EXISTS {role}") with When(f"I revoke privilege from nonexistent role {role}"): exitcode, message = errors.role_not_found_in_disk(role) - node.query(f"REVOKE NONE FROM ALL EXCEPT {role}", exitcode=exitcode,message=message) + node.query( + f"REVOKE NONE FROM ALL EXCEPT {role}", + exitcode=exitcode, + message=message, + ) - with Scenario("I revoke privilege from all except some users and roles", requirements=[ + with Scenario( + "I revoke privilege from all except some users and roles", + requirements=[ RQ_SRS_006_RBAC_Revoke_Privilege_From("1.0"), - RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0"), + ], + ): with setup(node): with When("I revoke privilege all except some users"): node.query("REVOKE NONE FROM ALL EXCEPT default, user0, role1") - with Scenario("I revoke privilege from all except current user", requirements=[ + with Scenario( + "I revoke privilege from all except current user", + requirements=[ RQ_SRS_006_RBAC_Revoke_Privilege_From("1.0"), - RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0")]): + RQ_SRS_006_RBAC_Revoke_Privilege_None("1.0"), + ], + ): with setup(node): with When("I revoke privilege from all except current user"): - node.query("REVOKE NONE FROM ALL EXCEPT CURRENT_USER") \ No newline at end of file + node.query("REVOKE NONE FROM ALL EXCEPT CURRENT_USER") diff --git a/tests/testflows/rbac/tests/syntax/revoke_role.py b/tests/testflows/rbac/tests/syntax/revoke_role.py index f1264c5fc4b..4e18ad13652 100755 --- a/tests/testflows/rbac/tests/syntax/revoke_role.py +++ b/tests/testflows/rbac/tests/syntax/revoke_role.py @@ -6,6 +6,7 @@ import rbac.helper.errors as errors from rbac.requirements import * from helpers.common import check_clickhouse_version + @TestFeature @Name("revoke role") @Args(format_description=False) @@ -21,7 +22,7 @@ def feature(self, node="clickhouse1"): node = self.context.cluster.node(node) @contextmanager - def setup(users=2,roles=2): + def setup(users=2, roles=2): try: with Given("I have some users"): for i in range(users): @@ -38,124 +39,194 @@ def feature(self, node="clickhouse1"): for i in range(roles): node.query(f"DROP ROLE IF EXISTS role{i}") - with Scenario("I revoke a role from a user", requirements=[ - RQ_SRS_006_RBAC_Revoke_Role("1.0")]): + with Scenario( + "I revoke a role from a user", requirements=[RQ_SRS_006_RBAC_Revoke_Role("1.0")] + ): with setup(): with When("I revoke a role"): node.query("REVOKE role0 FROM user0") - with Scenario("I revoke a nonexistent role from user", requirements=[ - RQ_SRS_006_RBAC_Revoke_Role("1.0")]): - with setup(1,0): + with Scenario( + "I revoke a nonexistent role from user", + requirements=[RQ_SRS_006_RBAC_Revoke_Role("1.0")], + ): + with setup(1, 0): with When("I revoke nonexistent role from a user"): exitcode, message = errors.role_not_found_in_disk(name="role0") - node.query("REVOKE role0 FROM user0", exitcode=exitcode, message=message) + node.query( + "REVOKE role0 FROM user0", exitcode=exitcode, message=message + ) # with nonexistent object name, REVOKE assumes type role (treats user0 as role) - with Scenario("I revoke a role from a nonexistent user", requirements=[ - RQ_SRS_006_RBAC_Revoke_Role("1.0")]): - with setup(0,1): + with Scenario( + "I revoke a role from a nonexistent user", + requirements=[RQ_SRS_006_RBAC_Revoke_Role("1.0")], + ): + with setup(0, 1): with When("I revoke role from a nonexistent user"): exitcode, message = errors.role_not_found_in_disk(name="user0") - node.query("REVOKE role0 FROM user0", exitcode=exitcode, message=message) + node.query( + "REVOKE role0 FROM user0", exitcode=exitcode, message=message + ) # with nonexistent object name, REVOKE assumes type role (treats user0 as role) - with Scenario("I revoke a role from ALL EXCEPT nonexistent user", requirements=[ - RQ_SRS_006_RBAC_Revoke_Role("1.0")]): - with setup(0,1): + with Scenario( + "I revoke a role from ALL EXCEPT nonexistent user", + requirements=[RQ_SRS_006_RBAC_Revoke_Role("1.0")], + ): + with setup(0, 1): with When("I revoke role from a nonexistent user"): exitcode, message = errors.role_not_found_in_disk(name="user0") - node.query("REVOKE role0 FROM ALL EXCEPT user0", exitcode=exitcode, message=message) + node.query( + "REVOKE role0 FROM ALL EXCEPT user0", + exitcode=exitcode, + message=message, + ) - with Scenario("I revoke a nonexistent role from a nonexistent user", requirements=[ - RQ_SRS_006_RBAC_Revoke_Role("1.0")]): - with setup(0,0): + with Scenario( + "I revoke a nonexistent role from a nonexistent user", + requirements=[RQ_SRS_006_RBAC_Revoke_Role("1.0")], + ): + with setup(0, 0): with When("I revoke nonexistent role from a nonexistent user"): - exitcode, message = errors.role_not_found_in_disk(name="user0") if check_clickhouse_version(">=21.09")(self) else errors.role_not_found_in_disk(name="role0") - node.query("REVOKE role0 FROM user0", exitcode=exitcode, message=message) + exitcode, message = ( + errors.role_not_found_in_disk(name="user0") + if check_clickhouse_version(">=21.09")(self) + else errors.role_not_found_in_disk(name="role0") + ) + node.query( + "REVOKE role0 FROM user0", exitcode=exitcode, message=message + ) - with Scenario("I revoke a role from multiple users", requirements=[ - RQ_SRS_006_RBAC_Revoke_Role("1.0")]): + with Scenario( + "I revoke a role from multiple users", + requirements=[RQ_SRS_006_RBAC_Revoke_Role("1.0")], + ): with setup(): with When("I revoke a role from multiple users"): node.query("REVOKE role0 FROM user0, user1") - with Scenario("I revoke multiple roles from multiple users", requirements=[ - RQ_SRS_006_RBAC_Revoke_Role("1.0")]): + with Scenario( + "I revoke multiple roles from multiple users", + requirements=[RQ_SRS_006_RBAC_Revoke_Role("1.0")], + ): with setup(): node.query("REVOKE role0, role1 FROM user0, user1") - #user is default, expect exception - with Scenario("I revoke a role from default user", requirements=[ + # user is default, expect exception + with Scenario( + "I revoke a role from default user", + requirements=[ RQ_SRS_006_RBAC_Revoke_Role("1.0"), - RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]): + RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0"), + ], + ): with setup(): with When("I revoke a role from default user"): exitcode, message = errors.cannot_update_default() - node.query("REVOKE role0 FROM CURRENT_USER", exitcode=exitcode, message=message) + node.query( + "REVOKE role0 FROM CURRENT_USER", exitcode=exitcode, message=message + ) - #user is user0 - with Scenario("I revoke a role from current user", requirements=[ + # user is user0 + with Scenario( + "I revoke a role from current user", + requirements=[ RQ_SRS_006_RBAC_Revoke_Role("1.0"), - RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]): + RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0"), + ], + ): with setup(): with When("I revoke a role from current user"): - node.query("REVOKE role0 FROM CURRENT_USER", settings = [("user","user0")]) + node.query( + "REVOKE role0 FROM CURRENT_USER", settings=[("user", "user0")] + ) - #user is default, expect exception - with Scenario("I revoke a role from all", requirements=[ + # user is default, expect exception + with Scenario( + "I revoke a role from all", + requirements=[ RQ_SRS_006_RBAC_Revoke_Role("1.0"), - RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]): + RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0"), + ], + ): with setup(): with When("I revoke a role from all"): exitcode, message = errors.cannot_update_default() node.query("REVOKE role0 FROM ALL", exitcode=exitcode, message=message) - #user is default, expect exception - with Scenario("I revoke multiple roles from all", requirements=[ + # user is default, expect exception + with Scenario( + "I revoke multiple roles from all", + requirements=[ RQ_SRS_006_RBAC_Revoke_Role("1.0"), - RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]): + RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0"), + ], + ): with setup(): with When("I revoke multiple roles from all"): exitcode, message = errors.cannot_update_default() - node.query("REVOKE role0, role1 FROM ALL", exitcode=exitcode, message=message) + node.query( + "REVOKE role0, role1 FROM ALL", exitcode=exitcode, message=message + ) - with Scenario("I revoke a role from all but current user", requirements=[ + with Scenario( + "I revoke a role from all but current user", + requirements=[ RQ_SRS_006_RBAC_Revoke_Role("1.0"), - RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]): + RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0"), + ], + ): with setup(): with When("I revoke a role from all except current"): node.query("REVOKE role0 FROM ALL EXCEPT CURRENT_USER") - with Scenario("I revoke a role from all but default user", requirements=[ + with Scenario( + "I revoke a role from all but default user", + requirements=[ RQ_SRS_006_RBAC_Revoke_Role("1.0"), - RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]): + RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0"), + ], + ): with setup(): with When("I revoke a role from all except default"): - node.query("REVOKE role0 FROM ALL EXCEPT default", - settings = [("user","user0")]) + node.query( + "REVOKE role0 FROM ALL EXCEPT default", settings=[("user", "user0")] + ) - with Scenario("I revoke multiple roles from all but default user", requirements=[ + with Scenario( + "I revoke multiple roles from all but default user", + requirements=[ RQ_SRS_006_RBAC_Revoke_Role("1.0"), - RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]): + RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0"), + ], + ): with setup(): with When("I revoke multiple roles from all except default"): - node.query("REVOKE role0, role1 FROM ALL EXCEPT default", settings = [("user","user0")]) + node.query( + "REVOKE role0, role1 FROM ALL EXCEPT default", + settings=[("user", "user0")], + ) - with Scenario("I revoke a role from a role", requirements=[ - RQ_SRS_006_RBAC_Revoke_Role("1.0")]): + with Scenario( + "I revoke a role from a role", requirements=[RQ_SRS_006_RBAC_Revoke_Role("1.0")] + ): with setup(): with When("I revoke a role from a role"): node.query("REVOKE role0 FROM role1") - with Scenario("I revoke a role from a role and a user", requirements=[ - RQ_SRS_006_RBAC_Revoke_Role("1.0")]): + with Scenario( + "I revoke a role from a role and a user", + requirements=[RQ_SRS_006_RBAC_Revoke_Role("1.0")], + ): with setup(): with When("I revoke a role from multiple roles"): node.query("REVOKE role0 FROM role1, user0") - with Scenario("I revoke a role from a user on cluster", requirements=[ - RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0")]): + with Scenario( + "I revoke a role from a user on cluster", + requirements=[RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0")], + ): with Given("I have a role and a user on a cluster"): node.query("CREATE USER OR REPLACE user0 ON CLUSTER sharded_cluster") node.query("CREATE ROLE OR REPLACE role0 ON CLUSTER sharded_cluster") @@ -165,41 +236,59 @@ def feature(self, node="clickhouse1"): node.query("DROP USER IF EXISTS user0 ON CLUSTER sharded_cluster") node.query("DROP ROLE IF EXISTS role0 ON CLUSTER sharded_cluster") - with Scenario("I revoke a role on fake cluster, throws exception", requirements=[ - RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0")]): + with Scenario( + "I revoke a role on fake cluster, throws exception", + requirements=[RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0")], + ): with Given("I have a role and a user on a cluster"): node.query("CREATE USER OR REPLACE user0") node.query("CREATE ROLE OR REPLACE role0") with When("I revoke a role from user on a cluster"): exitcode, message = errors.cluster_not_found("fake_cluster") - node.query("REVOKE ON CLUSTER fake_cluster role0 FROM user0", exitcode=exitcode, message=message) + node.query( + "REVOKE ON CLUSTER fake_cluster role0 FROM user0", + exitcode=exitcode, + message=message, + ) with Finally("I drop the user and role"): node.query("DROP USER IF EXISTS user0") node.query("DROP ROLE IF EXISTS role0") - with Scenario("I revoke multiple roles from multiple users on cluster", requirements=[ + with Scenario( + "I revoke multiple roles from multiple users on cluster", + requirements=[ RQ_SRS_006_RBAC_Revoke_Role("1.0"), - RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0")]): + RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0"), + ], + ): with Given("I have multiple roles and multiple users on a cluster"): for i in range(2): node.query(f"CREATE USER OR REPLACE user{i} ON CLUSTER sharded_cluster") node.query(f"CREATE ROLE OR REPLACE role{i} ON CLUSTER sharded_cluster") with When("I revoke multiple roles from multiple users on cluster"): - node.query("REVOKE ON CLUSTER sharded_cluster role0, role1 FROM user0, user1") + node.query( + "REVOKE ON CLUSTER sharded_cluster role0, role1 FROM user0, user1" + ) with Finally("I drop the roles and users"): for i in range(2): node.query(f"DROP USER IF EXISTS user{i} ON CLUSTER sharded_cluster") node.query(f"DROP ROLE IF EXISTS role{i} ON CLUSTER sharded_cluster") - with Scenario("I revoke admin option for role from a user", requirements=[ - RQ_SRS_006_RBAC_Revoke_AdminOption("1.0")]): + with Scenario( + "I revoke admin option for role from a user", + requirements=[RQ_SRS_006_RBAC_Revoke_AdminOption("1.0")], + ): with setup(): with When("I revoke admin option for role from a user"): node.query("REVOKE ADMIN OPTION FOR role0 FROM user0") - with Scenario("I revoke admin option for multiple roles from multiple users", requirements=[ + with Scenario( + "I revoke admin option for multiple roles from multiple users", + requirements=[ RQ_SRS_006_RBAC_Revoke_Role("1.0"), - RQ_SRS_006_RBAC_Revoke_AdminOption("1.0")]): + RQ_SRS_006_RBAC_Revoke_AdminOption("1.0"), + ], + ): with setup(): with When("I revoke admin option for multiple roles from multiple users"): node.query("REVOKE ADMIN OPTION FOR role0, role1 FROM user0, user1") diff --git a/tests/testflows/rbac/tests/syntax/set_default_role.py b/tests/testflows/rbac/tests/syntax/set_default_role.py index ed50810eba7..6fec27dea61 100755 --- a/tests/testflows/rbac/tests/syntax/set_default_role.py +++ b/tests/testflows/rbac/tests/syntax/set_default_role.py @@ -5,6 +5,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @TestFeature @Name("set default role") @Args(format_description=False) @@ -18,7 +19,7 @@ def feature(self, node="clickhouse1"): node = self.context.cluster.node(node) @contextmanager - def setup(users=2,roles=2): + def setup(users=2, roles=2): try: with Given("I have some users"): for i in range(users): @@ -35,34 +36,58 @@ def feature(self, node="clickhouse1"): for i in range(roles): node.query(f"DROP ROLE IF EXISTS role{i}") - with Scenario("I set default a nonexistent role to user", requirements=[ - RQ_SRS_006_RBAC_SetDefaultRole("1.0")]): - with setup(1,0): + with Scenario( + "I set default a nonexistent role to user", + requirements=[RQ_SRS_006_RBAC_SetDefaultRole("1.0")], + ): + with setup(1, 0): with When("I set default nonexistent role to a user"): exitcode, message = errors.role_not_found_in_disk(name="role0") - node.query("SET DEFAULT ROLE role0 TO user0", exitcode=exitcode, message=message) + node.query( + "SET DEFAULT ROLE role0 TO user0", + exitcode=exitcode, + message=message, + ) - with Scenario("I set default ALL EXCEPT a nonexistent role to user", requirements=[ - RQ_SRS_006_RBAC_SetDefaultRole("1.0")]): - with setup(1,0): + with Scenario( + "I set default ALL EXCEPT a nonexistent role to user", + requirements=[RQ_SRS_006_RBAC_SetDefaultRole("1.0")], + ): + with setup(1, 0): with When("I set default nonexistent role to a user"): exitcode, message = errors.role_not_found_in_disk(name="role0") - node.query("SET DEFAULT ROLE ALL EXCEPT role0 TO user0", exitcode=exitcode, message=message) + node.query( + "SET DEFAULT ROLE ALL EXCEPT role0 TO user0", + exitcode=exitcode, + message=message, + ) - with Scenario("I set default a role to a nonexistent user", requirements=[ - RQ_SRS_006_RBAC_SetDefaultRole("1.0")]): - with setup(0,1): + with Scenario( + "I set default a role to a nonexistent user", + requirements=[RQ_SRS_006_RBAC_SetDefaultRole("1.0")], + ): + with setup(0, 1): with When("I set default role to a nonexistent user"): exitcode, message = errors.user_not_found_in_disk(name="user0") - node.query("SET DEFAULT ROLE role0 TO user0", exitcode=exitcode, message=message) + node.query( + "SET DEFAULT ROLE role0 TO user0", + exitcode=exitcode, + message=message, + ) - #in SET DEFAULT ROLE, the nonexistent user is noticed first and becomes the thrown exception - with Scenario("I set default a nonexistent role to a nonexistent user", requirements=[ - RQ_SRS_006_RBAC_SetDefaultRole("1.0")]): - with setup(0,0): + # in SET DEFAULT ROLE, the nonexistent user is noticed first and becomes the thrown exception + with Scenario( + "I set default a nonexistent role to a nonexistent user", + requirements=[RQ_SRS_006_RBAC_SetDefaultRole("1.0")], + ): + with setup(0, 0): with When("I set default nonexistent role to a nonexistent user"): exitcode, message = errors.user_not_found_in_disk(name="user0") - node.query("SET DEFAULT ROLE role0 TO user0", exitcode=exitcode, message=message) + node.query( + "SET DEFAULT ROLE role0 TO user0", + exitcode=exitcode, + message=message, + ) try: with Given("I have some roles and some users"): @@ -71,47 +96,70 @@ def feature(self, node="clickhouse1"): node.query(f"CREATE USER user{i}") node.query(f"GRANT role0, role1 TO user0, user1") - with Scenario("I set default role for a user to none", requirements=[ - RQ_SRS_006_RBAC_SetDefaultRole_None("1.0")]): + with Scenario( + "I set default role for a user to none", + requirements=[RQ_SRS_006_RBAC_SetDefaultRole_None("1.0")], + ): with When("I set no roles default for user"): node.query("SET DEFAULT ROLE NONE TO user0") - with Scenario("I set one default role for a user", requirements=[ - RQ_SRS_006_RBAC_SetDefaultRole("1.0")]): + with Scenario( + "I set one default role for a user", + requirements=[RQ_SRS_006_RBAC_SetDefaultRole("1.0")], + ): with When("I set a default role for user "): node.query("SET DEFAULT ROLE role0 TO user0") - with Scenario("I set one default role for user default, throws exception", requirements=[ - RQ_SRS_006_RBAC_SetDefaultRole("1.0")]): + with Scenario( + "I set one default role for user default, throws exception", + requirements=[RQ_SRS_006_RBAC_SetDefaultRole("1.0")], + ): with When("I set a default role for default"): exitcode, message = errors.cannot_update_default() - node.query("SET DEFAULT ROLE role0 TO default", exitcode=exitcode, message=message) + node.query( + "SET DEFAULT ROLE role0 TO default", + exitcode=exitcode, + message=message, + ) - with Scenario("I set multiple default roles for a user", requirements=[ - RQ_SRS_006_RBAC_SetDefaultRole("1.0")]): + with Scenario( + "I set multiple default roles for a user", + requirements=[RQ_SRS_006_RBAC_SetDefaultRole("1.0")], + ): with When("I set multiple default roles to user"): node.query("SET DEFAULT ROLE role0, role1 TO user0") - with Scenario("I set multiple default roles for multiple users", requirements=[ - RQ_SRS_006_RBAC_SetDefaultRole("1.0")]): + with Scenario( + "I set multiple default roles for multiple users", + requirements=[RQ_SRS_006_RBAC_SetDefaultRole("1.0")], + ): with When("I set multiple default roles to multiple users"): node.query("SET DEFAULT ROLE role0, role1 TO user0, user1") - with Scenario("I set all roles as default for a user", requirements=[ - RQ_SRS_006_RBAC_SetDefaultRole_All("1.0")]): + with Scenario( + "I set all roles as default for a user", + requirements=[RQ_SRS_006_RBAC_SetDefaultRole_All("1.0")], + ): with When("I set all roles default to user"): node.query("SET DEFAULT ROLE ALL TO user0") - with Scenario("I set all roles except one for a user", requirements=[ - RQ_SRS_006_RBAC_SetDefaultRole_AllExcept("1.0")]): + with Scenario( + "I set all roles except one for a user", + requirements=[RQ_SRS_006_RBAC_SetDefaultRole_AllExcept("1.0")], + ): with When("I set all except one role default to user"): node.query("SET DEFAULT ROLE ALL EXCEPT role0 TO user0") - with Scenario("I set default role for current user", requirements=[ - RQ_SRS_006_RBAC_SetDefaultRole_CurrentUser("1.0")]): + with Scenario( + "I set default role for current user", + requirements=[RQ_SRS_006_RBAC_SetDefaultRole_CurrentUser("1.0")], + ): with When("I set default role to current user"): node.query("GRANT ACCESS MANAGEMENT ON *.* TO user0") - node.query("SET DEFAULT ROLE role0 TO CURRENT_USER", settings = [("user","user0")]) + node.query( + "SET DEFAULT ROLE role0 TO CURRENT_USER", + settings=[("user", "user0")], + ) finally: with Finally("I drop the roles and users"): diff --git a/tests/testflows/rbac/tests/syntax/set_role.py b/tests/testflows/rbac/tests/syntax/set_role.py index 3d3d4d00fac..bcf8db96ea7 100755 --- a/tests/testflows/rbac/tests/syntax/set_role.py +++ b/tests/testflows/rbac/tests/syntax/set_role.py @@ -5,6 +5,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @TestFeature @Name("set role") @Args(format_description=False) @@ -29,63 +30,78 @@ def feature(self, node="clickhouse1"): for i in range(roles): node.query(f"DROP ROLE IF EXISTS role{i}") - with Scenario("I set default role for current user", requirements=[ - RQ_SRS_006_RBAC_SetRole_Default("1.0")]): + with Scenario( + "I set default role for current user", + requirements=[RQ_SRS_006_RBAC_SetRole_Default("1.0")], + ): with When("I set default role for current user"): node.query("SET ROLE DEFAULT") - with Scenario("I set no role for current user", requirements=[ - RQ_SRS_006_RBAC_SetRole_None("1.0")]): - with When("I set no role for current user"): - node.query("SET ROLE NONE") + with Scenario( + "I set no role for current user", + requirements=[RQ_SRS_006_RBAC_SetRole_None("1.0")], + ): + with When("I set no role for current user"): + node.query("SET ROLE NONE") - with Scenario("I set nonexistent role, throws exception", requirements=[ - RQ_SRS_006_RBAC_SetRole_None("1.0")]): - with Given("I ensure that role role5 does not exist"): - node.query("DROP ROLE IF EXISTS role5") - with When("I set nonexistent role for current user"): - exitcode, message = errors.role_not_found_in_disk("role5") - node.query("SET ROLE role5", exitcode=exitcode, message=message) + with Scenario( + "I set nonexistent role, throws exception", + requirements=[RQ_SRS_006_RBAC_SetRole_None("1.0")], + ): + with Given("I ensure that role role5 does not exist"): + node.query("DROP ROLE IF EXISTS role5") + with When("I set nonexistent role for current user"): + exitcode, message = errors.role_not_found_in_disk("role5") + node.query("SET ROLE role5", exitcode=exitcode, message=message) - with Scenario("I set nonexistent role, throws exception", requirements=[ - RQ_SRS_006_RBAC_SetRole_None("1.0")]): - with Given("I ensure that role role5 does not exist"): - node.query("DROP ROLE IF EXISTS role5") - with When("I set nonexistent role for current user"): - exitcode, message = errors.role_not_found_in_disk("role5") - node.query("SET ROLE ALL EXCEPT role5", exitcode=exitcode, message=message) + with Scenario( + "I set nonexistent role, throws exception", + requirements=[RQ_SRS_006_RBAC_SetRole_None("1.0")], + ): + with Given("I ensure that role role5 does not exist"): + node.query("DROP ROLE IF EXISTS role5") + with When("I set nonexistent role for current user"): + exitcode, message = errors.role_not_found_in_disk("role5") + node.query("SET ROLE ALL EXCEPT role5", exitcode=exitcode, message=message) - with Scenario("I set one role for current user", requirements=[ - RQ_SRS_006_RBAC_SetRole("1.0")]): + with Scenario( + "I set one role for current user", requirements=[RQ_SRS_006_RBAC_SetRole("1.0")] + ): with setup(1): with Given("I have a user"): node.query("CREATE USER OR REPLACE user0") with And("I grant user a role"): node.query("GRANT role0 TO user0") with When("I set role for the user"): - node.query("SET ROLE role0", settings = [("user","user0")]) + node.query("SET ROLE role0", settings=[("user", "user0")]) with Finally("I drop the user"): node.query("DROP USER user0") - with Scenario("I set multiple roles for current user", requirements=[ - RQ_SRS_006_RBAC_SetRole("1.0")]): + with Scenario( + "I set multiple roles for current user", + requirements=[RQ_SRS_006_RBAC_SetRole("1.0")], + ): with setup(2): with Given("I have a user"): node.query("CREATE USER OR REPLACE user0") with And("I grant user a role"): node.query("GRANT role0, role1 TO user0") with When("I set roles for the user"): - node.query("SET ROLE role0, role1", settings = [("user","user0")]) + node.query("SET ROLE role0, role1", settings=[("user", "user0")]) with Finally("I drop the user"): node.query("DROP USER user0") - with Scenario("I set all roles for current user", requirements=[ - RQ_SRS_006_RBAC_SetRole_All("1.0")]): + with Scenario( + "I set all roles for current user", + requirements=[RQ_SRS_006_RBAC_SetRole_All("1.0")], + ): with When("I set all roles for current user"): node.query("SET ROLE ALL") - with Scenario("I set all roles except one for current user", requirements=[ - RQ_SRS_006_RBAC_SetRole_AllExcept("1.0")]): + with Scenario( + "I set all roles except one for current user", + requirements=[RQ_SRS_006_RBAC_SetRole_AllExcept("1.0")], + ): with setup(1): with When("I run set role command"): - node.query("SET ROLE ALL EXCEPT role0") \ No newline at end of file + node.query("SET ROLE ALL EXCEPT role0") diff --git a/tests/testflows/rbac/tests/syntax/show_create_quota.py b/tests/testflows/rbac/tests/syntax/show_create_quota.py index f29b3f5bcc6..632e34f760c 100755 --- a/tests/testflows/rbac/tests/syntax/show_create_quota.py +++ b/tests/testflows/rbac/tests/syntax/show_create_quota.py @@ -4,6 +4,7 @@ from testflows.core import * from rbac.requirements import * + @TestFeature @Name("show create quota") def feature(self, node="clickhouse1"): @@ -25,20 +26,26 @@ def feature(self, node="clickhouse1"): with Finally("I drop the quota"): node.query(f"DROP QUOTA IF EXISTS {quota}") - with Scenario("I show create quota", requirements=[ - RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Name("1.0")]): + with Scenario( + "I show create quota", + requirements=[RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Name("1.0")], + ): with cleanup("quota0"): with When("I run show create quota command"): node.query("SHOW CREATE QUOTA quota0") - with Scenario("I show create quota current", requirements=[ - RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Current("1.0")]): + with Scenario( + "I show create quota current", + requirements=[RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Current("1.0")], + ): with cleanup("quota1"): with When("I run show create quota command"): node.query("SHOW CREATE QUOTA CURRENT") - with Scenario("I show create quota current short form", requirements=[ - RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Current("1.0")]): + with Scenario( + "I show create quota current short form", + requirements=[RQ_SRS_006_RBAC_Quota_ShowCreateQuota_Current("1.0")], + ): with cleanup("quota2"): with When("I run show create quota command"): node.query("SHOW CREATE QUOTA") diff --git a/tests/testflows/rbac/tests/syntax/show_create_role.py b/tests/testflows/rbac/tests/syntax/show_create_role.py index 0b2adba96e2..f587285d85e 100755 --- a/tests/testflows/rbac/tests/syntax/show_create_role.py +++ b/tests/testflows/rbac/tests/syntax/show_create_role.py @@ -5,6 +5,7 @@ from testflows.core import * import rbac.helper.errors as errors from rbac.requirements import * + @TestFeature @Name("show create role") def feature(self, node="clickhouse1"): @@ -26,14 +27,17 @@ def feature(self, node="clickhouse1"): with Finally("I drop the role"): node.query(f"DROP ROLE IF EXISTS {role}") - with Scenario("I show create role", requirements=[ - RQ_SRS_006_RBAC_Role_ShowCreate("1.0")]): + with Scenario( + "I show create role", requirements=[RQ_SRS_006_RBAC_Role_ShowCreate("1.0")] + ): with setup("role0"): with When("I run show create role command"): node.query("SHOW CREATE ROLE role0") - with Scenario("I show create role, role doesn't exist, exception", requirements=[ - RQ_SRS_006_RBAC_Role_ShowCreate("1.0")]): + with Scenario( + "I show create role, role doesn't exist, exception", + requirements=[RQ_SRS_006_RBAC_Role_ShowCreate("1.0")], + ): with When("I run show create role to catch an exception"): exitcode, message = errors.role_not_found_in_disk(name="role0") - node.query("SHOW CREATE ROLE role0", exitcode=exitcode, message=message) \ No newline at end of file + node.query("SHOW CREATE ROLE role0", exitcode=exitcode, message=message) diff --git a/tests/testflows/rbac/tests/syntax/show_create_row_policy.py b/tests/testflows/rbac/tests/syntax/show_create_row_policy.py index cf43c0f2b41..70fd37ac58b 100755 --- a/tests/testflows/rbac/tests/syntax/show_create_row_policy.py +++ b/tests/testflows/rbac/tests/syntax/show_create_row_policy.py @@ -4,6 +4,7 @@ from testflows.core import * from rbac.requirements import * + @TestFeature @Name("show create row policy") def feature(self, node="clickhouse1"): @@ -29,21 +30,27 @@ def feature(self, node="clickhouse1"): with Given("I have a table"): node.query(f"CREATE TABLE default.foo (x UInt64, y String) Engine=Memory") - with Scenario("I show create row policy", requirements=[ - RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy("1.0")]): + with Scenario( + "I show create row policy", + requirements=[RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy("1.0")], + ): with cleanup("policy0"): with When("I run show create row policy command"): node.query("SHOW CREATE ROW POLICY policy0 ON default.foo") - with Scenario("I show create row policy on a table", requirements=[ - RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy_On("1.0")]): + with Scenario( + "I show create row policy on a table", + requirements=[RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy_On("1.0")], + ): with cleanup("policy0"): with When("I run show create row policy command"): node.query("SHOW CREATE ROW POLICY policy0 ON default.foo") - with Scenario("I show create row policy using short syntax on a table", requirements=[ - RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy_On("1.0")]): - with cleanup("policy1",on="foo"): + with Scenario( + "I show create row policy using short syntax on a table", + requirements=[RQ_SRS_006_RBAC_RowPolicy_ShowCreateRowPolicy_On("1.0")], + ): + with cleanup("policy1", on="foo"): with When("I run show create row policy command"): node.query("SHOW CREATE POLICY policy1 ON foo") finally: diff --git a/tests/testflows/rbac/tests/syntax/show_create_settings_profile.py b/tests/testflows/rbac/tests/syntax/show_create_settings_profile.py index 4af4e37951a..a4361b3531a 100755 --- a/tests/testflows/rbac/tests/syntax/show_create_settings_profile.py +++ b/tests/testflows/rbac/tests/syntax/show_create_settings_profile.py @@ -4,6 +4,7 @@ from testflows.core import * from rbac.requirements import * + @TestFeature @Name("show create settings profile") def feature(self, node="clickhouse1"): @@ -25,14 +26,18 @@ def feature(self, node="clickhouse1"): with Finally("I drop the settings profile"): node.query(f"DROP SETTINGS PROFILE IF EXISTS {profile}") - with Scenario("I show create settings profile", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_ShowCreateSettingsProfile("1.0")]): + with Scenario( + "I show create settings profile", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_ShowCreateSettingsProfile("1.0")], + ): with cleanup("profile0"): with When("I run show create settings profile command"): node.query("SHOW CREATE SETTINGS PROFILE profile0") - with Scenario("I show create settings profile short form", requirements=[ - RQ_SRS_006_RBAC_SettingsProfile_ShowCreateSettingsProfile("1.0")]): + with Scenario( + "I show create settings profile short form", + requirements=[RQ_SRS_006_RBAC_SettingsProfile_ShowCreateSettingsProfile("1.0")], + ): with cleanup("profile1"): with When("I run show create settings profile command"): node.query("SHOW CREATE PROFILE profile1") diff --git a/tests/testflows/rbac/tests/syntax/show_create_user.py b/tests/testflows/rbac/tests/syntax/show_create_user.py index 963e0d5d193..99da205b1a7 100755 --- a/tests/testflows/rbac/tests/syntax/show_create_user.py +++ b/tests/testflows/rbac/tests/syntax/show_create_user.py @@ -4,6 +4,7 @@ from testflows.core import * from rbac.requirements import * + @TestFeature @Name("show create user") def feature(self, node="clickhouse1"): @@ -25,13 +26,17 @@ def feature(self, node="clickhouse1"): with Finally("I drop the user"): node.query(f"DROP USER IF EXISTS {user}") - with Scenario("I run show create on user with no options", requirements=[ - RQ_SRS_006_RBAC_User_ShowCreateUser_For("1.0")]): + with Scenario( + "I run show create on user with no options", + requirements=[RQ_SRS_006_RBAC_User_ShowCreateUser_For("1.0")], + ): with setup("user0"): with When("I run show create user command"): node.query("SHOW CREATE USER user0") - with Scenario("I run show create on current user", requirements=[ - RQ_SRS_006_RBAC_User_ShowCreateUser("1.0")]): + with Scenario( + "I run show create on current user", + requirements=[RQ_SRS_006_RBAC_User_ShowCreateUser("1.0")], + ): with When("I show create the current user"): - node.query("SHOW CREATE USER CURRENT_USER") \ No newline at end of file + node.query("SHOW CREATE USER CURRENT_USER") diff --git a/tests/testflows/rbac/tests/syntax/show_grants.py b/tests/testflows/rbac/tests/syntax/show_grants.py index 18165ba98a5..256f8aef527 100755 --- a/tests/testflows/rbac/tests/syntax/show_grants.py +++ b/tests/testflows/rbac/tests/syntax/show_grants.py @@ -4,6 +4,7 @@ from testflows.core import * from rbac.requirements import * + @TestFeature @Name("show grants") def feature(self, node="clickhouse1"): @@ -25,13 +26,16 @@ def feature(self, node="clickhouse1"): with Finally("I drop the user"): node.query(f"DROP USER IF EXISTS {user}") - with Scenario("I show grants for user", requirements=[ - RQ_SRS_006_RBAC_Show_Grants_For("1.0")]): + with Scenario( + "I show grants for user", requirements=[RQ_SRS_006_RBAC_Show_Grants_For("1.0")] + ): with setup("user0"): with When("I run show grants command"): node.query("SHOW GRANTS FOR user0") - with Scenario("I show grants for current user", requirements=[ - RQ_SRS_006_RBAC_Show_Grants("1.0")]): + with Scenario( + "I show grants for current user", + requirements=[RQ_SRS_006_RBAC_Show_Grants("1.0")], + ): with When("I show grants"): - node.query("SHOW GRANTS") \ No newline at end of file + node.query("SHOW GRANTS") diff --git a/tests/testflows/rbac/tests/syntax/show_quotas.py b/tests/testflows/rbac/tests/syntax/show_quotas.py index 5fbae718a29..ec6b6edacec 100755 --- a/tests/testflows/rbac/tests/syntax/show_quotas.py +++ b/tests/testflows/rbac/tests/syntax/show_quotas.py @@ -4,6 +4,7 @@ from testflows.core import * from rbac.requirements import * + @TestFeature @Name("show quotas") def feature(self, node="clickhouse1"): @@ -25,26 +26,33 @@ def feature(self, node="clickhouse1"): with Finally("I drop the quota"): node.query(f"DROP QUOTA IF EXISTS {quota}") - with Scenario("I show quotas", requirements=[ - RQ_SRS_006_RBAC_Quota_ShowQuotas("1.0")]): + with Scenario( + "I show quotas", requirements=[RQ_SRS_006_RBAC_Quota_ShowQuotas("1.0")] + ): with cleanup("quota0"), cleanup("quota1"): with When("I run show quota command"): node.query("SHOW QUOTAS") - with Scenario("I show quotas into outfile", requirements=[ - RQ_SRS_006_RBAC_Quota_ShowQuotas_IntoOutfile("1.0")]): + with Scenario( + "I show quotas into outfile", + requirements=[RQ_SRS_006_RBAC_Quota_ShowQuotas_IntoOutfile("1.0")], + ): with cleanup("quota0"), cleanup("quota1"): with When("I run show quota command"): node.query("SHOW QUOTAS INTO OUTFILE 'quotas.txt'") - with Scenario("I show quotas with format", requirements=[ - RQ_SRS_006_RBAC_Quota_ShowQuotas_Format("1.0")]): + with Scenario( + "I show quotas with format", + requirements=[RQ_SRS_006_RBAC_Quota_ShowQuotas_Format("1.0")], + ): with cleanup("quota0"), cleanup("quota1"): with When("I run show quota command"): node.query("SHOW QUOTAS FORMAT TabSeparated") - with Scenario("I show quotas with settings", requirements=[ - RQ_SRS_006_RBAC_Quota_ShowQuotas("1.0")]): + with Scenario( + "I show quotas with settings", + requirements=[RQ_SRS_006_RBAC_Quota_ShowQuotas("1.0")], + ): with cleanup("quota0"), cleanup("quota1"): with When("I run show quota command"): node.query("SHOW QUOTAS SETTINGS max_memory_usage=5") diff --git a/tests/testflows/rbac/tests/syntax/show_row_policies.py b/tests/testflows/rbac/tests/syntax/show_row_policies.py index 0dc7f7f1d1a..81d59bd914b 100755 --- a/tests/testflows/rbac/tests/syntax/show_row_policies.py +++ b/tests/testflows/rbac/tests/syntax/show_row_policies.py @@ -4,6 +4,7 @@ from testflows.core import * from rbac.requirements import * + @TestFeature @Name("show row policies") def feature(self, node="clickhouse1"): @@ -29,26 +30,34 @@ def feature(self, node="clickhouse1"): with Given("I have a table"): node.query(f"CREATE TABLE default.foo (x UInt64, y String) Engine=Memory") - with Scenario("I show row policies", requirements=[ - RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies("1.0")]): + with Scenario( + "I show row policies", + requirements=[RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies("1.0")], + ): with cleanup("policy0"): with When("I run drop row policy command"): node.query("SHOW ROW POLICIES") - with Scenario("I show row policies using short syntax", requirements=[ - RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies("1.0")]): + with Scenario( + "I show row policies using short syntax", + requirements=[RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies("1.0")], + ): with cleanup("policy1"): with When("I run drop row policy command"): node.query("SHOW POLICIES") - with Scenario("I show row policies on a database table", requirements=[ - RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies_On("1.0")]): + with Scenario( + "I show row policies on a database table", + requirements=[RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies_On("1.0")], + ): with cleanup("policy0"): with When("I run drop row policy command"): node.query("SHOW ROW POLICIES ON default.foo") - with Scenario("I show row policies on a table", requirements=[ - RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies_On("1.0")]): + with Scenario( + "I show row policies on a table", + requirements=[RQ_SRS_006_RBAC_RowPolicy_ShowRowPolicies_On("1.0")], + ): with cleanup("policy0"): with When("I run drop row policy command"): node.query("SHOW ROW POLICIES ON foo") diff --git a/tests/testflows/rbac/tests/views/feature.py b/tests/testflows/rbac/tests/views/feature.py index 67f0dadb862..4595339a4a6 100755 --- a/tests/testflows/rbac/tests/views/feature.py +++ b/tests/testflows/rbac/tests/views/feature.py @@ -2,14 +2,27 @@ from testflows.core import * from rbac.helper.common import * + @TestFeature @Name("views") def feature(self): with Pool(3) as pool: try: - Feature(test=load("rbac.tests.views.view", "feature"), parallel=True, executor=pool) - Feature(test=load("rbac.tests.views.live_view", "feature"), parallel=True, executor=pool) - Feature(test=load("rbac.tests.views.materialized_view", "feature"), parallel=True, executor=pool) + Feature( + test=load("rbac.tests.views.view", "feature"), + parallel=True, + executor=pool, + ) + Feature( + test=load("rbac.tests.views.live_view", "feature"), + parallel=True, + executor=pool, + ) + Feature( + test=load("rbac.tests.views.materialized_view", "feature"), + parallel=True, + executor=pool, + ) finally: join() diff --git a/tests/testflows/rbac/tests/views/live_view.py b/tests/testflows/rbac/tests/views/live_view.py index edda654d949..d4ee8f13c61 100755 --- a/tests/testflows/rbac/tests/views/live_view.py +++ b/tests/testflows/rbac/tests/views/live_view.py @@ -5,6 +5,7 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @contextmanager def allow_experimental_live_view(node): setting = ("allow_experimental_live_view", 1) @@ -12,38 +13,68 @@ def allow_experimental_live_view(node): try: with Given("I add allow_experimental_live_view to the default query settings"): - default_query_settings = getsattr(current().context, "default_query_settings", []) + default_query_settings = getsattr( + current().context, "default_query_settings", [] + ) default_query_settings.append(setting) yield finally: - with Finally("I remove allow_experimental_live_view from the default query settings"): + with Finally( + "I remove allow_experimental_live_view from the default query settings" + ): if default_query_settings: try: default_query_settings.pop(default_query_settings.index(setting)) except ValueError: pass + @TestSuite @Requirements( RQ_SRS_006_RBAC_LiveView_Create("1.0"), ) def create(self, node=None): - """Test the RBAC functionality of the `CREATE LIVE VIEW` command. - """ - Scenario(run=create_without_create_view_privilege, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_create_view_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_revoked_create_view_privilege_revoked_directly_or_from_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_without_source_table_privilege, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_source_table_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_subquery_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_join_query_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_join_subquery_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_nested_views_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) + """Test the RBAC functionality of the `CREATE LIVE VIEW` command.""" + Scenario( + run=create_without_create_view_privilege, setup=instrument_clickhouse_server_log + ) + Scenario( + run=create_with_create_view_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_revoked_create_view_privilege_revoked_directly_or_from_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_without_source_table_privilege, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_source_table_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_subquery_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_join_query_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_join_subquery_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_nested_views_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + @TestScenario def create_without_create_view_privilege(self, node=None): - """Check that user is unable to create a live view without CREATE VIEW privilege. - """ + """Check that user is unable to create a live view without CREATE VIEW privilege.""" user_name = f"user_{getuid()}" view_name = f"view_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -51,34 +82,44 @@ def create_without_create_view_privilege(self, node=None): if node is None: node = self.context.node with user(node, f"{user_name}"): - with When("I try to create a live view without CREATE VIEW privilege as the user"): + with When( + "I try to create a live view without CREATE VIEW privilege as the user" + ): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE LIVE VIEW {view_name} AS SELECT 1", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"CREATE LIVE VIEW {view_name} AS SELECT 1", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestScenario def create_with_create_view_privilege_granted_directly_or_via_role(self, node=None): - """Check that user is able to create a live view with CREATE VIEW privilege, either granted directly or through a role. - """ + """Check that user is able to create a live view with CREATE VIEW privilege, either granted directly or through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_create_view_privilege, - name="create with create view privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_create_view_privilege, + name="create with create view privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_create_view_privilege, - name="create with create view privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_create_view_privilege, + name="create with create view privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_create_view_privilege(self, grant_target_name, user_name, node=None): - """Check that user is able to create a live view with the granted privileges. - """ + """Check that user is able to create a live view with the granted privileges.""" view_name = f"view_{getuid()}" if node is None: @@ -89,35 +130,46 @@ def create_with_create_view_privilege(self, grant_target_name, user_name, node=N node.query(f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}") with Then("I try to create a live view without privilege as the user"): - node.query(f"CREATE LIVE VIEW {view_name} AS SELECT 1", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE LIVE VIEW {view_name} AS SELECT 1", + settings=[("user", f"{user_name}")], + ) finally: with Then("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -def create_with_revoked_create_view_privilege_revoked_directly_or_from_role(self, node=None): - """Check that user is unable to create live view after the CREATE VIEW privilege is revoked, either directly or from a role. - """ +def create_with_revoked_create_view_privilege_revoked_directly_or_from_role( + self, node=None +): + """Check that user is unable to create live view after the CREATE VIEW privilege is revoked, either directly or from a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_revoked_create_view_privilege, - name="create with create view privilege revoked directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_revoked_create_view_privilege, + name="create with create view privilege revoked directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_revoked_create_view_privilege, - name="create with create view privilege revoked from a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_revoked_create_view_privilege, + name="create with create view privilege revoked from a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline -def create_with_revoked_create_view_privilege(self, grant_target_name, user_name, node=None): - """Revoke CREATE VIEW privilege and check the user is unable to create a live view. - """ +def create_with_revoked_create_view_privilege( + self, grant_target_name, user_name, node=None +): + """Revoke CREATE VIEW privilege and check the user is unable to create a live view.""" view_name = f"view_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -130,8 +182,13 @@ def create_with_revoked_create_view_privilege(self, grant_target_name, user_name node.query(f"REVOKE CREATE VIEW ON {view_name} FROM {grant_target_name}") with Then("I try to create a live view on the table as the user"): - node.query(f"CREATE LIVE VIEW {view_name} AS SELECT 1", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"CREATE LIVE VIEW {view_name} AS SELECT 1", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestScenario def create_without_source_table_privilege(self, node=None): @@ -150,9 +207,16 @@ def create_without_source_table_privilege(self, node=None): with When("I grant CREATE VIEW privilege to a user"): node.query(f"GRANT CREATE VIEW ON {view_name} TO {user_name}") - with Then("I try to create a live view without select privilege on the table"): - node.query(f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + with Then( + "I try to create a live view without select privilege on the table" + ): + node.query( + f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestScenario def create_with_source_table_privilege_granted_directly_or_via_role(self, node=None): @@ -165,19 +229,23 @@ def create_with_source_table_privilege_granted_directly_or_via_role(self, node=N if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_source_table_privilege, - name="create with create view and select privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_source_table_privilege, + name="create with create view and select privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_source_table_privilege, - name="create with create view and select privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_source_table_privilege, + name="create with create view and select privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_source_table_privilege(self, user_name, grant_target_name, node=None): - """Check that user is unable to create a live view without SELECT privilege on the source table. - """ + """Check that user is unable to create a live view without SELECT privilege on the source table.""" view_name = f"view_{getuid()}" table_name = f"table_{getuid()}" @@ -193,16 +261,20 @@ def create_with_source_table_privilege(self, user_name, grant_target_name, node= with And("I try to create a live view on the table as the user"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table_name}", + settings=[("user", f"{user_name}")], + ) with Then("I check the view"): output = node.query(f"SELECT count(*) FROM {view_name}").output - assert output == '0', error() + assert output == "0", error() finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def create_with_subquery_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to create a live view where the stored query has two subqueries @@ -215,14 +287,19 @@ def create_with_subquery_privilege_granted_directly_or_via_role(self, node=None) if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_subquery, - name="create with subquery, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_subquery, + name="create with subquery, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_subquery, - name="create with subquery, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_subquery, + name="create with subquery, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_subquery(self, user_name, grant_target_name, node=None): @@ -243,29 +320,72 @@ def create_with_subquery(self, user_name, grant_target_name, node=None): with When("I grant CREATE VIEW privilege"): node.query(f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}") with Then("I attempt to CREATE VIEW as the user with create privilege"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=3): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name, table2_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, + permutation, + grant_target_name, + table0_name, + table1_name, + table2_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a live view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=3))+1, grant_target_name, table0_name, table1_name, table2_name): + with grant_select_on_table( + node, + max(permutations(table_count=3)) + 1, + grant_target_name, + table0_name, + table1_name, + table2_name, + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a live view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name), settings = [("user", f"{user_name}")]) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def create_with_join_query_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to create a live view where the stored query includes a `JOIN` statement @@ -278,14 +398,19 @@ def create_with_join_query_privilege_granted_directly_or_via_role(self, node=Non if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_join_query, - name="create with join query, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_join_query, + name="create with join query, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_join_query, - name="create with join query, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_join_query, + name="create with join query, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_join_query(self, grant_target_name, user_name, node=None): @@ -305,29 +430,63 @@ def create_with_join_query(self, grant_target_name, user_name, node=None): with When("I grant CREATE VIEW privilege"): node.query(f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}") with Then("I attempt to create view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=2): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, permutation, grant_target_name, table0_name, table1_name + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a live view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=2))+1, grant_target_name, table0_name, table1_name): + with grant_select_on_table( + node, + max(permutations(table_count=2)) + 1, + grant_target_name, + table0_name, + table1_name, + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a live view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")]) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Then("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def create_with_join_subquery_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to create a live view with a stored query that includes `JOIN` and two subqueries @@ -339,14 +498,19 @@ def create_with_join_subquery_privilege_granted_directly_or_via_role(self, node= if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_join_subquery, - name="create with join subquery, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_join_subquery, + name="create with join subquery, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_join_subquery, - name="create with join subquery, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_join_subquery, + name="create with join subquery, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_join_subquery(self, grant_target_name, user_name, node=None): @@ -367,31 +531,79 @@ def create_with_join_subquery(self, grant_target_name, user_name, node=None): try: with When("I grant CREATE VIEW privilege"): node.query(f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}") - with Then("I attempt to create view as the user with CREATE VIEW privilege"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name, table3_name=table3_name), - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + with Then( + "I attempt to create view as the user with CREATE VIEW privilege" + ): + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + table3_name=table3_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=4): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name, table3_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, + permutation, + grant_target_name, + table0_name, + table1_name, + table3_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a live view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name, table3_name=table3_name), - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + table3_name=table3_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=4))+1, grant_target_name, table0_name, table1_name, table2_name, table3_name): + with grant_select_on_table( + node, + max(permutations(table_count=4)) + 1, + grant_target_name, + table0_name, + table1_name, + table2_name, + table3_name, + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a live view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name, table3_name=table3_name), - settings = [("user", f"{user_name}")]) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + table3_name=table3_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def create_with_nested_views_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to create a live view with a stored query that includes other views if and only if @@ -403,14 +615,19 @@ def create_with_nested_views_privilege_granted_directly_or_via_role(self, node=N if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_nested_views, - name="create with nested views, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_nested_views, + name="create with nested views, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_nested_views, - name="create with nested views, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_nested_views, + name="create with nested views, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_nested_views(self, grant_target_name, user_name, node=None): @@ -432,30 +649,79 @@ def create_with_nested_views(self, grant_target_name, user_name, node=None): try: with Given("I have some views"): node.query(f"CREATE VIEW {view0_name} AS SELECT y FROM {table0_name}") - node.query(f"CREATE VIEW {view1_name} AS SELECT y FROM {table1_name} WHERE y IN (SELECT y FROM {view0_name} WHERE y<2)") + node.query( + f"CREATE VIEW {view1_name} AS SELECT y FROM {table1_name} WHERE y IN (SELECT y FROM {view0_name} WHERE y<2)" + ) with When("I grant CREATE VIEW privilege"): node.query(f"GRANT CREATE VIEW ON {view2_name} TO {grant_target_name}") - with Then("I attempt to create view as the user with CREATE VIEW privilege"): - node.query(create_view_query.format(view2_name=view2_name, view1_name=view1_name, table2_name=table2_name), - settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + with Then( + "I attempt to create view as the user with CREATE VIEW privilege" + ): + node.query( + create_view_query.format( + view2_name=view2_name, + view1_name=view1_name, + table2_name=table2_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) - for permutation in ([0,1,2,3,10,14,15,26,27,30],permutations(table_count=5))[self.context.stress]: - with grant_select_on_table(node, permutation, grant_target_name, view1_name, table2_name, view0_name, table1_name, table0_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + for permutation in ( + [0, 1, 2, 3, 10, 14, 15, 26, 27, 30], + permutations(table_count=5), + )[self.context.stress]: + with grant_select_on_table( + node, + permutation, + grant_target_name, + view1_name, + table2_name, + view0_name, + table1_name, + table0_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view2_name}") with Then("I attempt to create a live view as the user"): - node.query(create_view_query.format(view2_name=view2_name, view1_name=view1_name, table2_name=table2_name), - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view2_name=view2_name, + view1_name=view1_name, + table2_name=table2_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all views"): - with grant_select_on_table(node, max(permutations(table_count=5))+1, grant_target_name, view0_name, view1_name, table0_name, table1_name, table2_name): + with grant_select_on_table( + node, + max(permutations(table_count=5)) + 1, + grant_target_name, + view0_name, + view1_name, + table0_name, + table1_name, + table2_name, + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view2_name}") with Then("I attempt to create a live view as the user"): - node.query(create_view_query.format(view2_name=view2_name, view1_name=view1_name, table2_name=table2_name), - settings = [("user", f"{user_name}")]) + node.query( + create_view_query.format( + view2_name=view2_name, + view1_name=view1_name, + table2_name=table2_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the views"): @@ -466,27 +732,53 @@ def create_with_nested_views(self, grant_target_name, user_name, node=None): with And("I drop view2", flags=TE): node.query(f"DROP VIEW IF EXISTS {view0_name}") + @TestSuite @Requirements( RQ_SRS_006_RBAC_LiveView_Select("1.0"), ) def select(self, node=None): - """Test the RBAC functionality of the `SELECT FROM live view` command. - """ - Scenario(run=select_without_select_privilege, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_select_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_select_privilege_revoked_directly_or_from_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_without_source_table_privilege, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_source_table_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_subquery_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_join_query_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_join_subquery_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_nested_views_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) + """Test the RBAC functionality of the `SELECT FROM live view` command.""" + Scenario( + run=select_without_select_privilege, setup=instrument_clickhouse_server_log + ) + Scenario( + run=select_with_select_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_select_privilege_revoked_directly_or_from_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_without_source_table_privilege, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_source_table_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_subquery_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_join_query_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_join_subquery_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_nested_views_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + @TestScenario def select_without_select_privilege(self, node=None): - """Check that user is unable to select on a view without view SELECT privilege. - """ + """Check that user is unable to select on a view without view SELECT privilege.""" user_name = f"user_{getuid()}" view_name = f"view_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -500,36 +792,44 @@ def select_without_select_privilege(self, node=None): node.query(f"CREATE LIVE VIEW {view_name} AS SELECT 1") with Then("I try to select from view without privilege as the user"): - node.query(f"SELECT * FROM {view_name}", settings = [("user",f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {view_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_select_privilege_granted_directly_or_via_role(self, node=None): - """Check that user is able to select from a view if and only if they have select privilege on that view, either directly or from a role. - """ + """Check that user is able to select from a view if and only if they have select privilege on that view, either directly or from a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_select_privilege, - name="select with select privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_select_privilege, + name="select with select privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_select_privilege, - name="select with select privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_select_privilege, + name="select with select privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_select_privilege(self, user_name, grant_target_name, node=None): - """Grant SELECT privilege on a view and check the user is able to SELECT from it. - """ + """Grant SELECT privilege on a view and check the user is able to SELECT from it.""" view_name = f"view_{getuid()}" if node is None: @@ -543,36 +843,42 @@ def select_with_select_privilege(self, user_name, grant_target_name, node=None): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") with Then("I attempt to select from view with privilege as the user"): - output = node.query(f"SELECT count(*) FROM {view_name}", settings = [("user",f"{user_name}")]).output - assert output == '1', error() + output = node.query( + f"SELECT count(*) FROM {view_name}", settings=[("user", f"{user_name}")] + ).output + assert output == "1", error() finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_select_privilege_revoked_directly_or_from_role(self, node=None): - """Check that user is unable to select from a view if their SELECT privilege is revoked, either directly or from a role. - """ + """Check that user is unable to select from a view if their SELECT privilege is revoked, either directly or from a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_revoked_select_privilege, - name="select with select privilege revoked directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_revoked_select_privilege, + name="select with select privilege revoked directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_revoked_select_privilege, - name="select with select privilege revoked from a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_revoked_select_privilege, + name="select with select privilege revoked from a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_revoked_select_privilege(self, user_name, grant_target_name, node=None): - """Grant and revoke SELECT privilege on a view and check the user is unable to SELECT from it. - """ + """Grant and revoke SELECT privilege on a view and check the user is unable to SELECT from it.""" view_name = f"view_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -589,17 +895,21 @@ def select_with_revoked_select_privilege(self, user_name, grant_target_name, nod node.query(f"REVOKE SELECT ON {view_name} FROM {grant_target_name}") with Then("I attempt to select from view with privilege as the user"): - node.query(f"SELECT count(*) FROM {view_name}", settings = [("user",f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SELECT count(*) FROM {view_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_without_source_table_privilege(self, node=None): - """Check that user is unable to select from a view without SELECT privilege for the source table. - """ + """Check that user is unable to select from a view without SELECT privilege for the source table.""" user_name = f"user_{getuid()}" view_name = f"view_{getuid()}" table_name = f"table_{getuid()}" @@ -608,7 +918,7 @@ def select_without_source_table_privilege(self, node=None): if node is None: node = self.context.node with table(node, f"{table_name}"): - with user(node, f"{user_name}"): + with user(node, f"{user_name}"): try: with When("I create a live view from the source table"): node.query(f"DROP VIEW IF EXISTS {view_name}") @@ -616,14 +926,21 @@ def select_without_source_table_privilege(self, node=None): with And("I grant view select privilege to the user"): node.query(f"GRANT SELECT ON {view_name} TO {user_name}") - with Then("I attempt to select from view without privilege on the source table"): - node.query(f"SELECT count(*) FROM {view_name}", settings = [("user",f"{user_name}")], - exitcode=exitcode, message=message) + with Then( + "I attempt to select from view without privilege on the source table" + ): + node.query( + f"SELECT count(*) FROM {view_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_source_table_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to select from a view, with source table in the stored query, if and only if @@ -635,19 +952,23 @@ def select_with_source_table_privilege_granted_directly_or_via_role(self, node=N if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_source_table_privilege, - name="select with source table, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_source_table_privilege, + name="select with source table, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_source_table_privilege, - name="select with source table, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_source_table_privilege, + name="select with source table, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_source_table_privilege(self, user_name, grant_target_name, node=None): - """Grant SELECT privilege on view and the source table for that view and check the user is able to SELECT from the view. - """ + """Grant SELECT privilege on view and the source table for that view and check the user is able to SELECT from the view.""" view_name = f"view_{getuid()}" table_name = f"table_{getuid()}" @@ -657,20 +978,26 @@ def select_with_source_table_privilege(self, user_name, grant_target_name, node= try: with Given("I have a view with a source table"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table_name}") + node.query( + f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table_name}" + ) with And("I grant select privileges"): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") node.query(f"GRANT SELECT ON {table_name} TO {grant_target_name}") with Then("I check the user is able to select from the view"): - output = node.query(f"SELECT count(*) FROM {view_name}", settings = [("user", f"{user_name}")]).output - assert output == '0', error() + output = node.query( + f"SELECT count(*) FROM {view_name}", + settings=[("user", f"{user_name}")], + ).output + assert output == "0", error() finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_subquery_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to select from a view where the stored query has two subqueries if and only if @@ -682,19 +1009,23 @@ def select_with_subquery_privilege_granted_directly_or_via_role(self, node=None) if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_subquery, - name="select with subquery, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_subquery, + name="select with subquery, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_subquery, - name="select with subquery, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_subquery, + name="select with subquery, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_subquery(self, user_name, grant_target_name, node=None): - """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them. - """ + """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them.""" view_name = f"view_{getuid()}" table0_name = f"table0_{getuid()}" table1_name = f"table1_{getuid()}" @@ -708,29 +1039,61 @@ def select_with_subquery(self, user_name, grant_target_name, node=None): try: with Given("I have a view with a subquery"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table0_name} WHERE y IN (SELECT y FROM {table1_name} WHERE y IN (SELECT y FROM {table2_name} WHERE y<2))") + node.query( + f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table0_name} WHERE y IN (SELECT y FROM {table1_name} WHERE y IN (SELECT y FROM {table2_name} WHERE y<2))" + ) with When("I grant SELECT privilege on view"): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") with Then("I attempt to select from the view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=3): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name, table2_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, + permutation, + grant_target_name, + table0_name, + table1_name, + table2_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=3))+1, grant_target_name, table0_name, table1_name, table2_name): + with grant_select_on_table( + node, + max(permutations(table_count=3)) + 1, + grant_target_name, + table0_name, + table1_name, + table2_name, + ): with Then("I attempt to select from a view as the user"): - output = node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")]).output - assert output == '0', error() + output = node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + ).output + assert output == "0", error() finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_join_query_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to select from a view where the stored query includes a `JOIN` statement if and only if @@ -742,19 +1105,23 @@ def select_with_join_query_privilege_granted_directly_or_via_role(self, node=Non if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_join_query, - name="select with join, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_join_query, + name="select with join, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_join_query, - name="select with join, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_join_query, + name="select with join, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_join_query(self, user_name, grant_target_name, node=None): - """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them. - """ + """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them.""" view_name = f"view_{getuid()}" table0_name = f"table0_{getuid()}" table1_name = f"table1_{getuid()}" @@ -767,28 +1134,54 @@ def select_with_join_query(self, user_name, grant_target_name, node=None): try: with Given("I have a view with a JOIN statement"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table0_name} JOIN {table1_name} USING d") + node.query( + f"CREATE LIVE VIEW {view_name} AS SELECT * FROM {table0_name} JOIN {table1_name} USING d" + ) with When("I grant SELECT privilege on view"): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") with Then("I attempt to select from the view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=2): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, permutation, grant_target_name, table0_name, table1_name + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=2))+1, grant_target_name, table0_name, table1_name): + with grant_select_on_table( + node, + max(permutations(table_count=2)) + 1, + grant_target_name, + table0_name, + table1_name, + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")]) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_join_subquery_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to select from a view with a stored query that includes `JOIN` and two subqueries @@ -800,19 +1193,23 @@ def select_with_join_subquery_privilege_granted_directly_or_via_role(self, node= if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_join_subquery, - name="select with join subquery, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_join_subquery, + name="select with join subquery, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_join_subquery, - name="select with join subquery, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_join_subquery, + name="select with join subquery, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_join_subquery(self, grant_target_name, user_name, node=None): - """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them. - """ + """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them.""" view_name = f"view_{getuid()}" table0_name = f"table0_{getuid()}" table1_name = f"table1_{getuid()}" @@ -827,28 +1224,62 @@ def select_with_join_subquery(self, grant_target_name, user_name, node=None): try: with Given("I have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE LIVE VIEW {view_name} AS SELECT y FROM {table0_name} WHERE y IN (SELECT y FROM {table1_name} WHERE y IN (SELECT y FROM {table2_name} WHERE y<2)) JOIN {table3_name} USING y") + node.query( + f"CREATE LIVE VIEW {view_name} AS SELECT y FROM {table0_name} WHERE y IN (SELECT y FROM {table1_name} WHERE y IN (SELECT y FROM {table2_name} WHERE y<2)) JOIN {table3_name} USING y" + ) with When("I grant SELECT privilege on view"): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") with Then("I attempt to select from the view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=4): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name, table2_name, table3_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, + permutation, + grant_target_name, + table0_name, + table1_name, + table2_name, + table3_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=4))+1, grant_target_name, table0_name, table1_name, table2_name, table3_name): + with grant_select_on_table( + node, + max(permutations(table_count=4)) + 1, + grant_target_name, + table0_name, + table1_name, + table2_name, + table3_name, + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")]) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_nested_views_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to select from a view with a stored query that includes other views if and only if @@ -860,19 +1291,23 @@ def select_with_nested_views_privilege_granted_directly_or_via_role(self, node=N if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_nested_views, - name="select with nested views, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_nested_views, + name="select with nested views, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_nested_views, - name="select with nested views, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_nested_views, + name="select with nested views, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_nested_views(self, grant_target_name, user_name, node=None): - """Grant SELECT on views and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them. - """ + """Grant SELECT on views and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them.""" view0_name = f"view0_{getuid()}" view1_name = f"view1_{getuid()}" view2_name = f"view2_{getuid()}" @@ -887,25 +1322,67 @@ def select_with_nested_views(self, grant_target_name, user_name, node=None): with table(node, f"{table0_name},{table1_name},{table2_name}"): try: with Given("I have some views"): - node.query(f"CREATE LIVE VIEW {view0_name} AS SELECT y FROM {table0_name}") - node.query(f"CREATE LIVE VIEW {view1_name} AS SELECT y FROM {view0_name} WHERE y IN (SELECT y FROM {table1_name} WHERE y<2)") - node.query(f"CREATE LIVE VIEW {view2_name} AS SELECT y FROM {view1_name} JOIN {table2_name} USING y") + node.query( + f"CREATE LIVE VIEW {view0_name} AS SELECT y FROM {table0_name}" + ) + node.query( + f"CREATE LIVE VIEW {view1_name} AS SELECT y FROM {view0_name} WHERE y IN (SELECT y FROM {table1_name} WHERE y<2)" + ) + node.query( + f"CREATE LIVE VIEW {view2_name} AS SELECT y FROM {view1_name} JOIN {table2_name} USING y" + ) with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view2_name=view2_name), - settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view2_name=view2_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) - for permutation in ([0,1,3,5,7,21,29,31,53,55,61],permutations(table_count=6))[self.context.stress]: - with grant_select_on_table(node, permutation, grant_target_name, view2_name, view1_name, table2_name, view0_name, table1_name, table0_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + for permutation in ( + [0, 1, 3, 5, 7, 21, 29, 31, 53, 55, 61], + permutations(table_count=6), + )[self.context.stress]: + with grant_select_on_table( + node, + permutation, + grant_target_name, + view2_name, + view1_name, + table2_name, + view0_name, + table1_name, + table0_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view2_name=view2_name), - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view2_name=view2_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all views"): - with grant_select_on_table(node, max(permutations(table_count=6))+1, grant_target_name, view0_name, view1_name, view2_name, table0_name, table1_name, table2_name): + with grant_select_on_table( + node, + max(permutations(table_count=6)) + 1, + grant_target_name, + view0_name, + view1_name, + view2_name, + table0_name, + table1_name, + table2_name, + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view2_name=view2_name), settings = [("user", f"{user_name}")]) + node.query( + select_view_query.format(view2_name=view2_name), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the views"): @@ -916,39 +1393,47 @@ def select_with_nested_views(self, grant_target_name, user_name, node=None): with And("I drop view2", flags=TE): node.query(f"DROP VIEW IF EXISTS {view0_name}") + @TestSuite @Requirements( RQ_SRS_006_RBAC_LiveView_Drop("1.0"), ) def drop(self, node=None): - """Test the RBAC functionality of the `DROP VIEW` command. - """ - Scenario(run=drop_with_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=drop_with_revoked_privilege_revoked_directly_or_from_role, setup=instrument_clickhouse_server_log) + """Test the RBAC functionality of the `DROP VIEW` command.""" + Scenario( + run=drop_with_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=drop_with_revoked_privilege_revoked_directly_or_from_role, + setup=instrument_clickhouse_server_log, + ) + @TestScenario def drop_with_privilege_granted_directly_or_via_role(self, node=None): - """Check that user is able to drop view with DROP VIEW privilege if the user has privilege directly or through a role. - """ + """Check that user is able to drop view with DROP VIEW privilege if the user has privilege directly or through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=drop_with_privilege, - name="drop privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario(test=drop_with_privilege, name="drop privilege granted directly")( + grant_target_name=user_name, user_name=user_name + ) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=drop_with_privilege, - name="drop privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=drop_with_privilege, name="drop privilege granted through a role" + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def drop_with_privilege(self, grant_target_name, user_name, node=None): - """Grant DROP VIEW privilege and check the user is able to successfully drop a view. - """ + """Grant DROP VIEW privilege and check the user is able to successfully drop a view.""" view_name = f"view_{getuid()}" exitcode, message = errors.table_does_not_exist(name=f"default.{view_name}") @@ -963,7 +1448,7 @@ def drop_with_privilege(self, grant_target_name, user_name, node=None): node.query(f"GRANT DROP VIEW ON {view_name} TO {grant_target_name}") with And("I drop the view as the user"): - node.query(f"DROP VIEW {view_name}", settings = [("user",f"{user_name}")]) + node.query(f"DROP VIEW {view_name}", settings=[("user", f"{user_name}")]) with Then("I check the table does not exist"): node.query(f"SELECT * FROM {view_name}", exitcode=exitcode, message=message) @@ -972,29 +1457,31 @@ def drop_with_privilege(self, grant_target_name, user_name, node=None): with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def drop_with_revoked_privilege_revoked_directly_or_from_role(self, node=None): - """Check that user is unable to drop view with DROP VIEW privilege revoked directly or from a role. - """ + """Check that user is unable to drop view with DROP VIEW privilege revoked directly or from a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=drop_with_revoked_privilege, - name="drop privilege revoked directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=drop_with_revoked_privilege, name="drop privilege revoked directly" + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=drop_with_revoked_privilege, - name="drop privilege revoked from a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=drop_with_revoked_privilege, name="drop privilege revoked from a role" + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def drop_with_revoked_privilege(self, grant_target_name, user_name, node=None): - """Revoke DROP VIEW privilege and check the user is unable to DROP a view. - """ + """Revoke DROP VIEW privilege and check the user is unable to DROP a view.""" view_name = f"view_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -1011,22 +1498,33 @@ def drop_with_revoked_privilege(self, grant_target_name, user_name, node=None): node.query(f"REVOKE DROP VIEW ON {view_name} FROM {grant_target_name}") with Then("I drop the view as the user"): - node.query(f"DROP VIEW {view_name}", settings = [("user",f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"DROP VIEW {view_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestSuite @Requirements( RQ_SRS_006_RBAC_LiveView_Refresh("1.0"), ) def refresh(self, node=None): - """Test the RBAC functionality of the `ALTER LIVE VIEW REFRESH` command. - """ - Scenario(run=refresh_with_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=refresh_with_privilege_revoked_directly_or_from_role, setup=instrument_clickhouse_server_log) + """Test the RBAC functionality of the `ALTER LIVE VIEW REFRESH` command.""" + Scenario( + run=refresh_with_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=refresh_with_privilege_revoked_directly_or_from_role, + setup=instrument_clickhouse_server_log, + ) + @TestScenario def refresh_with_privilege_granted_directly_or_via_role(self, node=None): @@ -1039,19 +1537,21 @@ def refresh_with_privilege_granted_directly_or_via_role(self, node=None): if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=refresh_with_privilege, - name="refresh privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=refresh_with_privilege, name="refresh privilege granted directly" + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=refresh_with_privilege, - name="refresh privilege revoked from a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=refresh_with_privilege, name="refresh privilege revoked from a role" + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def refresh_with_privilege(self, grant_target_name, user_name, node=None): - """Grant REFRESH privilege and check that user is able to refresh a live view. - """ + """Grant REFRESH privilege and check that user is able to refresh a live view.""" view_name = f"view_{getuid()}" if node is None: @@ -1062,15 +1562,21 @@ def refresh_with_privilege(self, grant_target_name, user_name, node=None): node.query(f"CREATE LIVE VIEW {view_name} AS SELECT 1") with When("I grant REFRESH privilege"): - node.query(f"GRANT ALTER VIEW REFRESH ON {view_name} TO {grant_target_name}") + node.query( + f"GRANT ALTER VIEW REFRESH ON {view_name} TO {grant_target_name}" + ) with Then("I attempt to refresh as the user"): - node.query(f"ALTER LIVE VIEW {view_name} REFRESH", settings = [("user",f"{user_name}")]) + node.query( + f"ALTER LIVE VIEW {view_name} REFRESH", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def refresh_with_privilege_revoked_directly_or_from_role(self, node=None): """Check that user is unable to refresh a live view with REFRESH privilege @@ -1082,19 +1588,23 @@ def refresh_with_privilege_revoked_directly_or_from_role(self, node=None): if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=refresh_with_revoked_privilege, - name="refresh privilege revoked directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=refresh_with_revoked_privilege, + name="refresh privilege revoked directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=refresh_with_revoked_privilege, - name="refresh privilege revoked from a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=refresh_with_revoked_privilege, + name="refresh privilege revoked from a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def refresh_with_revoked_privilege(self, grant_target_name, user_name, node=None): - """Revoke REFRESH privilege and check that user is unable to refresh a live view. - """ + """Revoke REFRESH privilege and check that user is unable to refresh a live view.""" view_name = f"view_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -1106,17 +1616,27 @@ def refresh_with_revoked_privilege(self, grant_target_name, user_name, node=None node.query(f"CREATE LIVE VIEW {view_name} AS SELECT 1") with When("I grant REFRESH privilege"): - node.query(f"GRANT ALTER VIEW REFRESH ON {view_name} TO {grant_target_name}") + node.query( + f"GRANT ALTER VIEW REFRESH ON {view_name} TO {grant_target_name}" + ) with And("I revoke REFRESH privilege"): - node.query(f"REVOKE ALTER VIEW REFRESH ON {view_name} FROM {grant_target_name}") + node.query( + f"REVOKE ALTER VIEW REFRESH ON {view_name} FROM {grant_target_name}" + ) with Then("I attempt to refresh as the user"): - node.query(f"ALTER LIVE VIEW {view_name} REFRESH", settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + node.query( + f"ALTER LIVE VIEW {view_name} REFRESH", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestFeature @Requirements( RQ_SRS_006_RBAC_LiveView("1.0"), diff --git a/tests/testflows/rbac/tests/views/materialized_view.py b/tests/testflows/rbac/tests/views/materialized_view.py index 0464332d327..6aedbd18c7a 100755 --- a/tests/testflows/rbac/tests/views/materialized_view.py +++ b/tests/testflows/rbac/tests/views/materialized_view.py @@ -5,49 +5,94 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @contextmanager def allow_experimental_alter_materialized_view_structure(node): setting = ("allow_experimental_alter_materialized_view_structure", 1) default_query_settings = None try: - with Given("I add allow_experimental_alter_materialized_view_structure to the default query settings"): - default_query_settings = getsattr(current().context, "default_query_settings", []) + with Given( + "I add allow_experimental_alter_materialized_view_structure to the default query settings" + ): + default_query_settings = getsattr( + current().context, "default_query_settings", [] + ) default_query_settings.append(setting) yield finally: - with Finally("I remove allow_experimental_alter_materialized_view_structure from the default query settings"): + with Finally( + "I remove allow_experimental_alter_materialized_view_structure from the default query settings" + ): if default_query_settings: try: default_query_settings.pop(default_query_settings.index(setting)) except ValueError: pass + @TestSuite @Requirements( RQ_SRS_006_RBAC_MaterializedView_Create("1.0"), ) def create(self, node=None): - """Test the RBAC functionality of the `CREATE MATERIALIZED VIEW` command. - """ - Scenario(run=create_without_create_view_privilege, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_create_view_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_revoked_create_view_privilege_revoked_directly_or_from_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_without_source_table_privilege, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_source_table_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_subquery_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_join_query_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_union_query_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_join_union_subquery_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_nested_views_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_target_table_privilege_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_populate_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_populate_source_table_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) + """Test the RBAC functionality of the `CREATE MATERIALIZED VIEW` command.""" + Scenario( + run=create_without_create_view_privilege, setup=instrument_clickhouse_server_log + ) + Scenario( + run=create_with_create_view_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_revoked_create_view_privilege_revoked_directly_or_from_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_without_source_table_privilege, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_source_table_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_subquery_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_join_query_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_union_query_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_join_union_subquery_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_nested_views_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_target_table_privilege_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_populate_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_populate_source_table_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + @TestScenario def create_without_create_view_privilege(self, node=None): - """Check that user is unable to create a view without CREATE VIEW privilege. - """ + """Check that user is unable to create a view without CREATE VIEW privilege.""" user_name = f"user_{getuid()}" view_name = f"view_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -57,32 +102,40 @@ def create_without_create_view_privilege(self, node=None): with user(node, f"{user_name}"): with When("I try to create a view without CREATE VIEW privilege as the user"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestScenario def create_with_create_view_privilege_granted_directly_or_via_role(self, node=None): - """Check that user is able to create a view with CREATE VIEW privilege, either granted directly or through a role. - """ + """Check that user is able to create a view with CREATE VIEW privilege, either granted directly or through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_create_view_privilege, - name="create with create view privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_create_view_privilege, + name="create with create view privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_create_view_privilege, - name="create with create view privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_create_view_privilege, + name="create with create view privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_create_view_privilege(self, grant_target_name, user_name, node=None): - """Check that user is able to create a view with the granted privileges. - """ + """Check that user is able to create a view with the granted privileges.""" view_name = f"view_{getuid()}" if node is None: @@ -93,35 +146,46 @@ def create_with_create_view_privilege(self, grant_target_name, user_name, node=N node.query(f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}") with Then("I try to create a view without privilege as the user"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1", + settings=[("user", f"{user_name}")], + ) finally: with Then("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -def create_with_revoked_create_view_privilege_revoked_directly_or_from_role(self, node=None): - """Check that user is unable to create view after the CREATE VIEW privilege is revoked, either directly or from a role. - """ +def create_with_revoked_create_view_privilege_revoked_directly_or_from_role( + self, node=None +): + """Check that user is unable to create view after the CREATE VIEW privilege is revoked, either directly or from a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_revoked_create_view_privilege, - name="create with create view privilege revoked directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_revoked_create_view_privilege, + name="create with create view privilege revoked directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_revoked_create_view_privilege, - name="create with create view privilege revoked from a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_revoked_create_view_privilege, + name="create with create view privilege revoked from a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline -def create_with_revoked_create_view_privilege(self, grant_target_name, user_name, node=None): - """Revoke CREATE VIEW privilege and check the user is unable to create a view. - """ +def create_with_revoked_create_view_privilege( + self, grant_target_name, user_name, node=None +): + """Revoke CREATE VIEW privilege and check the user is unable to create a view.""" view_name = f"view_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -134,8 +198,13 @@ def create_with_revoked_create_view_privilege(self, grant_target_name, user_name node.query(f"REVOKE CREATE VIEW ON {view_name} FROM {grant_target_name}") with Then("I try to create a view on the table as the user"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestScenario def create_without_source_table_privilege(self, node=None): @@ -155,8 +224,13 @@ def create_without_source_table_privilege(self, node=None): node.query(f"GRANT CREATE VIEW ON {view_name} TO {user_name}") with Then("I try to create a view without select privilege on the table"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestScenario def create_with_source_table_privilege_granted_directly_or_via_role(self, node=None): @@ -169,19 +243,23 @@ def create_with_source_table_privilege_granted_directly_or_via_role(self, node=N if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_source_table_privilege, - name="create with create view and select privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_source_table_privilege, + name="create with create view and select privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_source_table_privilege, - name="create with create view and select privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_source_table_privilege, + name="create with create view and select privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_source_table_privilege(self, user_name, grant_target_name, node=None): - """Check that user is unable to create a view without SELECT privilege on the source table. - """ + """Check that user is unable to create a view without SELECT privilege on the source table.""" view_name = f"view_{getuid()}" table_name = f"table_{getuid()}" @@ -197,16 +275,20 @@ def create_with_source_table_privilege(self, user_name, grant_target_name, node= with And("I try to create a view on the table as the user"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}", + settings=[("user", f"{user_name}")], + ) with Then("I check the view"): output = node.query(f"SELECT count(*) FROM {view_name}").output - assert output == '0', error() + assert output == "0", error() finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def create_with_subquery_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to create a view where the stored query has two subqueries @@ -219,14 +301,19 @@ def create_with_subquery_privilege_granted_directly_or_via_role(self, node=None) if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_subquery, - name="create with subquery, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_subquery, + name="create with subquery, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_subquery, - name="create with subquery, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_subquery, + name="create with subquery, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_subquery(self, user_name, grant_target_name, node=None): @@ -247,29 +334,72 @@ def create_with_subquery(self, user_name, grant_target_name, node=None): with When("I grant CREATE VIEW privilege"): node.query(f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}") with Then("I attempt to CREATE VIEW as the user with create privilege"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=3): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name, table2_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, + permutation, + grant_target_name, + table0_name, + table1_name, + table2_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=3))+1, grant_target_name, table0_name, table1_name, table2_name): + with grant_select_on_table( + node, + max(permutations(table_count=3)) + 1, + grant_target_name, + table0_name, + table1_name, + table2_name, + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name), settings = [("user", f"{user_name}")]) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def create_with_join_query_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to create a view where the stored query includes a `JOIN` statement @@ -282,14 +412,19 @@ def create_with_join_query_privilege_granted_directly_or_via_role(self, node=Non if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_join_query, - name="create with join query, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_join_query, + name="create with join query, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_join_query, - name="create with join query, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_join_query, + name="create with join query, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_join_query(self, grant_target_name, user_name, node=None): @@ -309,29 +444,63 @@ def create_with_join_query(self, grant_target_name, user_name, node=None): with When("I grant CREATE VIEW privilege"): node.query(f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}") with Then("I attempt to create view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=2): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, permutation, grant_target_name, table0_name, table1_name + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=2))+1, grant_target_name, table0_name, table1_name): + with grant_select_on_table( + node, + max(permutations(table_count=2)) + 1, + grant_target_name, + table0_name, + table1_name, + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")]) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Then("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def create_with_union_query_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to create a view where the stored query includes a `UNION ALL` statement @@ -344,14 +513,19 @@ def create_with_union_query_privilege_granted_directly_or_via_role(self, node=No if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_union_query, - name="create with union query, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_union_query, + name="create with union query, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_union_query, - name="create with union query, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_union_query, + name="create with union query, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_union_query(self, grant_target_name, user_name, node=None): @@ -371,31 +545,67 @@ def create_with_union_query(self, grant_target_name, user_name, node=None): with When("I grant CREATE VIEW privilege"): node.query(f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}") with Then("I attempt to create view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=2): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, permutation, grant_target_name, table0_name, table1_name + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=2))+1, grant_target_name, table0_name, table1_name): + with grant_select_on_table( + node, + max(permutations(table_count=2)) + 1, + grant_target_name, + table0_name, + table1_name, + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")]) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -def create_with_join_union_subquery_privilege_granted_directly_or_via_role(self, node=None): +def create_with_join_union_subquery_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to create a view with a stored query that includes `UNION ALL`, `JOIN` and two subqueries if and only if the user has SELECT privilege on all of the tables, either granted directly or through a role. """ @@ -405,14 +615,19 @@ def create_with_join_union_subquery_privilege_granted_directly_or_via_role(self, if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_join_union_subquery, - name="create with join union subquery, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_join_union_subquery, + name="create with join union subquery, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_join_union_subquery, - name="create with join union subquery, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_join_union_subquery, + name="create with join union subquery, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_join_union_subquery(self, grant_target_name, user_name, node=None): @@ -430,38 +645,94 @@ def create_with_join_union_subquery(self, grant_target_name, user_name, node=Non if node is None: node = self.context.node - with table(node, f"{table0_name},{table1_name},{table2_name},{table3_name},{table4_name}"): + with table( + node, f"{table0_name},{table1_name},{table2_name},{table3_name},{table4_name}" + ): with user(node, f"{user_name}"): try: with When("I grant CREATE VIEW privilege"): - node.query(f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}") - with Then("I attempt to create view as the user with CREATE VIEW privilege"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name, table3_name=table3_name, table4_name=table4_name), - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}" + ) + with Then( + "I attempt to create view as the user with CREATE VIEW privilege" + ): + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + table3_name=table3_name, + table4_name=table4_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=5): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name, table3_name, table4_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, + permutation, + grant_target_name, + table0_name, + table1_name, + table3_name, + table4_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name, table3_name=table3_name, table4_name=table4_name), - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + table3_name=table3_name, + table4_name=table4_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=5))+1, grant_target_name, table0_name, table1_name, table2_name, table3_name, table4_name): + with grant_select_on_table( + node, + max(permutations(table_count=5)) + 1, + grant_target_name, + table0_name, + table1_name, + table2_name, + table3_name, + table4_name, + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name, table3_name=table3_name, table4_name=table4_name), - settings = [("user", f"{user_name}")]) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + table3_name=table3_name, + table4_name=table4_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") -@TestScenario +@TestScenario def create_with_nested_views_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to create a view with a stored query that includes other views if and only if they have SELECT privilege on all the views and the source tables for those views. @@ -472,14 +743,19 @@ def create_with_nested_views_privilege_granted_directly_or_via_role(self, node=N if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_nested_views, - name="create with nested views, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_nested_views, + name="create with nested views, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_nested_views, - name="create with nested views, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_nested_views, + name="create with nested views, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_nested_views(self, grant_target_name, user_name, node=None): @@ -502,32 +778,89 @@ def create_with_nested_views(self, grant_target_name, user_name, node=None): with table(node, f"{table0_name},{table1_name},{table2_name},{table3_name}"): try: with Given("I have some views"): - node.query(f"CREATE MATERIALIZED VIEW {view0_name} ENGINE = Memory AS SELECT y FROM {table0_name}") - node.query(f"CREATE MATERIALIZED VIEW {view1_name} ENGINE = Memory AS SELECT y FROM {table1_name} WHERE y IN (SELECT y FROM {view0_name} WHERE y<2)") - node.query(f"CREATE MATERIALIZED VIEW {view2_name} ENGINE = Memory AS SELECT y FROM {table2_name} JOIN {view1_name} USING y") + node.query( + f"CREATE MATERIALIZED VIEW {view0_name} ENGINE = Memory AS SELECT y FROM {table0_name}" + ) + node.query( + f"CREATE MATERIALIZED VIEW {view1_name} ENGINE = Memory AS SELECT y FROM {table1_name} WHERE y IN (SELECT y FROM {view0_name} WHERE y<2)" + ) + node.query( + f"CREATE MATERIALIZED VIEW {view2_name} ENGINE = Memory AS SELECT y FROM {table2_name} JOIN {view1_name} USING y" + ) with When("I grant CREATE VIEW privilege"): node.query(f"GRANT CREATE VIEW ON {view3_name} TO {grant_target_name}") - with Then("I attempt to create view as the user with CREATE VIEW privilege"): - node.query(create_view_query.format(view3_name=view3_name, view2_name=view2_name, table3_name=table3_name), - settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + with Then( + "I attempt to create view as the user with CREATE VIEW privilege" + ): + node.query( + create_view_query.format( + view3_name=view3_name, + view2_name=view2_name, + table3_name=table3_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) - for permutation in ([0,1,2,3,7,11,15,31,39,79,95],permutations(table_count=7))[self.context.stress]: - with grant_select_on_table(node, permutation, grant_target_name, view2_name, table3_name, view1_name, table2_name, view0_name, table1_name, table0_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + for permutation in ( + [0, 1, 2, 3, 7, 11, 15, 31, 39, 79, 95], + permutations(table_count=7), + )[self.context.stress]: + with grant_select_on_table( + node, + permutation, + grant_target_name, + view2_name, + table3_name, + view1_name, + table2_name, + view0_name, + table1_name, + table0_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view3_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view3_name=view3_name, view2_name=view2_name, table3_name=table3_name), - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view3_name=view3_name, + view2_name=view2_name, + table3_name=table3_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all views"): - with grant_select_on_table(node, max(permutations(table_count=7))+1, grant_target_name, view0_name, view1_name, view2_name, table0_name, table1_name, table2_name, table3_name): + with grant_select_on_table( + node, + max(permutations(table_count=7)) + 1, + grant_target_name, + view0_name, + view1_name, + view2_name, + table0_name, + table1_name, + table2_name, + table3_name, + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view3_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view3_name=view3_name, view2_name=view2_name, table3_name=table3_name), - settings = [("user", f"{user_name}")]) + node.query( + create_view_query.format( + view3_name=view3_name, + view2_name=view2_name, + table3_name=table3_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the views"): @@ -540,6 +873,7 @@ def create_with_nested_views(self, grant_target_name, user_name, node=None): with And("I drop view3", flags=TE): node.query(f"DROP VIEW IF EXISTS {view0_name}") + @TestScenario def create_with_target_table_privilege_directly_or_via_role(self, node=None): """Check that user is able to create a materialized view with a target table if and only if @@ -551,19 +885,23 @@ def create_with_target_table_privilege_directly_or_via_role(self, node=None): if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_target_table, - name="create with target table, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_target_table, + name="create with target table, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_target_table, - name="create with target table, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_target_table, + name="create with target table, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_target_table(self, grant_target_name, user_name, node=None): - """Check that user is unable to create a view without INSERT and SELECT privileges and is able to once both are granted. - """ + """Check that user is unable to create a view without INSERT and SELECT privileges and is able to once both are granted.""" view_name = f"view_{getuid()}" table_name = f"table_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -575,33 +913,48 @@ def create_with_target_table(self, grant_target_name, user_name, node=None): with When("I grant CREATE VIEW privilege"): node.query(f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}") with Then("I attempt to create a view as the user"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} TO {table_name} AS SELECT 1", - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + f"CREATE MATERIALIZED VIEW {view_name} TO {table_name} AS SELECT 1", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant SELECT on the target table"): node.query(f"GRANT SELECT ON {table_name} TO {grant_target_name}") with Then("I attempt to create a view as the user"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} TO {table_name} AS SELECT 1", - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + f"CREATE MATERIALIZED VIEW {view_name} TO {table_name} AS SELECT 1", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I revoke SELECT on the target table"): node.query(f"REVOKE SELECT ON {table_name} FROM {grant_target_name}") with And("I grant INSERT privilege on the target table"): node.query(f"GRANT INSERT ON {table_name} TO {grant_target_name}") with Then("I attempt to create a view as the user"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} TO {table_name} AS SELECT 1", - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + f"CREATE MATERIALIZED VIEW {view_name} TO {table_name} AS SELECT 1", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant SELECT on the target table"): node.query(f"GRANT SELECT ON {table_name} TO {grant_target_name}") with Then("I successfully create a view as the user"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} TO {table_name} AS SELECT 1", - settings = [("user", f"{user_name}")]) + node.query( + f"CREATE MATERIALIZED VIEW {view_name} TO {table_name} AS SELECT 1", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def create_with_populate_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to create a view with POPULATE specified if and only if @@ -615,21 +968,25 @@ def create_with_populate_privilege_granted_directly_or_via_role(self, node=None) with user(node, f"{user_name}"): - Scenario(test=create_with_populate, - name="create with populate privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_populate, + name="create with populate privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_populate, - name="create with populate privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_populate, + name="create with populate privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_populate(self, user_name, grant_target_name, node=None): - """Check that user is only able to create the view after INSERT privilege is granted. - """ + """Check that user is only able to create the view after INSERT privilege is granted.""" view_name = f"view_{getuid()}" table_name = f"table_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -643,8 +1000,12 @@ def create_with_populate(self, user_name, grant_target_name, node=None): node.query(f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}") with Then("I attempt to create a view as the user"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory POPULATE AS SELECT 1", - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory POPULATE AS SELECT 1", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant INSERT privilege on the view"): node.query(f"GRANT INSERT ON {view_name} TO {grant_target_name}") @@ -653,15 +1014,20 @@ def create_with_populate(self, user_name, grant_target_name, node=None): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a view as the user"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory POPULATE AS SELECT 1", - settings = [("user", f"{user_name}")]) + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory POPULATE AS SELECT 1", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -def create_with_populate_source_table_privilege_granted_directly_or_via_role(self, node=None): +def create_with_populate_source_table_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to create a view with POPULATE and a source table specified if and only if they have CREATE VIEW and INSERT privileges for the view, either directly or from a role. """ @@ -671,19 +1037,23 @@ def create_with_populate_source_table_privilege_granted_directly_or_via_role(sel if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_populate_source_table, - name="create with populate and source table, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_populate_source_table, + name="create with populate and source table, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_populate_source_table, - name="create with populate and source table, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_populate_source_table, + name="create with populate and source table, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_populate_source_table(self, user_name, grant_target_name, node=None): - """Check that user is only able to create the view after INSERT privilege is granted. - """ + """Check that user is only able to create the view after INSERT privilege is granted.""" view_name = f"view_{getuid()}" table_name = f"table_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -695,50 +1065,92 @@ def create_with_populate_source_table(self, user_name, grant_target_name, node=N with When("I grant CREATE VIEW privilege"): node.query(f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}") with Then("I attempt to create a view as the user"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory POPULATE AS SELECT * FROM {table_name}", - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory POPULATE AS SELECT * FROM {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant SELECT privilege on the source table"): node.query(f"GRANT SELECT ON {table_name} TO {user_name}") with Then("I attempt to create a view as the user"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory POPULATE AS SELECT * FROM {table_name}", - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory POPULATE AS SELECT * FROM {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant INSERT privilege on the view"): node.query(f"GRANT INSERT ON {view_name} TO {grant_target_name}") with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a view as the user"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory POPULATE AS SELECT * FROM {table_name}", - settings = [("user", f"{user_name}")]) + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory POPULATE AS SELECT * FROM {table_name}", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestSuite @Requirements( RQ_SRS_006_RBAC_MaterializedView_Select("1.0"), ) def select(self, node=None): - """Test the RBAC functionality of the `SELECT FROM materialized view` command - """ - Scenario(run=select_without_select_privilege, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_select_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_select_privilege_revoked_directly_or_from_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_without_source_table_privilege, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_source_table_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_subquery_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_join_query_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_union_query_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_join_union_subquery_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_nested_views_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_privilege_granted_directly_or_via_role_without_target_table_privilege, setup=instrument_clickhouse_server_log) + """Test the RBAC functionality of the `SELECT FROM materialized view` command""" + Scenario( + run=select_without_select_privilege, setup=instrument_clickhouse_server_log + ) + Scenario( + run=select_with_select_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_select_privilege_revoked_directly_or_from_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_without_source_table_privilege, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_source_table_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_subquery_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_join_query_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_union_query_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_join_union_subquery_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_nested_views_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_privilege_granted_directly_or_via_role_without_target_table_privilege, + setup=instrument_clickhouse_server_log, + ) + @TestScenario def select_without_select_privilege(self, node=None): - """Check that user is unable to select on a view without view SELECT privilege. - """ + """Check that user is unable to select on a view without view SELECT privilege.""" user_name = f"user_{getuid()}" view_name = f"view_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -749,39 +1161,49 @@ def select_without_select_privilege(self, node=None): try: with When("I have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1" + ) with Then("I try to select from view without privilege as the user"): - node.query(f"SELECT * FROM {view_name}", settings = [("user",f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {view_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_select_privilege_granted_directly_or_via_role(self, node=None): - """Check that user is able to select from a view if and only if they have select privilege on that view, either directly or from a role. - """ + """Check that user is able to select from a view if and only if they have select privilege on that view, either directly or from a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_select_privilege, - name="select with select privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_select_privilege, + name="select with select privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_select_privilege, - name="select with select privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_select_privilege, + name="select with select privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_select_privilege(self, user_name, grant_target_name, node=None): - """Grant SELECT privilege on a view and check the user is able to SELECT from it. - """ + """Grant SELECT privilege on a view and check the user is able to SELECT from it.""" view_name = f"view_{getuid()}" if node is None: @@ -789,42 +1211,50 @@ def select_with_select_privilege(self, user_name, grant_target_name, node=None): try: with When("I have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1" + ) with And("I grant SELECT privilege for the view"): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") with Then("I attempt to select from view with privilege as the user"): - output = node.query(f"SELECT count(*) FROM {view_name}", settings = [("user",f"{user_name}")]).output - assert output == '1', error() + output = node.query( + f"SELECT count(*) FROM {view_name}", settings=[("user", f"{user_name}")] + ).output + assert output == "1", error() finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_select_privilege_revoked_directly_or_from_role(self, node=None): - """Check that user is unable to select from a view if their SELECT privilege is revoked, either directly or from a role. - """ + """Check that user is unable to select from a view if their SELECT privilege is revoked, either directly or from a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_select_privilege, - name="select with select privilege revoked directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_select_privilege, + name="select with select privilege revoked directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_select_privilege, - name="select with select privilege revoked from a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_select_privilege, + name="select with select privilege revoked from a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_revoked_select_privilege(self, user_name, grant_target_name, node=None): - """Grant and revoke SELECT privilege on a view and check the user is unable to SELECT from it. - """ + """Grant and revoke SELECT privilege on a view and check the user is unable to SELECT from it.""" view_name = f"view_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -841,17 +1271,21 @@ def select_with_revoked_select_privilege(self, user_name, grant_target_name, nod node.query(f"REVOKE SELECT ON {view_name} FROM {grant_target_name}") with Then("I attempt to select from view with privilege as the user"): - node.query(f"SELECT count(*) FROM {view_name}", settings = [("user",f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SELECT count(*) FROM {view_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_without_source_table_privilege(self, node=None): - """Check that user is unable to select from a view without SELECT privilege for the source table. - """ + """Check that user is unable to select from a view without SELECT privilege for the source table.""" user_name = f"user_{getuid()}" view_name = f"view_{getuid()}" table_name = f"table_{getuid()}" @@ -860,22 +1294,31 @@ def select_without_source_table_privilege(self, node=None): if node is None: node = self.context.node with table(node, f"{table_name}"): - with user(node, f"{user_name}"): + with user(node, f"{user_name}"): try: with When("I create a view from the source table"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}" + ) with And("I grant view select privilege to the user"): node.query(f"GRANT SELECT ON {view_name} TO {user_name}") - with Then("I attempt to select from view without privilege on the source table"): - node.query(f"SELECT count(*) FROM {view_name}", settings = [("user",f"{user_name}")], - exitcode=exitcode, message=message) + with Then( + "I attempt to select from view without privilege on the source table" + ): + node.query( + f"SELECT count(*) FROM {view_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_source_table_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to select from a view, with source table in the stored query, if and only if @@ -887,19 +1330,23 @@ def select_with_source_table_privilege_granted_directly_or_via_role(self, node=N if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_source_table_privilege, - name="select with source table, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_source_table_privilege, + name="select with source table, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_source_table_privilege, - name="select with source table, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_source_table_privilege, + name="select with source table, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_source_table_privilege(self, user_name, grant_target_name, node=None): - """Grant SELECT privilege on view and the source table for that view and check the user is able to SELECT from the view. - """ + """Grant SELECT privilege on view and the source table for that view and check the user is able to SELECT from the view.""" view_name = f"view_{getuid()}" table_name = f"table_{getuid()}" @@ -909,20 +1356,26 @@ def select_with_source_table_privilege(self, user_name, grant_target_name, node= try: with Given("I have a view with a source table"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}" + ) with And("I grant select privileges"): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") node.query(f"GRANT SELECT ON {table_name} TO {grant_target_name}") with Then("I check the user is able to select from the view"): - output = node.query(f"SELECT count(*) FROM {view_name}", settings = [("user", f"{user_name}")]).output - assert output == '0', error() + output = node.query( + f"SELECT count(*) FROM {view_name}", + settings=[("user", f"{user_name}")], + ).output + assert output == "0", error() finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_subquery_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to select from a view where the stored query has two subqueries if and only if @@ -934,19 +1387,23 @@ def select_with_subquery_privilege_granted_directly_or_via_role(self, node=None) if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_subquery, - name="select with subquery, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_subquery, + name="select with subquery, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_subquery, - name="select with subquery, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_subquery, + name="select with subquery, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_subquery(self, user_name, grant_target_name, node=None): - """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them. - """ + """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them.""" view_name = f"view_{getuid()}" table0_name = f"table0_{getuid()}" table1_name = f"table1_{getuid()}" @@ -960,29 +1417,61 @@ def select_with_subquery(self, user_name, grant_target_name, node=None): try: with Given("I have a view with a subquery"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table0_name} WHERE y IN (SELECT y FROM {table1_name} WHERE y IN (SELECT y FROM {table2_name} WHERE y<2))") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table0_name} WHERE y IN (SELECT y FROM {table1_name} WHERE y IN (SELECT y FROM {table2_name} WHERE y<2))" + ) with When("I grant SELECT privilege on view"): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") with Then("I attempt to select from the view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=3): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name, table2_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, + permutation, + grant_target_name, + table0_name, + table1_name, + table2_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=3))+1, grant_target_name, table0_name, table1_name, table2_name): + with grant_select_on_table( + node, + max(permutations(table_count=3)) + 1, + grant_target_name, + table0_name, + table1_name, + table2_name, + ): with Then("I attempt to select from a view as the user"): - output = node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")]).output - assert output == '0', error() + output = node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + ).output + assert output == "0", error() finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_join_query_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to select from a view where the stored query includes a `JOIN` statement if and only if @@ -994,19 +1483,23 @@ def select_with_join_query_privilege_granted_directly_or_via_role(self, node=Non if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_join_query, - name="select with join, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_join_query, + name="select with join, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_join_query, - name="select with join, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_join_query, + name="select with join, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_join_query(self, user_name, grant_target_name, node=None): - """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them. - """ + """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them.""" view_name = f"view_{getuid()}" table0_name = f"table0_{getuid()}" table1_name = f"table1_{getuid()}" @@ -1019,28 +1512,54 @@ def select_with_join_query(self, user_name, grant_target_name, node=None): try: with Given("I have a view with a JOIN statement"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table0_name} JOIN {table1_name} USING d") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table0_name} JOIN {table1_name} USING d" + ) with When("I grant SELECT privilege on view"): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") with Then("I attempt to select from the view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=2): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, permutation, grant_target_name, table0_name, table1_name + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=2))+1, grant_target_name, table0_name, table1_name): + with grant_select_on_table( + node, + max(permutations(table_count=2)) + 1, + grant_target_name, + table0_name, + table1_name, + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")]) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_union_query_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to select from a view where the stored query includes a `UNION ALL` statement if and only if @@ -1052,19 +1571,23 @@ def select_with_union_query_privilege_granted_directly_or_via_role(self, node=No if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_union_query, - name="select with union, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_union_query, + name="select with union, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_union_query, - name="select with union, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_union_query, + name="select with union, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_union_query(self, user_name, grant_target_name, node=None): - """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them. - """ + """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them.""" view_name = f"view_{getuid()}" table0_name = f"table0_{getuid()}" table1_name = f"table1_{getuid()}" @@ -1077,30 +1600,58 @@ def select_with_union_query(self, user_name, grant_target_name, node=None): try: with Given("I have a view with a UNION statement"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table0_name} UNION ALL SELECT * FROM {table1_name}") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table0_name} UNION ALL SELECT * FROM {table1_name}" + ) with When("I grant SELECT privilege on view"): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") with Then("I attempt to select from the view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=2): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, permutation, grant_target_name, table0_name, table1_name + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=2))+1, grant_target_name, table0_name, table1_name): + with grant_select_on_table( + node, + max(permutations(table_count=2)) + 1, + grant_target_name, + table0_name, + table1_name, + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")]) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -def select_with_join_union_subquery_privilege_granted_directly_or_via_role(self, node=None): +def select_with_join_union_subquery_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to select from a view with a stored query that includes `UNION ALL`, `JOIN` and two subqueries if and only if the user has SELECT privilege on all the tables and the view, either directly or through a role. """ @@ -1110,19 +1661,23 @@ def select_with_join_union_subquery_privilege_granted_directly_or_via_role(self, if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_join_union_subquery, - name="select with join union subquery, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_join_union_subquery, + name="select with join union subquery, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_join_union_subquery, - name="select with join union subquery, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_join_union_subquery, + name="select with join union subquery, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_join_union_subquery(self, grant_target_name, user_name, node=None): - """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them. - """ + """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them.""" view_name = f"view_{getuid()}" table0_name = f"table0_{getuid()}" table1_name = f"table1_{getuid()}" @@ -1134,32 +1689,70 @@ def select_with_join_union_subquery(self, grant_target_name, user_name, node=Non if node is None: node = self.context.node - with table(node, f"{table0_name},{table1_name},{table2_name},{table3_name},{table4_name}"): + with table( + node, f"{table0_name},{table1_name},{table2_name},{table3_name},{table4_name}" + ): try: with Given("I have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT y FROM {table0_name} JOIN {table1_name} USING y UNION ALL SELECT y FROM {table1_name} WHERE y IN (SELECT y FROM {table3_name} WHERE y IN (SELECT y FROM {table4_name} WHERE y<2))") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT y FROM {table0_name} JOIN {table1_name} USING y UNION ALL SELECT y FROM {table1_name} WHERE y IN (SELECT y FROM {table3_name} WHERE y IN (SELECT y FROM {table4_name} WHERE y<2))" + ) with When("I grant SELECT privilege on view"): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") with Then("I attempt to select from the view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=5): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name, table2_name, table3_name, table4_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, + permutation, + grant_target_name, + table0_name, + table1_name, + table2_name, + table3_name, + table4_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=5))+1, grant_target_name, table0_name, table1_name, table2_name, table3_name, table4_name): + with grant_select_on_table( + node, + max(permutations(table_count=5)) + 1, + grant_target_name, + table0_name, + table1_name, + table2_name, + table3_name, + table4_name, + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")]) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_nested_views_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to select from a view with a stored query that includes other views if and only if @@ -1171,19 +1764,23 @@ def select_with_nested_views_privilege_granted_directly_or_via_role(self, node=N if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_nested_views, - name="select with nested views, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_nested_views, + name="select with nested views, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_nested_views, - name="select with nested views, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_nested_views, + name="select with nested views, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_nested_views(self, grant_target_name, user_name, node=None): - """Grant SELECT on views and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them. - """ + """Grant SELECT on views and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them.""" view0_name = f"view0_{getuid()}" view1_name = f"view1_{getuid()}" view2_name = f"view2_{getuid()}" @@ -1201,25 +1798,71 @@ def select_with_nested_views(self, grant_target_name, user_name, node=None): try: with Given("I have some views"): node.query(f"CREATE VIEW {view0_name} AS SELECT y FROM {table0_name}") - node.query(f"CREATE VIEW {view1_name} AS SELECT y FROM {view0_name} WHERE y IN (SELECT y FROM {table1_name} WHERE y<2)") - node.query(f"CREATE VIEW {view2_name} AS SELECT y FROM {view1_name} JOIN {table2_name} USING y") - node.query(f"CREATE VIEW {view3_name} AS SELECT y FROM {view2_name} UNION ALL SELECT y FROM {table3_name}") + node.query( + f"CREATE VIEW {view1_name} AS SELECT y FROM {view0_name} WHERE y IN (SELECT y FROM {table1_name} WHERE y<2)" + ) + node.query( + f"CREATE VIEW {view2_name} AS SELECT y FROM {view1_name} JOIN {table2_name} USING y" + ) + node.query( + f"CREATE VIEW {view3_name} AS SELECT y FROM {view2_name} UNION ALL SELECT y FROM {table3_name}" + ) with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view3_name=view3_name), - settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view3_name=view3_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) - for permutation in ([0,1,3,5,7,13,15,23,31,45,63,95,127,173,237,247,253],permutations(table_count=8))[self.context.stress]: - with grant_select_on_table(node, permutation, grant_target_name, view3_name, table3_name, view2_name, view1_name, table2_name, view0_name, table1_name, table0_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + for permutation in ( + [0, 1, 3, 5, 7, 13, 15, 23, 31, 45, 63, 95, 127, 173, 237, 247, 253], + permutations(table_count=8), + )[self.context.stress]: + with grant_select_on_table( + node, + permutation, + grant_target_name, + view3_name, + table3_name, + view2_name, + view1_name, + table2_name, + view0_name, + table1_name, + table0_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view3_name=view3_name), - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view3_name=view3_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all views"): - with grant_select_on_table(node, max(permutations(table_count=8))+1, grant_target_name, view0_name, view1_name, view2_name, view3_name, table0_name, table1_name, table2_name, table3_name): + with grant_select_on_table( + node, + max(permutations(table_count=8)) + 1, + grant_target_name, + view0_name, + view1_name, + view2_name, + view3_name, + table0_name, + table1_name, + table2_name, + table3_name, + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view3_name=view3_name), settings = [("user", f"{user_name}")]) + node.query( + select_view_query.format(view3_name=view3_name), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the views"): @@ -1232,40 +1875,52 @@ def select_with_nested_views(self, grant_target_name, user_name, node=None): with And("I drop view3", flags=TE): node.query(f"DROP VIEW IF EXISTS {view0_name}") + @TestScenario -def select_with_privilege_granted_directly_or_via_role_without_target_table_privilege(self, node=None): - """Check that user is able to select from a materialized view without target table SELECT privilege. - """ +def select_with_privilege_granted_directly_or_via_role_without_target_table_privilege( + self, node=None +): + """Check that user is able to select from a materialized view without target table SELECT privilege.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_without_target_table_privilege, - name="select without target table privilege, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_without_target_table_privilege, + name="select without target table privilege, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_without_target_table_privilege, - name="select without target table privilege, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_without_target_table_privilege, + name="select without target table privilege, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline -def select_without_target_table_privilege(self, grant_target_name, user_name, node=None): - """GRANT the user SELECT privilege on the view and check the user is able to successfully SELECT from the view without target table privilege. - """ +def select_without_target_table_privilege( + self, grant_target_name, user_name, node=None +): + """GRANT the user SELECT privilege on the view and check the user is able to successfully SELECT from the view without target table privilege.""" view_name = f"view_{getuid()}" table_name = f"table_{getuid()}" if node is None: node = self.context.node try: with Given("I have a view"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} TO {table_name} AS SELECT 1") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} TO {table_name} AS SELECT 1" + ) with When("I grant SELECT privilege on the view"): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") with Then("I attempt to select from a view as the user"): - node.query(f"SELECT * FROM {view_name}", settings = [("user", f"{user_name}")]) + node.query( + f"SELECT * FROM {view_name}", settings=[("user", f"{user_name}")] + ) finally: with Finally("I drop the view"): @@ -1274,17 +1929,26 @@ def select_without_target_table_privilege(self, grant_target_name, user_name, no @TestSuite def select_from_tables(self, node=None): - """Testing RBAC functionality of SELECT for tables related to materialized views - target tables, source tables. - """ - Scenario(run=select_from_implicit_target_table_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_from_explicit_target_table_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_from_source_table_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) + """Testing RBAC functionality of SELECT for tables related to materialized views - target tables, source tables.""" + Scenario( + run=select_from_implicit_target_table_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_from_explicit_target_table_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_from_source_table_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_MaterializedView_Select_TargetTable("1.0") -) -def select_from_implicit_target_table_privilege_granted_directly_or_via_role(self, node=None): +@Requirements(RQ_SRS_006_RBAC_MaterializedView_Select_TargetTable("1.0")) +def select_from_implicit_target_table_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to SELECT from the implicit target table created from a materialized view if they have SELECT privilege on that table. """ @@ -1294,53 +1958,70 @@ def select_from_implicit_target_table_privilege_granted_directly_or_via_role(sel if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_from_implicit_target_table, - name="select from implicit target table, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_from_implicit_target_table, + name="select from implicit target table, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_from_implicit_target_table, - name="select from implicit target table, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_from_implicit_target_table, + name="select from implicit target table, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_from_implicit_target_table(self, grant_target_name, user_name, node=None): - """Grant SELECT on the implicit target table and check the user is able to SELECT only if they have SELECT privilege on the table. - """ + """Grant SELECT on the implicit target table and check the user is able to SELECT only if they have SELECT privilege on the table.""" view_name = f"view_{getuid()}" - implicit_table_name = f"\\\".inner.{view_name}\\\"" + implicit_table_name = f'\\".inner.{view_name}\\"' exitcode, message = errors.not_enough_privileges(name=f"{user_name}") if node is None: node = self.context.node try: with Given("I have a view"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1" + ) with Then("I attempt to SELECT from the implicit target table as the user"): - node.query(f"SELECT * FROM {implicit_table_name}", - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {implicit_table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant SELECT privilege on the view"): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") with Then("I attempt to SELECT from the implicit target table as the user"): - node.query(f"SELECT * FROM {implicit_table_name}", - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {implicit_table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant SELECT privilege on the target table"): node.query(f"GRANT SELECT ON {implicit_table_name} TO {grant_target_name}") with Then("I attempt to SELECT from the implicit target table as the user"): - node.query(f"SELECT * FROM {implicit_table_name}", - settings = [("user", f"{user_name}")]) + node.query( + f"SELECT * FROM {implicit_table_name}", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_MaterializedView_Select_TargetTable("1.0") -) -def select_from_explicit_target_table_privilege_granted_directly_or_via_role(self, node=None): +@Requirements(RQ_SRS_006_RBAC_MaterializedView_Select_TargetTable("1.0")) +def select_from_explicit_target_table_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to SELECT from the explicit target table created from a materialized view if they have SELECT privilege on that table. """ @@ -1350,19 +2031,23 @@ def select_from_explicit_target_table_privilege_granted_directly_or_via_role(sel if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_from_explicit_target_table, - name="select from explicit target table, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_from_explicit_target_table, + name="select from explicit target table, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_from_explicit_target_table, - name="select from explicit target table, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_from_explicit_target_table, + name="select from explicit target table, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_from_explicit_target_table(self, grant_target_name, user_name, node=None): - """Grant SELECT on the explicit target table and check the user is able to SELECT only if they have SELECT privilege on the table. - """ + """Grant SELECT on the explicit target table and check the user is able to SELECT only if they have SELECT privilege on the table.""" view_name = f"view_{getuid()}" table_name = f"table_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -1371,32 +2056,42 @@ def select_from_explicit_target_table(self, grant_target_name, user_name, node=N with table(node, f"{table_name}"): try: with Given("I have a view"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} TO {table_name} AS SELECT 1") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} TO {table_name} AS SELECT 1" + ) with Then("I attempt to SELECT from the explicit target table as the user"): - node.query(f"SELECT * FROM {table_name}", - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant SELECT privilege on the view"): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") with Then("I attempt to SELECT from the explicit target table as the user"): - node.query(f"SELECT * FROM {table_name}", - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant SELECT privilege on the target table"): node.query(f"GRANT SELECT ON {table_name} TO {grant_target_name}") with Then("I attempt to SELECT from the explicit target table as the user"): - node.query(f"SELECT * FROM {table_name}", - settings = [("user", f"{user_name}")]) + node.query( + f"SELECT * FROM {table_name}", settings=[("user", f"{user_name}")] + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -@Requirements( - RQ_SRS_006_RBAC_MaterializedView_Select_SourceTable("1.0") -) +@Requirements(RQ_SRS_006_RBAC_MaterializedView_Select_SourceTable("1.0")) def select_from_source_table_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to SELECT from the source table of a materialized view if they have SELECT privilege on that table. @@ -1407,19 +2102,23 @@ def select_from_source_table_privilege_granted_directly_or_via_role(self, node=N if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_from_source_table, - name="select from source table, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_from_source_table, + name="select from source table, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_from_source_table, - name="select from source table, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_from_source_table, + name="select from source table, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_from_source_table(self, grant_target_name, user_name, node=None): - """Grant SELECT on the source table and check the user is able to SELECT only if they have SELECT privilege on the table. - """ + """Grant SELECT on the source table and check the user is able to SELECT only if they have SELECT privilege on the table.""" view_name = f"view_{getuid()}" table_name = f"table_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -1428,61 +2127,80 @@ def select_from_source_table(self, grant_target_name, user_name, node=None): with table(node, f"{table_name}"): try: with Given("I have a view"): - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}" + ) with Then("I attempt to SELECT from the source table as the user"): - node.query(f"SELECT * FROM {table_name}", - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant SELECT privilege on the view"): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") with Then("I attempt to SELECT from the implicit target table as the user"): - node.query(f"SELECT * FROM {table_name}", - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant SELECT privilege on the target table"): node.query(f"GRANT SELECT ON {table_name} TO {grant_target_name}") with Then("I attempt to SELECT from the implicit target table as the user"): - node.query(f"SELECT * FROM {table_name}", - settings = [("user", f"{user_name}")]) + node.query( + f"SELECT * FROM {table_name}", settings=[("user", f"{user_name}")] + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestSuite @Requirements( RQ_SRS_006_RBAC_MaterializedView_Drop("1.0"), ) def drop(self, node=None): - """Test the RBAC functionality of the `DROP VIEW` command. - """ - Scenario(run=drop_with_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=drop_with_revoked_privilege_revoked_directly_or_from_role, setup=instrument_clickhouse_server_log) + """Test the RBAC functionality of the `DROP VIEW` command.""" + Scenario( + run=drop_with_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=drop_with_revoked_privilege_revoked_directly_or_from_role, + setup=instrument_clickhouse_server_log, + ) + @TestScenario def drop_with_privilege_granted_directly_or_via_role(self, node=None): - """Check that user is able to drop view with DROP VIEW privilege if the user has privilege directly or through a role. - """ + """Check that user is able to drop view with DROP VIEW privilege if the user has privilege directly or through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=drop_with_privilege, - name="drop privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario(test=drop_with_privilege, name="drop privilege granted directly")( + grant_target_name=user_name, user_name=user_name + ) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=drop_with_privilege, - name="drop privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=drop_with_privilege, name="drop privilege granted through a role" + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def drop_with_privilege(self, grant_target_name, user_name, node=None): - """Grant DROP VIEW privilege and check the user is able to successfully drop a view. - """ + """Grant DROP VIEW privilege and check the user is able to successfully drop a view.""" view_name = f"view_{getuid()}" exitcode, message = errors.table_does_not_exist(name=f"default.{view_name}") @@ -1491,13 +2209,15 @@ def drop_with_privilege(self, grant_target_name, user_name, node=None): try: with Given("I have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1" + ) with When("I grant DROP VIEW privilege"): node.query(f"GRANT DROP VIEW ON {view_name} TO {grant_target_name}") with And("I drop the view as the user"): - node.query(f"DROP VIEW {view_name}", settings = [("user",f"{user_name}")]) + node.query(f"DROP VIEW {view_name}", settings=[("user", f"{user_name}")]) with Then("I check the table does not exist"): node.query(f"SELECT * FROM {view_name}", exitcode=exitcode, message=message) @@ -1506,29 +2226,31 @@ def drop_with_privilege(self, grant_target_name, user_name, node=None): with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def drop_with_revoked_privilege_revoked_directly_or_from_role(self, node=None): - """Check that user is unable to drop view with DROP VIEW privilege revoked directly or from a role. - """ + """Check that user is unable to drop view with DROP VIEW privilege revoked directly or from a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=drop_with_revoked_privilege, - name="drop privilege revoked directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=drop_with_revoked_privilege, name="drop privilege revoked directly" + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=drop_with_revoked_privilege, - name="drop privilege revoked from a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=drop_with_revoked_privilege, name="drop privilege revoked from a role" + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def drop_with_revoked_privilege(self, grant_target_name, user_name, node=None): - """Revoke DROP VIEW privilege and check the user is unable to DROP a view. - """ + """Revoke DROP VIEW privilege and check the user is unable to DROP a view.""" view_name = f"view_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -1537,7 +2259,9 @@ def drop_with_revoked_privilege(self, grant_target_name, user_name, node=None): try: with Given("I have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1" + ) with When("I grant DROP VIEW privilege"): node.query(f"GRANT DROP VIEW ON {view_name} TO {grant_target_name}") @@ -1546,57 +2270,92 @@ def drop_with_revoked_privilege(self, grant_target_name, user_name, node=None): node.query(f"REVOKE DROP VIEW ON {view_name} FROM {grant_target_name}") with Then("I drop the view as the user"): - node.query(f"DROP VIEW {view_name}", settings = [("user",f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"DROP VIEW {view_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestSuite @Requirements( RQ_SRS_006_RBAC_MaterializedView_ModifyQuery("1.0"), ) def modify_query(self, node=None): - """Test the RBAC functionality of the `MODIFY QUERY` command. - """ + """Test the RBAC functionality of the `MODIFY QUERY` command.""" if node is None: node = self.context.node with allow_experimental_alter_materialized_view_structure(node): - Scenario(run=modify_query_with_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=modify_query_with_privilege_revoked_directly_or_from_role, setup=instrument_clickhouse_server_log) - Scenario(run=modify_query_without_source_table_privilege, setup=instrument_clickhouse_server_log) - Scenario(run=modify_query_with_source_table_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=modify_query_with_subquery_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=modify_query_with_join_query_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=modify_query_with_union_query_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=modify_query_with_join_union_subquery_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=modify_query_with_nested_views_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) + Scenario( + run=modify_query_with_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=modify_query_with_privilege_revoked_directly_or_from_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=modify_query_without_source_table_privilege, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=modify_query_with_source_table_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=modify_query_with_subquery_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=modify_query_with_join_query_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=modify_query_with_union_query_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=modify_query_with_join_union_subquery_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=modify_query_with_nested_views_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + @TestScenario def modify_query_with_privilege_granted_directly_or_via_role(self, node=None): - """Check that user is able to modify view with MODIFY QUERY if the user has privilege directly or through a role. - """ + """Check that user is able to modify view with MODIFY QUERY if the user has privilege directly or through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=modify_query_with_privilege, - name="modify query privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=modify_query_with_privilege, + name="modify query privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=modify_query_with_privilege, - name="modify query privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=modify_query_with_privilege, + name="modify query privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestScenario def modify_query_with_privilege(self, grant_target_name, user_name, node=None): - """Grant MODIFY QUERY and check that user is able to execute it. - """ + """Grant MODIFY QUERY and check that user is able to execute it.""" view_name = f"view_{getuid()}" if node is None: @@ -1604,41 +2363,52 @@ def modify_query_with_privilege(self, grant_target_name, user_name, node=None): try: with Given("I have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1" + ) with When("I grant MODIFY QUERY privilege"): - node.query(f"GRANT ALTER VIEW MODIFY QUERY ON {view_name} TO {grant_target_name}") + node.query( + f"GRANT ALTER VIEW MODIFY QUERY ON {view_name} TO {grant_target_name}" + ) with Then("I modify the view query as the user"): - node.query(f"ALTER TABLE {view_name} MODIFY QUERY SELECT 2", settings = [("user",f"{user_name}")]) + node.query( + f"ALTER TABLE {view_name} MODIFY QUERY SELECT 2", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def modify_query_with_privilege_revoked_directly_or_from_role(self, node=None): - """Check that user is unable to modify the view query with MODIFY QUERY if the privilege has been revoked, directly or from a role. - """ + """Check that user is unable to modify the view query with MODIFY QUERY if the privilege has been revoked, directly or from a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=modify_query_with_revoked_privilege, - name="modify query privilege revoked directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=modify_query_with_revoked_privilege, + name="modify query privilege revoked directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=modify_query_with_revoked_privilege, - name="modify query privilege revoked from a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=modify_query_with_revoked_privilege, + name="modify query privilege revoked from a role", + )(grant_target_name=role_name, user_name=user_name) + @TestScenario def modify_query_with_revoked_privilege(self, grant_target_name, user_name, node=None): - """Revoke MODIFY QUERY and check that user is unable to modify the view query. - """ + """Revoke MODIFY QUERY and check that user is unable to modify the view query.""" view_name = f"view_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -1647,21 +2417,32 @@ def modify_query_with_revoked_privilege(self, grant_target_name, user_name, node try: with Given("I have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1" + ) with When("I grant MODIFY QUERY privilege"): - node.query(f"GRANT ALTER VIEW MODIFY QUERY ON {view_name} TO {grant_target_name}") + node.query( + f"GRANT ALTER VIEW MODIFY QUERY ON {view_name} TO {grant_target_name}" + ) with And("I revoke MODIFY QUERY privilege"): - node.query(f"REVOKE ALTER VIEW MODIFY QUERY ON {view_name} FROM {grant_target_name}") + node.query( + f"REVOKE ALTER VIEW MODIFY QUERY ON {view_name} FROM {grant_target_name}" + ) with Then("I modify the view query as the user"): - node.query(f"ALTER TABLE {view_name} MODIFY QUERY SELECT 2", settings = [("user",f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"ALTER TABLE {view_name} MODIFY QUERY SELECT 2", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def modify_query_without_source_table_privilege(self, node=None): """Check that user is unable to modify the view query to have a source table in the stored query @@ -1675,24 +2456,37 @@ def modify_query_without_source_table_privilege(self, node=None): if node is None: node = self.context.node with table(node, f"{table_name}"): - with user(node, f"{user_name}"): + with user(node, f"{user_name}"): try: with When("I create a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1" + ) with And("I grant view MODIFY QUERY privilege to the user"): - node.query(f"GRANT ALTER VIEW MODIFY QUERY ON {view_name} TO {user_name}") - with Then("I attempt to use MODIFY QUERY on the view without privilege on the source table"): - node.query(f"ALTER TABLE {view_name} MODIFY QUERY SELECT * FROM {table_name}", settings = [("user",f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"GRANT ALTER VIEW MODIFY QUERY ON {view_name} TO {user_name}" + ) + with Then( + "I attempt to use MODIFY QUERY on the view without privilege on the source table" + ): + node.query( + f"ALTER TABLE {view_name} MODIFY QUERY SELECT * FROM {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -def modify_query_with_source_table_privilege_granted_directly_or_via_role(self, node=None): +def modify_query_with_source_table_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to modify the view query to have a source table in the stored query, if and only if the user has SELECT privilege for the view and the source table, either directly or from a role. """ @@ -1702,19 +2496,25 @@ def modify_query_with_source_table_privilege_granted_directly_or_via_role(self, if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=modify_query_with_source_table_privilege, - name="modify query with source table, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=modify_query_with_source_table_privilege, + name="modify query with source table, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=modify_query_with_source_table_privilege, - name="modify query with source table, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=modify_query_with_source_table_privilege, + name="modify query with source table, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline -def modify_query_with_source_table_privilege(self, user_name, grant_target_name, node=None): - """Grant MODIFY QUERY privilege on view and SELECT privilege on the new source table and check the user is able to modify the view query. - """ +def modify_query_with_source_table_privilege( + self, user_name, grant_target_name, node=None +): + """Grant MODIFY QUERY privilege on view and SELECT privilege on the new source table and check the user is able to modify the view query.""" view_name = f"view_{getuid()}" table_name = f"table_{getuid()}" @@ -1724,20 +2524,28 @@ def modify_query_with_source_table_privilege(self, user_name, grant_target_name, try: with Given("I have a view with a source table"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1" + ) with And("I grant view MODIFY QUERY privilege"): - node.query(f"GRANT ALTER VIEW MODIFY QUERY ON {view_name} TO {grant_target_name}") + node.query( + f"GRANT ALTER VIEW MODIFY QUERY ON {view_name} TO {grant_target_name}" + ) with And("I grant table SELECT privilege"): node.query(f"GRANT SELECT ON {table_name} TO {grant_target_name}") with Then("I check the user is able to modify the view query"): - node.query(f"ALTER TABLE {view_name} MODIFY QUERY SELECT * FROM {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"ALTER TABLE {view_name} MODIFY QUERY SELECT * FROM {table_name}", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def modify_query_with_subquery_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to modify the view query to use a query with two subqueries if and only if @@ -1749,14 +2557,19 @@ def modify_query_with_subquery_privilege_granted_directly_or_via_role(self, node if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=modify_query_with_subquery, - name="modify query with subquery, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=modify_query_with_subquery, + name="modify query with subquery, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=modify_query_with_subquery, - name="modify query with subquery, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=modify_query_with_subquery, + name="modify query with subquery, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def modify_query_with_subquery(self, user_name, grant_target_name, node=None): @@ -1776,31 +2589,82 @@ def modify_query_with_subquery(self, user_name, grant_target_name, node=None): try: with Given("I have a view with a subquery"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1" + ) with When("I grant MODIFY QUERY privilege on view"): - node.query(f"GRANT ALTER VIEW MODIFY QUERY ON {view_name} TO {grant_target_name}") + node.query( + f"GRANT ALTER VIEW MODIFY QUERY ON {view_name} TO {grant_target_name}" + ) with Then("I attempt to modify the view query as the user"): - node.query(modify_query_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name), settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + node.query( + modify_query_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=3): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name, table2_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, + permutation, + grant_target_name, + table0_name, + table1_name, + table2_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to modify the view query as the user"): - node.query(modify_query_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name), settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + modify_query_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=3))+1, grant_target_name, table0_name, table1_name, table2_name): + with grant_select_on_table( + node, + max(permutations(table_count=3)) + 1, + grant_target_name, + table0_name, + table1_name, + table2_name, + ): with Then("I attempt to modify the view query as the user"): - output = node.query(modify_query_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name), settings = [("user", f"{user_name}")]).output - assert output == '0', error() + output = node.query( + modify_query_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + ), + settings=[("user", f"{user_name}")], + ).output + assert output == "0", error() finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -def modify_query_with_join_query_privilege_granted_directly_or_via_role(self, node=None): +def modify_query_with_join_query_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to modify the view query to use a query that includes a `JOIN` statement if and only if the user has SELECT privilege on all the tables and MODIFY QUERY privilege on the view, either directly or through a role. """ @@ -1810,14 +2674,19 @@ def modify_query_with_join_query_privilege_granted_directly_or_via_role(self, no if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=modify_query_with_join_query, - name="modify query with join, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=modify_query_with_join_query, + name="modify query with join, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=modify_query_with_join_query, - name="modify query with join, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=modify_query_with_join_query, + name="modify query with join, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def modify_query_with_join_query(self, user_name, grant_target_name, node=None): @@ -1836,30 +2705,72 @@ def modify_query_with_join_query(self, user_name, grant_target_name, node=None): try: with Given("I have a view with a JOIN statement"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1" + ) with When("I grant MODIFY QUERY privilege on view"): - node.query(f"GRANT ALTER VIEW MODIFY QUERY ON {view_name} TO {grant_target_name}") + node.query( + f"GRANT ALTER VIEW MODIFY QUERY ON {view_name} TO {grant_target_name}" + ) with Then("I attempt to modify the view query as the user"): - node.query(modify_query_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + node.query( + modify_query_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=2): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, permutation, grant_target_name, table0_name, table1_name + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to modify the view query as the user"): - node.query(modify_query_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + modify_query_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=2))+1, grant_target_name, table0_name, table1_name): + with grant_select_on_table( + node, + max(permutations(table_count=2)) + 1, + grant_target_name, + table0_name, + table1_name, + ): with Then("I attempt to modify the view query as the user"): - node.query(modify_query_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")]) + node.query( + modify_query_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -def modify_query_with_union_query_privilege_granted_directly_or_via_role(self, node=None): +def modify_query_with_union_query_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to modify the view query to include a `UNION ALL` statement if and only if the user has SELECT privilege on all the tables and MODIFY QUERY on the view, either directly or through a role. """ @@ -1869,19 +2780,23 @@ def modify_query_with_union_query_privilege_granted_directly_or_via_role(self, n if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=modify_query_with_union_query, - name="modify query with union, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=modify_query_with_union_query, + name="modify query with union, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=modify_query_with_union_query, - name="modify query with union, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=modify_query_with_union_query, + name="modify query with union, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def modify_query_with_union_query(self, user_name, grant_target_name, node=None): - """Grant MODIFY QUERY on the view and SELECT on the tables in the stored query and check the user is able modify the view query if and only if they have SELECT privilege on all of them. - """ + """Grant MODIFY QUERY on the view and SELECT on the tables in the stored query and check the user is able modify the view query if and only if they have SELECT privilege on all of them.""" view_name = f"view_{getuid()}" table0_name = f"table0_{getuid()}" table1_name = f"table1_{getuid()}" @@ -1894,31 +2809,72 @@ def modify_query_with_union_query(self, user_name, grant_target_name, node=None) try: with Given("I have a view with a UNION statement"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1" + ) with When("I grant MODIFY QUERY privilege on view"): - node.query(f"GRANT ALTER VIEW MODIFY QUERY ON {view_name} TO {grant_target_name}") + node.query( + f"GRANT ALTER VIEW MODIFY QUERY ON {view_name} TO {grant_target_name}" + ) with Then("I attempt to modify the view query as the user"): - node.query(modify_query_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + node.query( + modify_query_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=2): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, permutation, grant_target_name, table0_name, table1_name + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to modify the view query as the user"): - node.query(modify_query_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + modify_query_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=2))+1, grant_target_name, table0_name, table1_name): + with grant_select_on_table( + node, + max(permutations(table_count=2)) + 1, + grant_target_name, + table0_name, + table1_name, + ): with Then("I attempt to modify the view query as the user"): - node.query(modify_query_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")]) + node.query( + modify_query_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -def modify_query_with_join_union_subquery_privilege_granted_directly_or_via_role(self, node=None): +def modify_query_with_join_union_subquery_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to modify the view query to include `UNION ALL`, `JOIN` and two subqueries if and only if the user has SELECT privilege on all the tables and the view, either directly or through a role. """ @@ -1928,17 +2884,24 @@ def modify_query_with_join_union_subquery_privilege_granted_directly_or_via_role if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=modify_query_with_join_union_subquery, - name="modify query with join union subquery, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=modify_query_with_join_union_subquery, + name="modify query with join union subquery, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=modify_query_with_join_union_subquery, - name="modify query with join union subquery, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=modify_query_with_join_union_subquery, + name="modify query with join union subquery, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline -def modify_query_with_join_union_subquery(self, grant_target_name, user_name, node=None): +def modify_query_with_join_union_subquery( + self, grant_target_name, user_name, node=None +): """Grant MODIFY QUERY on the view and SELECT on tables in the modify query and check the user is able modify the view query if and only if they have SELECT privilege on all of them. """ @@ -1953,36 +2916,97 @@ def modify_query_with_join_union_subquery(self, grant_target_name, user_name, no if node is None: node = self.context.node - with table(node, f"{table0_name},{table1_name},{table2_name},{table3_name},{table4_name}"): + with table( + node, f"{table0_name},{table1_name},{table2_name},{table3_name},{table4_name}" + ): try: with Given("I have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT 1" + ) with When("I grant MODIFY QUERY privilege on view"): - node.query(f"GRANT ALTER VIEW MODIFY QUERY ON {view_name} TO {grant_target_name}") + node.query( + f"GRANT ALTER VIEW MODIFY QUERY ON {view_name} TO {grant_target_name}" + ) with Then("I attempt to modify the view query as the user"): - node.query(modify_query_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name, table3_name=table3_name, table4_name=table4_name), settings = [("user",f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + modify_query_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + table3_name=table3_name, + table4_name=table4_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=5): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name, table2_name, table3_name, table4_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, + permutation, + grant_target_name, + table0_name, + table1_name, + table2_name, + table3_name, + table4_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to modify the view query as the user"): - node.query(modify_query_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name, table3_name=table3_name, table4_name=table4_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + modify_query_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + table3_name=table3_name, + table4_name=table4_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=5))+1, grant_target_name, table0_name, table1_name, table2_name, table3_name, table4_name): + with grant_select_on_table( + node, + max(permutations(table_count=5)) + 1, + grant_target_name, + table0_name, + table1_name, + table2_name, + table3_name, + table4_name, + ): with Then("I attempt to modify the view query as the user"): - node.query(modify_query_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name, table3_name=table3_name, table4_name=table4_name), settings = [("user", f"{user_name}")]) + node.query( + modify_query_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + table3_name=table3_name, + table4_name=table4_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -def modify_query_with_nested_views_privilege_granted_directly_or_via_role(self, node=None): +def modify_query_with_nested_views_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to modify the view query to include other views if and only if the user has MODIFY QUERY privilege on the view SELECT privilege on all of the views and the source tables for those views, either directly or through a role. """ @@ -1992,14 +3016,19 @@ def modify_query_with_nested_views_privilege_granted_directly_or_via_role(self, if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=modify_query_with_nested_views, - name="modify query with nested views, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=modify_query_with_nested_views, + name="modify query with nested views, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=modify_query_with_nested_views, - name="modify query with nested views, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=modify_query_with_nested_views, + name="modify query with nested views, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def modify_query_with_nested_views(self, grant_target_name, user_name, node=None): @@ -2023,26 +3052,83 @@ def modify_query_with_nested_views(self, grant_target_name, user_name, node=None try: with Given("I have some views"): node.query(f"CREATE VIEW {view0_name} AS SELECT y FROM {table0_name}") - node.query(f"CREATE VIEW {view1_name} AS SELECT y FROM {view0_name} WHERE y IN (SELECT y FROM {table1_name} WHERE y<2)") - node.query(f"CREATE VIEW {view2_name} AS SELECT y FROM {view1_name} JOIN {table2_name} USING y") + node.query( + f"CREATE VIEW {view1_name} AS SELECT y FROM {view0_name} WHERE y IN (SELECT y FROM {table1_name} WHERE y<2)" + ) + node.query( + f"CREATE VIEW {view2_name} AS SELECT y FROM {view1_name} JOIN {table2_name} USING y" + ) node.query(f"CREATE VIEW {view3_name} AS SELECT 1") with When("I grant MODIFY QUERY privilege on view"): - node.query(f"GRANT ALTER VIEW MODIFY QUERY ON {view3_name} TO {grant_target_name}") + node.query( + f"GRANT ALTER VIEW MODIFY QUERY ON {view3_name} TO {grant_target_name}" + ) with Then("I attempt to modify the view query as the user"): - node.query(modify_query_view_query.format(view3_name=view3_name, view2_name=view2_name, table3_name=table3_name), settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + node.query( + modify_query_view_query.format( + view3_name=view3_name, + view2_name=view2_name, + table3_name=table3_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) - for permutation in ([0,1,2,3,7,11,15,31,39,79,95],permutations(table_count=7))[self.context.stress]: - with grant_select_on_table(node, permutation, grant_target_name, view2_name, table3_name, view1_name, table2_name, view0_name, table1_name, table0_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + for permutation in ( + [0, 1, 2, 3, 7, 11, 15, 31, 39, 79, 95], + permutations(table_count=7), + )[self.context.stress]: + with grant_select_on_table( + node, + permutation, + grant_target_name, + view2_name, + table3_name, + view1_name, + table2_name, + view0_name, + table1_name, + table0_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to modify the view query as the user"): - node.query(modify_query_view_query.format(view3_name=view3_name, view2_name=view2_name, table3_name=table3_name), - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + modify_query_view_query.format( + view3_name=view3_name, + view2_name=view2_name, + table3_name=table3_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all views"): - with grant_select_on_table(node, max(permutations(table_count=7))+1, grant_target_name, view0_name, view1_name, view2_name, table0_name, table1_name, table2_name, table3_name): + with grant_select_on_table( + node, + max(permutations(table_count=7)) + 1, + grant_target_name, + view0_name, + view1_name, + view2_name, + table0_name, + table1_name, + table2_name, + table3_name, + ): with Then("I attempt to modify the view query as the user"): - node.query(modify_query_view_query.format(view3_name=view3_name, view2_name=view2_name, table3_name=table3_name), settings = [("user", f"{user_name}")]) + node.query( + modify_query_view_query.format( + view3_name=view3_name, + view2_name=view2_name, + table3_name=table3_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the views"): @@ -2055,40 +3141,53 @@ def modify_query_with_nested_views(self, grant_target_name, user_name, node=None with And("I drop view3", flags=TE): node.query(f"DROP VIEW IF EXISTS {view0_name}") + @TestSuite def insert(self, node=None): - """Check RBAC functionality of INSERT with materialized views. - """ - Scenario(run=insert_on_source_table_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=insert_with_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=insert_on_target_table_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) + """Check RBAC functionality of INSERT with materialized views.""" + Scenario( + run=insert_on_source_table_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=insert_with_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=insert_on_target_table_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + @TestScenario @Requirements( RQ_SRS_006_RBAC_MaterializedView_Insert_SourceTable("1.0"), ) def insert_on_source_table_privilege_granted_directly_or_via_role(self, node=None): - """Check that user is able to INSERT on the source table of the materialized view with only INSERT privilege on the source table. - """ + """Check that user is able to INSERT on the source table of the materialized view with only INSERT privilege on the source table.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=insert_on_source_table, - name="insert on source table, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=insert_on_source_table, + name="insert on source table, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=insert_on_source_table, - name="insert on source table, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=insert_on_source_table, + name="insert on source table, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def insert_on_source_table(self, grant_target_name, user_name, node=None): - """Grant SELECT on the source table to the user and check they are able to SELECT from it. - """ + """Grant SELECT on the source table to the user and check they are able to SELECT from it.""" view_name = f"view_{getuid()}" table0_name = f"table0_{getuid()}" table1_name = f"table1_{getuid()}" @@ -2099,17 +3198,23 @@ def insert_on_source_table(self, grant_target_name, user_name, node=None): try: with Given("I have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} TO {table0_name} AS SELECT * FROM {table1_name}") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} TO {table0_name} AS SELECT * FROM {table1_name}" + ) with When("I grant INSERT on the source table"): node.query(f"GRANT INSERT ON {table1_name} TO {grant_target_name}") with Then("I attempt to insert into the source table"): - node.query(f"INSERT INTO {table1_name}(d) VALUES ('2020-01-01')", settings = [("user",f"{user_name}")]) + node.query( + f"INSERT INTO {table1_name}(d) VALUES ('2020-01-01')", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario @Requirements( RQ_SRS_006_RBAC_MaterializedView_Insert("1.0"), @@ -2124,19 +3229,23 @@ def insert_with_privilege_granted_directly_or_via_role(self, node=None): if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=insert_with_insert_privilege, - name="insert on view, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=insert_with_insert_privilege, + name="insert on view, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=insert_with_insert_privilege, - name="insert on view, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=insert_with_insert_privilege, + name="insert on view, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def insert_with_insert_privilege(self, grant_target_name, user_name, node=None): - """Grant INSERT and check user is able to INSERT into the materialized view only if they have INSERT privilege for the view. - """ + """Grant INSERT and check user is able to INSERT into the materialized view only if they have INSERT privilege for the view.""" view_name = f"view_{getuid()}" table0_name = f"table0_{getuid()}" table1_name = f"table1_{getuid()}" @@ -2148,18 +3257,23 @@ def insert_with_insert_privilege(self, grant_target_name, user_name, node=None): try: with Given("I have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} TO {table0_name} AS SELECT * FROM {table1_name}") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} TO {table0_name} AS SELECT * FROM {table1_name}" + ) with When("I grant INSERT on the view"): node.query(f"GRANT INSERT ON {view_name} TO {grant_target_name}") with Then("I attempt to insert into the view"): - node.query(f"INSERT INTO {view_name}(d) VALUES ('2020-01-01')", - settings = [("user",f"{user_name}")]) + node.query( + f"INSERT INTO {view_name}(d) VALUES ('2020-01-01')", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario @Requirements( RQ_SRS_006_RBAC_MaterializedView_Insert_TargetTable("1.0"), @@ -2174,19 +3288,23 @@ def insert_on_target_table_privilege_granted_directly_or_via_role(self, node=Non if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=insert_on_target_table, - name="insert on target table, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=insert_on_target_table, + name="insert on target table, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=insert_on_target_table, - name="insert on target table, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=insert_on_target_table, + name="insert on target table, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def insert_on_target_table(self, grant_target_name, user_name, node=None): - """Grant INSERT and check user is able to INSERT into target table. - """ + """Grant INSERT and check user is able to INSERT into target table.""" view_name = f"view_{getuid()}" table0_name = f"table0_{getuid()}" table1_name = f"table1_{getuid()}" @@ -2197,21 +3315,30 @@ def insert_on_target_table(self, grant_target_name, user_name, node=None): try: with Given("I have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} TO {table0_name} AS SELECT * FROM {table1_name}") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} TO {table0_name} AS SELECT * FROM {table1_name}" + ) with When("I grant INSERT on the target table"): node.query(f"GRANT INSERT ON {table0_name} TO {grant_target_name}") with Then("I attempt to insert into the target table"): - node.query(f"INSERT INTO {table0_name}(d) VALUES ('2020-01-01')", settings = [("user",f"{user_name}")]) + node.query( + f"INSERT INTO {table0_name}(d) VALUES ('2020-01-01')", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + + @TestScenario @Requirements( RQ_SRS_006_RBAC_MaterializedView_Insert_TargetTable("1.0"), ) -def insert_on_implicit_target_table_privilege_granted_directly_or_via_role(self, node=None): +def insert_on_implicit_target_table_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to INSERT into the implicit target table of a materialized view if and only if they have INSERT privilege for the table, either directly or through a role. """ @@ -2221,22 +3348,26 @@ def insert_on_implicit_target_table_privilege_granted_directly_or_via_role(self, if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=insert_on_target_table, - name="insert on implicit target table, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=insert_on_target_table, + name="insert on implicit target table, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=insert_on_target_table, - name="insert on implicit target table, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=insert_on_target_table, + name="insert on implicit target table, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def insert_on_target_table(self, grant_target_name, user_name, node=None): - """Grant INSERT and check user is able to INSERT into implicit target table. - """ + """Grant INSERT and check user is able to INSERT into implicit target table.""" view_name = f"view_{getuid()}" table_name = f"table0_{getuid()}" - implicit_table_name = f"\\\".inner.{view_name}\\\"" + implicit_table_name = f'\\".inner.{view_name}\\"' if node is None: node = self.context.node @@ -2244,17 +3375,25 @@ def insert_on_target_table(self, grant_target_name, user_name, node=None): try: with Given("I have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}") + node.query( + f"CREATE MATERIALIZED VIEW {view_name} ENGINE = Memory AS SELECT * FROM {table_name}" + ) with When("I grant INSERT on the target table"): - node.query(f"GRANT INSERT ON {implicit_table_name} TO {grant_target_name}") + node.query( + f"GRANT INSERT ON {implicit_table_name} TO {grant_target_name}" + ) with Then("I attempt to insert into the target table"): - node.query(f"INSERT INTO {implicit_table_name}(d) VALUES ('2020-01-01')", settings = [("user",f"{user_name}")]) + node.query( + f"INSERT INTO {implicit_table_name}(d) VALUES ('2020-01-01')", + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestFeature @Requirements( RQ_SRS_006_RBAC_MaterializedView("1.0"), diff --git a/tests/testflows/rbac/tests/views/view.py b/tests/testflows/rbac/tests/views/view.py index f4fb4550a75..66788747934 100755 --- a/tests/testflows/rbac/tests/views/view.py +++ b/tests/testflows/rbac/tests/views/view.py @@ -5,28 +5,57 @@ from rbac.requirements import * from rbac.helper.common import * import rbac.helper.errors as errors + @TestSuite @Requirements( RQ_SRS_006_RBAC_View_Create("1.0"), ) def create(self, node=None): - """Test the RBAC functionality of the `CREATE VIEW` command. - """ - Scenario(run=create_without_create_view_privilege, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_create_view_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_revoked_create_view_privilege_revoked_directly_or_from_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_without_source_table_privilege, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_source_table_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_subquery_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_join_query_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_union_query_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_join_union_subquery_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=create_with_nested_views_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) + """Test the RBAC functionality of the `CREATE VIEW` command.""" + Scenario( + run=create_without_create_view_privilege, setup=instrument_clickhouse_server_log + ) + Scenario( + run=create_with_create_view_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_revoked_create_view_privilege_revoked_directly_or_from_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_without_source_table_privilege, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_source_table_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_subquery_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_join_query_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_union_query_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_join_union_subquery_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=create_with_nested_views_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + @TestScenario def create_without_create_view_privilege(self, node=None): - """Check that user is unable to create a view without CREATE VIEW privilege. - """ + """Check that user is unable to create a view without CREATE VIEW privilege.""" user_name = f"user_{getuid()}" view_name = f"view_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -36,32 +65,40 @@ def create_without_create_view_privilege(self, node=None): with user(node, f"{user_name}"): with When("I try to create a view without CREATE VIEW privilege as the user"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE VIEW {view_name} AS SELECT 1", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"CREATE VIEW {view_name} AS SELECT 1", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestScenario def create_with_create_view_privilege_granted_directly_or_via_role(self, node=None): - """Check that user is able to create a view with CREATE VIEW privilege, either granted directly or through a role. - """ + """Check that user is able to create a view with CREATE VIEW privilege, either granted directly or through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_create_view_privilege, - name="create with create view privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_create_view_privilege, + name="create with create view privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_create_view_privilege, - name="create with create view privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_create_view_privilege, + name="create with create view privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_create_view_privilege(self, grant_target_name, user_name, node=None): - """Check that user is able to create a view with the granted privileges. - """ + """Check that user is able to create a view with the granted privileges.""" view_name = f"view_{getuid()}" if node is None: @@ -72,35 +109,46 @@ def create_with_create_view_privilege(self, grant_target_name, user_name, node=N node.query(f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}") with Then("I try to create a view without privilege as the user"): - node.query(f"CREATE VIEW {view_name} AS SELECT 1", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE VIEW {view_name} AS SELECT 1", + settings=[("user", f"{user_name}")], + ) finally: with Then("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -def create_with_revoked_create_view_privilege_revoked_directly_or_from_role(self, node=None): - """Check that user is unable to create view after the CREATE VIEW privilege is revoked, either directly or from a role. - """ +def create_with_revoked_create_view_privilege_revoked_directly_or_from_role( + self, node=None +): + """Check that user is unable to create view after the CREATE VIEW privilege is revoked, either directly or from a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_revoked_create_view_privilege, - name="create with create view privilege revoked directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_revoked_create_view_privilege, + name="create with create view privilege revoked directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_revoked_create_view_privilege, - name="create with create view privilege revoked from a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_revoked_create_view_privilege, + name="create with create view privilege revoked from a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline -def create_with_revoked_create_view_privilege(self, grant_target_name, user_name, node=None): - """Revoke CREATE VIEW privilege and check the user is unable to create a view. - """ +def create_with_revoked_create_view_privilege( + self, grant_target_name, user_name, node=None +): + """Revoke CREATE VIEW privilege and check the user is unable to create a view.""" view_name = f"view_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -113,8 +161,13 @@ def create_with_revoked_create_view_privilege(self, grant_target_name, user_name node.query(f"REVOKE CREATE VIEW ON {view_name} FROM {grant_target_name}") with Then("I try to create a view on the table as the user"): - node.query(f"CREATE VIEW {view_name} AS SELECT 1", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"CREATE VIEW {view_name} AS SELECT 1", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestScenario def create_without_source_table_privilege(self, node=None): @@ -134,8 +187,13 @@ def create_without_source_table_privilege(self, node=None): node.query(f"GRANT CREATE VIEW ON {view_name} TO {user_name}") with Then("I try to create a view without select privilege on the table"): - node.query(f"CREATE VIEW {view_name} AS SELECT * FROM {table_name}", settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"CREATE VIEW {view_name} AS SELECT * FROM {table_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) + @TestScenario def create_with_source_table_privilege_granted_directly_or_via_role(self, node=None): @@ -148,19 +206,23 @@ def create_with_source_table_privilege_granted_directly_or_via_role(self, node=N if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_source_table_privilege, - name="create with create view and select privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_source_table_privilege, + name="create with create view and select privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_source_table_privilege, - name="create with create view and select privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_source_table_privilege, + name="create with create view and select privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_source_table_privilege(self, user_name, grant_target_name, node=None): - """Check that user is unable to create a view without SELECT privilege on the source table. - """ + """Check that user is unable to create a view without SELECT privilege on the source table.""" view_name = f"view_{getuid()}" table_name = f"table_{getuid()}" @@ -176,16 +238,20 @@ def create_with_source_table_privilege(self, user_name, grant_target_name, node= with And("I try to create a view on the table as the user"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE VIEW {view_name} AS SELECT * FROM {table_name}", settings = [("user", f"{user_name}")]) + node.query( + f"CREATE VIEW {view_name} AS SELECT * FROM {table_name}", + settings=[("user", f"{user_name}")], + ) with Then("I check the view"): output = node.query(f"SELECT count(*) FROM {view_name}").output - assert output == '0', error() + assert output == "0", error() finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def create_with_subquery_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to create a view where the stored query has two subqueries @@ -198,14 +264,19 @@ def create_with_subquery_privilege_granted_directly_or_via_role(self, node=None) if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_subquery, - name="create with subquery, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_subquery, + name="create with subquery, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_subquery, - name="create with subquery, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_subquery, + name="create with subquery, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_subquery(self, user_name, grant_target_name, node=None): @@ -226,29 +297,72 @@ def create_with_subquery(self, user_name, grant_target_name, node=None): with When("I grant CREATE VIEW privilege"): node.query(f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}") with Then("I attempt to CREATE VIEW as the user with create privilege"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=3): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name, table2_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, + permutation, + grant_target_name, + table0_name, + table1_name, + table2_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=3))+1, grant_target_name, table0_name, table1_name, table2_name): + with grant_select_on_table( + node, + max(permutations(table_count=3)) + 1, + grant_target_name, + table0_name, + table1_name, + table2_name, + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name), settings = [("user", f"{user_name}")]) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def create_with_join_query_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to create a view where the stored query includes a `JOIN` statement @@ -261,14 +375,19 @@ def create_with_join_query_privilege_granted_directly_or_via_role(self, node=Non if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_join_query, - name="create with join query, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_join_query, + name="create with join query, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_join_query, - name="create with join query, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_join_query, + name="create with join query, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_join_query(self, grant_target_name, user_name, node=None): @@ -288,29 +407,63 @@ def create_with_join_query(self, grant_target_name, user_name, node=None): with When("I grant CREATE VIEW privilege"): node.query(f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}") with Then("I attempt to create view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=2): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, permutation, grant_target_name, table0_name, table1_name + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=2))+1, grant_target_name, table0_name, table1_name): + with grant_select_on_table( + node, + max(permutations(table_count=2)) + 1, + grant_target_name, + table0_name, + table1_name, + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")]) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Then("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def create_with_union_query_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to create a view where the stored query includes a `UNION ALL` statement @@ -323,14 +476,19 @@ def create_with_union_query_privilege_granted_directly_or_via_role(self, node=No if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_union_query, - name="create with union query, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_union_query, + name="create with union query, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_union_query, - name="create with union query, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_union_query, + name="create with union query, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_union_query(self, grant_target_name, user_name, node=None): @@ -350,31 +508,67 @@ def create_with_union_query(self, grant_target_name, user_name, node=None): with When("I grant CREATE VIEW privilege"): node.query(f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}") with Then("I attempt to create view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=2): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, permutation, grant_target_name, table0_name, table1_name + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=2))+1, grant_target_name, table0_name, table1_name): + with grant_select_on_table( + node, + max(permutations(table_count=2)) + 1, + grant_target_name, + table0_name, + table1_name, + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name), settings = [("user", f"{user_name}")]) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -def create_with_join_union_subquery_privilege_granted_directly_or_via_role(self, node=None): +def create_with_join_union_subquery_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to create a view with a stored query that includes `UNION ALL`, `JOIN` and two subqueries if and only if the user has SELECT privilege on all of the tables, either granted directly or through a role. """ @@ -384,14 +578,19 @@ def create_with_join_union_subquery_privilege_granted_directly_or_via_role(self, if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_join_union_subquery, - name="create with join union subquery, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_join_union_subquery, + name="create with join union subquery, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_join_union_subquery, - name="create with join union subquery, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_join_union_subquery, + name="create with join union subquery, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_join_union_subquery(self, grant_target_name, user_name, node=None): @@ -409,36 +608,93 @@ def create_with_join_union_subquery(self, grant_target_name, user_name, node=Non if node is None: node = self.context.node - with table(node, f"{table0_name},{table1_name},{table2_name},{table3_name},{table4_name}"): + with table( + node, f"{table0_name},{table1_name},{table2_name},{table3_name},{table4_name}" + ): with user(node, f"{user_name}"): try: with When("I grant CREATE VIEW privilege"): - node.query(f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}") - with Then("I attempt to create view as the user with CREATE VIEW privilege"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name, table3_name=table3_name, table4_name=table4_name), - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + f"GRANT CREATE VIEW ON {view_name} TO {grant_target_name}" + ) + with Then( + "I attempt to create view as the user with CREATE VIEW privilege" + ): + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + table3_name=table3_name, + table4_name=table4_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=5): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name, table3_name, table4_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, + permutation, + grant_target_name, + table0_name, + table1_name, + table3_name, + table4_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name, table3_name=table3_name, table4_name=table4_name), - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + table3_name=table3_name, + table4_name=table4_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=5))+1, grant_target_name, table0_name, table1_name, table2_name, table3_name, table4_name): + with grant_select_on_table( + node, + max(permutations(table_count=5)) + 1, + grant_target_name, + table0_name, + table1_name, + table2_name, + table3_name, + table4_name, + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view_name=view_name, table0_name=table0_name, table1_name=table1_name, table2_name=table2_name, table3_name=table3_name, table4_name=table4_name), - settings = [("user", f"{user_name}")]) + node.query( + create_view_query.format( + view_name=view_name, + table0_name=table0_name, + table1_name=table1_name, + table2_name=table2_name, + table3_name=table3_name, + table4_name=table4_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def create_with_nested_views_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to create a view with a stored query that includes other views if and only if @@ -450,14 +706,19 @@ def create_with_nested_views_privilege_granted_directly_or_via_role(self, node=N if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=create_with_nested_views, - name="create with nested views, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=create_with_nested_views, + name="create with nested views, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=create_with_nested_views, - name="create with nested views, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=create_with_nested_views, + name="create with nested views, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def create_with_nested_views(self, grant_target_name, user_name, node=None): @@ -481,31 +742,86 @@ def create_with_nested_views(self, grant_target_name, user_name, node=None): try: with Given("I have some views"): node.query(f"CREATE VIEW {view0_name} AS SELECT y FROM {table0_name}") - node.query(f"CREATE VIEW {view1_name} AS SELECT y FROM {table1_name} WHERE y IN (SELECT y FROM {view0_name} WHERE y<2)") - node.query(f"CREATE VIEW {view2_name} AS SELECT y FROM {table2_name} JOIN {view1_name} USING y") + node.query( + f"CREATE VIEW {view1_name} AS SELECT y FROM {table1_name} WHERE y IN (SELECT y FROM {view0_name} WHERE y<2)" + ) + node.query( + f"CREATE VIEW {view2_name} AS SELECT y FROM {table2_name} JOIN {view1_name} USING y" + ) with When("I grant CREATE VIEW privilege"): node.query(f"GRANT CREATE VIEW ON {view3_name} TO {grant_target_name}") - with Then("I attempt to create view as the user with CREATE VIEW privilege"): - node.query(create_view_query.format(view3_name=view3_name, view2_name=view2_name, table3_name=table3_name), - settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + with Then( + "I attempt to create view as the user with CREATE VIEW privilege" + ): + node.query( + create_view_query.format( + view3_name=view3_name, + view2_name=view2_name, + table3_name=table3_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) - for permutation in ([0,1,2,3,7,11,15,31,39,79,95],permutations(table_count=7))[self.context.stress]: - with grant_select_on_table(node, permutation, grant_target_name, view2_name, table3_name, view1_name, table2_name, view0_name, table1_name, table0_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + for permutation in ( + [0, 1, 2, 3, 7, 11, 15, 31, 39, 79, 95], + permutations(table_count=7), + )[self.context.stress]: + with grant_select_on_table( + node, + permutation, + grant_target_name, + view2_name, + table3_name, + view1_name, + table2_name, + view0_name, + table1_name, + table0_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view3_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view3_name=view3_name, view2_name=view2_name, table3_name=table3_name), - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + create_view_query.format( + view3_name=view3_name, + view2_name=view2_name, + table3_name=table3_name, + ), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all views"): - with grant_select_on_table(node, max(permutations(table_count=7))+1, grant_target_name, view0_name, view1_name, view2_name, table0_name, table1_name, table2_name, table3_name): + with grant_select_on_table( + node, + max(permutations(table_count=7)) + 1, + grant_target_name, + view0_name, + view1_name, + view2_name, + table0_name, + table1_name, + table2_name, + table3_name, + ): with Given("I don't have a view"): node.query(f"DROP VIEW IF EXISTS {view3_name}") with Then("I attempt to create a view as the user"): - node.query(create_view_query.format(view3_name=view3_name, view2_name=view2_name, table3_name=table3_name), - settings = [("user", f"{user_name}")]) + node.query( + create_view_query.format( + view3_name=view3_name, + view2_name=view2_name, + table3_name=table3_name, + ), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the views"): @@ -518,28 +834,57 @@ def create_with_nested_views(self, grant_target_name, user_name, node=None): with And("I drop view3", flags=TE): node.query(f"DROP VIEW IF EXISTS {view0_name}") + @TestSuite @Requirements( RQ_SRS_006_RBAC_View_Select("1.0"), ) def select(self, node=None): - """Test the RBAC functionality of the `SELECT FROM view` command. - """ - Scenario(run=select_without_select_privilege, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_select_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_select_privilege_revoked_directly_or_from_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_without_source_table_privilege, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_source_table_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_subquery_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_join_query_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_union_query_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_join_union_subquery_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=select_with_nested_views_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) + """Test the RBAC functionality of the `SELECT FROM view` command.""" + Scenario( + run=select_without_select_privilege, setup=instrument_clickhouse_server_log + ) + Scenario( + run=select_with_select_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_select_privilege_revoked_directly_or_from_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_without_source_table_privilege, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_source_table_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_subquery_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_join_query_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_union_query_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_join_union_subquery_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=select_with_nested_views_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + @TestScenario def select_without_select_privilege(self, node=None): - """Check that user is unable to select on a view without view SELECT privilege. - """ + """Check that user is unable to select on a view without view SELECT privilege.""" user_name = f"user_{getuid()}" view_name = f"view_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -553,36 +898,44 @@ def select_without_select_privilege(self, node=None): node.query(f"CREATE VIEW {view_name} AS SELECT 1") with Then("I try to select from view without privilege as the user"): - node.query(f"SELECT * FROM {view_name}", settings = [("user",f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SELECT * FROM {view_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_select_privilege_granted_directly_or_via_role(self, node=None): - """Check that user is able to select from a view if and only if they have select privilege on that view, either directly or from a role. - """ + """Check that user is able to select from a view if and only if they have select privilege on that view, either directly or from a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_select_privilege, - name="select with select privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_select_privilege, + name="select with select privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_select_privilege, - name="select with select privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_select_privilege, + name="select with select privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_select_privilege(self, user_name, grant_target_name, node=None): - """Grant SELECT privilege on a view and check the user is able to SELECT from it. - """ + """Grant SELECT privilege on a view and check the user is able to SELECT from it.""" view_name = f"view_{getuid()}" if node is None: @@ -596,36 +949,42 @@ def select_with_select_privilege(self, user_name, grant_target_name, node=None): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") with Then("I attempt to select from view with privilege as the user"): - output = node.query(f"SELECT count(*) FROM {view_name}", settings = [("user",f"{user_name}")]).output - assert output == '1', error() + output = node.query( + f"SELECT count(*) FROM {view_name}", settings=[("user", f"{user_name}")] + ).output + assert output == "1", error() finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_select_privilege_revoked_directly_or_from_role(self, node=None): - """Check that user is unable to select from a view if their SELECT privilege is revoked, either directly or from a role. - """ + """Check that user is unable to select from a view if their SELECT privilege is revoked, either directly or from a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_select_privilege, - name="select with select privilege revoked directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_select_privilege, + name="select with select privilege revoked directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_select_privilege, - name="select with select privilege revoked from a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_select_privilege, + name="select with select privilege revoked from a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_revoked_select_privilege(self, user_name, grant_target_name, node=None): - """Grant and revoke SELECT privilege on a view and check the user is unable to SELECT from it. - """ + """Grant and revoke SELECT privilege on a view and check the user is unable to SELECT from it.""" view_name = f"view_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -642,17 +1001,21 @@ def select_with_revoked_select_privilege(self, user_name, grant_target_name, nod node.query(f"REVOKE SELECT ON {view_name} FROM {grant_target_name}") with Then("I attempt to select from view with privilege as the user"): - node.query(f"SELECT count(*) FROM {view_name}", settings = [("user",f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"SELECT count(*) FROM {view_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_without_source_table_privilege(self, node=None): - """Check that user is unable to select from a view without SELECT privilege for the source table. - """ + """Check that user is unable to select from a view without SELECT privilege for the source table.""" user_name = f"user_{getuid()}" view_name = f"view_{getuid()}" table_name = f"table_{getuid()}" @@ -661,7 +1024,7 @@ def select_without_source_table_privilege(self, node=None): if node is None: node = self.context.node with table(node, f"{table_name}"): - with user(node, f"{user_name}"): + with user(node, f"{user_name}"): try: with When("I create a view from the source table"): node.query(f"DROP VIEW IF EXISTS {view_name}") @@ -669,14 +1032,21 @@ def select_without_source_table_privilege(self, node=None): with And("I grant view select privilege to the user"): node.query(f"GRANT SELECT ON {view_name} TO {user_name}") - with Then("I attempt to select from view without privilege on the source table"): - node.query(f"SELECT count(*) FROM {view_name}", settings = [("user",f"{user_name}")], - exitcode=exitcode, message=message) + with Then( + "I attempt to select from view without privilege on the source table" + ): + node.query( + f"SELECT count(*) FROM {view_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_source_table_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to select from a view, with source table in the stored query, if and only if @@ -688,19 +1058,23 @@ def select_with_source_table_privilege_granted_directly_or_via_role(self, node=N if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_source_table_privilege, - name="select with source table, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_source_table_privilege, + name="select with source table, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_source_table_privilege, - name="select with source table, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_source_table_privilege, + name="select with source table, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_source_table_privilege(self, user_name, grant_target_name, node=None): - """Grant SELECT privilege on view and the source table for that view and check the user is able to SELECT from the view. - """ + """Grant SELECT privilege on view and the source table for that view and check the user is able to SELECT from the view.""" view_name = f"view_{getuid()}" table_name = f"table_{getuid()}" @@ -717,13 +1091,17 @@ def select_with_source_table_privilege(self, user_name, grant_target_name, node= node.query(f"GRANT SELECT ON {table_name} TO {grant_target_name}") with Then("I check the user is able to select from the view"): - output = node.query(f"SELECT count(*) FROM {view_name}", settings = [("user", f"{user_name}")]).output - assert output == '0', error() + output = node.query( + f"SELECT count(*) FROM {view_name}", + settings=[("user", f"{user_name}")], + ).output + assert output == "0", error() finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_subquery_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to select from a view where the stored query has two subqueries if and only if @@ -735,19 +1113,23 @@ def select_with_subquery_privilege_granted_directly_or_via_role(self, node=None) if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_subquery, - name="select with subquery, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_subquery, + name="select with subquery, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_subquery, - name="select with subquery, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_subquery, + name="select with subquery, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_subquery(self, user_name, grant_target_name, node=None): - """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them. - """ + """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them.""" view_name = f"view_{getuid()}" table0_name = f"table0_{getuid()}" table1_name = f"table1_{getuid()}" @@ -761,29 +1143,61 @@ def select_with_subquery(self, user_name, grant_target_name, node=None): try: with Given("I have a view with a subquery"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE VIEW {view_name} AS SELECT * FROM {table0_name} WHERE y IN (SELECT y FROM {table1_name} WHERE y IN (SELECT y FROM {table2_name} WHERE y<2))") + node.query( + f"CREATE VIEW {view_name} AS SELECT * FROM {table0_name} WHERE y IN (SELECT y FROM {table1_name} WHERE y IN (SELECT y FROM {table2_name} WHERE y<2))" + ) with When("I grant SELECT privilege on view"): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") with Then("I attempt to select from the view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=3): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name, table2_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, + permutation, + grant_target_name, + table0_name, + table1_name, + table2_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=3))+1, grant_target_name, table0_name, table1_name, table2_name): + with grant_select_on_table( + node, + max(permutations(table_count=3)) + 1, + grant_target_name, + table0_name, + table1_name, + table2_name, + ): with Then("I attempt to select from a view as the user"): - output = node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")]).output - assert output == '0', error() + output = node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + ).output + assert output == "0", error() finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_join_query_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to select from a view where the stored query includes a `JOIN` statement if and only if @@ -795,19 +1209,23 @@ def select_with_join_query_privilege_granted_directly_or_via_role(self, node=Non if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_join_query, - name="select with join, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_join_query, + name="select with join, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_join_query, - name="select with join, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_join_query, + name="select with join, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_join_query(self, user_name, grant_target_name, node=None): - """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them. - """ + """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them.""" view_name = f"view_{getuid()}" table0_name = f"table0_{getuid()}" table1_name = f"table1_{getuid()}" @@ -820,28 +1238,54 @@ def select_with_join_query(self, user_name, grant_target_name, node=None): try: with Given("I have a view with a JOIN statement"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE VIEW {view_name} AS SELECT * FROM {table0_name} JOIN {table1_name} USING d") + node.query( + f"CREATE VIEW {view_name} AS SELECT * FROM {table0_name} JOIN {table1_name} USING d" + ) with When("I grant SELECT privilege on view"): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") with Then("I attempt to select from the view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=2): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, permutation, grant_target_name, table0_name, table1_name + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=2))+1, grant_target_name, table0_name, table1_name): + with grant_select_on_table( + node, + max(permutations(table_count=2)) + 1, + grant_target_name, + table0_name, + table1_name, + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")]) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_union_query_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to select from a view where the stored query includes a `UNION ALL` statement if and only if @@ -853,19 +1297,23 @@ def select_with_union_query_privilege_granted_directly_or_via_role(self, node=No if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_union_query, - name="select with union, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_union_query, + name="select with union, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_union_query, - name="select with union, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_union_query, + name="select with union, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_union_query(self, user_name, grant_target_name, node=None): - """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them. - """ + """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them.""" view_name = f"view_{getuid()}" table0_name = f"table0_{getuid()}" table1_name = f"table1_{getuid()}" @@ -878,30 +1326,58 @@ def select_with_union_query(self, user_name, grant_target_name, node=None): try: with Given("I have a view with a UNION statement"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE VIEW {view_name} AS SELECT * FROM {table0_name} UNION ALL SELECT * FROM {table1_name}") + node.query( + f"CREATE VIEW {view_name} AS SELECT * FROM {table0_name} UNION ALL SELECT * FROM {table1_name}" + ) with When("I grant SELECT privilege on view"): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") with Then("I attempt to select from the view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=2): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, permutation, grant_target_name, table0_name, table1_name + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=2))+1, grant_target_name, table0_name, table1_name): + with grant_select_on_table( + node, + max(permutations(table_count=2)) + 1, + grant_target_name, + table0_name, + table1_name, + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")]) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario -def select_with_join_union_subquery_privilege_granted_directly_or_via_role(self, node=None): +def select_with_join_union_subquery_privilege_granted_directly_or_via_role( + self, node=None +): """Check that user is able to select from a view with a stored query that includes `UNION ALL`, `JOIN` and two subqueries if and only if the user has SELECT privilege on all the tables and the view, either directly or through a role. """ @@ -911,19 +1387,23 @@ def select_with_join_union_subquery_privilege_granted_directly_or_via_role(self, if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_join_union_subquery, - name="select with join union subquery, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_join_union_subquery, + name="select with join union subquery, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_join_union_subquery, - name="select with join union subquery, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_join_union_subquery, + name="select with join union subquery, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_join_union_subquery(self, grant_target_name, user_name, node=None): - """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them. - """ + """Grant SELECT on the view and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them.""" view_name = f"view_{getuid()}" table0_name = f"table0_{getuid()}" table1_name = f"table1_{getuid()}" @@ -935,32 +1415,70 @@ def select_with_join_union_subquery(self, grant_target_name, user_name, node=Non if node is None: node = self.context.node - with table(node, f"{table0_name},{table1_name},{table2_name},{table3_name},{table4_name}"): + with table( + node, f"{table0_name},{table1_name},{table2_name},{table3_name},{table4_name}" + ): try: with Given("I have a view"): node.query(f"DROP VIEW IF EXISTS {view_name}") - node.query(f"CREATE VIEW {view_name} AS SELECT y FROM {table0_name} JOIN {table1_name} USING y UNION ALL SELECT y FROM {table1_name} WHERE y IN (SELECT y FROM {table3_name} WHERE y IN (SELECT y FROM {table4_name} WHERE y<2))") + node.query( + f"CREATE VIEW {view_name} AS SELECT y FROM {table0_name} JOIN {table1_name} USING y UNION ALL SELECT y FROM {table1_name} WHERE y IN (SELECT y FROM {table3_name} WHERE y IN (SELECT y FROM {table4_name} WHERE y<2))" + ) with When("I grant SELECT privilege on view"): node.query(f"GRANT SELECT ON {view_name} TO {grant_target_name}") with Then("I attempt to select from the view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) for permutation in permutations(table_count=5): - with grant_select_on_table(node, permutation, grant_target_name, table0_name, table1_name, table2_name, table3_name, table4_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + with grant_select_on_table( + node, + permutation, + grant_target_name, + table0_name, + table1_name, + table2_name, + table3_name, + table4_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all tables"): - with grant_select_on_table(node, max(permutations(table_count=5))+1, grant_target_name, table0_name, table1_name, table2_name, table3_name, table4_name): + with grant_select_on_table( + node, + max(permutations(table_count=5)) + 1, + grant_target_name, + table0_name, + table1_name, + table2_name, + table3_name, + table4_name, + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view_name=view_name), settings = [("user", f"{user_name}")]) + node.query( + select_view_query.format(view_name=view_name), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def select_with_nested_views_privilege_granted_directly_or_via_role(self, node=None): """Check that user is able to select from a view with a stored query that includes other views if and only if @@ -972,19 +1490,23 @@ def select_with_nested_views_privilege_granted_directly_or_via_role(self, node=N if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=select_with_nested_views, - name="select with nested views, privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=select_with_nested_views, + name="select with nested views, privilege granted directly", + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=select_with_nested_views, - name="select with nested views, privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=select_with_nested_views, + name="select with nested views, privilege granted through a role", + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def select_with_nested_views(self, grant_target_name, user_name, node=None): - """Grant SELECT on views and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them. - """ + """Grant SELECT on views and tables in the stored query and check the user is able to SELECT if and only if they have SELECT privilege on all of them.""" view0_name = f"view0_{getuid()}" view1_name = f"view1_{getuid()}" view2_name = f"view2_{getuid()}" @@ -1002,25 +1524,71 @@ def select_with_nested_views(self, grant_target_name, user_name, node=None): try: with Given("I have some views"): node.query(f"CREATE VIEW {view0_name} AS SELECT y FROM {table0_name}") - node.query(f"CREATE VIEW {view1_name} AS SELECT y FROM {view0_name} WHERE y IN (SELECT y FROM {table1_name} WHERE y<2)") - node.query(f"CREATE VIEW {view2_name} AS SELECT y FROM {view1_name} JOIN {table2_name} USING y") - node.query(f"CREATE VIEW {view3_name} AS SELECT y FROM {view2_name} UNION ALL SELECT y FROM {table3_name}") + node.query( + f"CREATE VIEW {view1_name} AS SELECT y FROM {view0_name} WHERE y IN (SELECT y FROM {table1_name} WHERE y<2)" + ) + node.query( + f"CREATE VIEW {view2_name} AS SELECT y FROM {view1_name} JOIN {table2_name} USING y" + ) + node.query( + f"CREATE VIEW {view3_name} AS SELECT y FROM {view2_name} UNION ALL SELECT y FROM {table3_name}" + ) with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view3_name=view3_name), - settings = [("user",f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view3_name=view3_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) - for permutation in ([0,1,3,5,7,13,15,23,31,45,63,95,127,173,237,247,253],permutations(table_count=8))[self.context.stress]: - with grant_select_on_table(node, permutation, grant_target_name, view3_name, table3_name, view2_name, view1_name, table2_name, view0_name, table1_name, table0_name) as tables_granted: - with When(f"permutation={permutation}, tables granted = {tables_granted}"): + for permutation in ( + [0, 1, 3, 5, 7, 13, 15, 23, 31, 45, 63, 95, 127, 173, 237, 247, 253], + permutations(table_count=8), + )[self.context.stress]: + with grant_select_on_table( + node, + permutation, + grant_target_name, + view3_name, + table3_name, + view2_name, + view1_name, + table2_name, + view0_name, + table1_name, + table0_name, + ) as tables_granted: + with When( + f"permutation={permutation}, tables granted = {tables_granted}" + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view3_name=view3_name), - settings = [("user", f"{user_name}")], exitcode=exitcode, message=message) + node.query( + select_view_query.format(view3_name=view3_name), + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) with When("I grant select on all views"): - with grant_select_on_table(node, max(permutations(table_count=8))+1, grant_target_name, view0_name, view1_name, view2_name, view3_name, table0_name, table1_name, table2_name, table3_name): + with grant_select_on_table( + node, + max(permutations(table_count=8)) + 1, + grant_target_name, + view0_name, + view1_name, + view2_name, + view3_name, + table0_name, + table1_name, + table2_name, + table3_name, + ): with Then("I attempt to select from a view as the user"): - node.query(select_view_query.format(view3_name=view3_name), settings = [("user", f"{user_name}")]) + node.query( + select_view_query.format(view3_name=view3_name), + settings=[("user", f"{user_name}")], + ) finally: with Finally("I drop the views"): @@ -1033,40 +1601,47 @@ def select_with_nested_views(self, grant_target_name, user_name, node=None): with And("I drop view3", flags=TE): node.query(f"DROP VIEW IF EXISTS {view0_name}") + @TestSuite @Requirements( RQ_SRS_006_RBAC_View_Drop("1.0"), ) def drop(self, node=None): - """Test the RBAC functionality of the `DROP VIEW` command. - """ - Scenario(run=drop_with_privilege_granted_directly_or_via_role, setup=instrument_clickhouse_server_log) - Scenario(run=drop_with_revoked_privilege_revoked_directly_or_from_role, setup=instrument_clickhouse_server_log) + """Test the RBAC functionality of the `DROP VIEW` command.""" + Scenario( + run=drop_with_privilege_granted_directly_or_via_role, + setup=instrument_clickhouse_server_log, + ) + Scenario( + run=drop_with_revoked_privilege_revoked_directly_or_from_role, + setup=instrument_clickhouse_server_log, + ) + @TestScenario - def drop_with_privilege_granted_directly_or_via_role(self, node=None): - """Check that user is able to drop view with DROP VIEW privilege if the user has privilege directly or through a role. - """ + """Check that user is able to drop view with DROP VIEW privilege if the user has privilege directly or through a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=drop_with_privilege, - name="drop privilege granted directly")(grant_target_name=user_name, user_name=user_name) + Scenario(test=drop_with_privilege, name="drop privilege granted directly")( + grant_target_name=user_name, user_name=user_name + ) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=drop_with_privilege, - name="drop privilege granted through a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=drop_with_privilege, name="drop privilege granted through a role" + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def drop_with_privilege(self, grant_target_name, user_name, node=None): - """Grant DROP VIEW privilege and check the user is able to successfully drop a view. - """ + """Grant DROP VIEW privilege and check the user is able to successfully drop a view.""" view_name = f"view_{getuid()}" exitcode, message = errors.table_does_not_exist(name=f"default.{view_name}") @@ -1081,7 +1656,7 @@ def drop_with_privilege(self, grant_target_name, user_name, node=None): node.query(f"GRANT DROP VIEW ON {view_name} TO {grant_target_name}") with And("I drop the view as the user"): - node.query(f"DROP VIEW {view_name}", settings = [("user",f"{user_name}")]) + node.query(f"DROP VIEW {view_name}", settings=[("user", f"{user_name}")]) with Then("I check the table does not exist"): node.query(f"SELECT * FROM {view_name}", exitcode=exitcode, message=message) @@ -1090,29 +1665,31 @@ def drop_with_privilege(self, grant_target_name, user_name, node=None): with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestScenario def drop_with_revoked_privilege_revoked_directly_or_from_role(self, node=None): - """Check that user is unable to drop view with DROP VIEW privilege revoked directly or from a role. - """ + """Check that user is unable to drop view with DROP VIEW privilege revoked directly or from a role.""" user_name = f"user_{getuid()}" role_name = f"role_{getuid()}" if node is None: node = self.context.node with user(node, f"{user_name}"): - Scenario(test=drop_with_revoked_privilege, - name="drop privilege revoked directly")(grant_target_name=user_name, user_name=user_name) + Scenario( + test=drop_with_revoked_privilege, name="drop privilege revoked directly" + )(grant_target_name=user_name, user_name=user_name) with user(node, f"{user_name}"), role(node, f"{role_name}"): with When("I grant the role to the user"): node.query(f"GRANT {role_name} TO {user_name}") - Scenario(test=drop_with_revoked_privilege, - name="drop privilege revoked from a role")(grant_target_name=role_name, user_name=user_name) + Scenario( + test=drop_with_revoked_privilege, name="drop privilege revoked from a role" + )(grant_target_name=role_name, user_name=user_name) + @TestOutline def drop_with_revoked_privilege(self, grant_target_name, user_name, node=None): - """Revoke DROP VIEW privilege and check the user is unable to DROP a view. - """ + """Revoke DROP VIEW privilege and check the user is unable to DROP a view.""" view_name = f"view_{getuid()}" exitcode, message = errors.not_enough_privileges(name=f"{user_name}") @@ -1130,13 +1707,18 @@ def drop_with_revoked_privilege(self, grant_target_name, user_name, node=None): node.query(f"REVOKE DROP VIEW ON {view_name} FROM {grant_target_name}") with Then("I drop the view as the user"): - node.query(f"DROP VIEW {view_name}", settings = [("user",f"{user_name}")], - exitcode=exitcode, message=message) + node.query( + f"DROP VIEW {view_name}", + settings=[("user", f"{user_name}")], + exitcode=exitcode, + message=message, + ) finally: with Finally("I drop the view"): node.query(f"DROP VIEW IF EXISTS {view_name}") + @TestFeature @Requirements( RQ_SRS_006_RBAC_View("1.0"), diff --git a/tests/testflows/regression.py b/tests/testflows/regression.py index 8922ef7a2bb..bce8274c5cc 100755 --- a/tests/testflows/regression.py +++ b/tests/testflows/regression.py @@ -6,30 +6,72 @@ append_path(sys.path, ".") from helpers.argparser import argparser + @TestModule @Name("clickhouse") @ArgumentParser(argparser) -def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None): - """ClickHouse regression. - """ - args = {"local": local, "clickhouse_binary_path": clickhouse_binary_path, "clickhouse_version": clickhouse_version, "stress": stress} +def regression( + self, local, clickhouse_binary_path, clickhouse_version=None, stress=None +): + """ClickHouse regression.""" + args = { + "local": local, + "clickhouse_binary_path": clickhouse_binary_path, + "clickhouse_version": clickhouse_version, + "stress": stress, + } self.context.stress = stress self.context.clickhouse_version = clickhouse_version with Pool(8) as pool: try: - Feature(test=load("example.regression", "regression"), parallel=True, executor=pool)(**args) - Feature(test=load("ldap.regression", "regression"), parallel=True, executor=pool)(**args) - Feature(test=load("rbac.regression", "regression"), parallel=True, executor=pool)(**args) - Feature(test=load("aes_encryption.regression", "regression"), parallel=True, executor=pool)(**args) # TODO: fix it! + Feature( + test=load("example.regression", "regression"), + parallel=True, + executor=pool, + )(**args) + Feature( + test=load("ldap.regression", "regression"), parallel=True, executor=pool + )(**args) + Feature( + test=load("rbac.regression", "regression"), parallel=True, executor=pool + )(**args) + Feature( + test=load("aes_encryption.regression", "regression"), + parallel=True, + executor=pool, + )( + **args + ) # TODO: fix it! # Feature(test=load("map_type.regression", "regression"), parallel=True, executor=pool)(**args) # TODO: fix it! - Feature(test=load("window_functions.regression", "regression"), parallel=True, executor=pool)(**args) # TODO: fix it! - Feature(test=load("datetime64_extended_range.regression", "regression"), parallel=True, executor=pool)(**args) - Feature(test=load("kerberos.regression", "regression"), parallel=True, executor=pool)(**args) - Feature(test=load("extended_precision_data_types.regression", "regression"), parallel=True, executor=pool)(**args) # TODO: fix it! + Feature( + test=load("window_functions.regression", "regression"), + parallel=True, + executor=pool, + )( + **args + ) # TODO: fix it! + Feature( + test=load("datetime64_extended_range.regression", "regression"), + parallel=True, + executor=pool, + )(**args) + Feature( + test=load("kerberos.regression", "regression"), + parallel=True, + executor=pool, + )(**args) + Feature( + test=load("extended_precision_data_types.regression", "regression"), + parallel=True, + executor=pool, + )( + **args + ) # TODO: fix it! finally: join() + if main(): regression() diff --git a/tests/testflows/window_functions/regression.py b/tests/testflows/window_functions/regression.py index 130977cc050..f7fa116ead8 100755 --- a/tests/testflows/window_functions/regression.py +++ b/tests/testflows/window_functions/regression.py @@ -8,106 +8,146 @@ append_path(sys.path, "..") from helpers.cluster import Cluster from helpers.argparser import argparser -from window_functions.requirements import SRS019_ClickHouse_Window_Functions, RQ_SRS_019_ClickHouse_WindowFunctions +from window_functions.requirements import ( + SRS019_ClickHouse_Window_Functions, + RQ_SRS_019_ClickHouse_WindowFunctions, +) xfails = { - "tests/:/frame clause/range frame/between expr following and expr following without order by error": - [(Fail, "invalid error message")], - "tests/:/frame clause/range frame/between expr following and expr preceding without order by error": - [(Fail, "invalid error message")], - "tests/:/frame clause/range frame/between expr following and current row without order by error": - [(Fail, "invalid error message")], - "tests/:/frame clause/range frame/between expr following and current row zero special case": - [(Fail, "known bug")], - "tests/:/frame clause/range frame/between expr following and expr preceding with order by zero special case": - [(Fail, "known bug")], - "tests/:/funcs/lag/anyOrNull with column value as offset": - [(Fail, "column values are not supported as offset")], - "tests/:/funcs/lead/subquery as offset": - [(Fail, "subquery is not supported as offset")], - "tests/:/frame clause/range frame/between current row and unbounded following modifying named window": - [(Fail, "range with named window is not supported")], - "tests/:/frame clause/range overflow/negative overflow with Int16": - [(Fail, "exception on conversion")], - "tests/:/frame clause/range overflow/positive overflow with Int16": - [(Fail, "exception on conversion")], - "tests/:/misc/subquery expr preceding": - [(Fail, "subquery is not supported as offset")], - "tests/:/frame clause/range errors/error negative preceding offset": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22442")], - "tests/:/frame clause/range errors/error negative following offset": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22442")], - "tests/:/misc/window functions in select expression": - [(Fail, "not supported, https://github.com/ClickHouse/ClickHouse/issues/19857")], - "tests/:/misc/window functions in subquery": - [(Fail, "not supported, https://github.com/ClickHouse/ClickHouse/issues/19857")], - "tests/:/misc/in view": - [(Fail, "bug, https://github.com/ClickHouse/ClickHouse/issues/26001")], - "tests/:/frame clause/range frame/order by decimal": - [(Fail, "Exception: The RANGE OFFSET frame for 'DB::ColumnDecimal >' ORDER BY column is not implemented")], - "tests/:/frame clause/range frame/with nulls": - [(Fail, "DB::Exception: The RANGE OFFSET frame for 'DB::ColumnNullable' ORDER BY column is not implemented")], - "tests/:/aggregate funcs/aggregate funcs over rows frame/func='mannWhitneyUTest(salary, 1)'": - [(Fail, "need to investigate")], - "tests/:/aggregate funcs/aggregate funcs over rows frame/func='rankCorr(salary, 0.5)'": - [(Fail, "need to investigate")], - "tests/distributed/misc/query with order by and one window": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902")], - "tests/distributed/over clause/empty named window": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902")], - "tests/distributed/over clause/empty": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902")], - "tests/distributed/over clause/adhoc window": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902")], - "tests/distributed/frame clause/range datetime/:": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902")], - "tests/distributed/frame clause/range frame/between expr preceding and expr following with partition by same column twice": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902")], - "tests/:/funcs/leadInFrame/explicit default value": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/25057")], - "tests/:/funcs/leadInFrame/with nulls": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/25057")], - "tests/:/funcs/leadInFrame/default offset": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902")], - "tests/:/funcs/lagInFrame/explicit default value": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/25057")], - "tests/:/funcs/lagInFrame/with nulls": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/25057")], - "tests/:/funcs/lagInFrame/default offset": - [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902")] + "tests/:/frame clause/range frame/between expr following and expr following without order by error": [ + (Fail, "invalid error message") + ], + "tests/:/frame clause/range frame/between expr following and expr preceding without order by error": [ + (Fail, "invalid error message") + ], + "tests/:/frame clause/range frame/between expr following and current row without order by error": [ + (Fail, "invalid error message") + ], + "tests/:/frame clause/range frame/between expr following and current row zero special case": [ + (Fail, "known bug") + ], + "tests/:/frame clause/range frame/between expr following and expr preceding with order by zero special case": [ + (Fail, "known bug") + ], + "tests/:/funcs/lag/anyOrNull with column value as offset": [ + (Fail, "column values are not supported as offset") + ], + "tests/:/funcs/lead/subquery as offset": [ + (Fail, "subquery is not supported as offset") + ], + "tests/:/frame clause/range frame/between current row and unbounded following modifying named window": [ + (Fail, "range with named window is not supported") + ], + "tests/:/frame clause/range overflow/negative overflow with Int16": [ + (Fail, "exception on conversion") + ], + "tests/:/frame clause/range overflow/positive overflow with Int16": [ + (Fail, "exception on conversion") + ], + "tests/:/misc/subquery expr preceding": [ + (Fail, "subquery is not supported as offset") + ], + "tests/:/frame clause/range errors/error negative preceding offset": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/22442") + ], + "tests/:/frame clause/range errors/error negative following offset": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/22442") + ], + "tests/:/misc/window functions in select expression": [ + (Fail, "not supported, https://github.com/ClickHouse/ClickHouse/issues/19857") + ], + "tests/:/misc/window functions in subquery": [ + (Fail, "not supported, https://github.com/ClickHouse/ClickHouse/issues/19857") + ], + "tests/:/misc/in view": [ + (Fail, "bug, https://github.com/ClickHouse/ClickHouse/issues/26001") + ], + "tests/:/frame clause/range frame/order by decimal": [ + ( + Fail, + "Exception: The RANGE OFFSET frame for 'DB::ColumnDecimal >' ORDER BY column is not implemented", + ) + ], + "tests/:/frame clause/range frame/with nulls": [ + ( + Fail, + "DB::Exception: The RANGE OFFSET frame for 'DB::ColumnNullable' ORDER BY column is not implemented", + ) + ], + "tests/:/aggregate funcs/aggregate funcs over rows frame/func='mannWhitneyUTest(salary, 1)'": [ + (Fail, "need to investigate") + ], + "tests/:/aggregate funcs/aggregate funcs over rows frame/func='rankCorr(salary, 0.5)'": [ + (Fail, "need to investigate") + ], + "tests/distributed/misc/query with order by and one window": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902") + ], + "tests/distributed/over clause/empty named window": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902") + ], + "tests/distributed/over clause/empty": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902") + ], + "tests/distributed/over clause/adhoc window": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902") + ], + "tests/distributed/frame clause/range datetime/:": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902") + ], + "tests/distributed/frame clause/range frame/between expr preceding and expr following with partition by same column twice": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902") + ], + "tests/:/funcs/leadInFrame/explicit default value": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/25057") + ], + "tests/:/funcs/leadInFrame/with nulls": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/25057") + ], + "tests/:/funcs/leadInFrame/default offset": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902") + ], + "tests/:/funcs/lagInFrame/explicit default value": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/25057") + ], + "tests/:/funcs/lagInFrame/with nulls": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/25057") + ], + "tests/:/funcs/lagInFrame/default offset": [ + (Fail, "https://github.com/ClickHouse/ClickHouse/issues/23902") + ], } -xflags = { -} +xflags = {} + @TestModule @ArgumentParser(argparser) @XFails(xfails) @XFlags(xflags) @Name("window functions") -@Specifications( - SRS019_ClickHouse_Window_Functions -) -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions("1.0") -) -def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None): - """Window functions regression. - """ - nodes = { - "clickhouse": - ("clickhouse1", "clickhouse2", "clickhouse3") - } +@Specifications(SRS019_ClickHouse_Window_Functions) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions("1.0")) +def regression( + self, local, clickhouse_binary_path, clickhouse_version=None, stress=None +): + """Window functions regression.""" + nodes = {"clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3")} if stress is not None: self.context.stress = stress self.context.clickhouse_version = clickhouse_version - with Cluster(local, clickhouse_binary_path, nodes=nodes, - docker_compose_project_dir=os.path.join(current_dir(), "window_functions_env")) as cluster: + with Cluster( + local, + clickhouse_binary_path, + nodes=nodes, + docker_compose_project_dir=os.path.join(current_dir(), "window_functions_env"), + ) as cluster: self.context.cluster = cluster Feature(run=load("window_functions.tests.feature", "feature"), flags=TE) + if main(): regression() diff --git a/tests/testflows/window_functions/requirements/requirements.py b/tests/testflows/window_functions/requirements/requirements.py index e453b1728e1..3e9a8a46719 100644 --- a/tests/testflows/window_functions/requirements/requirements.py +++ b/tests/testflows/window_functions/requirements/requirements.py @@ -9,3334 +9,3467 @@ from testflows.core import Requirement Heading = Specification.Heading RQ_SRS_019_ClickHouse_WindowFunctions = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support [window functions] that produce a result for each row inside the window.\n' - '\n' - ), + "[ClickHouse] SHALL support [window functions] that produce a result for each row inside the window.\n" + "\n" + ), link=None, level=3, - num='3.1.1') + num="3.1.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_NonDistributedTables = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.NonDistributedTables', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.NonDistributedTables", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of [window functions] on non-distributed \n' - 'table engines such as `MergeTree`.\n' - '\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of [window functions] on non-distributed \n" + "table engines such as `MergeTree`.\n" + "\n" + "\n" + ), link=None, level=3, - num='3.1.2') + num="3.1.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_DistributedTables = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.DistributedTables', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.DistributedTables", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support correct operation of [window functions] on\n' - '[Distributed](https://clickhouse.com/docs/en/engines/table-engines/special/distributed/) table engine.\n' - '\n' - ), + "[ClickHouse] SHALL support correct operation of [window functions] on\n" + "[Distributed](https://clickhouse.com/docs/en/engines/table-engines/special/distributed/) table engine.\n" + "\n" + ), link=None, level=3, - num='3.1.3') + num="3.1.3", +) RQ_SRS_019_ClickHouse_WindowFunctions_WindowSpec = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.WindowSpec', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.WindowSpec", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support defining a window using window specification clause.\n' - 'The [window_spec] SHALL be defined as\n' - '\n' - '```\n' - 'window_spec:\n' - ' [partition_clause] [order_clause] [frame_clause]\n' - '```\n' - '\n' - 'that SHALL specify how to partition query rows into groups for processing by the window function.\n' - '\n' - ), + "[ClickHouse] SHALL support defining a window using window specification clause.\n" + "The [window_spec] SHALL be defined as\n" + "\n" + "```\n" + "window_spec:\n" + " [partition_clause] [order_clause] [frame_clause]\n" + "```\n" + "\n" + "that SHALL specify how to partition query rows into groups for processing by the window function.\n" + "\n" + ), link=None, level=3, - num='3.2.1') + num="3.2.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_PartitionClause = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.PartitionClause', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.PartitionClause", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support [partition_clause] that indicates how to divide the query rows into groups.\n' - 'The [partition_clause] SHALL be defined as\n' - '\n' - '```\n' - 'partition_clause:\n' - ' PARTITION BY expr [, expr] ...\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support [partition_clause] that indicates how to divide the query rows into groups.\n" + "The [partition_clause] SHALL be defined as\n" + "\n" + "```\n" + "partition_clause:\n" + " PARTITION BY expr [, expr] ...\n" + "```\n" + "\n" + ), link=None, level=3, - num='3.3.1') + num="3.3.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_PartitionClause_MultipleExpr = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.PartitionClause.MultipleExpr', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.PartitionClause.MultipleExpr", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support partitioning by more than one `expr` in the [partition_clause] definition.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL support partitioning by more than one `expr` in the [partition_clause] definition.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT x,s, sum(x) OVER (PARTITION BY x,s) FROM values('x Int8, s String', (1,'a'),(1,'b'),(2,'b'))\n" - '```\n' - '\n' - '```bash\n' - '┌─x─┬─s─┬─sum(x) OVER (PARTITION BY x, s)─┐\n' - '│ 1 │ a │ 1 │\n' - '│ 1 │ b │ 1 │\n' - '│ 2 │ b │ 2 │\n' - '└───┴───┴─────────────────────────────────┘\n' - '```\n' - '\n' - ), + "```\n" + "\n" + "```bash\n" + "┌─x─┬─s─┬─sum(x) OVER (PARTITION BY x, s)─┐\n" + "│ 1 │ a │ 1 │\n" + "│ 1 │ b │ 1 │\n" + "│ 2 │ b │ 2 │\n" + "└───┴───┴─────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=3, - num='3.3.2') + num="3.3.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_PartitionClause_MissingExpr_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.PartitionClause.MissingExpr.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.PartitionClause.MissingExpr.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if `expr` is missing in the [partition_clause] definition.\n' - '\n' - '```sql\n' - 'SELECT sum(number) OVER (PARTITION BY) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error if `expr` is missing in the [partition_clause] definition.\n" + "\n" + "```sql\n" + "SELECT sum(number) OVER (PARTITION BY) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=3, - num='3.3.3') + num="3.3.3", +) RQ_SRS_019_ClickHouse_WindowFunctions_PartitionClause_InvalidExpr_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.PartitionClause.InvalidExpr.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.PartitionClause.InvalidExpr.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if `expr` is invalid in the [partition_clause] definition.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if `expr` is invalid in the [partition_clause] definition.\n" + "\n" + ), link=None, level=3, - num='3.3.4') + num="3.3.4", +) RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.OrderClause', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.OrderClause", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support [order_clause] that indicates how to sort rows in each window.\n' - '\n' - ), + "[ClickHouse] SHALL support [order_clause] that indicates how to sort rows in each window.\n" + "\n" + ), link=None, level=3, - num='3.4.1') + num="3.4.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause_MultipleExprs = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.OrderClause.MultipleExprs', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.OrderClause.MultipleExprs", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return support using more than one `expr` in the [order_clause] definition.\n' - '\n' - 'For example, \n' - '\n' - '```sql\n' + "[ClickHouse] SHALL return support using more than one `expr` in the [order_clause] definition.\n" + "\n" + "For example, \n" + "\n" + "```sql\n" "SELECT x,s, sum(x) OVER (ORDER BY x DESC, s DESC) FROM values('x Int8, s String', (1,'a'),(1,'b'),(2,'b'))\n" - '```\n' - '\n' - '```bash\n' - '┌─x─┬─s─┬─sum(x) OVER (ORDER BY x DESC, s DESC)─┐\n' - '│ 2 │ b │ 2 │\n' - '│ 1 │ b │ 3 │\n' - '│ 1 │ a │ 4 │\n' - '└───┴───┴───────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "```\n" + "\n" + "```bash\n" + "┌─x─┬─s─┬─sum(x) OVER (ORDER BY x DESC, s DESC)─┐\n" + "│ 2 │ b │ 2 │\n" + "│ 1 │ b │ 3 │\n" + "│ 1 │ a │ 4 │\n" + "└───┴───┴───────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=3, - num='3.4.2') + num="3.4.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause_MissingExpr_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.OrderClause.MissingExpr.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.OrderClause.MissingExpr.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if `expr` is missing in the [order_clause] definition.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if `expr` is missing in the [order_clause] definition.\n" + "\n" + ), link=None, level=3, - num='3.4.3') + num="3.4.3", +) RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause_InvalidExpr_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.OrderClause.InvalidExpr.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.OrderClause.InvalidExpr.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if `expr` is invalid in the [order_clause] definition.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if `expr` is invalid in the [order_clause] definition.\n" + "\n" + ), link=None, level=3, - num='3.4.4') + num="3.4.4", +) RQ_SRS_019_ClickHouse_WindowFunctions_FrameClause = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.FrameClause', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.FrameClause", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support [frame_clause] that SHALL specify a subset of the current window.\n' - '\n' - 'The `frame_clause` SHALL be defined as\n' - '\n' - '```\n' - 'frame_clause:\n' - ' {ROWS | RANGE } frame_extent\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support [frame_clause] that SHALL specify a subset of the current window.\n" + "\n" + "The `frame_clause` SHALL be defined as\n" + "\n" + "```\n" + "frame_clause:\n" + " {ROWS | RANGE } frame_extent\n" + "```\n" + "\n" + ), link=None, level=3, - num='3.5.1') + num="3.5.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_FrameClause_DefaultFrame = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.FrameClause.DefaultFrame', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.FrameClause.DefaultFrame", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the default `frame_clause` to be `RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW`. \n' - '\n' - 'If the `ORDER BY` clause is specified then this SHALL set the frame to be all rows from \n' - 'the partition start up to and including current row and its peers. \n' - '\n' - 'If the `ORDER BY` clause is not specified then this SHALL set the frame to include all rows\n' - 'in the partition because all the rows are considered to be the peers of the current row.\n' - '\n' - ), + "[ClickHouse] SHALL support the default `frame_clause` to be `RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW`. \n" + "\n" + "If the `ORDER BY` clause is specified then this SHALL set the frame to be all rows from \n" + "the partition start up to and including current row and its peers. \n" + "\n" + "If the `ORDER BY` clause is not specified then this SHALL set the frame to include all rows\n" + "in the partition because all the rows are considered to be the peers of the current row.\n" + "\n" + ), link=None, level=3, - num='3.5.2') + num="3.5.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `ROWS` frame to define beginning and ending row positions.\n' - 'Offsets SHALL be differences in row numbers from the current row number.\n' - '\n' - '```sql\n' - 'ROWS frame_extent\n' - '```\n' - '\n' - 'See [frame_extent] definition.\n' - '\n' - ), + "[ClickHouse] SHALL support `ROWS` frame to define beginning and ending row positions.\n" + "Offsets SHALL be differences in row numbers from the current row number.\n" + "\n" + "```sql\n" + "ROWS frame_extent\n" + "```\n" + "\n" + "See [frame_extent] definition.\n" + "\n" + ), link=None, level=4, - num='3.5.3.1') + num="3.5.3.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_MissingFrameExtent_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.MissingFrameExtent.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.MissingFrameExtent.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `ROWS` frame clause is defined without [frame_extent].\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ORDER BY number ROWS) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `ROWS` frame clause is defined without [frame_extent].\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ORDER BY number ROWS) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=4, - num='3.5.3.2') + num="3.5.3.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_InvalidFrameExtent_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.InvalidFrameExtent.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.InvalidFrameExtent.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `ROWS` frame clause has invalid [frame_extent].\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL return an error if the `ROWS` frame clause has invalid [frame_extent].\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (ORDER BY number ROWS '1') FROM numbers(1,3)\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=4, - num='3.5.3.3') + num="3.5.3.3", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Start_CurrentRow = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.CurrentRow', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.CurrentRow", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include only the current row in the window partition\n' - 'when `ROWS CURRENT ROW` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS CURRENT ROW) FROM numbers(1,2)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ROWS BETWEEN CURRENT ROW AND CURRENT ROW)─┐\n' - '│ 1 │ 1 │\n' - '│ 2 │ 2 │\n' - '└────────┴─────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include only the current row in the window partition\n" + "when `ROWS CURRENT ROW` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS CURRENT ROW) FROM numbers(1,2)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ROWS BETWEEN CURRENT ROW AND CURRENT ROW)─┐\n" + "│ 1 │ 1 │\n" + "│ 2 │ 2 │\n" + "└────────┴─────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.4.1') + num="3.5.3.4.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Start_UnboundedPreceding = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.UnboundedPreceding', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.UnboundedPreceding", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all rows before and including the current row in the window partition\n' - 'when `ROWS UNBOUNDED PRECEDING` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS UNBOUNDED PRECEDING) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)─┐\n' - '│ 1 │ 1 │\n' - '│ 2 │ 3 │\n' - '│ 3 │ 6 │\n' - '└────────┴─────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include all rows before and including the current row in the window partition\n" + "when `ROWS UNBOUNDED PRECEDING` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS UNBOUNDED PRECEDING) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)─┐\n" + "│ 1 │ 1 │\n" + "│ 2 │ 3 │\n" + "│ 3 │ 6 │\n" + "└────────┴─────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.5.1') + num="3.5.3.5.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Start_ExprPreceding = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.ExprPreceding', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.ExprPreceding", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include `expr` rows before and including the current row in the window partition \n' - 'when `ROWS expr PRECEDING` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS 1 PRECEDING) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)─┐\n' - '│ 1 │ 1 │\n' - '│ 2 │ 3 │\n' - '│ 3 │ 5 │\n' - '└────────┴─────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include `expr` rows before and including the current row in the window partition \n" + "when `ROWS expr PRECEDING` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS 1 PRECEDING) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)─┐\n" + "│ 1 │ 1 │\n" + "│ 2 │ 3 │\n" + "│ 3 │ 5 │\n" + "└────────┴─────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.6.1') + num="3.5.3.6.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Start_UnboundedFollowing_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.UnboundedFollowing.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.UnboundedFollowing.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `ROWS UNBOUNDED FOLLOWING` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS UNBOUNDED FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `ROWS UNBOUNDED FOLLOWING` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS UNBOUNDED FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.7.1') + num="3.5.3.7.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Start_ExprFollowing_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.ExprFollowing.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.ExprFollowing.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `ROWS expr FOLLOWING` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS 1 FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `ROWS expr FOLLOWING` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS 1 FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.8.1') + num="3.5.3.8.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_CurrentRow = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.CurrentRow', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.CurrentRow", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include only the current row in the window partition\n' - 'when `ROWS BETWEEN CURRENT ROW AND CURRENT ROW` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN CURRENT ROW AND CURRENT ROW) FROM numbers(1,2)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ROWS BETWEEN CURRENT ROW AND CURRENT ROW)─┐\n' - '│ 1 │ 1 │\n' - '│ 2 │ 2 │\n' - '└────────┴─────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include only the current row in the window partition\n" + "when `ROWS BETWEEN CURRENT ROW AND CURRENT ROW` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN CURRENT ROW AND CURRENT ROW) FROM numbers(1,2)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ROWS BETWEEN CURRENT ROW AND CURRENT ROW)─┐\n" + "│ 1 │ 1 │\n" + "│ 2 │ 2 │\n" + "└────────┴─────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.9.1') + num="3.5.3.9.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_UnboundedPreceding_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.UnboundedPreceding.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.UnboundedPreceding.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `ROWS BETWEEN CURRENT ROW AND UNBOUNDED PRECEDING` frame is specified.\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `ROWS BETWEEN CURRENT ROW AND UNBOUNDED PRECEDING` frame is specified.\n" + "\n" + ), link=None, level=5, - num='3.5.3.9.2') + num="3.5.3.9.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_ExprPreceding_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.ExprPreceding.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.ExprPreceding.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `ROWS BETWEEN CURRENT ROW AND expr PRECEDING` frame is specified.\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `ROWS BETWEEN CURRENT ROW AND expr PRECEDING` frame is specified.\n" + "\n" + ), link=None, level=5, - num='3.5.3.9.3') + num="3.5.3.9.3", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_UnboundedFollowing = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.UnboundedFollowing', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.UnboundedFollowing", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include the current row and all the following rows in the window partition\n' - 'when `ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)─┐\n' - '│ 1 │ 6 │\n' - '│ 2 │ 5 │\n' - '│ 3 │ 3 │\n' - '└────────┴─────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include the current row and all the following rows in the window partition\n" + "when `ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)─┐\n" + "│ 1 │ 6 │\n" + "│ 2 │ 5 │\n" + "│ 3 │ 3 │\n" + "└────────┴─────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.9.4') + num="3.5.3.9.4", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_ExprFollowing = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.ExprFollowing', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.ExprFollowing", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include the current row and the `expr` rows that are following the current row in the window partition\n' - 'when `ROWS BETWEEN CURRENT ROW AND expr FOLLOWING` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING)─┐\n' - '│ 1 │ 3 │\n' - '│ 2 │ 5 │\n' - '│ 3 │ 3 │\n' - '└────────┴─────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include the current row and the `expr` rows that are following the current row in the window partition\n" + "when `ROWS BETWEEN CURRENT ROW AND expr FOLLOWING` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING)─┐\n" + "│ 1 │ 3 │\n" + "│ 2 │ 5 │\n" + "│ 3 │ 3 │\n" + "└────────┴─────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.9.5') + num="3.5.3.9.5", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_CurrentRow = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.CurrentRow', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.CurrentRow", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all the rows before and including the current row in the window partition\n' - 'when `ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)─┐\n' - '│ 1 │ 1 │\n' - '│ 2 │ 3 │\n' - '│ 3 │ 6 │\n' - '└────────┴─────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include all the rows before and including the current row in the window partition\n" + "when `ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)─┐\n" + "│ 1 │ 1 │\n" + "│ 2 │ 3 │\n" + "│ 3 │ 6 │\n" + "└────────┴─────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.10.1') + num="3.5.3.10.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_UnboundedPreceding_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.UnboundedPreceding.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.UnboundedPreceding.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED PRECEDING` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED PRECEDING) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED PRECEDING` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED PRECEDING) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.10.2') + num="3.5.3.10.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_ExprPreceding = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.ExprPreceding', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.ExprPreceding", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all the rows until and including the current row minus `expr` rows preceding it\n' - 'when `ROWS BETWEEN UNBOUNDED PRECEDING AND expr PRECEDING` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING)─┐\n' - '│ 1 │ 0 │\n' - '│ 2 │ 1 │\n' - '│ 3 │ 3 │\n' - '└────────┴─────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include all the rows until and including the current row minus `expr` rows preceding it\n" + "when `ROWS BETWEEN UNBOUNDED PRECEDING AND expr PRECEDING` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING)─┐\n" + "│ 1 │ 0 │\n" + "│ 2 │ 1 │\n" + "│ 3 │ 3 │\n" + "└────────┴─────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.10.3') + num="3.5.3.10.3", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_UnboundedFollowing = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.UnboundedFollowing', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.UnboundedFollowing", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all rows in the window partition \n' - 'when `ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)─┐\n' - '│ 1 │ 6 │\n' - '│ 2 │ 6 │\n' - '│ 3 │ 6 │\n' - '└────────┴─────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include all rows in the window partition \n" + "when `ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)─┐\n" + "│ 1 │ 6 │\n" + "│ 2 │ 6 │\n" + "│ 3 │ 6 │\n" + "└────────┴─────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.10.4') + num="3.5.3.10.4", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_ExprFollowing = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.ExprFollowing', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.ExprFollowing", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all the rows until and including the current row plus `expr` rows following it\n' - 'when `ROWS BETWEEN UNBOUNDED PRECEDING AND expr FOLLOWING` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING)─┐\n' - '│ 1 │ 3 │\n' - '│ 2 │ 6 │\n' - '│ 3 │ 6 │\n' - '└────────┴─────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include all the rows until and including the current row plus `expr` rows following it\n" + "when `ROWS BETWEEN UNBOUNDED PRECEDING AND expr FOLLOWING` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING)─┐\n" + "│ 1 │ 3 │\n" + "│ 2 │ 6 │\n" + "│ 3 │ 6 │\n" + "└────────┴─────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.10.5') + num="3.5.3.10.5", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedFollowing_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedFollowing.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedFollowing.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `UNBOUNDED FOLLOWING` is specified as the start of the frame, including\n' - '\n' - '* `ROWS BETWEEN UNBOUNDED FOLLOWING AND CURRENT ROW`\n' - '* `ROWS BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED PRECEDING`\n' - '* `ROWS BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED FOLLOWING`\n' - '* `ROWS BETWEEN UNBOUNDED FOLLOWING AND expr PRECEDING`\n' - '* `ROWS BETWEEN UNBOUNDED FOLLOWING AND expr FOLLOWING`\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN UNBOUNDED FOLLOWING AND CURRENT ROW) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `UNBOUNDED FOLLOWING` is specified as the start of the frame, including\n" + "\n" + "* `ROWS BETWEEN UNBOUNDED FOLLOWING AND CURRENT ROW`\n" + "* `ROWS BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED PRECEDING`\n" + "* `ROWS BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED FOLLOWING`\n" + "* `ROWS BETWEEN UNBOUNDED FOLLOWING AND expr PRECEDING`\n" + "* `ROWS BETWEEN UNBOUNDED FOLLOWING AND expr FOLLOWING`\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN UNBOUNDED FOLLOWING AND CURRENT ROW) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.11.1') + num="3.5.3.11.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprFollowing_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprFollowing.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprFollowing.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `expr FOLLOWING` is specified as the start of the frame\n' - 'and it points to a row that is after the start of the frame inside the window partition such\n' - 'as the following cases\n' - '\n' - '* `ROWS BETWEEN expr FOLLOWING AND CURRENT ROW`\n' - '* `ROWS BETWEEN expr FOLLOWING AND UNBOUNDED PRECEDING`\n' - '* `ROWS BETWEEN expr FOLLOWING AND expr PRECEDING`\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN 1 FOLLOWING AND CURRENT ROW) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `expr FOLLOWING` is specified as the start of the frame\n" + "and it points to a row that is after the start of the frame inside the window partition such\n" + "as the following cases\n" + "\n" + "* `ROWS BETWEEN expr FOLLOWING AND CURRENT ROW`\n" + "* `ROWS BETWEEN expr FOLLOWING AND UNBOUNDED PRECEDING`\n" + "* `ROWS BETWEEN expr FOLLOWING AND expr PRECEDING`\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN 1 FOLLOWING AND CURRENT ROW) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.12.1') + num="3.5.3.12.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprFollowing_ExprFollowing_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprFollowing.ExprFollowing.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprFollowing.ExprFollowing.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `ROWS BETWEEN expr FOLLOWING AND expr FOLLOWING`\n' - 'is specified and the end of the frame specified by the `expr FOLLOWING` is a row that is before the row \n' - 'specified by the frame start.\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN 1 FOLLOWING AND 0 FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `ROWS BETWEEN expr FOLLOWING AND expr FOLLOWING`\n" + "is specified and the end of the frame specified by the `expr FOLLOWING` is a row that is before the row \n" + "specified by the frame start.\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN 1 FOLLOWING AND 0 FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.12.2') + num="3.5.3.12.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprFollowing_UnboundedFollowing = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprFollowing.UnboundedFollowing', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprFollowing.UnboundedFollowing", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all the rows from and including current row plus `expr` rows following it \n' - 'until and including the last row in the window partition\n' - 'when `ROWS BETWEEN expr FOLLOWING AND UNBOUNDED FOLLOWING` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ROWS BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING)─┐\n' - '│ 1 │ 5 │\n' - '│ 2 │ 3 │\n' - '│ 3 │ 0 │\n' - '└────────┴─────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include all the rows from and including current row plus `expr` rows following it \n" + "until and including the last row in the window partition\n" + "when `ROWS BETWEEN expr FOLLOWING AND UNBOUNDED FOLLOWING` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ROWS BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING)─┐\n" + "│ 1 │ 5 │\n" + "│ 2 │ 3 │\n" + "│ 3 │ 0 │\n" + "└────────┴─────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.12.3') + num="3.5.3.12.3", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprFollowing_ExprFollowing = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprFollowing.ExprFollowing', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprFollowing.ExprFollowing", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include the rows from and including current row plus `expr` following it \n' - 'until and including the row specified by the frame end when the frame end \n' - 'is the current row plus `expr` following it is right at or after the start of the frame\n' - 'when `ROWS BETWEEN expr FOLLOWING AND expr FOLLOWING` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN 1 FOLLOWING AND 2 FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ROWS BETWEEN 1 FOLLOWING AND 2 FOLLOWING)─┐\n' - '│ 1 │ 5 │\n' - '│ 2 │ 3 │\n' - '│ 3 │ 0 │\n' - '└────────┴─────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include the rows from and including current row plus `expr` following it \n" + "until and including the row specified by the frame end when the frame end \n" + "is the current row plus `expr` following it is right at or after the start of the frame\n" + "when `ROWS BETWEEN expr FOLLOWING AND expr FOLLOWING` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN 1 FOLLOWING AND 2 FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ROWS BETWEEN 1 FOLLOWING AND 2 FOLLOWING)─┐\n" + "│ 1 │ 5 │\n" + "│ 2 │ 3 │\n" + "│ 3 │ 0 │\n" + "└────────┴─────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.12.4') + num="3.5.3.12.4", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_CurrentRow = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.CurrentRow', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.CurrentRow", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include the rows from and including current row minus `expr` rows\n' - 'preceding it until and including the current row in the window frame\n' - 'when `ROWS BETWEEN expr PRECEDING AND CURRENT ROW` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)─┐\n' - '│ 1 │ 1 │\n' - '│ 2 │ 3 │\n' - '│ 3 │ 5 │\n' - '└────────┴─────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include the rows from and including current row minus `expr` rows\n" + "preceding it until and including the current row in the window frame\n" + "when `ROWS BETWEEN expr PRECEDING AND CURRENT ROW` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)─┐\n" + "│ 1 │ 1 │\n" + "│ 2 │ 3 │\n" + "│ 3 │ 5 │\n" + "└────────┴─────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.13.1') + num="3.5.3.13.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_UnboundedPreceding_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.UnboundedPreceding.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.UnboundedPreceding.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error\n' - 'when `ROWS BETWEEN expr PRECEDING AND UNBOUNDED PRECEDING` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND UNBOUNDED PRECEDING) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error\n" + "when `ROWS BETWEEN expr PRECEDING AND UNBOUNDED PRECEDING` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND UNBOUNDED PRECEDING) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.13.2') + num="3.5.3.13.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_UnboundedFollowing = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.UnboundedFollowing', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.UnboundedFollowing", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include the rows from and including current row minus `expr` rows\n' - 'preceding it until and including the last row in the window partition\n' - 'when `ROWS BETWEEN expr PRECEDING AND UNBOUNDED FOLLOWING` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING)─┐\n' - '│ 1 │ 6 │\n' - '│ 2 │ 6 │\n' - '│ 3 │ 5 │\n' - '└────────┴─────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include the rows from and including current row minus `expr` rows\n" + "preceding it until and including the last row in the window partition\n" + "when `ROWS BETWEEN expr PRECEDING AND UNBOUNDED FOLLOWING` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING)─┐\n" + "│ 1 │ 6 │\n" + "│ 2 │ 6 │\n" + "│ 3 │ 5 │\n" + "└────────┴─────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.13.3') + num="3.5.3.13.3", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_ExprPreceding_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.ExprPreceding.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.ExprPreceding.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when the frame end specified by the `expr PRECEDING`\n' - 'evaluates to a row that is before the row specified by the frame start in the window partition\n' - 'when `ROWS BETWEEN expr PRECEDING AND expr PRECEDING` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND 2 PRECEDING) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error when the frame end specified by the `expr PRECEDING`\n" + "evaluates to a row that is before the row specified by the frame start in the window partition\n" + "when `ROWS BETWEEN expr PRECEDING AND expr PRECEDING` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND 2 PRECEDING) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.13.4') + num="3.5.3.13.4", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_ExprPreceding = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.ExprPreceding', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.ExprPreceding", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include the rows from and including current row minus `expr` rows preceding it\n' - 'until and including the current row minus `expr` rows preceding it if the end\n' - 'of the frame is after the frame start in the window partition \n' - 'when `ROWS BETWEEN expr PRECEDING AND expr PRECEDING` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND 0 PRECEDING) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND 0 PRECEDING)─┐\n' - '│ 1 │ 1 │\n' - '│ 2 │ 3 │\n' - '│ 3 │ 5 │\n' - '└────────┴─────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include the rows from and including current row minus `expr` rows preceding it\n" + "until and including the current row minus `expr` rows preceding it if the end\n" + "of the frame is after the frame start in the window partition \n" + "when `ROWS BETWEEN expr PRECEDING AND expr PRECEDING` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND 0 PRECEDING) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND 0 PRECEDING)─┐\n" + "│ 1 │ 1 │\n" + "│ 2 │ 3 │\n" + "│ 3 │ 5 │\n" + "└────────┴─────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.13.5') + num="3.5.3.13.5", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_ExprFollowing = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.ExprFollowing', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.ExprFollowing", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include the rows from and including current row minus `expr` rows preceding it\n' - 'until and including the current row plus `expr` rows following it in the window partition\n' - 'when `ROWS BETWEEN expr PRECEDING AND expr FOLLOWING` frame is specified.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)─┐\n' - '│ 1 │ 3 │\n' - '│ 2 │ 6 │\n' - '│ 3 │ 5 │\n' - '└────────┴─────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include the rows from and including current row minus `expr` rows preceding it\n" + "until and including the current row plus `expr` rows following it in the window partition\n" + "when `ROWS BETWEEN expr PRECEDING AND expr FOLLOWING` frame is specified.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)─┐\n" + "│ 1 │ 3 │\n" + "│ 2 │ 6 │\n" + "│ 3 │ 5 │\n" + "└────────┴─────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.3.13.6') + num="3.5.3.13.6", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `RANGE` frame to define rows within a value range.\n' - 'Offsets SHALL be differences in row values from the current row value.\n' - '\n' - '```sql\n' - 'RANGE frame_extent\n' - '```\n' - '\n' - 'See [frame_extent] definition.\n' - '\n' - ), + "[ClickHouse] SHALL support `RANGE` frame to define rows within a value range.\n" + "Offsets SHALL be differences in row values from the current row value.\n" + "\n" + "```sql\n" + "RANGE frame_extent\n" + "```\n" + "\n" + "See [frame_extent] definition.\n" + "\n" + ), link=None, level=4, - num='3.5.4.1') + num="3.5.4.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_DataTypes_DateAndDateTime = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.DataTypes.DateAndDateTime', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.DataTypes.DateAndDateTime", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `RANGE` frame over columns with `Date` and `DateTime`\n' - 'data types.\n' - '\n' - ), + "[ClickHouse] SHALL support `RANGE` frame over columns with `Date` and `DateTime`\n" + "data types.\n" + "\n" + ), link=None, level=4, - num='3.5.4.2') + num="3.5.4.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_DataTypes_IntAndUInt = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.DataTypes.IntAndUInt', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.DataTypes.IntAndUInt", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `RANGE` frame over columns with numerical data types\n' - 'such `IntX` and `UIntX`.\n' - '\n' - ), + "[ClickHouse] SHALL support `RANGE` frame over columns with numerical data types\n" + "such `IntX` and `UIntX`.\n" + "\n" + ), link=None, level=4, - num='3.5.4.3') + num="3.5.4.3", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_MultipleColumnsInOrderBy_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.MultipleColumnsInOrderBy.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.MultipleColumnsInOrderBy.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `RANGE` frame definition is used with `ORDER BY`\n' - 'that uses multiple columns.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `RANGE` frame definition is used with `ORDER BY`\n" + "that uses multiple columns.\n" + "\n" + ), link=None, level=4, - num='3.5.4.4') + num="3.5.4.4", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_MissingFrameExtent_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.MissingFrameExtent.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.MissingFrameExtent.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `RANGE` frame definition is missing [frame_extent].\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `RANGE` frame definition is missing [frame_extent].\n" + "\n" + ), link=None, level=4, - num='3.5.4.5') + num="3.5.4.5", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_InvalidFrameExtent_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.InvalidFrameExtent.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.InvalidFrameExtent.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `RANGE` frame definition has invalid [frame_extent].\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `RANGE` frame definition has invalid [frame_extent].\n" + "\n" + ), link=None, level=4, - num='3.5.4.6') + num="3.5.4.6", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_CurrentRow_Peers = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.CurrentRow.Peers', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.CurrentRow.Peers", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] for the `RANGE` frame SHALL define the `peers` of the `CURRENT ROW` to be all\n' - 'the rows that are inside the same order bucket.\n' - '\n' - ), + "[ClickHouse] for the `RANGE` frame SHALL define the `peers` of the `CURRENT ROW` to be all\n" + "the rows that are inside the same order bucket.\n" + "\n" + ), link=None, level=4, - num='3.5.4.8') + num="3.5.4.8", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_CurrentRow_WithoutOrderBy = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.CurrentRow.WithoutOrderBy', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.CurrentRow.WithoutOrderBy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all rows in the window partition\n' - 'when `RANGE CURRENT ROW` frame is specified without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (RANGE CURRENT ROW) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (RANGE BETWEEN CURRENT ROW AND CURRENT ROW)─┐\n' - '│ 1 │ 6 │\n' - '│ 2 │ 6 │\n' - '│ 3 │ 6 │\n' - '└────────┴──────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include all rows in the window partition\n" + "when `RANGE CURRENT ROW` frame is specified without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (RANGE CURRENT ROW) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (RANGE BETWEEN CURRENT ROW AND CURRENT ROW)─┐\n" + "│ 1 │ 6 │\n" + "│ 2 │ 6 │\n" + "│ 3 │ 6 │\n" + "└────────┴──────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.9.1') + num="3.5.4.9.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_CurrentRow_WithOrderBy = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.CurrentRow.WithOrderBy', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.CurrentRow.WithOrderBy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all rows that are [current row peers] in the window partition\n' - 'when `RANGE CURRENT ROW` frame is specified with the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL include all rows that are [current row peers] in the window partition\n" + "when `RANGE CURRENT ROW` frame is specified with the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (ORDER BY number RANGE CURRENT ROW) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN CURRENT ROW AND CURRENT ROW)─┐\n' - '│ 1 │ 2 │\n' - '│ 1 │ 2 │\n' - '│ 2 │ 2 │\n' - '│ 3 │ 3 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN CURRENT ROW AND CURRENT ROW)─┐\n" + "│ 1 │ 2 │\n" + "│ 1 │ 2 │\n" + "│ 2 │ 2 │\n" + "│ 3 │ 3 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.9.2') + num="3.5.4.9.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_UnboundedFollowing_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.UnboundedFollowing.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.UnboundedFollowing.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE UNBOUNDED FOLLOWING` frame is specified with or without order by\n' - 'as `UNBOUNDED FOLLOWING` SHALL not be supported as [frame_start].\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (RANGE UNBOUNDED FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE UNBOUNDED FOLLOWING` frame is specified with or without order by\n" + "as `UNBOUNDED FOLLOWING` SHALL not be supported as [frame_start].\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (RANGE UNBOUNDED FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.10.1') + num="3.5.4.10.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_UnboundedPreceding_WithoutOrderBy = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.UnboundedPreceding.WithoutOrderBy', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.UnboundedPreceding.WithoutOrderBy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all rows in the window partition\n' - 'when `RANGE UNBOUNDED PRECEDING` frame is specified without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (RANGE UNBOUNDED PRECEDING) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)─┐\n' - '│ 1 │ 6 │\n' - '│ 2 │ 6 │\n' - '│ 3 │ 6 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include all rows in the window partition\n" + "when `RANGE UNBOUNDED PRECEDING` frame is specified without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (RANGE UNBOUNDED PRECEDING) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)─┐\n" + "│ 1 │ 6 │\n" + "│ 2 │ 6 │\n" + "│ 3 │ 6 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.11.1') + num="3.5.4.11.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_UnboundedPreceding_WithOrderBy = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.UnboundedPreceding.WithOrderBy', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.UnboundedPreceding.WithOrderBy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include rows with values from and including the first row \n' - 'until and including all [current row peers] in the window partition\n' - 'when `RANGE UNBOUNDED PRECEDING` frame is specified with the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ORDER BY number RANGE UNBOUNDED PRECEDING) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)─┐\n' - '│ 1 │ 1 │\n' - '│ 2 │ 3 │\n' - '│ 3 │ 6 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include rows with values from and including the first row \n" + "until and including all [current row peers] in the window partition\n" + "when `RANGE UNBOUNDED PRECEDING` frame is specified with the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ORDER BY number RANGE UNBOUNDED PRECEDING) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)─┐\n" + "│ 1 │ 1 │\n" + "│ 2 │ 3 │\n" + "│ 3 │ 6 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.11.2') + num="3.5.4.11.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_ExprPreceding_WithoutOrderBy_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprPreceding.WithoutOrderBy.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprPreceding.WithoutOrderBy.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE expr PRECEDING` frame is specified without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (RANGE 1 PRECEDING) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE expr PRECEDING` frame is specified without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (RANGE 1 PRECEDING) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.12.1') + num="3.5.4.12.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_ExprPreceding_OrderByNonNumericalColumn_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprPreceding.OrderByNonNumericalColumn.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprPreceding.OrderByNonNumericalColumn.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE expr PRECEDING` is used with `ORDER BY` clause\n' - 'over a non-numerical column.\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE expr PRECEDING` is used with `ORDER BY` clause\n" + "over a non-numerical column.\n" + "\n" + ), link=None, level=5, - num='3.5.4.12.2') + num="3.5.4.12.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_ExprPreceding_WithOrderBy = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprPreceding.WithOrderBy', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprPreceding.WithOrderBy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include rows with values from and including current row value minus `expr`\n' - 'until and including the value for the current row \n' - 'when `RANGE expr PRECEDING` frame is specified with the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ORDER BY number RANGE 1 PRECEDING) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)─┐\n' - '│ 1 │ 1 │\n' - '│ 2 │ 3 │\n' - '│ 3 │ 5 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include rows with values from and including current row value minus `expr`\n" + "until and including the value for the current row \n" + "when `RANGE expr PRECEDING` frame is specified with the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ORDER BY number RANGE 1 PRECEDING) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)─┐\n" + "│ 1 │ 1 │\n" + "│ 2 │ 3 │\n" + "│ 3 │ 5 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.12.3') + num="3.5.4.12.3", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_ExprFollowing_WithoutOrderBy_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprFollowing.WithoutOrderBy.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprFollowing.WithoutOrderBy.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE expr FOLLOWING` frame is specified without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (RANGE 1 FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE expr FOLLOWING` frame is specified without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (RANGE 1 FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.13.1') + num="3.5.4.13.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_ExprFollowing_WithOrderBy_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprFollowing.WithOrderBy.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprFollowing.WithOrderBy.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE expr FOLLOWING` frame is specified wit the `ORDER BY` clause \n' - 'as the value for the frame start cannot be larger than the value for the frame end.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ORDER BY number RANGE 1 FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE expr FOLLOWING` frame is specified wit the `ORDER BY` clause \n" + "as the value for the frame start cannot be larger than the value for the frame end.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ORDER BY number RANGE 1 FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.13.2') + num="3.5.4.13.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_CurrentRow_CurrentRow = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.CurrentRow', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.CurrentRow", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all [current row peers] in the window partition \n' - 'when `RANGE BETWEEN CURRENT ROW AND CURRENT ROW` frame is specified with or without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '**Without `ORDER BY`** \n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (RANGE BETWEEN CURRENT ROW AND CURRENT ROW) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (RANGE BETWEEN CURRENT ROW AND CURRENT ROW)─┐\n' - '│ 1 │ 6 │\n' - '│ 2 │ 6 │\n' - '│ 3 │ 6 │\n' - '└────────┴──────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - '**With `ORDER BY`** \n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN CURRENT ROW AND CURRENT ROW) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN CURRENT ROW AND CURRENT ROW)─┐\n' - '│ 1 │ 1 │\n' - '│ 2 │ 2 │\n' - '│ 3 │ 3 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include all [current row peers] in the window partition \n" + "when `RANGE BETWEEN CURRENT ROW AND CURRENT ROW` frame is specified with or without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "**Without `ORDER BY`** \n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (RANGE BETWEEN CURRENT ROW AND CURRENT ROW) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (RANGE BETWEEN CURRENT ROW AND CURRENT ROW)─┐\n" + "│ 1 │ 6 │\n" + "│ 2 │ 6 │\n" + "│ 3 │ 6 │\n" + "└────────┴──────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + "**With `ORDER BY`** \n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN CURRENT ROW AND CURRENT ROW) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN CURRENT ROW AND CURRENT ROW)─┐\n" + "│ 1 │ 1 │\n" + "│ 2 │ 2 │\n" + "│ 3 │ 3 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.14.1') + num="3.5.4.14.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_CurrentRow_UnboundedPreceding_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.UnboundedPreceding.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.UnboundedPreceding.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN CURRENT ROW AND UNBOUNDED PRECEDING` frame is specified\n' - 'with or without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '**Without `ORDER BY`**\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (RANGE BETWEEN CURRENT ROW AND UNBOUNDED PRECEDING) FROM numbers(1,3)\n' - '```\n' - '\n' - '**With `ORDER BY`**\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN CURRENT ROW AND UNBOUNDED PRECEDING) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE BETWEEN CURRENT ROW AND UNBOUNDED PRECEDING` frame is specified\n" + "with or without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "**Without `ORDER BY`**\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (RANGE BETWEEN CURRENT ROW AND UNBOUNDED PRECEDING) FROM numbers(1,3)\n" + "```\n" + "\n" + "**With `ORDER BY`**\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN CURRENT ROW AND UNBOUNDED PRECEDING) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.14.2') + num="3.5.4.14.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_CurrentRow_UnboundedFollowing = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.UnboundedFollowing', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.UnboundedFollowing", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all rows with values from and including [current row peers] until and including\n' - 'the last row in the window partition when `RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING` frame is specified\n' - 'with or without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '**Without `ORDER BY`**\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)─┐\n' - '│ 1 │ 6 │\n' - '│ 2 │ 6 │\n' - '│ 3 │ 6 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - '**With `ORDER BY`**\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)─┐\n' - '│ 1 │ 6 │\n' - '│ 2 │ 5 │\n' - '│ 3 │ 3 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL include all rows with values from and including [current row peers] until and including\n" + "the last row in the window partition when `RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING` frame is specified\n" + "with or without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "**Without `ORDER BY`**\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)─┐\n" + "│ 1 │ 6 │\n" + "│ 2 │ 6 │\n" + "│ 3 │ 6 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + "**With `ORDER BY`**\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)─┐\n" + "│ 1 │ 6 │\n" + "│ 2 │ 5 │\n" + "│ 3 │ 3 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.14.3') + num="3.5.4.14.3", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_CurrentRow_ExprFollowing_WithoutOrderBy_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.ExprFollowing.WithoutOrderBy.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.ExprFollowing.WithoutOrderBy.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN CURRENT ROW AND expr FOLLOWING` frame is specified\n' - 'without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (RANGE BETWEEN CURRENT ROW AND 1 FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE BETWEEN CURRENT ROW AND expr FOLLOWING` frame is specified\n" + "without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (RANGE BETWEEN CURRENT ROW AND 1 FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.14.4') + num="3.5.4.14.4", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_CurrentRow_ExprFollowing_WithOrderBy = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.ExprFollowing.WithOrderBy', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.ExprFollowing.WithOrderBy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all rows with values from and including [current row peers] until and including\n' - 'current row value plus `expr` when `RANGE BETWEEN CURRENT ROW AND expr FOLLOWING` frame is specified\n' - 'with the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL include all rows with values from and including [current row peers] until and including\n" + "current row value plus `expr` when `RANGE BETWEEN CURRENT ROW AND expr FOLLOWING` frame is specified\n" + "with the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN CURRENT ROW AND 1 FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN CURRENT ROW AND 1 FOLLOWING)─┐\n' - '│ 1 │ 4 │\n' - '│ 1 │ 4 │\n' - '│ 2 │ 5 │\n' - '│ 3 │ 3 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN CURRENT ROW AND 1 FOLLOWING)─┐\n" + "│ 1 │ 4 │\n" + "│ 1 │ 4 │\n" + "│ 2 │ 5 │\n" + "│ 3 │ 3 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.14.5') + num="3.5.4.14.5", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_CurrentRow_ExprPreceding_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.ExprPreceding.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.ExprPreceding.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN CURRENT ROW AND expr PRECEDING` frame is specified\n' - 'with or without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '**Without `ORDER BY`**\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (RANGE BETWEEN CURRENT ROW AND 1 PRECEDING) FROM numbers(1,3)\n' - '```\n' - '\n' - '**With `ORDER BY`**\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN CURRENT ROW AND 1 PRECEDING) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE BETWEEN CURRENT ROW AND expr PRECEDING` frame is specified\n" + "with or without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "**Without `ORDER BY`**\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (RANGE BETWEEN CURRENT ROW AND 1 PRECEDING) FROM numbers(1,3)\n" + "```\n" + "\n" + "**With `ORDER BY`**\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN CURRENT ROW AND 1 PRECEDING) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.14.6') + num="3.5.4.14.6", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_CurrentRow = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.CurrentRow', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.CurrentRow", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all rows with values from and including the first row until and including\n' - '[current row peers] in the window partition when `RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW` frame is specified\n' - 'with and without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '**Without `ORDER BY`**\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL include all rows with values from and including the first row until and including\n" + "[current row peers] in the window partition when `RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW` frame is specified\n" + "with and without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "**Without `ORDER BY`**\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)─┐\n' - '│ 1 │ 7 │\n' - '│ 1 │ 7 │\n' - '│ 2 │ 7 │\n' - '│ 3 │ 7 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - '**With `ORDER BY`**\n' - '\n' - '```sql\n' + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)─┐\n" + "│ 1 │ 7 │\n" + "│ 1 │ 7 │\n" + "│ 2 │ 7 │\n" + "│ 3 │ 7 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + "**With `ORDER BY`**\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)─┐\n' - '│ 1 │ 2 │\n' - '│ 1 │ 2 │\n' - '│ 2 │ 4 │\n' - '│ 3 │ 7 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)─┐\n" + "│ 1 │ 2 │\n" + "│ 1 │ 2 │\n" + "│ 2 │ 4 │\n" + "│ 3 │ 7 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.15.1') + num="3.5.4.15.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_UnboundedPreceding_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.UnboundedPreceding.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.UnboundedPreceding.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return and error when `RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED PRECEDING` frame is specified\n' - 'with and without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '**Without `ORDER BY`**\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL return and error when `RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED PRECEDING` frame is specified\n" + "with and without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "**Without `ORDER BY`**\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED PRECEDING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '**With `ORDER BY`**\n' - '\n' - '```sql\n' + "```\n" + "\n" + "**With `ORDER BY`**\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED PRECEDING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.15.2') + num="3.5.4.15.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_UnboundedFollowing = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.UnboundedFollowing', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.UnboundedFollowing", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all rows in the window partition when `RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING` frame is specified\n' - 'with and without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '**Without `ORDER BY`**\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL include all rows in the window partition when `RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING` frame is specified\n" + "with and without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "**Without `ORDER BY`**\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)─┐\n' - '│ 1 │ 7 │\n' - '│ 1 │ 7 │\n' - '│ 2 │ 7 │\n' - '│ 3 │ 7 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - '**With `ORDER BY`**\n' - '\n' - '```sql\n' + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)─┐\n" + "│ 1 │ 7 │\n" + "│ 1 │ 7 │\n" + "│ 2 │ 7 │\n" + "│ 3 │ 7 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + "**With `ORDER BY`**\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)─┐\n' - '│ 1 │ 7 │\n' - '│ 1 │ 7 │\n' - '│ 2 │ 7 │\n' - '│ 3 │ 7 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)─┐\n" + "│ 1 │ 7 │\n" + "│ 1 │ 7 │\n" + "│ 2 │ 7 │\n" + "│ 3 │ 7 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.15.3') + num="3.5.4.15.3", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_ExprPreceding_WithoutOrderBy_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.ExprPreceding.WithoutOrderBy.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.ExprPreceding.WithoutOrderBy.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN UNBOUNDED PRECEDING AND expr PRECEDING` frame is specified\n' - 'without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL return an error when `RANGE BETWEEN UNBOUNDED PRECEDING AND expr PRECEDING` frame is specified\n" + "without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.15.4') + num="3.5.4.15.4", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_ExprPreceding_WithOrderBy = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.ExprPreceding.WithOrderBy', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.ExprPreceding.WithOrderBy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all rows with values from and including the first row until and including\n' - 'the value of the current row minus `expr` in the window partition\n' - 'when `RANGE BETWEEN UNBOUNDED PRECEDING AND expr PRECEDING` frame is specified with the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL include all rows with values from and including the first row until and including\n" + "the value of the current row minus `expr` in the window partition\n" + "when `RANGE BETWEEN UNBOUNDED PRECEDING AND expr PRECEDING` frame is specified with the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING)─┐\n' - '│ 1 │ 0 │\n' - '│ 1 │ 0 │\n' - '│ 2 │ 2 │\n' - '│ 3 │ 4 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING)─┐\n" + "│ 1 │ 0 │\n" + "│ 1 │ 0 │\n" + "│ 2 │ 2 │\n" + "│ 3 │ 4 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.15.5') + num="3.5.4.15.5", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_ExprFollowing_WithoutOrderBy_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.ExprFollowing.WithoutOrderBy.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.ExprFollowing.WithoutOrderBy.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN UNBOUNDED PRECEDING AND expr FOLLOWING` frame is specified\n' - 'without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL return an error when `RANGE BETWEEN UNBOUNDED PRECEDING AND expr FOLLOWING` frame is specified\n" + "without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.15.6') + num="3.5.4.15.6", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_ExprFollowing_WithOrderBy = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.ExprFollowing.WithOrderBy', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.ExprFollowing.WithOrderBy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all rows with values from and including the first row until and including\n' - 'the value of the current row plus `expr` in the window partition\n' - 'when `RANGE BETWEEN UNBOUNDED PRECEDING AND expr FOLLOWING` frame is specified with the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL include all rows with values from and including the first row until and including\n" + "the value of the current row plus `expr` in the window partition\n" + "when `RANGE BETWEEN UNBOUNDED PRECEDING AND expr FOLLOWING` frame is specified with the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING)─┐\n' - '│ 1 │ 4 │\n' - '│ 1 │ 4 │\n' - '│ 2 │ 7 │\n' - '│ 3 │ 7 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING)─┐\n" + "│ 1 │ 4 │\n" + "│ 1 │ 4 │\n" + "│ 2 │ 7 │\n" + "│ 3 │ 7 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.15.7') + num="3.5.4.15.7", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedFollowing_CurrentRow_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.CurrentRow.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.CurrentRow.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN UNBOUNDED FOLLOWING AND CURRENT ROW` frame is specified \n' - 'with or without the `ORDER BY` clause.\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE BETWEEN UNBOUNDED FOLLOWING AND CURRENT ROW` frame is specified \n" + "with or without the `ORDER BY` clause.\n" + "\n" + ), link=None, level=5, - num='3.5.4.16.1') + num="3.5.4.16.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedFollowing_UnboundedFollowing_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.UnboundedFollowing.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.UnboundedFollowing.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED FOLLOWING` frame is specified \n' - 'with or without the `ORDER BY` clause.\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED FOLLOWING` frame is specified \n" + "with or without the `ORDER BY` clause.\n" + "\n" + ), link=None, level=5, - num='3.5.4.16.2') + num="3.5.4.16.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedFollowing_UnboundedPreceding_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.UnboundedPreceding.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.UnboundedPreceding.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED PRECEDING` frame is specified \n' - 'with or without the `ORDER BY` clause.\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED PRECEDING` frame is specified \n" + "with or without the `ORDER BY` clause.\n" + "\n" + ), link=None, level=5, - num='3.5.4.16.3') + num="3.5.4.16.3", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedFollowing_ExprPreceding_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.ExprPreceding.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.ExprPreceding.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN UNBOUNDED FOLLOWING AND expr PRECEDING` frame is specified \n' - 'with or without the `ORDER BY` clause.\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE BETWEEN UNBOUNDED FOLLOWING AND expr PRECEDING` frame is specified \n" + "with or without the `ORDER BY` clause.\n" + "\n" + ), link=None, level=5, - num='3.5.4.16.4') + num="3.5.4.16.4", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedFollowing_ExprFollowing_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.ExprFollowing.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.ExprFollowing.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN UNBOUNDED FOLLOWING AND expr FOLLOWING` frame is specified \n' - 'with or without the `ORDER BY` clause.\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE BETWEEN UNBOUNDED FOLLOWING AND expr FOLLOWING` frame is specified \n" + "with or without the `ORDER BY` clause.\n" + "\n" + ), link=None, level=5, - num='3.5.4.16.5') + num="3.5.4.16.5", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_CurrentRow_WithOrderBy = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.CurrentRow.WithOrderBy', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.CurrentRow.WithOrderBy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all rows with values from and including current row minus `expr` \n' - 'until and including [current row peers] in the window partition\n' - 'when `RANGE BETWEEN expr PRECEDING AND CURRENT ROW` frame is specified with the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL include all rows with values from and including current row minus `expr` \n" + "until and including [current row peers] in the window partition\n" + "when `RANGE BETWEEN expr PRECEDING AND CURRENT ROW` frame is specified with the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 1 PRECEDING AND CURRENT ROW) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)─┐\n' - '│ 1 │ 2 │\n' - '│ 1 │ 2 │\n' - '│ 2 │ 4 │\n' - '│ 3 │ 5 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)─┐\n" + "│ 1 │ 2 │\n" + "│ 1 │ 2 │\n" + "│ 2 │ 4 │\n" + "│ 3 │ 5 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.17.1') + num="3.5.4.17.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_CurrentRow_WithoutOrderBy_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.CurrentRow.WithoutOrderBy.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.CurrentRow.WithoutOrderBy.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN expr PRECEDING AND CURRENT ROW` frame is specified\n' - 'without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL return an error when `RANGE BETWEEN expr PRECEDING AND CURRENT ROW` frame is specified\n" + "without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND CURRENT ROW) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.17.2') + num="3.5.4.17.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_UnboundedPreceding_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.UnboundedPreceding.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.UnboundedPreceding.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN expr PRECEDING AND UNBOUNDED PRECEDING` frame is specified \n' - 'with or without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '**Without `ORDER BY`**\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL return an error when `RANGE BETWEEN expr PRECEDING AND UNBOUNDED PRECEDING` frame is specified \n" + "with or without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "**Without `ORDER BY`**\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND UNBOUNDED PRECEDING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '**With `ORDER BY`**\n' - '\n' - '```sql\n' + "```\n" + "\n" + "**With `ORDER BY`**\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 1 PRECEDING AND UNBOUNDED PRECEDING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.17.3') + num="3.5.4.17.3", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_UnboundedFollowing_WithoutOrderBy_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.UnboundedFollowing.WithoutOrderBy.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.UnboundedFollowing.WithoutOrderBy.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN expr PRECEDING AND UNBOUNDED FOLLOWING` frame is specified \n' - 'without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE BETWEEN expr PRECEDING AND UNBOUNDED FOLLOWING` frame is specified \n" + "without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.17.4') + num="3.5.4.17.4", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_UnboundedFollowing_WithOrderBy = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.UnboundedFollowing.WithOrderBy', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.UnboundedFollowing.WithOrderBy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all rows with values from and including current row minus `expr` \n' - 'until and including the last row in the window partition when `RANGE BETWEEN expr PRECEDING AND UNBOUNDED FOLLOWING` frame\n' - 'is specified with the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL include all rows with values from and including current row minus `expr` \n" + "until and including the last row in the window partition when `RANGE BETWEEN expr PRECEDING AND UNBOUNDED FOLLOWING` frame\n" + "is specified with the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING)─┐\n' - '│ 1 │ 7 │\n' - '│ 1 │ 7 │\n' - '│ 2 │ 7 │\n' - '│ 3 │ 5 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING)─┐\n" + "│ 1 │ 7 │\n" + "│ 1 │ 7 │\n" + "│ 2 │ 7 │\n" + "│ 3 │ 5 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.17.5') + num="3.5.4.17.5", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_ExprFollowing_WithoutOrderBy_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprFollowing.WithoutOrderBy.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprFollowing.WithoutOrderBy.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN expr PRECEDING AND expr FOLLOWING` frame is specified \n' - 'without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE BETWEEN expr PRECEDING AND expr FOLLOWING` frame is specified \n" + "without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.17.6') + num="3.5.4.17.6", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_ExprFollowing_WithOrderBy = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprFollowing.WithOrderBy', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprFollowing.WithOrderBy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all rows with values from and including current row minus preceding `expr` \n' - 'until and including current row plus following `expr` in the window partition \n' - 'when `RANGE BETWEEN expr PRECEDING AND expr FOLLOWING` frame is specified with the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL include all rows with values from and including current row minus preceding `expr` \n" + "until and including current row plus following `expr` in the window partition \n" + "when `RANGE BETWEEN expr PRECEDING AND expr FOLLOWING` frame is specified with the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING)─┐\n' - '│ 1 │ 4 │\n' - '│ 1 │ 4 │\n' - '│ 2 │ 7 │\n' - '│ 3 │ 5 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING)─┐\n" + "│ 1 │ 4 │\n" + "│ 1 │ 4 │\n" + "│ 2 │ 7 │\n" + "│ 3 │ 5 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.17.7') + num="3.5.4.17.7", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_ExprPreceding_WithoutOrderBy_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprPreceding.WithoutOrderBy.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprPreceding.WithoutOrderBy.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN expr PRECEDING AND expr PRECEDING` frame is specified \n' - 'without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND 0 PRECEDING) FROM numbers(1,3)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE BETWEEN expr PRECEDING AND expr PRECEDING` frame is specified \n" + "without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND 0 PRECEDING) FROM numbers(1,3)\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.17.8') + num="3.5.4.17.8", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_ExprPreceding_WithOrderBy_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprPreceding.WithOrderBy.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprPreceding.WithOrderBy.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when the value of the [frame_end] specified by the \n' - 'current row minus preceding `expr` is greater than the value of the [frame_start] in the window partition\n' - 'when `RANGE BETWEEN expr PRECEDING AND expr PRECEDING` frame is specified with the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL return an error when the value of the [frame_end] specified by the \n" + "current row minus preceding `expr` is greater than the value of the [frame_start] in the window partition\n" + "when `RANGE BETWEEN expr PRECEDING AND expr PRECEDING` frame is specified with the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND 2 PRECEDING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.17.9') + num="3.5.4.17.9", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_ExprPreceding_WithOrderBy = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprPreceding.WithOrderBy', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprPreceding.WithOrderBy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all rows with values from and including current row minus preceding `expr` for the [frame_start]\n' - 'until and including current row minus following `expr` for the [frame_end] in the window partition \n' - 'when `RANGE BETWEEN expr PRECEDING AND expr PRECEDING` frame is specified with the `ORDER BY` clause\n' - 'if an only if the [frame_end] value is equal or greater than [frame_start] value.\n' - '\n' - 'For example,\n' - '\n' - '**Greater Than**\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL include all rows with values from and including current row minus preceding `expr` for the [frame_start]\n" + "until and including current row minus following `expr` for the [frame_end] in the window partition \n" + "when `RANGE BETWEEN expr PRECEDING AND expr PRECEDING` frame is specified with the `ORDER BY` clause\n" + "if an only if the [frame_end] value is equal or greater than [frame_start] value.\n" + "\n" + "For example,\n" + "\n" + "**Greater Than**\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 1 PRECEDING AND 0 PRECEDING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 1 PRECEDING AND 0 PRECEDING)─┐\n' - '│ 1 │ 2 │\n' - '│ 1 │ 2 │\n' - '│ 2 │ 4 │\n' - '│ 3 │ 5 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - 'or **Equal**\n' - '\n' - '```sql\n' + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 1 PRECEDING AND 0 PRECEDING)─┐\n" + "│ 1 │ 2 │\n" + "│ 1 │ 2 │\n" + "│ 2 │ 4 │\n" + "│ 3 │ 5 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + "or **Equal**\n" + "\n" + "```sql\n" " SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 1 PRECEDING AND 1 PRECEDING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 1 PRECEDING AND 1 PRECEDING)─┐\n' - '│ 1 │ 0 │\n' - '│ 1 │ 0 │\n' - '│ 2 │ 2 │\n' - '│ 3 │ 2 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 1 PRECEDING AND 1 PRECEDING)─┐\n" + "│ 1 │ 0 │\n" + "│ 1 │ 0 │\n" + "│ 2 │ 2 │\n" + "│ 3 │ 2 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.17.10') + num="3.5.4.17.10", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_CurrentRow_WithoutOrderBy_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.CurrentRow.WithoutOrderBy.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.CurrentRow.WithoutOrderBy.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN expr FOLLOWING AND CURRENT ROW` frame is specified \n' - 'without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL return an error when `RANGE BETWEEN expr FOLLOWING AND CURRENT ROW` frame is specified \n" + "without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (RANGE BETWEEN 0 FOLLOWING AND CURRENT ROW) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.18.1') + num="3.5.4.18.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_CurrentRow_WithOrderBy_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.CurrentRow.WithOrderBy.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.CurrentRow.WithOrderBy.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN expr FOLLOWING AND CURRENT ROW` frame is specified \n' - 'with the `ORDER BY` clause and `expr` is greater than `0`.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL return an error when `RANGE BETWEEN expr FOLLOWING AND CURRENT ROW` frame is specified \n" + "with the `ORDER BY` clause and `expr` is greater than `0`.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (RANGE BETWEEN 1 FOLLOWING AND CURRENT ROW) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.18.2') + num="3.5.4.18.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_CurrentRow_ZeroSpecialCase = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.CurrentRow.ZeroSpecialCase', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.CurrentRow.ZeroSpecialCase", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all [current row peers] in the window partition\n' - 'when `RANGE BETWEEN expr FOLLOWING AND CURRENT ROW` frame is specified \n' - 'with the `ORDER BY` clause if and only if the `expr` equals to `0`.\n' - '\n' - 'For example,\n' - '\n' - '**Without `ORDER BY`**\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL include all [current row peers] in the window partition\n" + "when `RANGE BETWEEN expr FOLLOWING AND CURRENT ROW` frame is specified \n" + "with the `ORDER BY` clause if and only if the `expr` equals to `0`.\n" + "\n" + "For example,\n" + "\n" + "**Without `ORDER BY`**\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (RANGE BETWEEN 0 FOLLOWING AND CURRENT ROW) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (RANGE BETWEEN 0 FOLLOWING AND CURRENT ROW)─┐\n' - '│ 1 │ 7 │\n' - '│ 1 │ 7 │\n' - '│ 2 │ 7 │\n' - '│ 3 │ 7 │\n' - '└────────┴──────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - '**With `ORDER BY`**\n' - '\n' - '```sql\n' + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (RANGE BETWEEN 0 FOLLOWING AND CURRENT ROW)─┐\n" + "│ 1 │ 7 │\n" + "│ 1 │ 7 │\n" + "│ 2 │ 7 │\n" + "│ 3 │ 7 │\n" + "└────────┴──────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + "**With `ORDER BY`**\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (RANGE BETWEEN 0 FOLLOWING AND CURRENT ROW) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 0 FOLLOWING AND CURRENT ROW)─┐\n' - '│ 1 │ 2 │\n' - '│ 1 │ 2 │\n' - '│ 2 │ 2 │\n' - '│ 3 │ 3 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 0 FOLLOWING AND CURRENT ROW)─┐\n" + "│ 1 │ 2 │\n" + "│ 1 │ 2 │\n" + "│ 2 │ 2 │\n" + "│ 3 │ 3 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.18.3') + num="3.5.4.18.3", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_UnboundedFollowing_WithoutOrderBy_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.UnboundedFollowing.WithoutOrderBy.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.UnboundedFollowing.WithoutOrderBy.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN expr FOLLOWING AND UNBOUNDED FOLLOWING` frame is specified \n' - 'without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL return an error when `RANGE BETWEEN expr FOLLOWING AND UNBOUNDED FOLLOWING` frame is specified \n" + "without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (RANGE BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.18.4') + num="3.5.4.18.4", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_UnboundedFollowing_WithOrderBy = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.UnboundedFollowing.WithOrderBy', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.UnboundedFollowing.WithOrderBy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all rows with values from and including current row plus `expr`\n' - 'until and including the last row in the window partition \n' - 'when `RANGE BETWEEN expr FOLLOWING AND UNBOUNDED FOLLOWING` frame is specified with the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL include all rows with values from and including current row plus `expr`\n" + "until and including the last row in the window partition \n" + "when `RANGE BETWEEN expr FOLLOWING AND UNBOUNDED FOLLOWING` frame is specified with the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING)─┐\n' - '│ 1 │ 5 │\n' - '│ 1 │ 5 │\n' - '│ 2 │ 3 │\n' - '│ 3 │ 0 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING)─┐\n" + "│ 1 │ 5 │\n" + "│ 1 │ 5 │\n" + "│ 2 │ 3 │\n" + "│ 3 │ 0 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.18.5') + num="3.5.4.18.5", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_UnboundedPreceding_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.UnboundedPreceding.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.UnboundedPreceding.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN expr FOLLOWING AND UNBOUNDED PRECEDING` frame is specified \n' - 'with or without the `ORDER BY` clause.\n' - '\n' - 'For example,\n' - '\n' - '**Without `ORDER BY`**\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL return an error when `RANGE BETWEEN expr FOLLOWING AND UNBOUNDED PRECEDING` frame is specified \n" + "with or without the `ORDER BY` clause.\n" + "\n" + "For example,\n" + "\n" + "**Without `ORDER BY`**\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (RANGE BETWEEN 1 FOLLOWING AND UNBOUNDED PRECEDING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '**With `ORDER BY`**\n' - '\n' - '```sql\n' + "```\n" + "\n" + "**With `ORDER BY`**\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 1 FOLLOWING AND UNBOUNDED PRECEDING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.18.6') + num="3.5.4.18.6", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_ExprPreceding_WithoutOrderBy_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprPreceding.WithoutOrderBy.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprPreceding.WithoutOrderBy.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN expr FOLLOWING AND expr PRECEDING` frame is specified \n' - 'without the `ORDER BY`.\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE BETWEEN expr FOLLOWING AND expr PRECEDING` frame is specified \n" + "without the `ORDER BY`.\n" + "\n" + ), link=None, level=5, - num='3.5.4.18.7') + num="3.5.4.18.7", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_ExprPreceding_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprPreceding.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprPreceding.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN expr FOLLOWING AND expr PRECEDING` frame is specified \n' - 'with the `ORDER BY` clause if the value of both `expr` is not `0`.\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE BETWEEN expr FOLLOWING AND expr PRECEDING` frame is specified \n" + "with the `ORDER BY` clause if the value of both `expr` is not `0`.\n" + "\n" + ), link=None, level=5, - num='3.5.4.18.8') + num="3.5.4.18.8", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_ExprPreceding_WithOrderBy_ZeroSpecialCase = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprPreceding.WithOrderBy.ZeroSpecialCase', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprPreceding.WithOrderBy.ZeroSpecialCase", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all rows with value equal to [current row peers] in the window partition\n' - 'when `RANGE BETWEEN expr FOLLOWING AND expr PRECEDING` frame is specified \n' + "[ClickHouse] SHALL include all rows with value equal to [current row peers] in the window partition\n" + "when `RANGE BETWEEN expr FOLLOWING AND expr PRECEDING` frame is specified \n" "with the `ORDER BY` clause if and only if both `expr`'s are `0`.\n" - '\n' - 'For example,\n' - '\n' - '```sql\n' + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 0 FOLLOWING AND 0 PRECEDING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 0 FOLLOWING AND 0 PRECEDING ─┐\n' - '│ 1 │ 2 │\n' - '│ 1 │ 2 │\n' - '│ 2 │ 2 │\n' - '│ 3 │ 3 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 0 FOLLOWING AND 0 PRECEDING ─┐\n" + "│ 1 │ 2 │\n" + "│ 1 │ 2 │\n" + "│ 2 │ 2 │\n" + "│ 3 │ 3 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.18.9') + num="3.5.4.18.9", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_ExprFollowing_WithoutOrderBy_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprFollowing.WithoutOrderBy.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprFollowing.WithoutOrderBy.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN expr FOLLOWING AND expr FOLLOWING` frame is specified \n' - 'without the `ORDER BY` clause.\n' - '\n' - ), + "[ClickHouse] SHALL return an error when `RANGE BETWEEN expr FOLLOWING AND expr FOLLOWING` frame is specified \n" + "without the `ORDER BY` clause.\n" + "\n" + ), link=None, level=5, - num='3.5.4.18.10') + num="3.5.4.18.10", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_ExprFollowing_WithOrderBy_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprFollowing.WithOrderBy.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprFollowing.WithOrderBy.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error when `RANGE BETWEEN expr FOLLOWING AND expr FOLLOWING` frame is specified \n' - 'with the `ORDER BY` clause but the `expr` for the [frame_end] is less than the `expr` for the [frame_start].\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL return an error when `RANGE BETWEEN expr FOLLOWING AND expr FOLLOWING` frame is specified \n" + "with the `ORDER BY` clause but the `expr` for the [frame_end] is less than the `expr` for the [frame_start].\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 1 FOLLOWING AND 0 FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - ), + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.18.11') + num="3.5.4.18.11", +) RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_ExprFollowing_WithOrderBy = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprFollowing.WithOrderBy', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprFollowing.WithOrderBy", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL include all rows with value from and including current row plus `expr` for the [frame_start]\n' - 'until and including current row plus `expr` for the [frame_end] in the window partition\n' - 'when `RANGE BETWEEN expr FOLLOWING AND expr FOLLOWING` frame is specified \n' - 'with the `ORDER BY` clause if and only if the `expr` for the [frame_end] is greater than or equal than the \n' - '`expr` for the [frame_start].\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' + "[ClickHouse] SHALL include all rows with value from and including current row plus `expr` for the [frame_start]\n" + "until and including current row plus `expr` for the [frame_end] in the window partition\n" + "when `RANGE BETWEEN expr FOLLOWING AND expr FOLLOWING` frame is specified \n" + "with the `ORDER BY` clause if and only if the `expr` for the [frame_end] is greater than or equal than the \n" + "`expr` for the [frame_start].\n" + "\n" + "For example,\n" + "\n" + "```sql\n" "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))\n" - '```\n' - '\n' - '```bash\n' - '┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOWING)─┐\n' - '│ 1 │ 5 │\n' - '│ 1 │ 5 │\n' - '│ 2 │ 3 │\n' - '│ 3 │ 0 │\n' - '└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n' - '```\n' - '\n' - ), + "```\n" + "\n" + "```bash\n" + "┌─number─┬─sum(number) OVER (ORDER BY number ASC RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOWING)─┐\n" + "│ 1 │ 5 │\n" + "│ 1 │ 5 │\n" + "│ 2 │ 3 │\n" + "│ 3 │ 0 │\n" + "└────────┴──────────────────────────────────────────────────────────────────────────────────┘\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.5.4.18.12') + num="3.5.4.18.12", +) RQ_SRS_019_ClickHouse_WindowFunctions_Frame_Extent = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.Frame.Extent', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.Frame.Extent", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support [frame_extent] defined as\n' - '\n' - '```\n' - 'frame_extent:\n' - ' {frame_start | frame_between}\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support [frame_extent] defined as\n" + "\n" + "```\n" + "frame_extent:\n" + " {frame_start | frame_between}\n" + "```\n" + "\n" + ), link=None, level=4, - num='3.5.5.1') + num="3.5.5.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_Frame_Start = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.Frame.Start', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.Frame.Start", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support [frame_start] defined as\n' - '\n' - '```\n' - 'frame_start: {\n' - ' CURRENT ROW\n' - ' | UNBOUNDED PRECEDING\n' - ' | UNBOUNDED FOLLOWING\n' - ' | expr PRECEDING\n' - ' | expr FOLLOWING\n' - '}\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support [frame_start] defined as\n" + "\n" + "```\n" + "frame_start: {\n" + " CURRENT ROW\n" + " | UNBOUNDED PRECEDING\n" + " | UNBOUNDED FOLLOWING\n" + " | expr PRECEDING\n" + " | expr FOLLOWING\n" + "}\n" + "```\n" + "\n" + ), link=None, level=4, - num='3.5.6.1') + num="3.5.6.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_Frame_Between = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.Frame.Between', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.Frame.Between", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support [frame_between] defined as\n' - '\n' - '```\n' - 'frame_between:\n' - ' BETWEEN frame_start AND frame_end\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support [frame_between] defined as\n" + "\n" + "```\n" + "frame_between:\n" + " BETWEEN frame_start AND frame_end\n" + "```\n" + "\n" + ), link=None, level=4, - num='3.5.7.1') + num="3.5.7.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_Frame_End = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.Frame.End', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.Frame.End", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support [frame_end] defined as\n' - '\n' - '```\n' - 'frame_end: {\n' - ' CURRENT ROW\n' - ' | UNBOUNDED PRECEDING\n' - ' | UNBOUNDED FOLLOWING\n' - ' | expr PRECEDING\n' - ' | expr FOLLOWING\n' - '}\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support [frame_end] defined as\n" + "\n" + "```\n" + "frame_end: {\n" + " CURRENT ROW\n" + " | UNBOUNDED PRECEDING\n" + " | UNBOUNDED FOLLOWING\n" + " | expr PRECEDING\n" + " | expr FOLLOWING\n" + "}\n" + "```\n" + "\n" + ), link=None, level=4, - num='3.5.8.1') + num="3.5.8.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_CurrentRow = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.CurrentRow', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.CurrentRow", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `CURRENT ROW` as `frame_start` or `frame_end` value.\n' - '\n' - '* For `ROWS` SHALL define the bound to be the current row\n' - '* For `RANGE` SHALL define the bound to be the peers of the current row\n' - '\n' - ), + "[ClickHouse] SHALL support `CURRENT ROW` as `frame_start` or `frame_end` value.\n" + "\n" + "* For `ROWS` SHALL define the bound to be the current row\n" + "* For `RANGE` SHALL define the bound to be the peers of the current row\n" + "\n" + ), link=None, level=4, - num='3.5.9.1') + num="3.5.9.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_UnboundedPreceding = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.UnboundedPreceding', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.UnboundedPreceding", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `UNBOUNDED PRECEDING` as `frame_start` or `frame_end` value\n' - 'and it SHALL define that the bound is the first partition row.\n' - '\n' - ), + "[ClickHouse] SHALL support `UNBOUNDED PRECEDING` as `frame_start` or `frame_end` value\n" + "and it SHALL define that the bound is the first partition row.\n" + "\n" + ), link=None, level=4, - num='3.5.10.1') + num="3.5.10.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_UnboundedFollowing = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.UnboundedFollowing', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.UnboundedFollowing", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `UNBOUNDED FOLLOWING` as `frame_start` or `frame_end` value\n' - 'and it SHALL define that the bound is the last partition row.\n' - '\n' - ), + "[ClickHouse] SHALL support `UNBOUNDED FOLLOWING` as `frame_start` or `frame_end` value\n" + "and it SHALL define that the bound is the last partition row.\n" + "\n" + ), link=None, level=4, - num='3.5.11.1') + num="3.5.11.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_ExprPreceding = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.ExprPreceding', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.ExprPreceding", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `expr PRECEDING` as `frame_start` or `frame_end` value\n' - '\n' - '* For `ROWS` it SHALL define the bound to be the `expr` rows before the current row\n' - '* For `RANGE` it SHALL define the bound to be the rows with values equal to the current row value minus the `expr`.\n' - '\n' - ), + "[ClickHouse] SHALL support `expr PRECEDING` as `frame_start` or `frame_end` value\n" + "\n" + "* For `ROWS` it SHALL define the bound to be the `expr` rows before the current row\n" + "* For `RANGE` it SHALL define the bound to be the rows with values equal to the current row value minus the `expr`.\n" + "\n" + ), link=None, level=4, - num='3.5.12.1') + num="3.5.12.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_ExprPreceding_ExprValue = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.ExprPreceding.ExprValue', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.ExprPreceding.ExprValue", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support only non-negative numeric literal as the value for the `expr` in the `expr PRECEDING` frame boundary.\n' - '\n' - 'For example,\n' - '\n' - '```\n' - '5 PRECEDING\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support only non-negative numeric literal as the value for the `expr` in the `expr PRECEDING` frame boundary.\n" + "\n" + "For example,\n" + "\n" + "```\n" + "5 PRECEDING\n" + "```\n" + "\n" + ), link=None, level=4, - num='3.5.12.2') + num="3.5.12.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_ExprFollowing = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.ExprFollowing', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.ExprFollowing", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `expr FOLLOWING` as `frame_start` or `frame_end` value\n' - '\n' - '* For `ROWS` it SHALL define the bound to be the `expr` rows after the current row\n' - '* For `RANGE` it SHALL define the bound to be the rows with values equal to the current row value plus `expr`\n' - '\n' - ), + "[ClickHouse] SHALL support `expr FOLLOWING` as `frame_start` or `frame_end` value\n" + "\n" + "* For `ROWS` it SHALL define the bound to be the `expr` rows after the current row\n" + "* For `RANGE` it SHALL define the bound to be the rows with values equal to the current row value plus `expr`\n" + "\n" + ), link=None, level=4, - num='3.5.13.1') + num="3.5.13.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_ExprFollowing_ExprValue = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.ExprFollowing.ExprValue', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.ExprFollowing.ExprValue", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support only non-negative numeric literal as the value for the `expr` in the `expr FOLLOWING` frame boundary.\n' - '\n' - 'For example,\n' - '\n' - '```\n' - '5 FOLLOWING\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support only non-negative numeric literal as the value for the `expr` in the `expr FOLLOWING` frame boundary.\n" + "\n" + "For example,\n" + "\n" + "```\n" + "5 FOLLOWING\n" + "```\n" + "\n" + ), link=None, level=4, - num='3.5.13.2') + num="3.5.13.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_WindowClause = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.WindowClause', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.WindowClause", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `WINDOW` clause to define one or more windows.\n' - '\n' - '```sql\n' - 'WINDOW window_name AS (window_spec)\n' - ' [, window_name AS (window_spec)] ..\n' - '```\n' - '\n' - 'The `window_name` SHALL be the name of a window defined by a `WINDOW` clause.\n' - '\n' - 'The [window_spec] SHALL specify the window.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT ... FROM table WINDOW w AS (partiton by id))\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support `WINDOW` clause to define one or more windows.\n" + "\n" + "```sql\n" + "WINDOW window_name AS (window_spec)\n" + " [, window_name AS (window_spec)] ..\n" + "```\n" + "\n" + "The `window_name` SHALL be the name of a window defined by a `WINDOW` clause.\n" + "\n" + "The [window_spec] SHALL specify the window.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT ... FROM table WINDOW w AS (partiton by id))\n" + "```\n" + "\n" + ), link=None, level=3, - num='3.6.1') + num="3.6.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_WindowClause_MultipleWindows = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.WindowClause.MultipleWindows', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.WindowClause.MultipleWindows", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `WINDOW` clause that defines multiple windows.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT ... FROM table WINDOW w1 AS (partition by id), w2 AS (partition by customer)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support `WINDOW` clause that defines multiple windows.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT ... FROM table WINDOW w1 AS (partition by id), w2 AS (partition by customer)\n" + "```\n" + "\n" + ), link=None, level=3, - num='3.6.2') + num="3.6.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_WindowClause_MissingWindowSpec_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.WindowClause.MissingWindowSpec.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.WindowClause.MissingWindowSpec.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `WINDOW` clause definition is missing [window_spec].\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `WINDOW` clause definition is missing [window_spec].\n" + "\n" + ), link=None, level=3, - num='3.6.3') + num="3.6.3", +) RQ_SRS_019_ClickHouse_WindowFunctions_OverClause = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.OverClause', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.OverClause", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `OVER` clause to either use named window defined using `WINDOW` clause\n' - 'or adhoc window defined inplace.\n' - '\n' - '\n' - '```\n' - 'OVER ()|(window_spec)|named_window \n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support `OVER` clause to either use named window defined using `WINDOW` clause\n" + "or adhoc window defined inplace.\n" + "\n" + "\n" + "```\n" + "OVER ()|(window_spec)|named_window \n" + "```\n" + "\n" + ), link=None, level=3, - num='3.7.1') + num="3.7.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_EmptyOverClause = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.EmptyOverClause', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.EmptyOverClause", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL treat the entire set of query rows as a single partition when `OVER` clause is empty.\n' - 'For example,\n' - '\n' - '```\n' - 'SELECT sum(x) OVER () FROM table\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL treat the entire set of query rows as a single partition when `OVER` clause is empty.\n" + "For example,\n" + "\n" + "```\n" + "SELECT sum(x) OVER () FROM table\n" + "```\n" + "\n" + ), link=None, level=4, - num='3.7.2.1') + num="3.7.2.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_AdHocWindow = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.AdHocWindow', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.AdHocWindow", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support ad hoc window specification in the `OVER` clause.\n' - '\n' - '```\n' - 'OVER [window_spec]\n' - '```\n' - '\n' - 'See [window_spec] definition.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - '(count(*) OVER (partition by id order by time desc))\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support ad hoc window specification in the `OVER` clause.\n" + "\n" + "```\n" + "OVER [window_spec]\n" + "```\n" + "\n" + "See [window_spec] definition.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "(count(*) OVER (partition by id order by time desc))\n" + "```\n" + "\n" + ), link=None, level=4, - num='3.7.3.1') + num="3.7.3.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_AdHocWindow_MissingWindowSpec_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.AdHocWindow.MissingWindowSpec.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.AdHocWindow.MissingWindowSpec.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `OVER` clause has missing [window_spec].\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `OVER` clause has missing [window_spec].\n" + "\n" + ), link=None, level=4, - num='3.7.3.2') + num="3.7.3.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_NamedWindow = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.NamedWindow', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.NamedWindow", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using a previously defined named window in the `OVER` clause.\n' - '\n' - '```\n' - 'OVER [window_name]\n' - '```\n' - '\n' - 'See [window_name] definition.\n' - '\n' - 'For example,\n' - '\n' - '```sql\n' - 'SELECT count(*) OVER w FROM table WINDOW w AS (partition by id)\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support using a previously defined named window in the `OVER` clause.\n" + "\n" + "```\n" + "OVER [window_name]\n" + "```\n" + "\n" + "See [window_name] definition.\n" + "\n" + "For example,\n" + "\n" + "```sql\n" + "SELECT count(*) OVER w FROM table WINDOW w AS (partition by id)\n" + "```\n" + "\n" + ), link=None, level=4, - num='3.7.4.1') + num="3.7.4.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_NamedWindow_InvalidName_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.NamedWindow.InvalidName.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.NamedWindow.InvalidName.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `OVER` clause reference invalid window name.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `OVER` clause reference invalid window name.\n" + "\n" + ), link=None, level=4, - num='3.7.4.2') + num="3.7.4.2", +) RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_NamedWindow_MultipleWindows_Error = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.NamedWindow.MultipleWindows.Error', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.NamedWindow.MultipleWindows.Error", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL return an error if the `OVER` clause references more than one window name.\n' - '\n' - ), + "[ClickHouse] SHALL return an error if the `OVER` clause references more than one window name.\n" + "\n" + ), link=None, level=4, - num='3.7.4.3') + num="3.7.4.3", +) RQ_SRS_019_ClickHouse_WindowFunctions_FirstValue = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.FirstValue', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.FirstValue", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `first_value` window function that\n' - 'SHALL be synonum for the `any(value)` function\n' - 'that SHALL return the value of `expr` from first row in the window frame.\n' - '\n' - '```\n' - 'first_value(expr) OVER ...\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support `first_value` window function that\n" + "SHALL be synonum for the `any(value)` function\n" + "that SHALL return the value of `expr` from first row in the window frame.\n" + "\n" + "```\n" + "first_value(expr) OVER ...\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.8.1.1.1') + num="3.8.1.1.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_LastValue = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.LastValue', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.LastValue", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `last_value` window function that\n' - 'SHALL be synonym for the `anyLast(value)` function\n' - 'that SHALL return the value of `expr` from the last row in the window frame.\n' - '\n' - '```\n' - 'last_value(expr) OVER ...\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support `last_value` window function that\n" + "SHALL be synonym for the `anyLast(value)` function\n" + "that SHALL return the value of `expr` from the last row in the window frame.\n" + "\n" + "```\n" + "last_value(expr) OVER ...\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.8.1.2.1') + num="3.8.1.2.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_Lag_Workaround = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.Lag.Workaround', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.Lag.Workaround", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support a workaround for the `lag(value, offset)` function as\n' - '\n' - '```\n' - 'any(value) OVER (.... ROWS BETWEEN PRECEDING AND PRECEDING)\n' - '```\n' - '\n' - 'The function SHALL returns the value from the row that lags (precedes) the current row\n' - 'by the `N` rows within its partition. Where `N` is the `value` passed to the `any` function.\n' - '\n' - 'If there is no such row, the return value SHALL be default.\n' - '\n' - 'For example, if `N` is 3, the return value is default for the first two rows.\n' - 'If N or default are missing, the defaults are 1 and NULL, respectively.\n' - '\n' - '`N` SHALL be a literal non-negative integer. If N is 0, the value SHALL be\n' - 'returned for the current row.\n' - '\n' - ), + "[ClickHouse] SHALL support a workaround for the `lag(value, offset)` function as\n" + "\n" + "```\n" + "any(value) OVER (.... ROWS BETWEEN PRECEDING AND PRECEDING)\n" + "```\n" + "\n" + "The function SHALL returns the value from the row that lags (precedes) the current row\n" + "by the `N` rows within its partition. Where `N` is the `value` passed to the `any` function.\n" + "\n" + "If there is no such row, the return value SHALL be default.\n" + "\n" + "For example, if `N` is 3, the return value is default for the first two rows.\n" + "If N or default are missing, the defaults are 1 and NULL, respectively.\n" + "\n" + "`N` SHALL be a literal non-negative integer. If N is 0, the value SHALL be\n" + "returned for the current row.\n" + "\n" + ), link=None, level=5, - num='3.8.1.3.1') + num="3.8.1.3.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_Lead_Workaround = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.Lead.Workaround', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.Lead.Workaround", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support a workaround for the `lead(value, offset)` function as\n' - '\n' - '```\n' - 'any(value) OVER (.... ROWS BETWEEN FOLLOWING AND FOLLOWING)\n' - '```\n' - '\n' - 'The function SHALL returns the value from the row that leads (follows) the current row by\n' - 'the `N` rows within its partition. Where `N` is the `value` passed to the `any` function.\n' - '\n' - 'If there is no such row, the return value SHALL be default.\n' - '\n' - 'For example, if `N` is 3, the return value is default for the last two rows.\n' - 'If `N` or default are missing, the defaults are 1 and NULL, respectively.\n' - '\n' - '`N` SHALL be a literal non-negative integer. If `N` is 0, the value SHALL be\n' - 'returned for the current row.\n' - '\n' - ), + "[ClickHouse] SHALL support a workaround for the `lead(value, offset)` function as\n" + "\n" + "```\n" + "any(value) OVER (.... ROWS BETWEEN FOLLOWING AND FOLLOWING)\n" + "```\n" + "\n" + "The function SHALL returns the value from the row that leads (follows) the current row by\n" + "the `N` rows within its partition. Where `N` is the `value` passed to the `any` function.\n" + "\n" + "If there is no such row, the return value SHALL be default.\n" + "\n" + "For example, if `N` is 3, the return value is default for the last two rows.\n" + "If `N` or default are missing, the defaults are 1 and NULL, respectively.\n" + "\n" + "`N` SHALL be a literal non-negative integer. If `N` is 0, the value SHALL be\n" + "returned for the current row.\n" + "\n" + ), link=None, level=5, - num='3.8.1.4.1') + num="3.8.1.4.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_LeadInFrame = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.LeadInFrame', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.LeadInFrame", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `leadInFrame(expr[, offset, [default]])` function.\n' - '\n' - 'For example,\n' - '```\n' - 'leadInFrame(column) OVER (...)\n' - '```\n' - '\n' - 'The function SHALL return the value from the row that leads (follows) the current row\n' - 'by the `offset` rows within the current frame. If there is no such row,\n' - 'the return value SHALL be the `default` value. If the `default` value is not specified \n' - 'then the default value for the corresponding column data type SHALL be returned.\n' - '\n' - 'The `offset` SHALL be a literal non-negative integer. If the `offset` is set to `0`, then\n' - 'the value SHALL be returned for the current row. If the `offset` is not specified, the default\n' - 'value SHALL be `1`.\n' - '\n' - ), + "[ClickHouse] SHALL support the `leadInFrame(expr[, offset, [default]])` function.\n" + "\n" + "For example,\n" + "```\n" + "leadInFrame(column) OVER (...)\n" + "```\n" + "\n" + "The function SHALL return the value from the row that leads (follows) the current row\n" + "by the `offset` rows within the current frame. If there is no such row,\n" + "the return value SHALL be the `default` value. If the `default` value is not specified \n" + "then the default value for the corresponding column data type SHALL be returned.\n" + "\n" + "The `offset` SHALL be a literal non-negative integer. If the `offset` is set to `0`, then\n" + "the value SHALL be returned for the current row. If the `offset` is not specified, the default\n" + "value SHALL be `1`.\n" + "\n" + ), link=None, level=5, - num='3.8.1.5.1') + num="3.8.1.5.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_LagInFrame = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.LagInFrame', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.LagInFrame", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support the `lagInFrame(expr[, offset, [default]])` function.\n' - '\n' - 'For example,\n' - '```\n' - 'lagInFrame(column) OVER (...)\n' - '```\n' - '\n' - 'The function SHALL return the value from the row that lags (preceds) the current row\n' - 'by the `offset` rows within the current frame. If there is no such row,\n' - 'the return value SHALL be the `default` value. If the `default` value is not specified \n' - 'then the default value for the corresponding column data type SHALL be returned.\n' - '\n' - 'The `offset` SHALL be a literal non-negative integer. If the `offset` is set to `0`, then\n' - 'the value SHALL be returned for the current row. If the `offset` is not specified, the default\n' - 'value SHALL be `1`.\n' - '\n' - ), + "[ClickHouse] SHALL support the `lagInFrame(expr[, offset, [default]])` function.\n" + "\n" + "For example,\n" + "```\n" + "lagInFrame(column) OVER (...)\n" + "```\n" + "\n" + "The function SHALL return the value from the row that lags (preceds) the current row\n" + "by the `offset` rows within the current frame. If there is no such row,\n" + "the return value SHALL be the `default` value. If the `default` value is not specified \n" + "then the default value for the corresponding column data type SHALL be returned.\n" + "\n" + "The `offset` SHALL be a literal non-negative integer. If the `offset` is set to `0`, then\n" + "the value SHALL be returned for the current row. If the `offset` is not specified, the default\n" + "value SHALL be `1`.\n" + "\n" + ), link=None, level=5, - num='3.8.1.6.1') + num="3.8.1.6.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_Rank = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.Rank', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.Rank", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `rank` window function that SHALL\n' - 'return the rank of the current row within its partition with gaps.\n' - '\n' - 'Peers SHALL be considered ties and receive the same rank.\n' - 'The function SHALL not assign consecutive ranks to peer groups if groups of size greater than one exist\n' - 'and the result is noncontiguous rank numbers.\n' - '\n' - 'If the function is used without `ORDER BY` to sort partition rows into the desired order\n' - 'then all rows SHALL be peers.\n' - '\n' - '```\n' - 'rank() OVER ...\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support `rank` window function that SHALL\n" + "return the rank of the current row within its partition with gaps.\n" + "\n" + "Peers SHALL be considered ties and receive the same rank.\n" + "The function SHALL not assign consecutive ranks to peer groups if groups of size greater than one exist\n" + "and the result is noncontiguous rank numbers.\n" + "\n" + "If the function is used without `ORDER BY` to sort partition rows into the desired order\n" + "then all rows SHALL be peers.\n" + "\n" + "```\n" + "rank() OVER ...\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.8.1.7.1') + num="3.8.1.7.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_DenseRank = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.DenseRank', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.DenseRank", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `dense_rank` function over a window that SHALL\n' - 'return the rank of the current row within its partition without gaps.\n' - '\n' - 'Peers SHALL be considered ties and receive the same rank.\n' - 'The function SHALL assign consecutive ranks to peer groups and\n' - 'the result is that groups of size greater than one do not produce noncontiguous rank numbers.\n' - '\n' - 'If the function is used without `ORDER BY` to sort partition rows into the desired order\n' - 'then all rows SHALL be peers.\n' - '\n' - '```\n' - 'dense_rank() OVER ...\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support `dense_rank` function over a window that SHALL\n" + "return the rank of the current row within its partition without gaps.\n" + "\n" + "Peers SHALL be considered ties and receive the same rank.\n" + "The function SHALL assign consecutive ranks to peer groups and\n" + "the result is that groups of size greater than one do not produce noncontiguous rank numbers.\n" + "\n" + "If the function is used without `ORDER BY` to sort partition rows into the desired order\n" + "then all rows SHALL be peers.\n" + "\n" + "```\n" + "dense_rank() OVER ...\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.8.1.8.1') + num="3.8.1.8.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_RowNumber = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.RowNumber', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowNumber", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support `row_number` function over a window that SHALL\n' - 'returns the number of the current row within its partition.\n' - '\n' - 'Rows numbers SHALL range from 1 to the number of partition rows.\n' - '\n' - 'The `ORDER BY` affects the order in which rows are numbered.\n' - 'Without `ORDER BY`, row numbering MAY be nondeterministic.\n' - '\n' - '```\n' - 'row_number() OVER ...\n' - '```\n' - '\n' - ), + "[ClickHouse] SHALL support `row_number` function over a window that SHALL\n" + "returns the number of the current row within its partition.\n" + "\n" + "Rows numbers SHALL range from 1 to the number of partition rows.\n" + "\n" + "The `ORDER BY` affects the order in which rows are numbered.\n" + "Without `ORDER BY`, row numbering MAY be nondeterministic.\n" + "\n" + "```\n" + "row_number() OVER ...\n" + "```\n" + "\n" + ), link=None, level=5, - num='3.8.1.9.1') + num="3.8.1.9.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_AggregateFunctions = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.AggregateFunctions', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.AggregateFunctions", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support using aggregate functions over windows.\n' - '\n' - '* [count](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/count/)\n' - '* [min](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/min/)\n' - '* [max](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/max/)\n' - '* [sum](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/sum/)\n' - '* [avg](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/avg/)\n' - '* [any](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/any/)\n' - '* [stddevPop](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/stddevpop/)\n' - '* [stddevSamp](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/stddevsamp/)\n' - '* [varPop(x)](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/varpop/)\n' - '* [varSamp](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/varsamp/)\n' - '* [covarPop](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/covarpop/)\n' - '* [covarSamp](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/covarsamp/)\n' - '* [anyHeavy](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/anyheavy/)\n' - '* [anyLast](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/anylast/)\n' - '* [argMin](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/argmin/)\n' - '* [argMax](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/argmax/)\n' - '* [avgWeighted](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/avgweighted/)\n' - '* [corr](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/corr/)\n' - '* [topK](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/topk/)\n' - '* [topKWeighted](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/topkweighted/)\n' - '* [groupArray](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/grouparray/)\n' - '* [groupUniqArray](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray/)\n' - '* [groupArrayInsertAt](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat/)\n' - '* [groupArrayMovingSum](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum/)\n' - '* [groupArrayMovingAvg](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg/)\n' - '* [groupArraySample](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/grouparraysample/)\n' - '* [groupBitAnd](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/groupbitand/)\n' - '* [groupBitOr](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/groupbitor/)\n' - '* [groupBitXor](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/groupbitxor/)\n' - '* [groupBitmap](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/groupbitmap/)\n' - '* [groupBitmapAnd](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/groupbitmapand/)\n' - '* [groupBitmapOr](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor/)\n' - '* [groupBitmapXor](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor/)\n' - '* [sumWithOverflow](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/sumwithoverflow/)\n' - '* [deltaSum](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/deltasum/)\n' - '* [sumMap](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/summap/)\n' - '* [minMap](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/minmap/)\n' - '* [maxMap](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/maxmap/)\n' - '* [initializeAggregation](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/initializeAggregation/)\n' - '* [skewPop](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/skewpop/)\n' - '* [skewSamp](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/skewsamp/)\n' - '* [kurtPop](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/kurtpop/)\n' - '* [kurtSamp](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/kurtsamp/)\n' - '* [uniq](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/uniq/)\n' - '* [uniqExact](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/uniqexact/)\n' - '* [uniqCombined](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/uniqcombined/)\n' - '* [uniqCombined64](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/uniqcombined64/)\n' - '* [uniqHLL12](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/uniqhll12/)\n' - '* [quantile](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantile/)\n' - '* [quantiles](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantiles/)\n' - '* [quantileExact](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantileexact/)\n' - '* [quantileExactWeighted](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantileexactweighted/)\n' - '* [quantileTiming](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantiletiming/)\n' - '* [quantileTimingWeighted](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted/)\n' - '* [quantileDeterministic](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantiledeterministic/)\n' - '* [quantileTDigest](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest/)\n' - '* [quantileTDigestWeighted](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted/)\n' - '* [simpleLinearRegression](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression/)\n' - '* [stochasticLinearRegression](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression/)\n' - '* [stochasticLogisticRegression](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression/)\n' - '* [categoricalInformationValue](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression/)\n' - '* [studentTTest](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/studentttest/)\n' - '* [welchTTest](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/welchttest/)\n' - '* [mannWhitneyUTest](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/mannwhitneyutest/)\n' - '* [median](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/median/)\n' - '* [rankCorr](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/rankCorr/)\n' - '\n' - ), + "[ClickHouse] SHALL support using aggregate functions over windows.\n" + "\n" + "* [count](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/count/)\n" + "* [min](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/min/)\n" + "* [max](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/max/)\n" + "* [sum](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/sum/)\n" + "* [avg](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/avg/)\n" + "* [any](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/any/)\n" + "* [stddevPop](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/stddevpop/)\n" + "* [stddevSamp](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/stddevsamp/)\n" + "* [varPop(x)](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/varpop/)\n" + "* [varSamp](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/varsamp/)\n" + "* [covarPop](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/covarpop/)\n" + "* [covarSamp](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/covarsamp/)\n" + "* [anyHeavy](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/anyheavy/)\n" + "* [anyLast](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/anylast/)\n" + "* [argMin](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/argmin/)\n" + "* [argMax](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/argmax/)\n" + "* [avgWeighted](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/avgweighted/)\n" + "* [corr](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/corr/)\n" + "* [topK](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/topk/)\n" + "* [topKWeighted](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/topkweighted/)\n" + "* [groupArray](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/grouparray/)\n" + "* [groupUniqArray](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray/)\n" + "* [groupArrayInsertAt](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat/)\n" + "* [groupArrayMovingSum](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum/)\n" + "* [groupArrayMovingAvg](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg/)\n" + "* [groupArraySample](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/grouparraysample/)\n" + "* [groupBitAnd](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/groupbitand/)\n" + "* [groupBitOr](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/groupbitor/)\n" + "* [groupBitXor](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/groupbitxor/)\n" + "* [groupBitmap](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/groupbitmap/)\n" + "* [groupBitmapAnd](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/groupbitmapand/)\n" + "* [groupBitmapOr](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor/)\n" + "* [groupBitmapXor](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor/)\n" + "* [sumWithOverflow](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/sumwithoverflow/)\n" + "* [deltaSum](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/deltasum/)\n" + "* [sumMap](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/summap/)\n" + "* [minMap](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/minmap/)\n" + "* [maxMap](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/maxmap/)\n" + "* [initializeAggregation](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/initializeAggregation/)\n" + "* [skewPop](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/skewpop/)\n" + "* [skewSamp](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/skewsamp/)\n" + "* [kurtPop](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/kurtpop/)\n" + "* [kurtSamp](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/kurtsamp/)\n" + "* [uniq](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/uniq/)\n" + "* [uniqExact](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/uniqexact/)\n" + "* [uniqCombined](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/uniqcombined/)\n" + "* [uniqCombined64](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/uniqcombined64/)\n" + "* [uniqHLL12](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/uniqhll12/)\n" + "* [quantile](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantile/)\n" + "* [quantiles](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantiles/)\n" + "* [quantileExact](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantileexact/)\n" + "* [quantileExactWeighted](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantileexactweighted/)\n" + "* [quantileTiming](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantiletiming/)\n" + "* [quantileTimingWeighted](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted/)\n" + "* [quantileDeterministic](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantiledeterministic/)\n" + "* [quantileTDigest](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest/)\n" + "* [quantileTDigestWeighted](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted/)\n" + "* [simpleLinearRegression](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression/)\n" + "* [stochasticLinearRegression](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression/)\n" + "* [stochasticLogisticRegression](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression/)\n" + "* [categoricalInformationValue](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression/)\n" + "* [studentTTest](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/studentttest/)\n" + "* [welchTTest](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/welchttest/)\n" + "* [mannWhitneyUTest](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/mannwhitneyutest/)\n" + "* [median](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/median/)\n" + "* [rankCorr](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/rankCorr/)\n" + "\n" + ), link=None, level=4, - num='3.8.2.1') + num="3.8.2.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_AggregateFunctions_Combinators = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.AggregateFunctions.Combinators', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.AggregateFunctions.Combinators", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support aggregate functions with combinator prefixes over windows.\n' - '\n' - '* [-If](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#agg-functions-combinator-if)\n' - '* [-Array](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#agg-functions-combinator-array)\n' - '* [-SimpleState](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#agg-functions-combinator-simplestate)\n' - '* [-State](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#agg-functions-combinator-state)\n' - '* [-Merge](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#aggregate_functions_combinators-merge)\n' - '* [-MergeState](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#aggregate_functions_combinators-mergestate)\n' - '* [-ForEach](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#agg-functions-combinator-foreach)\n' - '* [-Distinct](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#agg-functions-combinator-distinct)\n' - '* [-OrDefault](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#agg-functions-combinator-ordefault)\n' - '* [-OrNull](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#agg-functions-combinator-ornull)\n' - '* [-Resample](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#agg-functions-combinator-resample)\n' - '\n' - ), + "[ClickHouse] SHALL support aggregate functions with combinator prefixes over windows.\n" + "\n" + "* [-If](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#agg-functions-combinator-if)\n" + "* [-Array](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#agg-functions-combinator-array)\n" + "* [-SimpleState](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#agg-functions-combinator-simplestate)\n" + "* [-State](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#agg-functions-combinator-state)\n" + "* [-Merge](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#aggregate_functions_combinators-merge)\n" + "* [-MergeState](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#aggregate_functions_combinators-mergestate)\n" + "* [-ForEach](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#agg-functions-combinator-foreach)\n" + "* [-Distinct](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#agg-functions-combinator-distinct)\n" + "* [-OrDefault](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#agg-functions-combinator-ordefault)\n" + "* [-OrNull](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#agg-functions-combinator-ornull)\n" + "* [-Resample](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/combinators/#agg-functions-combinator-resample)\n" + "\n" + ), link=None, level=5, - num='3.8.2.2.1') + num="3.8.2.2.1", +) RQ_SRS_019_ClickHouse_WindowFunctions_AggregateFunctions_Parametric = Requirement( - name='RQ.SRS-019.ClickHouse.WindowFunctions.AggregateFunctions.Parametric', - version='1.0', + name="RQ.SRS-019.ClickHouse.WindowFunctions.AggregateFunctions.Parametric", + version="1.0", priority=None, group=None, type=None, uid=None, description=( - '[ClickHouse] SHALL support parametric aggregate functions over windows.\n' - '\n' - '* [histogram](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/parametric-functions/#histogram)\n' - '* [sequenceMatch(pattern)(timestamp, cond1, cond2, ...)](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/parametric-functions/#function-sequencematch)\n' - '* [sequenceCount(pattern)(time, cond1, cond2, ...)](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/parametric-functions/#function-sequencecount)\n' - '* [windowFunnel](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/parametric-functions/#windowfunnel)\n' - '* [retention](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/parametric-functions/#retention)\n' - '* [uniqUpTo(N)(x)](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/parametric-functions/#uniquptonx)\n' - '* [sumMapFiltered(keys_to_keep)(keys, values)](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/parametric-functions/#summapfilteredkeys-to-keepkeys-values)\n' - '\n' - ), + "[ClickHouse] SHALL support parametric aggregate functions over windows.\n" + "\n" + "* [histogram](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/parametric-functions/#histogram)\n" + "* [sequenceMatch(pattern)(timestamp, cond1, cond2, ...)](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/parametric-functions/#function-sequencematch)\n" + "* [sequenceCount(pattern)(time, cond1, cond2, ...)](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/parametric-functions/#function-sequencecount)\n" + "* [windowFunnel](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/parametric-functions/#windowfunnel)\n" + "* [retention](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/parametric-functions/#retention)\n" + "* [uniqUpTo(N)(x)](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/parametric-functions/#uniquptonx)\n" + "* [sumMapFiltered(keys_to_keep)(keys, values)](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/parametric-functions/#summapfilteredkeys-to-keepkeys-values)\n" + "\n" + ), link=None, level=5, - num='3.8.2.3.1') + num="3.8.2.3.1", +) SRS019_ClickHouse_Window_Functions = Specification( - name='SRS019 ClickHouse Window Functions', + name="SRS019 ClickHouse Window Functions", description=None, author=None, - date=None, - status=None, + date=None, + status=None, approved_by=None, approved_date=None, approved_version=None, @@ -3348,201 +3481,735 @@ SRS019_ClickHouse_Window_Functions = Specification( parent=None, children=None, headings=( - Heading(name='Revision History', level=1, num='1'), - Heading(name='Introduction', level=1, num='2'), - Heading(name='Requirements', level=1, num='3'), - Heading(name='General', level=2, num='3.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions', level=3, num='3.1.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.NonDistributedTables', level=3, num='3.1.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.DistributedTables', level=3, num='3.1.3'), - Heading(name='Window Specification', level=2, num='3.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.WindowSpec', level=3, num='3.2.1'), - Heading(name='PARTITION Clause', level=2, num='3.3'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.PartitionClause', level=3, num='3.3.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.PartitionClause.MultipleExpr', level=3, num='3.3.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.PartitionClause.MissingExpr.Error', level=3, num='3.3.3'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.PartitionClause.InvalidExpr.Error', level=3, num='3.3.4'), - Heading(name='ORDER Clause', level=2, num='3.4'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.OrderClause', level=3, num='3.4.1'), - Heading(name='order_clause', level=4, num='3.4.1.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.OrderClause.MultipleExprs', level=3, num='3.4.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.OrderClause.MissingExpr.Error', level=3, num='3.4.3'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.OrderClause.InvalidExpr.Error', level=3, num='3.4.4'), - Heading(name='FRAME Clause', level=2, num='3.5'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.FrameClause', level=3, num='3.5.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.FrameClause.DefaultFrame', level=3, num='3.5.2'), - Heading(name='ROWS', level=3, num='3.5.3'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame', level=4, num='3.5.3.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.MissingFrameExtent.Error', level=4, num='3.5.3.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.InvalidFrameExtent.Error', level=4, num='3.5.3.3'), - Heading(name='ROWS CURRENT ROW', level=4, num='3.5.3.4'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.CurrentRow', level=5, num='3.5.3.4.1'), - Heading(name='ROWS UNBOUNDED PRECEDING', level=4, num='3.5.3.5'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.UnboundedPreceding', level=5, num='3.5.3.5.1'), - Heading(name='ROWS `expr` PRECEDING', level=4, num='3.5.3.6'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.ExprPreceding', level=5, num='3.5.3.6.1'), - Heading(name='ROWS UNBOUNDED FOLLOWING', level=4, num='3.5.3.7'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.UnboundedFollowing.Error', level=5, num='3.5.3.7.1'), - Heading(name='ROWS `expr` FOLLOWING', level=4, num='3.5.3.8'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.ExprFollowing.Error', level=5, num='3.5.3.8.1'), - Heading(name='ROWS BETWEEN CURRENT ROW', level=4, num='3.5.3.9'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.CurrentRow', level=5, num='3.5.3.9.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.UnboundedPreceding.Error', level=5, num='3.5.3.9.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.ExprPreceding.Error', level=5, num='3.5.3.9.3'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.UnboundedFollowing', level=5, num='3.5.3.9.4'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.ExprFollowing', level=5, num='3.5.3.9.5'), - Heading(name='ROWS BETWEEN UNBOUNDED PRECEDING', level=4, num='3.5.3.10'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.CurrentRow', level=5, num='3.5.3.10.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.UnboundedPreceding.Error', level=5, num='3.5.3.10.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.ExprPreceding', level=5, num='3.5.3.10.3'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.UnboundedFollowing', level=5, num='3.5.3.10.4'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.ExprFollowing', level=5, num='3.5.3.10.5'), - Heading(name='ROWS BETWEEN UNBOUNDED FOLLOWING', level=4, num='3.5.3.11'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedFollowing.Error', level=5, num='3.5.3.11.1'), - Heading(name='ROWS BETWEEN `expr` FOLLOWING', level=4, num='3.5.3.12'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprFollowing.Error', level=5, num='3.5.3.12.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprFollowing.ExprFollowing.Error', level=5, num='3.5.3.12.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprFollowing.UnboundedFollowing', level=5, num='3.5.3.12.3'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprFollowing.ExprFollowing', level=5, num='3.5.3.12.4'), - Heading(name='ROWS BETWEEN `expr` PRECEDING', level=4, num='3.5.3.13'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.CurrentRow', level=5, num='3.5.3.13.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.UnboundedPreceding.Error', level=5, num='3.5.3.13.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.UnboundedFollowing', level=5, num='3.5.3.13.3'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.ExprPreceding.Error', level=5, num='3.5.3.13.4'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.ExprPreceding', level=5, num='3.5.3.13.5'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.ExprFollowing', level=5, num='3.5.3.13.6'), - Heading(name='RANGE', level=3, num='3.5.4'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame', level=4, num='3.5.4.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.DataTypes.DateAndDateTime', level=4, num='3.5.4.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.DataTypes.IntAndUInt', level=4, num='3.5.4.3'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.MultipleColumnsInOrderBy.Error', level=4, num='3.5.4.4'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.MissingFrameExtent.Error', level=4, num='3.5.4.5'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.InvalidFrameExtent.Error', level=4, num='3.5.4.6'), - Heading(name='`CURRENT ROW` Peers', level=4, num='3.5.4.7'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.CurrentRow.Peers', level=4, num='3.5.4.8'), - Heading(name='RANGE CURRENT ROW', level=4, num='3.5.4.9'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.CurrentRow.WithoutOrderBy', level=5, num='3.5.4.9.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.CurrentRow.WithOrderBy', level=5, num='3.5.4.9.2'), - Heading(name='RANGE UNBOUNDED FOLLOWING', level=4, num='3.5.4.10'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.UnboundedFollowing.Error', level=5, num='3.5.4.10.1'), - Heading(name='RANGE UNBOUNDED PRECEDING', level=4, num='3.5.4.11'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.UnboundedPreceding.WithoutOrderBy', level=5, num='3.5.4.11.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.UnboundedPreceding.WithOrderBy', level=5, num='3.5.4.11.2'), - Heading(name='RANGE `expr` PRECEDING', level=4, num='3.5.4.12'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprPreceding.WithoutOrderBy.Error', level=5, num='3.5.4.12.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprPreceding.OrderByNonNumericalColumn.Error', level=5, num='3.5.4.12.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprPreceding.WithOrderBy', level=5, num='3.5.4.12.3'), - Heading(name='RANGE `expr` FOLLOWING', level=4, num='3.5.4.13'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprFollowing.WithoutOrderBy.Error', level=5, num='3.5.4.13.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprFollowing.WithOrderBy.Error', level=5, num='3.5.4.13.2'), - Heading(name='RANGE BETWEEN CURRENT ROW', level=4, num='3.5.4.14'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.CurrentRow', level=5, num='3.5.4.14.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.UnboundedPreceding.Error', level=5, num='3.5.4.14.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.UnboundedFollowing', level=5, num='3.5.4.14.3'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.ExprFollowing.WithoutOrderBy.Error', level=5, num='3.5.4.14.4'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.ExprFollowing.WithOrderBy', level=5, num='3.5.4.14.5'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.ExprPreceding.Error', level=5, num='3.5.4.14.6'), - Heading(name='RANGE BETWEEN UNBOUNDED PRECEDING', level=4, num='3.5.4.15'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.CurrentRow', level=5, num='3.5.4.15.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.UnboundedPreceding.Error', level=5, num='3.5.4.15.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.UnboundedFollowing', level=5, num='3.5.4.15.3'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.ExprPreceding.WithoutOrderBy.Error', level=5, num='3.5.4.15.4'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.ExprPreceding.WithOrderBy', level=5, num='3.5.4.15.5'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.ExprFollowing.WithoutOrderBy.Error', level=5, num='3.5.4.15.6'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.ExprFollowing.WithOrderBy', level=5, num='3.5.4.15.7'), - Heading(name='RANGE BETWEEN UNBOUNDED FOLLOWING', level=4, num='3.5.4.16'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.CurrentRow.Error', level=5, num='3.5.4.16.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.UnboundedFollowing.Error', level=5, num='3.5.4.16.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.UnboundedPreceding.Error', level=5, num='3.5.4.16.3'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.ExprPreceding.Error', level=5, num='3.5.4.16.4'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.ExprFollowing.Error', level=5, num='3.5.4.16.5'), - Heading(name='RANGE BETWEEN expr PRECEDING', level=4, num='3.5.4.17'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.CurrentRow.WithOrderBy', level=5, num='3.5.4.17.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.CurrentRow.WithoutOrderBy.Error', level=5, num='3.5.4.17.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.UnboundedPreceding.Error', level=5, num='3.5.4.17.3'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.UnboundedFollowing.WithoutOrderBy.Error', level=5, num='3.5.4.17.4'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.UnboundedFollowing.WithOrderBy', level=5, num='3.5.4.17.5'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprFollowing.WithoutOrderBy.Error', level=5, num='3.5.4.17.6'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprFollowing.WithOrderBy', level=5, num='3.5.4.17.7'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprPreceding.WithoutOrderBy.Error', level=5, num='3.5.4.17.8'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprPreceding.WithOrderBy.Error', level=5, num='3.5.4.17.9'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprPreceding.WithOrderBy', level=5, num='3.5.4.17.10'), - Heading(name='RANGE BETWEEN expr FOLLOWING', level=4, num='3.5.4.18'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.CurrentRow.WithoutOrderBy.Error', level=5, num='3.5.4.18.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.CurrentRow.WithOrderBy.Error', level=5, num='3.5.4.18.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.CurrentRow.ZeroSpecialCase', level=5, num='3.5.4.18.3'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.UnboundedFollowing.WithoutOrderBy.Error', level=5, num='3.5.4.18.4'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.UnboundedFollowing.WithOrderBy', level=5, num='3.5.4.18.5'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.UnboundedPreceding.Error', level=5, num='3.5.4.18.6'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprPreceding.WithoutOrderBy.Error', level=5, num='3.5.4.18.7'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprPreceding.Error', level=5, num='3.5.4.18.8'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprPreceding.WithOrderBy.ZeroSpecialCase', level=5, num='3.5.4.18.9'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprFollowing.WithoutOrderBy.Error', level=5, num='3.5.4.18.10'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprFollowing.WithOrderBy.Error', level=5, num='3.5.4.18.11'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprFollowing.WithOrderBy', level=5, num='3.5.4.18.12'), - Heading(name='Frame Extent', level=3, num='3.5.5'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.Frame.Extent', level=4, num='3.5.5.1'), - Heading(name='Frame Start', level=3, num='3.5.6'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.Frame.Start', level=4, num='3.5.6.1'), - Heading(name='Frame Between', level=3, num='3.5.7'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.Frame.Between', level=4, num='3.5.7.1'), - Heading(name='Frame End', level=3, num='3.5.8'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.Frame.End', level=4, num='3.5.8.1'), - Heading(name='`CURRENT ROW`', level=3, num='3.5.9'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.CurrentRow', level=4, num='3.5.9.1'), - Heading(name='`UNBOUNDED PRECEDING`', level=3, num='3.5.10'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.UnboundedPreceding', level=4, num='3.5.10.1'), - Heading(name='`UNBOUNDED FOLLOWING`', level=3, num='3.5.11'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.UnboundedFollowing', level=4, num='3.5.11.1'), - Heading(name='`expr PRECEDING`', level=3, num='3.5.12'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.ExprPreceding', level=4, num='3.5.12.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.ExprPreceding.ExprValue', level=4, num='3.5.12.2'), - Heading(name='`expr FOLLOWING`', level=3, num='3.5.13'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.ExprFollowing', level=4, num='3.5.13.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.ExprFollowing.ExprValue', level=4, num='3.5.13.2'), - Heading(name='WINDOW Clause', level=2, num='3.6'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.WindowClause', level=3, num='3.6.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.WindowClause.MultipleWindows', level=3, num='3.6.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.WindowClause.MissingWindowSpec.Error', level=3, num='3.6.3'), - Heading(name='`OVER` Clause', level=2, num='3.7'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.OverClause', level=3, num='3.7.1'), - Heading(name='Empty Clause', level=3, num='3.7.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.EmptyOverClause', level=4, num='3.7.2.1'), - Heading(name='Ad-Hoc Window', level=3, num='3.7.3'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.AdHocWindow', level=4, num='3.7.3.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.AdHocWindow.MissingWindowSpec.Error', level=4, num='3.7.3.2'), - Heading(name='Named Window', level=3, num='3.7.4'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.NamedWindow', level=4, num='3.7.4.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.NamedWindow.InvalidName.Error', level=4, num='3.7.4.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.NamedWindow.MultipleWindows.Error', level=4, num='3.7.4.3'), - Heading(name='Window Functions', level=2, num='3.8'), - Heading(name='Nonaggregate Functions', level=3, num='3.8.1'), - Heading(name='The `first_value(expr)` Function', level=4, num='3.8.1.1'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.FirstValue', level=5, num='3.8.1.1.1'), - Heading(name='The `last_value(expr)` Function', level=4, num='3.8.1.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.LastValue', level=5, num='3.8.1.2.1'), - Heading(name='The `lag(value, offset)` Function Workaround', level=4, num='3.8.1.3'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.Lag.Workaround', level=5, num='3.8.1.3.1'), - Heading(name='The `lead(value, offset)` Function Workaround', level=4, num='3.8.1.4'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.Lead.Workaround', level=5, num='3.8.1.4.1'), - Heading(name='The `leadInFrame(expr[, offset, [default]])`', level=4, num='3.8.1.5'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.LeadInFrame', level=5, num='3.8.1.5.1'), - Heading(name='The `lagInFrame(expr[, offset, [default]])`', level=4, num='3.8.1.6'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.LagInFrame', level=5, num='3.8.1.6.1'), - Heading(name='The `rank()` Function', level=4, num='3.8.1.7'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.Rank', level=5, num='3.8.1.7.1'), - Heading(name='The `dense_rank()` Function', level=4, num='3.8.1.8'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.DenseRank', level=5, num='3.8.1.8.1'), - Heading(name='The `row_number()` Function', level=4, num='3.8.1.9'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.RowNumber', level=5, num='3.8.1.9.1'), - Heading(name='Aggregate Functions', level=3, num='3.8.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.AggregateFunctions', level=4, num='3.8.2.1'), - Heading(name='Combinators', level=4, num='3.8.2.2'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.AggregateFunctions.Combinators', level=5, num='3.8.2.2.1'), - Heading(name='Parametric', level=4, num='3.8.2.3'), - Heading(name='RQ.SRS-019.ClickHouse.WindowFunctions.AggregateFunctions.Parametric', level=5, num='3.8.2.3.1'), - Heading(name='References', level=1, num='4'), + Heading(name="Revision History", level=1, num="1"), + Heading(name="Introduction", level=1, num="2"), + Heading(name="Requirements", level=1, num="3"), + Heading(name="General", level=2, num="3.1"), + Heading(name="RQ.SRS-019.ClickHouse.WindowFunctions", level=3, num="3.1.1"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.NonDistributedTables", + level=3, + num="3.1.2", ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.DistributedTables", + level=3, + num="3.1.3", + ), + Heading(name="Window Specification", level=2, num="3.2"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.WindowSpec", + level=3, + num="3.2.1", + ), + Heading(name="PARTITION Clause", level=2, num="3.3"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.PartitionClause", + level=3, + num="3.3.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.PartitionClause.MultipleExpr", + level=3, + num="3.3.2", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.PartitionClause.MissingExpr.Error", + level=3, + num="3.3.3", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.PartitionClause.InvalidExpr.Error", + level=3, + num="3.3.4", + ), + Heading(name="ORDER Clause", level=2, num="3.4"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.OrderClause", + level=3, + num="3.4.1", + ), + Heading(name="order_clause", level=4, num="3.4.1.1"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.OrderClause.MultipleExprs", + level=3, + num="3.4.2", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.OrderClause.MissingExpr.Error", + level=3, + num="3.4.3", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.OrderClause.InvalidExpr.Error", + level=3, + num="3.4.4", + ), + Heading(name="FRAME Clause", level=2, num="3.5"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.FrameClause", + level=3, + num="3.5.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.FrameClause.DefaultFrame", + level=3, + num="3.5.2", + ), + Heading(name="ROWS", level=3, num="3.5.3"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame", + level=4, + num="3.5.3.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.MissingFrameExtent.Error", + level=4, + num="3.5.3.2", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.InvalidFrameExtent.Error", + level=4, + num="3.5.3.3", + ), + Heading(name="ROWS CURRENT ROW", level=4, num="3.5.3.4"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.CurrentRow", + level=5, + num="3.5.3.4.1", + ), + Heading(name="ROWS UNBOUNDED PRECEDING", level=4, num="3.5.3.5"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.UnboundedPreceding", + level=5, + num="3.5.3.5.1", + ), + Heading(name="ROWS `expr` PRECEDING", level=4, num="3.5.3.6"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.ExprPreceding", + level=5, + num="3.5.3.6.1", + ), + Heading(name="ROWS UNBOUNDED FOLLOWING", level=4, num="3.5.3.7"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.UnboundedFollowing.Error", + level=5, + num="3.5.3.7.1", + ), + Heading(name="ROWS `expr` FOLLOWING", level=4, num="3.5.3.8"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Start.ExprFollowing.Error", + level=5, + num="3.5.3.8.1", + ), + Heading(name="ROWS BETWEEN CURRENT ROW", level=4, num="3.5.3.9"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.CurrentRow", + level=5, + num="3.5.3.9.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.UnboundedPreceding.Error", + level=5, + num="3.5.3.9.2", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.ExprPreceding.Error", + level=5, + num="3.5.3.9.3", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.UnboundedFollowing", + level=5, + num="3.5.3.9.4", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.CurrentRow.ExprFollowing", + level=5, + num="3.5.3.9.5", + ), + Heading(name="ROWS BETWEEN UNBOUNDED PRECEDING", level=4, num="3.5.3.10"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.CurrentRow", + level=5, + num="3.5.3.10.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.UnboundedPreceding.Error", + level=5, + num="3.5.3.10.2", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.ExprPreceding", + level=5, + num="3.5.3.10.3", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.UnboundedFollowing", + level=5, + num="3.5.3.10.4", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedPreceding.ExprFollowing", + level=5, + num="3.5.3.10.5", + ), + Heading(name="ROWS BETWEEN UNBOUNDED FOLLOWING", level=4, num="3.5.3.11"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.UnboundedFollowing.Error", + level=5, + num="3.5.3.11.1", + ), + Heading(name="ROWS BETWEEN `expr` FOLLOWING", level=4, num="3.5.3.12"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprFollowing.Error", + level=5, + num="3.5.3.12.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprFollowing.ExprFollowing.Error", + level=5, + num="3.5.3.12.2", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprFollowing.UnboundedFollowing", + level=5, + num="3.5.3.12.3", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprFollowing.ExprFollowing", + level=5, + num="3.5.3.12.4", + ), + Heading(name="ROWS BETWEEN `expr` PRECEDING", level=4, num="3.5.3.13"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.CurrentRow", + level=5, + num="3.5.3.13.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.UnboundedPreceding.Error", + level=5, + num="3.5.3.13.2", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.UnboundedFollowing", + level=5, + num="3.5.3.13.3", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.ExprPreceding.Error", + level=5, + num="3.5.3.13.4", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.ExprPreceding", + level=5, + num="3.5.3.13.5", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowsFrame.Between.ExprPreceding.ExprFollowing", + level=5, + num="3.5.3.13.6", + ), + Heading(name="RANGE", level=3, num="3.5.4"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame", + level=4, + num="3.5.4.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.DataTypes.DateAndDateTime", + level=4, + num="3.5.4.2", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.DataTypes.IntAndUInt", + level=4, + num="3.5.4.3", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.MultipleColumnsInOrderBy.Error", + level=4, + num="3.5.4.4", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.MissingFrameExtent.Error", + level=4, + num="3.5.4.5", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.InvalidFrameExtent.Error", + level=4, + num="3.5.4.6", + ), + Heading(name="`CURRENT ROW` Peers", level=4, num="3.5.4.7"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.CurrentRow.Peers", + level=4, + num="3.5.4.8", + ), + Heading(name="RANGE CURRENT ROW", level=4, num="3.5.4.9"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.CurrentRow.WithoutOrderBy", + level=5, + num="3.5.4.9.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.CurrentRow.WithOrderBy", + level=5, + num="3.5.4.9.2", + ), + Heading(name="RANGE UNBOUNDED FOLLOWING", level=4, num="3.5.4.10"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.UnboundedFollowing.Error", + level=5, + num="3.5.4.10.1", + ), + Heading(name="RANGE UNBOUNDED PRECEDING", level=4, num="3.5.4.11"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.UnboundedPreceding.WithoutOrderBy", + level=5, + num="3.5.4.11.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.UnboundedPreceding.WithOrderBy", + level=5, + num="3.5.4.11.2", + ), + Heading(name="RANGE `expr` PRECEDING", level=4, num="3.5.4.12"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprPreceding.WithoutOrderBy.Error", + level=5, + num="3.5.4.12.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprPreceding.OrderByNonNumericalColumn.Error", + level=5, + num="3.5.4.12.2", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprPreceding.WithOrderBy", + level=5, + num="3.5.4.12.3", + ), + Heading(name="RANGE `expr` FOLLOWING", level=4, num="3.5.4.13"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprFollowing.WithoutOrderBy.Error", + level=5, + num="3.5.4.13.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Start.ExprFollowing.WithOrderBy.Error", + level=5, + num="3.5.4.13.2", + ), + Heading(name="RANGE BETWEEN CURRENT ROW", level=4, num="3.5.4.14"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.CurrentRow", + level=5, + num="3.5.4.14.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.UnboundedPreceding.Error", + level=5, + num="3.5.4.14.2", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.UnboundedFollowing", + level=5, + num="3.5.4.14.3", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.ExprFollowing.WithoutOrderBy.Error", + level=5, + num="3.5.4.14.4", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.ExprFollowing.WithOrderBy", + level=5, + num="3.5.4.14.5", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.CurrentRow.ExprPreceding.Error", + level=5, + num="3.5.4.14.6", + ), + Heading(name="RANGE BETWEEN UNBOUNDED PRECEDING", level=4, num="3.5.4.15"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.CurrentRow", + level=5, + num="3.5.4.15.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.UnboundedPreceding.Error", + level=5, + num="3.5.4.15.2", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.UnboundedFollowing", + level=5, + num="3.5.4.15.3", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.ExprPreceding.WithoutOrderBy.Error", + level=5, + num="3.5.4.15.4", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.ExprPreceding.WithOrderBy", + level=5, + num="3.5.4.15.5", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.ExprFollowing.WithoutOrderBy.Error", + level=5, + num="3.5.4.15.6", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedPreceding.ExprFollowing.WithOrderBy", + level=5, + num="3.5.4.15.7", + ), + Heading(name="RANGE BETWEEN UNBOUNDED FOLLOWING", level=4, num="3.5.4.16"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.CurrentRow.Error", + level=5, + num="3.5.4.16.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.UnboundedFollowing.Error", + level=5, + num="3.5.4.16.2", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.UnboundedPreceding.Error", + level=5, + num="3.5.4.16.3", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.ExprPreceding.Error", + level=5, + num="3.5.4.16.4", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.UnboundedFollowing.ExprFollowing.Error", + level=5, + num="3.5.4.16.5", + ), + Heading(name="RANGE BETWEEN expr PRECEDING", level=4, num="3.5.4.17"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.CurrentRow.WithOrderBy", + level=5, + num="3.5.4.17.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.CurrentRow.WithoutOrderBy.Error", + level=5, + num="3.5.4.17.2", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.UnboundedPreceding.Error", + level=5, + num="3.5.4.17.3", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.UnboundedFollowing.WithoutOrderBy.Error", + level=5, + num="3.5.4.17.4", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.UnboundedFollowing.WithOrderBy", + level=5, + num="3.5.4.17.5", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprFollowing.WithoutOrderBy.Error", + level=5, + num="3.5.4.17.6", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprFollowing.WithOrderBy", + level=5, + num="3.5.4.17.7", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprPreceding.WithoutOrderBy.Error", + level=5, + num="3.5.4.17.8", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprPreceding.WithOrderBy.Error", + level=5, + num="3.5.4.17.9", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprPreceding.ExprPreceding.WithOrderBy", + level=5, + num="3.5.4.17.10", + ), + Heading(name="RANGE BETWEEN expr FOLLOWING", level=4, num="3.5.4.18"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.CurrentRow.WithoutOrderBy.Error", + level=5, + num="3.5.4.18.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.CurrentRow.WithOrderBy.Error", + level=5, + num="3.5.4.18.2", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.CurrentRow.ZeroSpecialCase", + level=5, + num="3.5.4.18.3", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.UnboundedFollowing.WithoutOrderBy.Error", + level=5, + num="3.5.4.18.4", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.UnboundedFollowing.WithOrderBy", + level=5, + num="3.5.4.18.5", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.UnboundedPreceding.Error", + level=5, + num="3.5.4.18.6", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprPreceding.WithoutOrderBy.Error", + level=5, + num="3.5.4.18.7", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprPreceding.Error", + level=5, + num="3.5.4.18.8", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprPreceding.WithOrderBy.ZeroSpecialCase", + level=5, + num="3.5.4.18.9", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprFollowing.WithoutOrderBy.Error", + level=5, + num="3.5.4.18.10", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprFollowing.WithOrderBy.Error", + level=5, + num="3.5.4.18.11", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RangeFrame.Between.ExprFollowing.ExprFollowing.WithOrderBy", + level=5, + num="3.5.4.18.12", + ), + Heading(name="Frame Extent", level=3, num="3.5.5"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.Frame.Extent", + level=4, + num="3.5.5.1", + ), + Heading(name="Frame Start", level=3, num="3.5.6"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.Frame.Start", + level=4, + num="3.5.6.1", + ), + Heading(name="Frame Between", level=3, num="3.5.7"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.Frame.Between", + level=4, + num="3.5.7.1", + ), + Heading(name="Frame End", level=3, num="3.5.8"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.Frame.End", + level=4, + num="3.5.8.1", + ), + Heading(name="`CURRENT ROW`", level=3, num="3.5.9"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.CurrentRow", + level=4, + num="3.5.9.1", + ), + Heading(name="`UNBOUNDED PRECEDING`", level=3, num="3.5.10"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.UnboundedPreceding", + level=4, + num="3.5.10.1", + ), + Heading(name="`UNBOUNDED FOLLOWING`", level=3, num="3.5.11"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.UnboundedFollowing", + level=4, + num="3.5.11.1", + ), + Heading(name="`expr PRECEDING`", level=3, num="3.5.12"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.ExprPreceding", + level=4, + num="3.5.12.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.ExprPreceding.ExprValue", + level=4, + num="3.5.12.2", + ), + Heading(name="`expr FOLLOWING`", level=3, num="3.5.13"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.ExprFollowing", + level=4, + num="3.5.13.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.ExprFollowing.ExprValue", + level=4, + num="3.5.13.2", + ), + Heading(name="WINDOW Clause", level=2, num="3.6"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.WindowClause", + level=3, + num="3.6.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.WindowClause.MultipleWindows", + level=3, + num="3.6.2", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.WindowClause.MissingWindowSpec.Error", + level=3, + num="3.6.3", + ), + Heading(name="`OVER` Clause", level=2, num="3.7"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.OverClause", + level=3, + num="3.7.1", + ), + Heading(name="Empty Clause", level=3, num="3.7.2"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.EmptyOverClause", + level=4, + num="3.7.2.1", + ), + Heading(name="Ad-Hoc Window", level=3, num="3.7.3"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.AdHocWindow", + level=4, + num="3.7.3.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.AdHocWindow.MissingWindowSpec.Error", + level=4, + num="3.7.3.2", + ), + Heading(name="Named Window", level=3, num="3.7.4"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.NamedWindow", + level=4, + num="3.7.4.1", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.NamedWindow.InvalidName.Error", + level=4, + num="3.7.4.2", + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.OverClause.NamedWindow.MultipleWindows.Error", + level=4, + num="3.7.4.3", + ), + Heading(name="Window Functions", level=2, num="3.8"), + Heading(name="Nonaggregate Functions", level=3, num="3.8.1"), + Heading(name="The `first_value(expr)` Function", level=4, num="3.8.1.1"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.FirstValue", + level=5, + num="3.8.1.1.1", + ), + Heading(name="The `last_value(expr)` Function", level=4, num="3.8.1.2"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.LastValue", + level=5, + num="3.8.1.2.1", + ), + Heading( + name="The `lag(value, offset)` Function Workaround", level=4, num="3.8.1.3" + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.Lag.Workaround", + level=5, + num="3.8.1.3.1", + ), + Heading( + name="The `lead(value, offset)` Function Workaround", level=4, num="3.8.1.4" + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.Lead.Workaround", + level=5, + num="3.8.1.4.1", + ), + Heading( + name="The `leadInFrame(expr[, offset, [default]])`", level=4, num="3.8.1.5" + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.LeadInFrame", + level=5, + num="3.8.1.5.1", + ), + Heading( + name="The `lagInFrame(expr[, offset, [default]])`", level=4, num="3.8.1.6" + ), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.LagInFrame", + level=5, + num="3.8.1.6.1", + ), + Heading(name="The `rank()` Function", level=4, num="3.8.1.7"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.Rank", level=5, num="3.8.1.7.1" + ), + Heading(name="The `dense_rank()` Function", level=4, num="3.8.1.8"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.DenseRank", + level=5, + num="3.8.1.8.1", + ), + Heading(name="The `row_number()` Function", level=4, num="3.8.1.9"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.RowNumber", + level=5, + num="3.8.1.9.1", + ), + Heading(name="Aggregate Functions", level=3, num="3.8.2"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.AggregateFunctions", + level=4, + num="3.8.2.1", + ), + Heading(name="Combinators", level=4, num="3.8.2.2"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.AggregateFunctions.Combinators", + level=5, + num="3.8.2.2.1", + ), + Heading(name="Parametric", level=4, num="3.8.2.3"), + Heading( + name="RQ.SRS-019.ClickHouse.WindowFunctions.AggregateFunctions.Parametric", + level=5, + num="3.8.2.3.1", + ), + Heading(name="References", level=1, num="4"), + ), requirements=( RQ_SRS_019_ClickHouse_WindowFunctions, RQ_SRS_019_ClickHouse_WindowFunctions_NonDistributedTables, @@ -3677,8 +4344,8 @@ SRS019_ClickHouse_Window_Functions = Specification( RQ_SRS_019_ClickHouse_WindowFunctions_AggregateFunctions, RQ_SRS_019_ClickHouse_WindowFunctions_AggregateFunctions_Combinators, RQ_SRS_019_ClickHouse_WindowFunctions_AggregateFunctions_Parametric, - ), - content=''' + ), + content=""" # SRS019 ClickHouse Window Functions # Software Requirements Specification @@ -6037,4 +6704,5 @@ version: 1.0 [GitHub]: https://github.com [PostreSQL]: https://www.postgresql.org/docs/9.2/tutorial-window.html [MySQL]: https://dev.mysql.com/doc/refman/8.0/en/window-functions.html -''') +""", +) diff --git a/tests/testflows/window_functions/tests/aggregate_funcs.py b/tests/testflows/window_functions/tests/aggregate_funcs.py index 67a5f2cfb4f..faac8a84c49 100644 --- a/tests/testflows/window_functions/tests/aggregate_funcs.py +++ b/tests/testflows/window_functions/tests/aggregate_funcs.py @@ -4,109 +4,117 @@ from testflows.asserts import values, error, snapshot from window_functions.requirements import * from window_functions.tests.common import * + @TestOutline(Scenario) -@Examples("func", [ - ("count(salary)",), - ("min(salary)",), - ("max(salary)",), - ("sum(salary)",), - ("avg(salary)",), - ("any(salary)",), - ("stddevPop(salary)",), - ("stddevSamp(salary)",), - ("varPop(salary)",), - ("varSamp(salary)",), - ("covarPop(salary, 2000)",), - ("covarSamp(salary, 2000)",), - ("anyHeavy(salary)",), - ("anyLast(salary)",), - ("argMin(salary, 5000)",), - ("argMax(salary, 5000)",), - ("avgWeighted(salary, 1)",), - ("corr(salary, 0.5)",), - ("topK(salary)",), - ("topKWeighted(salary, 1)",), - ("groupArray(salary)",), - ("groupUniqArray(salary)",), - ("groupArrayInsertAt(salary, 0)",), - ("groupArrayMovingSum(salary)",), - ("groupArrayMovingAvg(salary)",), - ("groupArraySample(3, 1234)(salary)",), - ("groupBitAnd(toUInt8(salary))",), - ("groupBitOr(toUInt8(salary))",), - ("groupBitXor(toUInt8(salary))",), - ("groupBitmap(toUInt8(salary))",), - # #("groupBitmapAnd",), - # #("groupBitmapOr",), - # #("groupBitmapXor",), - ("sumWithOverflow(salary)",), - ("deltaSum(salary)",), - ("sumMap([5000], [salary])",), - ("minMap([5000], [salary])",), - ("maxMap([5000], [salary])",), - # #("initializeAggregation",), - ("skewPop(salary)",), - ("skewSamp(salary)",), - ("kurtPop(salary)",), - ("kurtSamp(salary)",), - ("uniq(salary)",), - ("uniqExact(salary)",), - ("uniqCombined(salary)",), - ("uniqCombined64(salary)",), - ("uniqHLL12(salary)",), - ("quantile(salary)",), - ("quantiles(0.5)(salary)",), - ("quantileExact(salary)",), - ("quantileExactWeighted(salary, 1)",), - ("quantileTiming(salary)",), - ("quantileTimingWeighted(salary, 1)",), - ("quantileDeterministic(salary, 1234)",), - ("quantileTDigest(salary)",), - ("quantileTDigestWeighted(salary, 1)",), - ("simpleLinearRegression(salary, empno)",), - ("stochasticLinearRegression(salary, 1)",), - ("stochasticLogisticRegression(salary, 1)",), - #("categoricalInformationValue(salary, 0)",), - ("studentTTest(salary, 1)",), - ("welchTTest(salary, 1)",), - ("mannWhitneyUTest(salary, 1)",), - ("median(salary)",), - ("rankCorr(salary, 0.5)",), -]) +@Examples( + "func", + [ + ("count(salary)",), + ("min(salary)",), + ("max(salary)",), + ("sum(salary)",), + ("avg(salary)",), + ("any(salary)",), + ("stddevPop(salary)",), + ("stddevSamp(salary)",), + ("varPop(salary)",), + ("varSamp(salary)",), + ("covarPop(salary, 2000)",), + ("covarSamp(salary, 2000)",), + ("anyHeavy(salary)",), + ("anyLast(salary)",), + ("argMin(salary, 5000)",), + ("argMax(salary, 5000)",), + ("avgWeighted(salary, 1)",), + ("corr(salary, 0.5)",), + ("topK(salary)",), + ("topKWeighted(salary, 1)",), + ("groupArray(salary)",), + ("groupUniqArray(salary)",), + ("groupArrayInsertAt(salary, 0)",), + ("groupArrayMovingSum(salary)",), + ("groupArrayMovingAvg(salary)",), + ("groupArraySample(3, 1234)(salary)",), + ("groupBitAnd(toUInt8(salary))",), + ("groupBitOr(toUInt8(salary))",), + ("groupBitXor(toUInt8(salary))",), + ("groupBitmap(toUInt8(salary))",), + # #("groupBitmapAnd",), + # #("groupBitmapOr",), + # #("groupBitmapXor",), + ("sumWithOverflow(salary)",), + ("deltaSum(salary)",), + ("sumMap([5000], [salary])",), + ("minMap([5000], [salary])",), + ("maxMap([5000], [salary])",), + # #("initializeAggregation",), + ("skewPop(salary)",), + ("skewSamp(salary)",), + ("kurtPop(salary)",), + ("kurtSamp(salary)",), + ("uniq(salary)",), + ("uniqExact(salary)",), + ("uniqCombined(salary)",), + ("uniqCombined64(salary)",), + ("uniqHLL12(salary)",), + ("quantile(salary)",), + ("quantiles(0.5)(salary)",), + ("quantileExact(salary)",), + ("quantileExactWeighted(salary, 1)",), + ("quantileTiming(salary)",), + ("quantileTimingWeighted(salary, 1)",), + ("quantileDeterministic(salary, 1234)",), + ("quantileTDigest(salary)",), + ("quantileTDigestWeighted(salary, 1)",), + ("simpleLinearRegression(salary, empno)",), + ("stochasticLinearRegression(salary, 1)",), + ("stochasticLogisticRegression(salary, 1)",), + # ("categoricalInformationValue(salary, 0)",), + ("studentTTest(salary, 1)",), + ("welchTTest(salary, 1)",), + ("mannWhitneyUTest(salary, 1)",), + ("median(salary)",), + ("rankCorr(salary, 0.5)",), + ], +) def aggregate_funcs_over_rows_frame(self, func): - """Checking aggregate funcs over rows frame. - """ - execute_query(f""" + """Checking aggregate funcs over rows frame.""" + execute_query( + f""" SELECT {func} OVER (ORDER BY salary, empno ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS func FROM empsalary """ ) + @TestScenario def avg_with_nulls(self): - """Check `avg` aggregate function using a window that contains NULLs. - """ - expected = convert_output(""" + """Check `avg` aggregate function using a window that contains NULLs.""" + expected = convert_output( + """ i | avg ---+-------------------- 1 | 1.5 2 | 2 3 | \\N 4 | \\N - """) + """ + ) - execute_query(""" + execute_query( + """ SELECT i, avg(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS avg FROM values('i Int32, v Nullable(Int32)', (1,1),(2,2),(3,NULL),(4,NULL)) """, - expected=expected + expected=expected, ) + @TestScenario def var_pop(self): - """Check `var_pop` aggregate function ove a window. - """ - expected = convert_output(""" + """Check `var_pop` aggregate function ove a window.""" + expected = convert_output( + """ var_pop ----------------------- 21704 @@ -114,20 +122,23 @@ def var_pop(self): 11266.666666666666 4225 0 - """) + """ + ) - execute_query(""" + execute_query( + """ SELECT VAR_POP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS var_pop FROM values('i Int8, n Int32', (1,600),(2,470),(3,170),(4,430),(5,300)) """, - expected=expected + expected=expected, ) + @TestScenario def var_samp(self): - """Check `var_samp` aggregate function ove a window. - """ - expected = convert_output(""" + """Check `var_samp` aggregate function ove a window.""" + expected = convert_output( + """ var_samp ----------------------- 27130 @@ -135,20 +146,23 @@ def var_samp(self): 16900 8450 nan - """) + """ + ) - execute_query(""" + execute_query( + """ SELECT VAR_SAMP(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS var_samp FROM VALUES('i Int8, n Int16',(1,600),(2,470),(3,170),(4,430),(5,300)) """, - expected=expected + expected=expected, ) + @TestScenario def stddevpop(self): - """Check `stddevPop` aggregate function ove a window. - """ - expected = convert_output(""" + """Check `stddevPop` aggregate function ove a window.""" + expected = convert_output( + """ stddev_pop --------------------- 147.32277488562318 @@ -157,20 +171,23 @@ def stddevpop(self): 106.14455552060438 65 0 - """) + """ + ) - execute_query(""" + execute_query( + """ SELECT stddevPop(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS stddev_pop FROM VALUES('i Int8, n Nullable(Int16)',(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) """, - expected=expected + expected=expected, ) + @TestScenario def stddevsamp(self): - """Check `stddevSamp` aggregate function ove a window. - """ - expected = convert_output(""" + """Check `stddevSamp` aggregate function ove a window.""" + expected = convert_output( + """ stddev_samp --------------------- 164.7118696390761 @@ -179,20 +196,23 @@ def stddevsamp(self): 130 91.92388155425118 nan - """) + """ + ) - execute_query(""" + execute_query( + """ SELECT stddevSamp(n) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS stddev_samp FROM VALUES('i Int8, n Nullable(Int16)',(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) """, - expected=expected + expected=expected, ) + @TestScenario def aggregate_function_recovers_from_nan(self): - """Check that aggregate function can recover from `nan` value inside a window. - """ - expected = convert_output(""" + """Check that aggregate function can recover from `nan` value inside a window.""" + expected = convert_output( + """ a | b | sum ---+-----+----- 1 | 1 | 1 @@ -200,21 +220,24 @@ def aggregate_function_recovers_from_nan(self): 3 | nan | nan 4 | 3 | nan 5 | 4 | 7 - """) + """ + ) - execute_query(""" + execute_query( + """ SELECT a, b, SUM(b) OVER(ORDER BY a ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) AS sum FROM VALUES('a Int8, b Float64',(1,1),(2,2),(3,nan),(4,3),(5,4)) """, - expected=expected + expected=expected, ) + @TestScenario def bit_functions(self): - """Check trying to use bitwise functions over a window. - """ - expected = convert_output(""" + """Check trying to use bitwise functions over a window.""" + expected = convert_output( + """ i | b | bool_and | bool_or ---+---+----------+--------- 1 | 1 | 1 | 1 @@ -222,21 +245,24 @@ def bit_functions(self): 3 | 0 | 0 | 0 4 | 0 | 0 | 1 5 | 1 | 1 | 1 - """) + """ + ) - execute_query(""" + execute_query( + """ SELECT i, b, groupBitAnd(b) OVER w AS bool_and, groupBitOr(b) OVER w AS bool_or FROM VALUES('i Int8, b UInt8', (1,1), (2,1), (3,0), (4,0), (5,1)) WINDOW w AS (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING) """, - expected=expected + expected=expected, ) + @TestScenario def sum(self): - """Check calculation of sum over a window. - """ - expected = convert_output(""" + """Check calculation of sum over a window.""" + expected = convert_output( + """ sum_1 | ten | four -------+-----+------ 0 | 0 | 0 @@ -249,18 +275,20 @@ def sum(self): 0 | 4 | 0 1 | 7 | 1 1 | 9 | 1 - """) + """ + ) execute_query( "SELECT sum(four) OVER (PARTITION BY ten ORDER BY unique2) AS sum_1, ten, four FROM tenk1 WHERE unique2 < 10", - expected=expected + expected=expected, ) + @TestScenario def nested_aggregates(self): - """Check using nested aggregates over a window. - """ - expected = convert_output(""" + """Check using nested aggregates over a window.""" + expected = convert_output( + """ ten | two | gsum | wsum -----+-----+-------+-------- 0 | 0 | 45000 | 45000 @@ -273,18 +301,20 @@ def nested_aggregates(self): 5 | 1 | 50000 | 144000 7 | 1 | 52000 | 196000 9 | 1 | 54000 | 250000 - """) + """ + ) execute_query( "SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER (PARTITION BY two ORDER BY ten) AS wsum FROM tenk1 GROUP BY ten, two", - expected=expected + expected=expected, ) + @TestScenario def aggregate_and_window_function_in_the_same_window(self): - """Check using aggregate and window function in the same window. - """ - expected = convert_output(""" + """Check using aggregate and window function in the same window.""" + expected = convert_output( + """ sum | rank -------+------ 6000 | 1 @@ -297,35 +327,36 @@ def aggregate_and_window_function_in_the_same_window(self): 5000 | 1 14600 | 2 14600 | 2 - """) + """ + ) execute_query( "SELECT sum(salary) OVER w AS sum, rank() OVER w AS rank FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary DESC)", - expected=expected + expected=expected, ) + @TestScenario def ungrouped_aggregate_over_empty_row_set(self): - """Check using window function with ungrouped aggregate over an empty row set. - """ - expected = convert_output(""" + """Check using window function with ungrouped aggregate over an empty row set.""" + expected = convert_output( + """ sum ----- 0 - """) + """ + ) execute_query( "SELECT SUM(COUNT(number)) OVER () AS sum FROM numbers(10) WHERE number=42", - expected=expected + expected=expected, ) + @TestFeature @Name("aggregate funcs") -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_AggregateFunctions("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_AggregateFunctions("1.0")) def feature(self): - """Check using aggregate functions over windows. - """ + """Check using aggregate functions over windows.""" for scenario in loads(current_module(), Scenario): Scenario(run=scenario, flags=TE) diff --git a/tests/testflows/window_functions/tests/common.py b/tests/testflows/window_functions/tests/common.py index 4f8b8081bf9..b0bca328e4d 100644 --- a/tests/testflows/window_functions/tests/common.py +++ b/tests/testflows/window_functions/tests/common.py @@ -8,52 +8,77 @@ from testflows.core.name import basename, parentname from testflows._core.testtype import TestSubType from testflows.asserts import values, error, snapshot + def window_frame_error(): return (36, "Exception: Window frame") + def frame_start_error(): return (36, "Exception: Frame start") + def frame_end_error(): return (36, "Exception: Frame end") + def frame_offset_nonnegative_error(): return syntax_error() + def frame_end_unbounded_preceding_error(): return (36, "Exception: Frame end cannot be UNBOUNDED PRECEDING") + def frame_range_offset_error(): return (48, "Exception: The RANGE OFFSET frame") + def frame_requires_order_by_error(): - return (36, "Exception: The RANGE OFFSET window frame requires exactly one ORDER BY column, 0 given") + return ( + 36, + "Exception: The RANGE OFFSET window frame requires exactly one ORDER BY column, 0 given", + ) + def syntax_error(): return (62, "Exception: Syntax error") + def groups_frame_error(): return (48, "Exception: Window frame 'Groups' is not implemented") + def getuid(): if current().subtype == TestSubType.Example: - testname = f"{basename(parentname(current().name)).replace(' ', '_').replace(',','')}" + testname = ( + f"{basename(parentname(current().name)).replace(' ', '_').replace(',','')}" + ) else: testname = f"{basename(current().name).replace(' ', '_').replace(',','')}" - return testname + "_" + str(uuid.uuid1()).replace('-', '_') + return testname + "_" + str(uuid.uuid1()).replace("-", "_") + def convert_output(s): - """Convert expected output to TSV format. - """ - return '\n'.join([l.strip() for i, l in enumerate(re.sub('\s+\|\s+', '\t', s).strip().splitlines()) if i != 1]) + """Convert expected output to TSV format.""" + return "\n".join( + [ + l.strip() + for i, l in enumerate(re.sub("\s+\|\s+", "\t", s).strip().splitlines()) + if i != 1 + ] + ) -def execute_query(sql, expected=None, exitcode=None, message=None, format="TabSeparatedWithNames"): - """Execute SQL query and compare the output to the snapshot. - """ + +def execute_query( + sql, expected=None, exitcode=None, message=None, format="TabSeparatedWithNames" +): + """Execute SQL query and compare the output to the snapshot.""" name = basename(current().name) with When("I execute query", description=sql): - r = current().context.node.query(sql + " FORMAT " + format, exitcode=exitcode, message=message) + r = current().context.node.query( + sql + " FORMAT " + format, exitcode=exitcode, message=message + ) if message is None: if expected is not None: @@ -62,18 +87,21 @@ def execute_query(sql, expected=None, exitcode=None, message=None, format="TabSe else: with Then("I check output against snapshot"): with values() as that: - assert that(snapshot("\n" + r.output.strip() + "\n", "tests", name=name, encoder=str)), error() + assert that( + snapshot( + "\n" + r.output.strip() + "\n", + "tests", + name=name, + encoder=str, + ) + ), error() + @TestStep(Given) def t1_table(self, name="t1", distributed=False): - """Create t1 table. - """ + """Create t1 table.""" table = None - data = [ - "(1, 1)", - "(1, 2)", - "(2, 2)" - ] + data = ["(1, 1)", "(1, 2)", "(2, 2)"] if not distributed: with By("creating table"): @@ -97,10 +125,18 @@ def t1_table(self, name="t1", distributed=False): f2 Int8 ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') ORDER BY tuple() """ - create_table(name=name + "_source", statement=sql, on_cluster="sharded_cluster") + create_table( + name=name + "_source", statement=sql, on_cluster="sharded_cluster" + ) with And("a distributed table"): - sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, f1 % toUInt8(getMacro('shard')))" + sql = ( + "CREATE TABLE {name} AS " + + name + + "_source" + + " ENGINE = Distributed(sharded_cluster, default, " + + f"{name + '_source'}, f1 % toUInt8(getMacro('shard')))" + ) table = create_table(name=name, statement=sql) with And("populating table with data"): @@ -110,10 +146,10 @@ def t1_table(self, name="t1", distributed=False): return table + @TestStep(Given) def datetimes_table(self, name="datetimes", distributed=False): - """Create datetimes table. - """ + """Create datetimes table.""" table = None data = [ "(1, '2000-10-19 10:23:54', '2000-10-19 10:23:54')", @@ -125,7 +161,7 @@ def datetimes_table(self, name="datetimes", distributed=False): "(7, '2005-10-19 10:23:54', '2005-10-19 10:23:54')", "(8, '2006-10-19 10:23:54', '2006-10-19 10:23:54')", "(9, '2007-10-19 10:23:54', '2007-10-19 10:23:54')", - "(10, '2008-10-19 10:23:54', '2008-10-19 10:23:54')" + "(10, '2008-10-19 10:23:54', '2008-10-19 10:23:54')", ] if not distributed: @@ -152,10 +188,18 @@ def datetimes_table(self, name="datetimes", distributed=False): f_timestamp DateTime ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') ORDER BY tuple() """ - create_table(name=name + "_source", statement=sql, on_cluster="sharded_cluster") + create_table( + name=name + "_source", statement=sql, on_cluster="sharded_cluster" + ) with And("a distributed table"): - sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, id % toUInt8(getMacro('shard')))" + sql = ( + "CREATE TABLE {name} AS " + + name + + "_source" + + " ENGINE = Distributed(sharded_cluster, default, " + + f"{name + '_source'}, id % toUInt8(getMacro('shard')))" + ) table = create_table(name=name, statement=sql) with And("populating table with data"): @@ -165,10 +209,10 @@ def datetimes_table(self, name="datetimes", distributed=False): return table + @TestStep(Given) def numerics_table(self, name="numerics", distributed=False): - """Create numerics tables. - """ + """Create numerics tables.""" table = None data = [ @@ -181,7 +225,7 @@ def numerics_table(self, name="numerics", distributed=False): "(6, 2, 2, 2)", "(7, 100, 100, 100)", "(8, 'infinity', 'infinity', toDecimal64(1000,15))", - "(9, 'NaN', 'NaN', 0)" + "(9, 'NaN', 'NaN', 0)", ] if not distributed: @@ -210,10 +254,18 @@ def numerics_table(self, name="numerics", distributed=False): f_numeric Decimal64(15) ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') ORDER BY tuple(); """ - create_table(name=name + "_source", statement=sql, on_cluster="sharded_cluster") + create_table( + name=name + "_source", statement=sql, on_cluster="sharded_cluster" + ) with And("a distributed table"): - sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, id % toUInt8(getMacro('shard')))" + sql = ( + "CREATE TABLE {name} AS " + + name + + "_source" + + " ENGINE = Distributed(sharded_cluster, default, " + + f"{name + '_source'}, id % toUInt8(getMacro('shard')))" + ) table = create_table(name=name, statement=sql) with And("populating table with data"): @@ -223,10 +275,10 @@ def numerics_table(self, name="numerics", distributed=False): return table + @TestStep(Given) def tenk1_table(self, name="tenk1", distributed=False): - """Create tenk1 table. - """ + """Create tenk1 table.""" table = None if not distributed: @@ -256,7 +308,11 @@ def tenk1_table(self, name="tenk1", distributed=False): with And("populating table with data"): datafile = os.path.join(current_dir(), "tenk.data") debug(datafile) - self.context.cluster.command(None, f"cat \"{datafile}\" | {self.context.node.cluster.docker_compose} exec -T {self.context.node.name} clickhouse client -q \"INSERT INTO {name} FORMAT TSV\"", exitcode=0) + self.context.cluster.command( + None, + f'cat "{datafile}" | {self.context.node.cluster.docker_compose} exec -T {self.context.node.name} clickhouse client -q "INSERT INTO {name} FORMAT TSV"', + exitcode=0, + ) else: with By("creating a table"): sql = """ @@ -279,10 +335,18 @@ def tenk1_table(self, name="tenk1", distributed=False): string4 String ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') ORDER BY tuple() """ - create_table(name=name + '_source', statement=sql, on_cluster="sharded_cluster") + create_table( + name=name + "_source", statement=sql, on_cluster="sharded_cluster" + ) with And("a distributed table"): - sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, unique1 % toUInt8(getMacro('shard')))" + sql = ( + "CREATE TABLE {name} AS " + + name + + "_source" + + " ENGINE = Distributed(sharded_cluster, default, " + + f"{name + '_source'}, unique1 % toUInt8(getMacro('shard')))" + ) table = create_table(name=name, statement=sql) with And("populating table with data"): @@ -291,22 +355,24 @@ def tenk1_table(self, name="tenk1", distributed=False): with open(datafile, "r") as file: lines = file.readlines() - chunks = [lines[i:i + 1000] for i in range(0, len(lines), 1000)] + chunks = [lines[i : i + 1000] for i in range(0, len(lines), 1000)] for chunk in chunks: with tempfile.NamedTemporaryFile() as file: - file.write(''.join(chunk).encode("utf-8")) + file.write("".join(chunk).encode("utf-8")) file.flush() - self.context.cluster.command(None, - f"cat \"{file.name}\" | {self.context.node.cluster.docker_compose} exec -T {self.context.node.name} clickhouse client -q \"INSERT INTO {table} FORMAT TSV\"", - exitcode=0) + self.context.cluster.command( + None, + f'cat "{file.name}" | {self.context.node.cluster.docker_compose} exec -T {self.context.node.name} clickhouse client -q "INSERT INTO {table} FORMAT TSV"', + exitcode=0, + ) return table + @TestStep(Given) def empsalary_table(self, name="empsalary", distributed=False): - """Create employee salary reference table. - """ + """Create employee salary reference table.""" table = None data = [ @@ -319,7 +385,7 @@ def empsalary_table(self, name="empsalary", distributed=False): "('develop', 9, 4500, '2008-01-01')", "('sales', 3, 4800, '2007-08-01')", "('develop', 8, 6000, '2006-10-01')", - "('develop', 11, 5200, '2007-08-15')" + "('develop', 11, 5200, '2007-08-15')", ] if not distributed: @@ -341,7 +407,7 @@ def empsalary_table(self, name="empsalary", distributed=False): else: with By("creating replicated source tables"): - sql = """ + sql = """ CREATE TABLE {name} ON CLUSTER sharded_cluster ( depname LowCardinality(String), empno UInt64, @@ -350,27 +416,40 @@ def empsalary_table(self, name="empsalary", distributed=False): ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') ORDER BY enroll_date """ - create_table(name=name + "_source", statement=sql, on_cluster="sharded_cluster") + create_table( + name=name + "_source", statement=sql, on_cluster="sharded_cluster" + ) with And("a distributed table"): - sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, empno % toUInt8(getMacro('shard')))" + sql = ( + "CREATE TABLE {name} AS " + + name + + "_source" + + " ENGINE = Distributed(sharded_cluster, default, " + + f"{name + '_source'}, empno % toUInt8(getMacro('shard')))" + ) table = create_table(name=name, statement=sql) with And("populating distributed table with data"): - with By("inserting one data row at a time", description="so that data is sharded between nodes"): + with By( + "inserting one data row at a time", + description="so that data is sharded between nodes", + ): for row in data: - self.context.node.query(f"INSERT INTO {table} VALUES {row}", - settings=[("insert_distributed_sync", "1")]) + self.context.node.query( + f"INSERT INTO {table} VALUES {row}", + settings=[("insert_distributed_sync", "1")], + ) with And("dumping all the data in the table"): self.context.node.query(f"SELECT * FROM {table}") return table + @TestStep(Given) def create_table(self, name, statement, on_cluster=False): - """Create table. - """ + """Create table.""" node = current().context.node try: with Given(f"I have a {name} table"): diff --git a/tests/testflows/window_functions/tests/errors.py b/tests/testflows/window_functions/tests/errors.py index d7b80ed7cd8..ee9452eecba 100644 --- a/tests/testflows/window_functions/tests/errors.py +++ b/tests/testflows/window_functions/tests/errors.py @@ -3,6 +3,7 @@ from testflows.core import * from window_functions.requirements import * from window_functions.tests.common import * + @TestScenario def error_using_non_window_function(self): """Check that trying to use non window or aggregate function over a window @@ -11,87 +12,87 @@ def error_using_non_window_function(self): exitcode = 63 message = "DB::Exception: Unknown aggregate function numbers" - sql = ("SELECT numbers(1, 100) OVER () FROM empsalary") + sql = "SELECT numbers(1, 100) OVER () FROM empsalary" with When("I execute query", description=sql): r = current().context.node.query(sql, exitcode=exitcode, message=message) + @TestScenario def error_order_by_another_window_function(self): - """Check that trying to order by another window function returns an error. - """ + """Check that trying to order by another window function returns an error.""" exitcode = 184 message = "DB::Exception: Window function rank() OVER (ORDER BY random() ASC) is found inside window definition in query" - sql = ("SELECT rank() OVER (ORDER BY rank() OVER (ORDER BY random()))") + sql = "SELECT rank() OVER (ORDER BY rank() OVER (ORDER BY random()))" with When("I execute query", description=sql): r = current().context.node.query(sql, exitcode=exitcode, message=message) + @TestScenario def error_window_function_in_where(self): - """Check that trying to use window function in `WHERE` returns an error. - """ + """Check that trying to use window function in `WHERE` returns an error.""" exitcode = 184 message = "DB::Exception: Window function row_number() OVER (ORDER BY salary ASC) is found in WHERE in query" - sql = ("SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY salary) < 10") + sql = "SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY salary) < 10" with When("I execute query", description=sql): r = current().context.node.query(sql, exitcode=exitcode, message=message) + @TestScenario def error_window_function_in_join(self): - """Check that trying to use window function in `JOIN` returns an error. - """ + """Check that trying to use window function in `JOIN` returns an error.""" exitcode = 147 message = "DB::Exception: Cannot get JOIN keys from JOIN ON section: row_number() OVER (ORDER BY salary ASC) < 10" - sql = ("SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVER (ORDER BY salary) < 10") + sql = "SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVER (ORDER BY salary) < 10" with When("I execute query", description=sql): r = current().context.node.query(sql, exitcode=exitcode, message=message) + @TestScenario def error_window_function_in_group_by(self): - """Check that trying to use window function in `GROUP BY` returns an error. - """ + """Check that trying to use window function in `GROUP BY` returns an error.""" exitcode = 47 message = "DB::Exception: Unknown identifier: row_number() OVER (ORDER BY salary ASC); there are columns" - sql = ("SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GROUP BY row_number() OVER (ORDER BY salary) < 10") + sql = "SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GROUP BY row_number() OVER (ORDER BY salary) < 10" with When("I execute query", description=sql): r = current().context.node.query(sql, exitcode=exitcode, message=message) + @TestScenario def error_window_function_in_having(self): - """Check that trying to use window function in `HAVING` returns an error. - """ + """Check that trying to use window function in `HAVING` returns an error.""" exitcode = 184 message = "DB::Exception: Window function row_number() OVER (ORDER BY salary ASC) is found in HAVING in query" - sql = ("SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GROUP BY salary HAVING row_number() OVER (ORDER BY salary) < 10") + sql = "SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GROUP BY salary HAVING row_number() OVER (ORDER BY salary) < 10" with When("I execute query", description=sql): r = current().context.node.query(sql, exitcode=exitcode, message=message) + @TestScenario def error_select_from_window(self): - """Check that trying to use window function in `FROM` returns an error. - """ + """Check that trying to use window function in `FROM` returns an error.""" exitcode = 46 message = "DB::Exception: Unknown table function rank" - sql = ("SELECT * FROM rank() OVER (ORDER BY random())") + sql = "SELECT * FROM rank() OVER (ORDER BY random())" with When("I execute query", description=sql): r = current().context.node.query(sql, exitcode=exitcode, message=message) + @TestScenario def error_window_function_in_alter_delete_where(self): - """Check that trying to use window function in `ALTER DELETE`'s `WHERE` clause returns an error. - """ + """Check that trying to use window function in `ALTER DELETE`'s `WHERE` clause returns an error.""" if self.context.distributed: exitcode = 48 message = "Exception: Table engine Distributed doesn't support mutations" @@ -99,39 +100,39 @@ def error_window_function_in_alter_delete_where(self): exitcode = 184 message = "DB::Exception: Window function rank() OVER (ORDER BY random() ASC) is found in WHERE in query" - sql = ("ALTER TABLE empsalary DELETE WHERE (rank() OVER (ORDER BY random())) > 10") + sql = "ALTER TABLE empsalary DELETE WHERE (rank() OVER (ORDER BY random())) > 10" with When("I execute query", description=sql): r = current().context.node.query(sql, exitcode=exitcode, message=message) + @TestScenario def error_named_window_defined_twice(self): - """Check that trying to define named window twice. - """ + """Check that trying to define named window twice.""" exitcode = 36 message = "DB::Exception: Window 'w' is defined twice in the WINDOW clause" - sql = ("SELECT count(*) OVER w FROM tenk1 WINDOW w AS (ORDER BY unique1), w AS (ORDER BY unique1)") + sql = "SELECT count(*) OVER w FROM tenk1 WINDOW w AS (ORDER BY unique1), w AS (ORDER BY unique1)" with When("I execute query", description=sql): r = current().context.node.query(sql, exitcode=exitcode, message=message) + @TestScenario def error_coma_between_partition_by_and_order_by_clause(self): - """Check that trying to use a coma between partition by and order by clause. - """ + """Check that trying to use a coma between partition by and order by clause.""" exitcode = 62 message = "DB::Exception: Syntax error" - sql = ("SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM tenk1") + sql = "SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM tenk1" with When("I execute query", description=sql): r = current().context.node.query(sql, exitcode=exitcode, message=message) + @TestFeature @Name("errors") def feature(self): - """Check different error conditions. - """ + """Check different error conditions.""" for scenario in loads(current_module(), Scenario): Scenario(run=scenario, flags=TE) diff --git a/tests/testflows/window_functions/tests/feature.py b/tests/testflows/window_functions/tests/feature.py index f6c565d116b..c1454a419e9 100755 --- a/tests/testflows/window_functions/tests/feature.py +++ b/tests/testflows/window_functions/tests/feature.py @@ -6,10 +6,25 @@ from window_functions.requirements import * @TestOutline(Feature) @Name("tests") -@Examples("distributed", [ - (False, Name("non distributed"),Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_NonDistributedTables("1.0"))), - (True, Name("distributed"), Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_DistributedTables("1.0"))) -]) +@Examples( + "distributed", + [ + ( + False, + Name("non distributed"), + Requirements( + RQ_SRS_019_ClickHouse_WindowFunctions_NonDistributedTables("1.0") + ), + ), + ( + True, + Name("distributed"), + Requirements( + RQ_SRS_019_ClickHouse_WindowFunctions_DistributedTables("1.0") + ), + ), + ], +) def feature(self, distributed, node="clickhouse1"): """Check window functions behavior using non-distributed or distributed tables. diff --git a/tests/testflows/window_functions/tests/frame_clause.py b/tests/testflows/window_functions/tests/frame_clause.py index 9c15ace286d..e2ae136ac20 100644 --- a/tests/testflows/window_functions/tests/frame_clause.py +++ b/tests/testflows/window_functions/tests/frame_clause.py @@ -3,15 +3,14 @@ from testflows.core import * from window_functions.requirements import * from window_functions.tests.common import * + @TestFeature -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_FrameClause_DefaultFrame("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_FrameClause_DefaultFrame("1.0")) def default_frame(self): - """Check default frame. - """ + """Check default frame.""" with Scenario("with order by"): - expected = convert_output(""" + expected = convert_output( + """ number | sum ---------+------ 1 | 2 @@ -19,15 +18,17 @@ def default_frame(self): 2 | 4 3 | 10 3 | 10 - """) + """ + ) execute_query( "select number, sum(number) OVER (ORDER BY number) AS sum FROM values('number Int8', (1),(1),(2),(3),(3))", - expected=expected + expected=expected, ) with Scenario("without order by"): - expected = convert_output(""" + expected = convert_output( + """ number | sum ---------+------ 1 | 10 @@ -35,13 +36,15 @@ def default_frame(self): 2 | 10 3 | 10 3 | 10 - """) + """ + ) execute_query( "select number, sum(number) OVER () AS sum FROM values('number Int8', (1),(1),(2),(3),(3))", - expected=expected + expected=expected, ) + @TestFeature @Name("frame clause") @Requirements( @@ -56,11 +59,10 @@ def default_frame(self): RQ_SRS_019_ClickHouse_WindowFunctions_ExprPreceding("1.0"), RQ_SRS_019_ClickHouse_WindowFunctions_ExprFollowing("1.0"), RQ_SRS_019_ClickHouse_WindowFunctions_ExprPreceding_ExprValue("1.0"), - RQ_SRS_019_ClickHouse_WindowFunctions_ExprFollowing_ExprValue("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_ExprFollowing_ExprValue("1.0"), ) def feature(self): - """Check defining frame clause. - """ + """Check defining frame clause.""" Feature(run=default_frame, flags=TE) Feature(run=load("window_functions.tests.rows_frame", "feature"), flags=TE) Feature(run=load("window_functions.tests.range_frame", "feature"), flags=TE) diff --git a/tests/testflows/window_functions/tests/funcs.py b/tests/testflows/window_functions/tests/funcs.py index 4526e6c9c4a..7060aed9e51 100644 --- a/tests/testflows/window_functions/tests/funcs.py +++ b/tests/testflows/window_functions/tests/funcs.py @@ -3,14 +3,13 @@ from testflows.core import * from window_functions.requirements import * from window_functions.tests.common import * + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_FirstValue("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_FirstValue("1.0")) def first_value(self): - """Check `first_value` function. - """ - expected = convert_output(""" + """Check `first_value` function.""" + expected = convert_output( + """ first_value | ten | four -------------+-----+------ 0 | 0 | 0 @@ -23,31 +22,34 @@ def first_value(self): 0 | 0 | 2 1 | 1 | 3 1 | 3 | 3 - """) + """ + ) with Example("using first_value"): execute_query( "SELECT first_value(ten) OVER (PARTITION BY four ORDER BY ten) AS first_value, ten, four FROM tenk1 WHERE unique2 < 10", - expected=expected + expected=expected, ) with Example("using any equivalent"): execute_query( "SELECT any(ten) OVER (PARTITION BY four ORDER BY ten) AS first_value, ten, four FROM tenk1 WHERE unique2 < 10", - expected=expected + expected=expected, ) + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_LastValue("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_LastValue("1.0")) def last_value(self): - """Check `last_value` function. - """ - with Example("order by window", description=""" + """Check `last_value` function.""" + with Example( + "order by window", + description=""" Check that last_value returns the last row of the frame that is CURRENT ROW in ORDER BY window - """): - expected = convert_output(""" + """, + ): + expected = convert_output( + """ last_value | ten | four ------------+-----+------ 0 | 0 | 0 @@ -60,24 +62,29 @@ def last_value(self): 0 | 4 | 0 1 | 7 | 1 1 | 9 | 1 - """) + """ + ) with Check("using last_value"): execute_query( "SELECT last_value(four) OVER (ORDER BY ten, four) AS last_value, ten, four FROM tenk1 WHERE unique2 < 10", - expected=expected + expected=expected, ) with Check("using anyLast() equivalent"): execute_query( "SELECT anyLast(four) OVER (ORDER BY ten, four) AS last_value, ten, four FROM tenk1 WHERE unique2 < 10", - expected=expected + expected=expected, ) - with Example("partition by window", description=""" + with Example( + "partition by window", + description=""" Check that last_value returns the last row of the frame that is CURRENT ROW in ORDER BY window - """): - expected = convert_output(""" + """, + ): + expected = convert_output( + """ last_value | ten | four ------------+-----+------ 4 | 0 | 0 @@ -90,14 +97,15 @@ def last_value(self): 0 | 0 | 2 3 | 1 | 3 3 | 3 | 3 - """) + """ + ) with Check("using last_value"): execute_query( """SELECT last_value(ten) OVER (PARTITION BY four) AS last_value, ten, four FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten) ORDER BY four, ten""", - expected=expected + expected=expected, ) with Check("using anyLast() equivalent"): @@ -105,18 +113,17 @@ def last_value(self): """SELECT anyLast(ten) OVER (PARTITION BY four) AS last_value, ten, four FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten) ORDER BY four, ten""", - expected=expected + expected=expected, ) + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_Lag_Workaround("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_Lag_Workaround("1.0")) def lag(self): - """Check `lag` function workaround. - """ + """Check `lag` function workaround.""" with Example("anyOrNull"): - expected = convert_output(""" + expected = convert_output( + """ lag | ten | four -----+-----+------ \\N | 0 | 0 @@ -129,15 +136,17 @@ def lag(self): \\N | 0 | 2 \\N | 1 | 3 1 | 3 | 3 - """) + """ + ) execute_query( "SELECT anyOrNull(ten) OVER (PARTITION BY four ORDER BY ten ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) AS lag , ten, four FROM tenk1 WHERE unique2 < 10", - expected=expected + expected=expected, ) with Example("any"): - expected = convert_output(""" + expected = convert_output( + """ lag | ten | four -----+-----+------ 0 | 0 | 0 @@ -150,15 +159,17 @@ def lag(self): 0 | 0 | 2 0 | 1 | 3 1 | 3 | 3 - """) + """ + ) execute_query( "SELECT any(ten) OVER (PARTITION BY four ORDER BY ten ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) AS lag , ten, four FROM tenk1 WHERE unique2 < 10", - expected=expected + expected=expected, ) with Example("anyOrNull with column value as offset"): - expected = convert_output(""" + expected = convert_output( + """ lag | ten | four -----+-----+------ 0 | 0 | 0 @@ -171,22 +182,22 @@ def lag(self): \\N | 0 | 2 \\N | 1 | 3 \\N | 3 | 3 - """) + """ + ) execute_query( "SELECT any(ten) OVER (PARTITION BY four ORDER BY ten ROWS BETWEEN four PRECEDING AND four PRECEDING) AS lag , ten, four FROM tenk1 WHERE unique2 < 10", - expected=expected + expected=expected, ) + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_Lead_Workaround("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_Lead_Workaround("1.0")) def lead(self): - """Check `lead` function workaround. - """ + """Check `lead` function workaround.""" with Example("anyOrNull"): - expected = convert_output(""" + expected = convert_output( + """ lead | ten | four ------+-----+------ 0 | 0 | 0 @@ -199,15 +210,17 @@ def lead(self): \\N | 0 | 2 3 | 1 | 3 \\N | 3 | 3 - """) + """ + ) execute_query( "SELECT anyOrNull(ten) OVER (PARTITION BY four ORDER BY ten ROWS BETWEEN 1 FOLLOWING AND 1 FOLLOWING) AS lead, ten, four FROM tenk1 WHERE unique2 < 10", - expected=expected + expected=expected, ) with Example("any"): - expected = convert_output(""" + expected = convert_output( + """ lead | ten | four ------+-----+------ 0 | 0 | 0 @@ -220,15 +233,17 @@ def lead(self): 0 | 0 | 2 3 | 1 | 3 0 | 3 | 3 - """) + """ + ) execute_query( "SELECT any(ten) OVER (PARTITION BY four ORDER BY ten ROWS BETWEEN 1 FOLLOWING AND 1 FOLLOWING) AS lead, ten, four FROM tenk1 WHERE unique2 < 10", - expected=expected + expected=expected, ) with Example("any with arithmetic expr"): - expected = convert_output(""" + expected = convert_output( + """ lead | ten | four ------+-----+------ 0 | 0 | 0 @@ -241,15 +256,17 @@ def lead(self): 0 | 0 | 2 6 | 1 | 3 0 | 3 | 3 - """) + """ + ) execute_query( "SELECT any(ten * 2) OVER (PARTITION BY four ORDER BY ten ROWS BETWEEN 1 FOLLOWING AND 1 FOLLOWING) AS lead, ten, four FROM tenk1 WHERE unique2 < 10", - expected=expected + expected=expected, ) with Example("subquery as offset"): - expected = convert_output(""" + expected = convert_output( + """ lead ------ 0 @@ -262,22 +279,22 @@ def lead(self): 0 3 \\N - """) + """ + ) execute_query( "SELECT anyNull(ten) OVER (PARTITION BY four ORDER BY ten ROWS BETWEEN (SELECT two FROM tenk1 WHERE unique2 = unique2) FOLLOWING AND (SELECT two FROM tenk1 WHERE unique2 = unique2) FOLLOWING) AS lead " "FROM tenk1 WHERE unique2 < 10", - expected=expected + expected=expected, ) + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowNumber("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_RowNumber("1.0")) def row_number(self): - """Check `row_number` function. - """ - expected = convert_output(""" + """Check `row_number` function.""" + expected = convert_output( + """ row_number ------------ 1 @@ -290,21 +307,21 @@ def row_number(self): 8 9 10 - """) + """ + ) execute_query( "SELECT row_number() OVER (ORDER BY unique2) AS row_number FROM tenk1 WHERE unique2 < 10", - expected=expected + expected=expected, ) + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_Rank("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_Rank("1.0")) def rank(self): - """Check `rank` function. - """ - expected = convert_output(""" + """Check `rank` function.""" + expected = convert_output( + """ rank_1 | ten | four --------+-----+------ 1 | 0 | 0 @@ -317,21 +334,21 @@ def rank(self): 1 | 0 | 2 1 | 1 | 3 2 | 3 | 3 - """) + """ + ) execute_query( "SELECT rank() OVER (PARTITION BY four ORDER BY ten) AS rank_1, ten, four FROM tenk1 WHERE unique2 < 10", - expected=expected + expected=expected, ) + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_DenseRank("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_DenseRank("1.0")) def dense_rank(self): - """Check `dense_rank` function. - """ - expected = convert_output(""" + """Check `dense_rank` function.""" + expected = convert_output( + """ dense_rank | ten | four ------------+-----+------ 1 | 0 | 0 @@ -344,18 +361,20 @@ def dense_rank(self): 1 | 0 | 2 1 | 1 | 3 2 | 3 | 3 - """) + """ + ) execute_query( "SELECT dense_rank() OVER (PARTITION BY four ORDER BY ten) AS dense_rank, ten, four FROM tenk1 WHERE unique2 < 10", - expected=expected + expected=expected, ) + @TestScenario def last_value_with_no_frame(self): - """Check last_value function with no frame. - """ - expected = convert_output(""" + """Check last_value function with no frame.""" + expected = convert_output( + """ four | ten | sum | last_value ------+-----+-----+------------ 0 | 0 | 0 | 0 @@ -378,24 +397,26 @@ def last_value_with_no_frame(self): 3 | 5 | 9 | 5 3 | 7 | 16 | 7 3 | 9 | 25 | 9 - """) + """ + ) execute_query( "SELECT four, ten, sum(ten) over (partition by four order by ten) AS sum, " "last_value(ten) over (partition by four order by ten) AS last_value " "FROM (select distinct ten, four from tenk1)", - expected=expected + expected=expected, ) + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_LastValue("1.0"), RQ_SRS_019_ClickHouse_WindowFunctions_Lag_Workaround("1.0"), ) def last_value_with_lag_workaround(self): - """Check last value with lag workaround. - """ - expected = convert_output(""" + """Check last value with lag workaround.""" + expected = convert_output( + """ last_value | lag | salary ------------+------+-------- 4500 | 0 | 3500 @@ -408,24 +429,26 @@ def last_value_with_lag_workaround(self): 6000 | 5000 | 5200 6000 | 5200 | 5200 6000 | 5200 | 6000 - """) + """ + ) execute_query( "select last_value(salary) over(order by salary range between 1000 preceding and 1000 following) AS last_value, " "any(salary) over(order by salary rows between 1 preceding and 1 preceding) AS lag, " "salary from empsalary", - expected=expected + expected=expected, ) + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_FirstValue("1.0"), - RQ_SRS_019_ClickHouse_WindowFunctions_Lead_Workaround("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_Lead_Workaround("1.0"), ) def first_value_with_lead_workaround(self): - """Check first value with lead workaround. - """ - expected = convert_output(""" + """Check first value with lead workaround.""" + expected = convert_output( + """ first_value | lead | salary -------------+------+-------- 3500 | 3900 | 3500 @@ -438,24 +461,24 @@ def first_value_with_lead_workaround(self): 4200 | 5200 | 5200 4200 | 6000 | 5200 5000 | 0 | 6000 - """) + """ + ) execute_query( "select first_value(salary) over(order by salary range between 1000 preceding and 1000 following) AS first_value, " "any(salary) over(order by salary rows between 1 following and 1 following) AS lead," "salary from empsalary", - expected=expected + expected=expected, ) + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_LeadInFrame("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_LeadInFrame("1.0")) def leadInFrame(self): - """Check `leadInFrame` function. - """ + """Check `leadInFrame` function.""" with Example("non default offset"): - expected = convert_output(""" + expected = convert_output( + """ empno | salary | lead --------+--------+------- 1 | 5000 | 5000 @@ -468,15 +491,17 @@ def leadInFrame(self): 9 | 4500 | 4500 10 | 5200 | 5200 11 | 5200 | 5200 - """) + """ + ) execute_query( "select empno, salary, leadInFrame(salary,0) OVER (ORDER BY salary) AS lead FROM empsalary ORDER BY empno", - expected=expected + expected=expected, ) with Example("default offset"): - expected = convert_output(""" + expected = convert_output( + """ empno | salary | lead --------+--------+------- 1 | 5000 | 0 @@ -489,15 +514,17 @@ def leadInFrame(self): 9 | 4500 | 0 10 | 5200 | 5200 11 | 5200 | 0 - """) + """ + ) execute_query( "select empno, salary, leadInFrame(salary) OVER (ORDER BY salary) AS lead FROM (SELECT * FROM empsalary ORDER BY empno)", - expected=expected + expected=expected, ) with Example("explicit default value"): - expected = convert_output(""" + expected = convert_output( + """ empno | salary | lead --------+--------+------- 1 | 5000 | 8 @@ -510,15 +537,17 @@ def leadInFrame(self): 9 | 4500 | 8 10 | 5200 | 5200 11 | 5200 | 8 - """) + """ + ) execute_query( "select empno, salary, leadInFrame(salary,1,8) OVER (ORDER BY salary) AS lead FROM empsalary ORDER BY empno", - expected=expected + expected=expected, ) with Example("without order by"): - expected = convert_output(""" + expected = convert_output( + """ empno | salary | lead --------+--------+------- 1 | 5000 | 3900 @@ -531,15 +560,17 @@ def leadInFrame(self): 9 | 4500 | 5200 10 | 5200 | 5200 11 | 5200 | 0 - """) + """ + ) execute_query( "select empno, salary, leadInFrame(salary) OVER () AS lead FROM (SELECT * FROM empsalary ORDER BY empno)", - expected=expected + expected=expected, ) with Example("with nulls"): - expected = convert_output(""" + expected = convert_output( + """ number | lead --------+----- 1 | 1 @@ -547,22 +578,22 @@ def leadInFrame(self): 2 | 3 3 | 0 \\N | 0 - """) + """ + ) execute_query( "select number, leadInFrame(number,1,0) OVER () AS lead FROM values('number Nullable(Int8)', (1),(1),(2),(3),(NULL))", - expected=expected + expected=expected, ) + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_LagInFrame("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_LagInFrame("1.0")) def lagInFrame(self): - """Check `lagInFrame` function. - """ + """Check `lagInFrame` function.""" with Example("non default offset"): - expected = convert_output(""" + expected = convert_output( + """ empno | salary | lag --------+--------+------- 1 | 5000 | 5000 @@ -575,15 +606,17 @@ def lagInFrame(self): 9 | 4500 | 4500 10 | 5200 | 5200 11 | 5200 | 5200 - """) + """ + ) execute_query( "select empno, salary, lagInFrame(salary,0) OVER (ORDER BY salary) AS lag FROM empsalary ORDER BY empno", - expected=expected + expected=expected, ) with Example("default offset"): - expected = convert_output(""" + expected = convert_output( + """ empno | salary | lag --------+--------+------- 5 | 3500 | 0 @@ -596,15 +629,17 @@ def lagInFrame(self): 10 | 5200 | 5000 11 | 5200 | 5200 8 | 6000 | 5200 - """) + """ + ) execute_query( "select empno, salary, lagInFrame(salary) OVER (ORDER BY salary) AS lag FROM (SELECT * FROM empsalary ORDER BY empno)", - expected=expected + expected=expected, ) with Example("explicit default value"): - expected = convert_output(""" + expected = convert_output( + """ empno | salary | lag --------+--------+------- 1 | 5000 | 4800 @@ -617,15 +652,17 @@ def lagInFrame(self): 9 | 4500 | 4200 10 | 5200 | 5000 11 | 5200 | 5200 - """) + """ + ) execute_query( "select empno, salary, lagInFrame(salary,1,8) OVER (ORDER BY salary) AS lag FROM empsalary ORDER BY empno", - expected=expected + expected=expected, ) with Example("without order by"): - expected = convert_output(""" + expected = convert_output( + """ empno | salary | lag --------+--------+------- 1 | 5000 | 0 @@ -638,15 +675,17 @@ def lagInFrame(self): 9 | 4500 | 6000 10 | 5200 | 4500 11 | 5200 | 5200 - """) + """ + ) execute_query( "select empno, salary, lagInFrame(salary) OVER () AS lag FROM (SELECT * FROM empsalary ORDER BY empno)", - expected=expected + expected=expected, ) with Example("with nulls"): - expected = convert_output(""" + expected = convert_output( + """ number | lag --------+----- 1 | 0 @@ -654,17 +693,18 @@ def lagInFrame(self): 2 | 1 3 | 2 \\N | 3 - """) + """ + ) execute_query( "select number, lagInFrame(number,1,0) OVER () AS lag FROM values('number Nullable(Int8)', (1),(1),(2),(3),(NULL))", - expected=expected + expected=expected, ) + @TestFeature @Name("funcs") def feature(self): - """Check true window functions. - """ + """Check true window functions.""" for scenario in loads(current_module(), Scenario): Scenario(run=scenario, flags=TE) diff --git a/tests/testflows/window_functions/tests/misc.py b/tests/testflows/window_functions/tests/misc.py index aca24edfe9c..5cb579c3954 100644 --- a/tests/testflows/window_functions/tests/misc.py +++ b/tests/testflows/window_functions/tests/misc.py @@ -3,11 +3,12 @@ from testflows.core import * from window_functions.requirements import * from window_functions.tests.common import * + @TestScenario def subquery_expr_preceding(self): - """Check using subquery expr in preceding. - """ - expected = convert_output(""" + """Check using subquery expr in preceding.""" + expected = convert_output( + """ sum | unique1 -----+--------- 0 | 0 @@ -20,21 +21,23 @@ def subquery_expr_preceding(self): 13 | 7 15 | 8 17 | 9 - """) + """ + ) execute_query( "SELECT sum(unique1) over " "(order by unique1 rows (SELECT unique1 FROM tenk1 ORDER BY unique1 LIMIT 1) + 1 PRECEDING) AS sum, " "unique1 " "FROM tenk1 WHERE unique1 < 10", - expected=expected + expected=expected, ) + @TestScenario def window_functions_in_select_expression(self): - """Check using multiple window functions in an expression. - """ - expected = convert_output(""" + """Check using multiple window functions in an expression.""" + expected = convert_output( + """ cntsum -------- 22 @@ -47,23 +50,26 @@ def window_functions_in_select_expression(self): 51 92 136 - """) + """ + ) execute_query( "SELECT (count(*) OVER (PARTITION BY four ORDER BY ten) + " "sum(hundred) OVER (PARTITION BY four ORDER BY ten)) AS cntsum " "FROM tenk1 WHERE unique2 < 10", - expected=expected + expected=expected, ) + @TestScenario def window_functions_in_subquery(self): - """Check using window functions in a subquery. - """ - expected = convert_output(""" + """Check using window functions in a subquery.""" + expected = convert_output( + """ total | fourcount | twosum -------+-----------+-------- - """) + """ + ) execute_query( "SELECT * FROM (" @@ -73,14 +79,15 @@ def window_functions_in_subquery(self): " sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS twosum " " FROM tenk1 " ") WHERE total <> fourcount + twosum", - expected=expected + expected=expected, ) + @TestScenario def group_by_and_one_window(self): - """Check running window function with group by and one window. - """ - expected = convert_output(""" + """Check running window function with group by and one window.""" + expected = convert_output( + """ four | ten | sum | avg ------+-----+------+------------------------ 0 | 0 | 0 | 0 @@ -103,24 +110,27 @@ def group_by_and_one_window(self): 3 | 5 | 7500 | 5 3 | 7 | 7500 | 7 3 | 9 | 7500 | 9 - """) + """ + ) execute_query( "SELECT four, ten, SUM(SUM(four)) OVER (PARTITION BY four) AS sum, AVG(ten) AS avg FROM tenk1 GROUP BY four, ten ORDER BY four, ten", expected=expected, ) + @TestScenario def group_by_and_multiple_windows(self): - """Check running window function with group by and multiple windows. - """ - expected = convert_output(""" + """Check running window function with group by and multiple windows.""" + expected = convert_output( + """ sum1 | row_number | sum2 -------+------------+------- 25100 | 1 | 47100 7400 | 2 | 22000 14600 | 3 | 14600 - """) + """ + ) execute_query( "SELECT sum(salary) AS sum1, row_number() OVER (ORDER BY depname) AS row_number, " @@ -129,11 +139,12 @@ def group_by_and_multiple_windows(self): expected=expected, ) + @TestScenario def query_with_order_by_and_one_window(self): - """Check using a window function in the query that has `ORDER BY` clause. - """ - expected = convert_output(""" + """Check using a window function in the query that has `ORDER BY` clause.""" + expected = convert_output( + """ depname | empno | salary | rank ----------+----------+--------+--------- sales | 3 | 4800 | 1 @@ -146,45 +157,51 @@ def query_with_order_by_and_one_window(self): develop | 10 | 5200 | 3 develop | 11 | 5200 | 4 develop | 8 | 6000 | 5 - """) + """ + ) execute_query( "SELECT depname, empno, salary, rank() OVER w AS rank FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary, empno) ORDER BY rank() OVER w, empno", - expected=expected + expected=expected, ) + @TestScenario def with_union_all(self): - """Check using window over rows obtained with `UNION ALL`. - """ - expected = convert_output(""" + """Check using window over rows obtained with `UNION ALL`.""" + expected = convert_output( + """ count ------- - """) + """ + ) execute_query( "SELECT count(*) OVER (PARTITION BY four) AS count FROM (SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk1) LIMIT 0", - expected=expected + expected=expected, ) + @TestScenario def empty_table(self): - """Check using an empty table with a window function. - """ - expected = convert_output(""" + """Check using an empty table with a window function.""" + expected = convert_output( + """ count ------- - """) + """ + ) execute_query( "SELECT count(*) OVER (PARTITION BY four) AS count FROM (SELECT * FROM tenk1 WHERE 0)", - expected=expected + expected=expected, ) + @TestScenario def from_subquery(self): - """Check using a window function over data from subquery. - """ - expected = convert_output(""" + """Check using a window function over data from subquery.""" + expected = convert_output( + """ count | four -------+------ 4 | 1 @@ -193,20 +210,22 @@ def from_subquery(self): 4 | 1 2 | 3 2 | 3 - """) + """ + ) execute_query( "SELECT count(*) OVER (PARTITION BY four) AS count, four FROM (SELECT * FROM tenk1 WHERE two = 1) WHERE unique2 < 10", - expected=expected + expected=expected, ) + @TestScenario def groups_frame(self): - """Check using `GROUPS` frame. - """ + """Check using `GROUPS` frame.""" exitcode, message = groups_frame_error() - expected = convert_output(""" + expected = convert_output( + """ sum | unique1 | four -----+---------+------ 12 | 0 | 0 @@ -219,16 +238,20 @@ def groups_frame(self): 35 | 2 | 2 45 | 3 | 3 45 | 7 | 3 - """) + """ + ) - execute_query(""" + execute_query( + """ SELECT sum(unique1) over (order by four groups between unbounded preceding and current row), unique1, four FROM tenk1 WHERE unique1 < 10 """, - exitcode=exitcode, message=message + exitcode=exitcode, + message=message, ) + @TestScenario def count_with_empty_over_clause_without_start(self): """Check that we can use `count()` window function without passing @@ -237,7 +260,7 @@ def count_with_empty_over_clause_without_start(self): exitcode = 0 message = "1" - sql = ("SELECT count() OVER () FROM tenk1 LIMIT 1") + sql = "SELECT count() OVER () FROM tenk1 LIMIT 1" with When("I execute query", description=sql): r = current().context.node.query(sql, exitcode=exitcode, message=message) @@ -245,17 +268,19 @@ def count_with_empty_over_clause_without_start(self): @TestScenario def subquery_multiple_window_functions(self): - """Check using multiple window functions is a subquery. - """ - expected = convert_output(""" + """Check using multiple window functions is a subquery.""" + expected = convert_output( + """ depname | depsalary | depminsalary --------+-------------+-------------- sales | 5000 | 5000 sales | 9800 | 4800 sales | 14600 | 4800 - """) + """ + ) - execute_query(""" + execute_query( + """ SELECT * FROM (SELECT depname, sum(salary) OVER (PARTITION BY depname order by empno) AS depsalary, @@ -263,15 +288,17 @@ def subquery_multiple_window_functions(self): FROM empsalary) WHERE depname = 'sales' """, - expected=expected + expected=expected, ) + @TestScenario def windows_with_same_partitioning_but_different_ordering(self): """Check using using two windows that use the same partitioning but different ordering. """ - expected = convert_output(""" + expected = convert_output( + """ first | last ------+----- 7 | 7 @@ -284,22 +311,25 @@ def windows_with_same_partitioning_but_different_ordering(self): 3 | 3 3 | 4 3 | 1 - """) + """ + ) - execute_query(""" + execute_query( + """ SELECT any(empno) OVER (PARTITION BY depname ORDER BY salary, enroll_date) AS first, anyLast(empno) OVER (PARTITION BY depname ORDER BY salary,enroll_date,empno) AS last FROM empsalary """, - expected=expected + expected=expected, ) + @TestScenario def subquery_with_multiple_windows_filtering(self): - """Check filtering rows from a subquery that uses multiple window functions. - """ - expected = convert_output(""" + """Check filtering rows from a subquery that uses multiple window functions.""" + expected = convert_output( + """ depname | empno | salary | enroll_date | first_emp | last_emp ----------+-------+----------+--------------+-------------+---------- develop | 8 | 6000 | 2006-10-01 | 1 | 5 @@ -308,9 +338,11 @@ def subquery_with_multiple_windows_filtering(self): personnel | 5 | 3500 | 2007-12-10 | 2 | 1 sales | 1 | 5000 | 2006-10-01 | 1 | 3 sales | 4 | 4800 | 2007-08-08 | 3 | 1 - """) + """ + ) - execute_query(""" + execute_query( + """ SELECT * FROM (SELECT depname, empno, @@ -321,16 +353,17 @@ def subquery_with_multiple_windows_filtering(self): FROM empsalary) emp WHERE first_emp = 1 OR last_emp = 1 """, - expected=expected + expected=expected, ) + @TestScenario def exclude_clause(self): - """Check if exclude clause is supported. - """ + """Check if exclude clause is supported.""" exitcode, message = syntax_error() - expected = convert_output(""" + expected = convert_output( + """ sum | unique1 | four -----+---------+------ 7 | 4 | 0 @@ -343,19 +376,21 @@ def exclude_clause(self): 23 | 3 | 3 15 | 7 | 3 10 | 0 | 0 - """) + """ + ) execute_query( "SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude no others) AS sum," "unique1, four " "FROM tenk1 WHERE unique1 < 10", - exitcode=exitcode, message=message + exitcode=exitcode, + message=message, ) + @TestScenario def in_view(self): - """Check using a window function in a view. - """ + """Check using a window function in a view.""" with Given("I create a view"): sql = """ CREATE VIEW v_window AS @@ -364,7 +399,8 @@ def in_view(self): """ create_table(name="v_window", statement=sql) - expected = convert_output(""" + expected = convert_output( + """ number | sum_rows ---------+---------- 1 | 3 @@ -377,20 +413,16 @@ def in_view(self): 8 | 24 9 | 27 10 | 19 - """) - - execute_query( - "SELECT * FROM v_window", - expected=expected + """ ) + execute_query("SELECT * FROM v_window", expected=expected) + + @TestFeature @Name("misc") -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_FrameClause("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_FrameClause("1.0")) def feature(self): - """Check misc cases for frame clause. - """ + """Check misc cases for frame clause.""" for scenario in loads(current_module(), Scenario): Scenario(run=scenario, flags=TE) diff --git a/tests/testflows/window_functions/tests/order_clause.py b/tests/testflows/window_functions/tests/order_clause.py index 2dafe5dafc9..ce8bc3cbd8f 100644 --- a/tests/testflows/window_functions/tests/order_clause.py +++ b/tests/testflows/window_functions/tests/order_clause.py @@ -2,129 +2,142 @@ from testflows.core import * from window_functions.requirements import * from window_functions.tests.common import * + @TestScenario def single_expr_asc(self): - """Check defining of order clause with single expr ASC. - """ - expected = convert_output(""" + """Check defining of order clause with single expr ASC.""" + expected = convert_output( + """ x | s | sum ----+---+----- 1 | a | 2 1 | b | 2 2 | b | 4 - """) + """ + ) execute_query( "SELECT x,s, sum(x) OVER (ORDER BY x ASC) AS sum FROM values('x Int8, s String', (1,'a'),(1,'b'),(2,'b'))", - expected=expected + expected=expected, ) + @TestScenario def single_expr_desc(self): - """Check defining of order clause with single expr DESC. - """ - expected = convert_output(""" + """Check defining of order clause with single expr DESC.""" + expected = convert_output( + """ x | s | sum ----+---+----- 2 | b | 2 1 | a | 4 1 | b | 4 - """) + """ + ) execute_query( "SELECT x,s, sum(x) OVER (ORDER BY x DESC) AS sum FROM values('x Int8, s String', (1,'a'),(1,'b'),(2,'b'))", - expected=expected + expected=expected, ) + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause_MultipleExprs("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause_MultipleExprs("1.0")) def multiple_expr_desc_desc(self): - """Check defining of order clause with multiple exprs. - """ - expected = convert_output(""" + """Check defining of order clause with multiple exprs.""" + expected = convert_output( + """ x | s | sum --+---+---- 2 | b | 2 1 | b | 3 1 | a | 4 - """) + """ + ) execute_query( "SELECT x,s, sum(x) OVER (ORDER BY x DESC, s DESC) AS sum FROM values('x Int8, s String', (1,'a'),(1,'b'),(2,'b'))", - expected=expected + expected=expected, ) + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause_MultipleExprs("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause_MultipleExprs("1.0")) def multiple_expr_asc_asc(self): - """Check defining of order clause with multiple exprs. - """ - expected = convert_output(""" + """Check defining of order clause with multiple exprs.""" + expected = convert_output( + """ x | s | sum ----+---+------ 1 | a | 1 1 | b | 2 2 | b | 4 - """) + """ + ) execute_query( "SELECT x,s, sum(x) OVER (ORDER BY x ASC, s ASC) AS sum FROM values('x Int8, s String', (1,'a'),(1,'b'),(2,'b'))", - expected=expected + expected=expected, ) + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause_MultipleExprs("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause_MultipleExprs("1.0")) def multiple_expr_asc_desc(self): - """Check defining of order clause with multiple exprs. - """ - expected = convert_output(""" + """Check defining of order clause with multiple exprs.""" + expected = convert_output( + """ x | s | sum ----+---+------ 1 | b | 1 1 | a | 2 2 | b | 4 - """) + """ + ) execute_query( "SELECT x,s, sum(x) OVER (ORDER BY x ASC, s DESC) AS sum FROM values('x Int8, s String', (1,'a'),(1,'b'),(2,'b'))", - expected=expected + expected=expected, ) + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause_MissingExpr_Error("1.0") ) def missing_expr_error(self): - """Check that defining of order clause with missing expr returns an error. - """ + """Check that defining of order clause with missing expr returns an error.""" exitcode = 62 message = "Exception: Syntax error: failed at position" - self.context.node.query("SELECT sum(number) OVER (ORDER BY) FROM numbers(1,3)", exitcode=exitcode, message=message) + self.context.node.query( + "SELECT sum(number) OVER (ORDER BY) FROM numbers(1,3)", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause_InvalidExpr_Error("1.0") ) def invalid_expr_error(self): - """Check that defining of order clause with invalid expr returns an error. - """ + """Check that defining of order clause with invalid expr returns an error.""" exitcode = 47 message = "Exception: Missing columns: 'foo'" - self.context.node.query("SELECT sum(number) OVER (ORDER BY foo) FROM numbers(1,3)", exitcode=exitcode, message=message) + self.context.node.query( + "SELECT sum(number) OVER (ORDER BY foo) FROM numbers(1,3)", + exitcode=exitcode, + message=message, + ) + @TestScenario def by_column(self): - """Check order by using a single column. - """ - expected = convert_output(""" + """Check order by using a single column.""" + expected = convert_output( + """ depname | empno | salary | rank -----------+-------+--------+------ develop | 7 | 4200 | 1 @@ -137,18 +150,20 @@ def by_column(self): sales | 1 | 5000 | 1 sales | 3 | 4800 | 1 sales | 4 | 4800 | 1 - """) + """ + ) execute_query( "SELECT depname, empno, salary, rank() OVER (PARTITION BY depname, empno ORDER BY salary) AS rank FROM empsalary", expected=expected, ) + @TestScenario def by_expr(self): - """Check order by with expression. - """ - expected = convert_output(""" + """Check order by with expression.""" + expected = convert_output( + """ avg ------------------------ 0 @@ -161,16 +176,19 @@ def by_expr(self): 2 3 3 - """) + """ + ) execute_query( "SELECT avg(four) OVER (PARTITION BY four ORDER BY thousand / 100) AS avg FROM tenk1 WHERE unique2 < 10", expected=expected, ) + @TestScenario def by_expr_with_aggregates(self): - expected = convert_output(""" + expected = convert_output( + """ ten | res | rank -----+----------+------ 0 | 9976146 | 4 @@ -183,7 +201,8 @@ def by_expr_with_aggregates(self): 7 | 10120309 | 10 8 | 9991305 | 6 9 | 10040184 | 7 - """) + """ + ) execute_query( "select ten, sum(unique1) + sum(unique2) as res, rank() over (order by sum(unique1) + sum(unique2)) as rank " @@ -191,28 +210,27 @@ def by_expr_with_aggregates(self): expected=expected, ) + @TestScenario def by_a_non_integer_constant(self): - """Check if it is allowed to use a window with ordering by a non integer constant. - """ - expected = convert_output(""" + """Check if it is allowed to use a window with ordering by a non integer constant.""" + expected = convert_output( + """ rank ------ 1 - """) + """ + ) execute_query( - "SELECT rank() OVER (ORDER BY length('abc')) AS rank", - expected=expected + "SELECT rank() OVER (ORDER BY length('abc')) AS rank", expected=expected ) + @TestFeature @Name("order clause") -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_OrderClause("1.0")) def feature(self): - """Check defining order clause. - """ + """Check defining order clause.""" for scenario in loads(current_module(), Scenario): Scenario(run=scenario, flags=TE) diff --git a/tests/testflows/window_functions/tests/over_clause.py b/tests/testflows/window_functions/tests/over_clause.py index d02ddcee656..87fbd7fec0b 100644 --- a/tests/testflows/window_functions/tests/over_clause.py +++ b/tests/testflows/window_functions/tests/over_clause.py @@ -3,14 +3,13 @@ from testflows.core import * from window_functions.requirements import * from window_functions.tests.common import * + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_EmptyOverClause("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_EmptyOverClause("1.0")) def empty(self): - """Check using empty over clause. - """ - expected = convert_output(""" + """Check using empty over clause.""" + expected = convert_output( + """ count ------- 10 @@ -23,22 +22,24 @@ def empty(self): 10 10 10 - """) + """ + ) execute_query( "SELECT COUNT(*) OVER () AS count FROM tenk1 WHERE unique2 < 10", - expected=expected + expected=expected, ) + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_EmptyOverClause("1.0"), - RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_NamedWindow("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_NamedWindow("1.0"), ) def empty_named_window(self): - """Check using over clause with empty window. - """ - expected = convert_output(""" + """Check using over clause with empty window.""" + expected = convert_output( + """ count ------- 10 @@ -51,21 +52,23 @@ def empty_named_window(self): 10 10 10 - """) + """ + ) execute_query( "SELECT COUNT(*) OVER w AS count FROM tenk1 WHERE unique2 < 10 WINDOW w AS ()", - expected=expected + expected=expected, ) + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_AdHocWindow("1.0"), ) def adhoc_window(self): - """Check running aggregating `sum` function over an adhoc window. - """ - expected = convert_output(""" + """Check running aggregating `sum` function over an adhoc window.""" + expected = convert_output( + """ depname | empno | salary | sum -----------+-------+--------+------- develop | 7 | 4200 | 25100 @@ -78,60 +81,73 @@ def adhoc_window(self): sales | 3 | 4800 | 14600 sales | 4 | 4800 | 14600 sales | 1 | 5000 | 14600 - """) + """ + ) execute_query( "SELECT depname, empno, salary, sum(salary) OVER (PARTITION BY depname) AS sum FROM empsalary ORDER BY depname, salary, empno", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_AdHocWindow_MissingWindowSpec_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_AdHocWindow_MissingWindowSpec_Error( + "1.0" + ) ) def missing_window_spec(self): - """Check missing window spec in over clause. - """ + """Check missing window spec in over clause.""" exitcode = 62 message = "Exception: Syntax error" - self.context.node.query("SELECT number,sum(number) OVER FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_NamedWindow_InvalidName_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_NamedWindow_InvalidName_Error( + "1.0" + ) ) def invalid_window_name(self): - """Check invalid window name. - """ + """Check invalid window name.""" exitcode = 47 message = "Exception: Window 'w3' is not defined" - self.context.node.query("SELECT number,sum(number) OVER w3 FROM values('number Int8', (1),(1),(2),(3)) WINDOW w1 AS ()", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER w3 FROM values('number Int8', (1),(1),(2),(3)) WINDOW w1 AS ()", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_NamedWindow_MultipleWindows_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_OverClause_NamedWindow_MultipleWindows_Error( + "1.0" + ) ) def invalid_multiple_windows(self): - """Check invalid multiple window names. - """ + """Check invalid multiple window names.""" exitcode = 47 message = "Exception: Missing columns" - self.context.node.query("SELECT number,sum(number) OVER w1, w2 FROM values('number Int8', (1),(1),(2),(3)) WINDOW w1 AS (), w2 AS (PARTITION BY number)", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER w1, w2 FROM values('number Int8', (1),(1),(2),(3)) WINDOW w1 AS (), w2 AS (PARTITION BY number)", + exitcode=exitcode, + message=message, + ) @TestFeature @Name("over clause") -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_OverClause("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_OverClause("1.0")) def feature(self): - """Check defining frame clause. - """ + """Check defining frame clause.""" for scenario in loads(current_module(), Scenario): Scenario(run=scenario, flags=TE) diff --git a/tests/testflows/window_functions/tests/partition_clause.py b/tests/testflows/window_functions/tests/partition_clause.py index 3e9ebefe2ba..e8da74d0603 100644 --- a/tests/testflows/window_functions/tests/partition_clause.py +++ b/tests/testflows/window_functions/tests/partition_clause.py @@ -3,75 +3,82 @@ from testflows.core import * from window_functions.requirements import * from window_functions.tests.common import * + @TestScenario def single_expr(self): - """Check defining of partition clause with single expr. - """ - expected = convert_output(""" + """Check defining of partition clause with single expr.""" + expected = convert_output( + """ x | s | sum ----+---+------ 1 | a | 2 1 | b | 2 2 | b | 2 - """) + """ + ) execute_query( "SELECT x,s, sum(x) OVER (PARTITION BY x) AS sum FROM values('x Int8, s String', (1,'a'),(1,'b'),(2,'b'))", - expected=expected + expected=expected, ) + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_PartitionClause_MultipleExpr("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_PartitionClause_MultipleExpr("1.0")) def multiple_expr(self): - """Check defining of partition clause with multiple exprs. - """ - expected = convert_output(""" + """Check defining of partition clause with multiple exprs.""" + expected = convert_output( + """ x | s | sum --+---+---- 1 | a | 1 1 | b | 1 2 | b | 2 - """) + """ + ) execute_query( "SELECT x,s, sum(x) OVER (PARTITION BY x,s) AS sum FROM values('x Int8, s String', (1,'a'),(1,'b'),(2,'b'))", - expected=expected + expected=expected, ) + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_PartitionClause_MissingExpr_Error("1.0") ) def missing_expr_error(self): - """Check that defining of partition clause with missing expr returns an error. - """ + """Check that defining of partition clause with missing expr returns an error.""" exitcode = 62 message = "Exception: Syntax error: failed at position" - self.context.node.query("SELECT sum(number) OVER (PARTITION BY) FROM numbers(1,3)", exitcode=exitcode, message=message) + self.context.node.query( + "SELECT sum(number) OVER (PARTITION BY) FROM numbers(1,3)", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_PartitionClause_InvalidExpr_Error("1.0") ) def invalid_expr_error(self): - """Check that defining of partition clause with invalid expr returns an error. - """ + """Check that defining of partition clause with invalid expr returns an error.""" exitcode = 47 message = "Exception: Missing columns: 'foo'" - self.context.node.query("SELECT sum(number) OVER (PARTITION BY foo) FROM numbers(1,3)", exitcode=exitcode, message=message) + self.context.node.query( + "SELECT sum(number) OVER (PARTITION BY foo) FROM numbers(1,3)", + exitcode=exitcode, + message=message, + ) @TestFeature @Name("partition clause") -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_PartitionClause("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_PartitionClause("1.0")) def feature(self): - """Check defining partition clause. - """ + """Check defining partition clause.""" for scenario in loads(current_module(), Scenario): Scenario(run=scenario, flags=TE) diff --git a/tests/testflows/window_functions/tests/range_datetime.py b/tests/testflows/window_functions/tests/range_datetime.py index 0b34fdf43d4..8d335d41345 100644 --- a/tests/testflows/window_functions/tests/range_datetime.py +++ b/tests/testflows/window_functions/tests/range_datetime.py @@ -3,12 +3,14 @@ from testflows.core import * from window_functions.requirements import * from window_functions.tests.common import * + @TestScenario def order_by_asc_range_between_days_preceding_and_days_following(self): """Check range between days preceding and days following with ascending order by. """ - expected = convert_output(""" + expected = convert_output( + """ sum | salary | enroll_date -------+--------+------------- 34900 | 5000 | 2006-10-01 @@ -21,19 +23,22 @@ def order_by_asc_range_between_days_preceding_and_days_following(self): 32200 | 4500 | 2008-01-01 47100 | 5200 | 2007-08-01 47100 | 5200 | 2007-08-15 - """) + """ + ) execute_query( "select sum(salary) over (order by enroll_date range between 365 preceding and 365 following) AS sum, " "salary, enroll_date from empsalary order by empno", - expected=expected + expected=expected, ) + @TestScenario def order_by_desc_range_between_days_preceding_and_days_following(self): """Check range between days preceding and days following with descending order by.""" - expected = convert_output(""" + expected = convert_output( + """ sum | salary | enroll_date -------+--------+------------- 34900 | 5000 | 2006-10-01 @@ -46,20 +51,23 @@ def order_by_desc_range_between_days_preceding_and_days_following(self): 32200 | 4500 | 2008-01-01 47100 | 5200 | 2007-08-01 47100 | 5200 | 2007-08-15 - """) + """ + ) execute_query( "select sum(salary) over (order by enroll_date desc range between 365 preceding and 365 following) AS sum, " "salary, enroll_date from empsalary order by empno", - expected=expected + expected=expected, ) + @TestScenario def order_by_desc_range_between_days_following_and_days_following(self): """Check range between days following and days following with descending order by. """ - expected = convert_output(""" + expected = convert_output( + """ sum | salary | enroll_date -------+--------+------------- 0 | 5000 | 2006-10-01 @@ -72,20 +80,23 @@ def order_by_desc_range_between_days_following_and_days_following(self): 0 | 4500 | 2008-01-01 0 | 5200 | 2007-08-01 0 | 5200 | 2007-08-15 - """) + """ + ) execute_query( "select sum(salary) over (order by enroll_date desc range between 365 following and 365 following) AS sum, " "salary, enroll_date from empsalary order by empno", - expected=expected + expected=expected, ) + @TestScenario def order_by_desc_range_between_days_preceding_and_days_preceding(self): """Check range between days preceding and days preceding with descending order by. """ - expected = convert_output(""" + expected = convert_output( + """ sum | salary | enroll_date -------+--------+------------- 0 | 5000 | 2006-10-01 @@ -98,20 +109,23 @@ def order_by_desc_range_between_days_preceding_and_days_preceding(self): 0 | 4500 | 2008-01-01 0 | 5200 | 2007-08-01 0 | 5200 | 2007-08-15 - """) + """ + ) execute_query( "select sum(salary) over (order by enroll_date desc range between 365 preceding and 365 preceding) AS sum, " "salary, enroll_date from empsalary order by empno", - expected=expected + expected=expected, ) + @TestScenario def datetime_with_timezone_order_by_asc_range_between_n_preceding_and_n_following(self): """Check range between preceding and following with DateTime column that has timezone using ascending order by. """ - expected = convert_output(""" + expected = convert_output( + """ id | f_timestamptz | first_value | last_value ----+------------------------------+-------------+------------ 1 | 2000-10-19 10:23:54 | 1 | 3 @@ -124,7 +138,8 @@ def datetime_with_timezone_order_by_asc_range_between_n_preceding_and_n_followin 8 | 2006-10-19 10:23:54 | 7 | 9 9 | 2007-10-19 10:23:54 | 8 | 10 10 | 2008-10-19 10:23:54 | 9 | 10 - """) + """ + ) execute_query( """ @@ -133,15 +148,19 @@ def datetime_with_timezone_order_by_asc_range_between_n_preceding_and_n_followin window w as (order by f_timestamptz range between 31622400 preceding and 31622400 following) order by id """, - expected=expected + expected=expected, ) + @TestScenario -def datetime_with_timezone_order_by_desc_range_between_n_preceding_and_n_following(self): +def datetime_with_timezone_order_by_desc_range_between_n_preceding_and_n_following( + self, +): """Check range between preceding and following with DateTime column that has timezone using descending order by. """ - expected = convert_output(""" + expected = convert_output( + """ id | f_timestamptz | first_value | last_value ----+------------------------------+-------------+------------ 10 | 2008-10-19 10:23:54 | 10 | 9 @@ -154,7 +173,8 @@ def datetime_with_timezone_order_by_desc_range_between_n_preceding_and_n_followi 3 | 2001-10-19 10:23:54 | 4 | 1 2 | 2001-10-19 10:23:54 | 4 | 1 1 | 2000-10-19 10:23:54 | 2 | 1 - """) + """ + ) execute_query( """ @@ -163,15 +183,17 @@ def datetime_with_timezone_order_by_desc_range_between_n_preceding_and_n_followi window w as (order by f_timestamptz desc range between 31622400 preceding and 31622400 following) order by id desc """, - expected=expected + expected=expected, ) + @TestScenario def datetime_order_by_asc_range_between_n_preceding_and_n_following(self): """Check range between preceding and following with DateTime column and ascending order by. """ - expected = convert_output(""" + expected = convert_output( + """ id | f_timestamp | first_value | last_value ----+------------------------------+-------------+------------ 1 | 2000-10-19 10:23:54 | 1 | 3 @@ -184,7 +206,8 @@ def datetime_order_by_asc_range_between_n_preceding_and_n_following(self): 8 | 2006-10-19 10:23:54 | 7 | 9 9 | 2007-10-19 10:23:54 | 8 | 10 10 | 2008-10-19 10:23:54 | 9 | 10 - """) + """ + ) execute_query( """ @@ -193,15 +216,17 @@ def datetime_order_by_asc_range_between_n_preceding_and_n_following(self): window w as (order by f_timestamp range between 31622400 preceding and 31622400 following) ORDER BY id """, - expected=expected + expected=expected, ) + @TestScenario def datetime_order_by_desc_range_between_n_preceding_and_n_following(self): """Check range between preceding and following with DateTime column and descending order by. """ - expected = convert_output(""" + expected = convert_output( + """ id | f_timestamp | first_value | last_value ----+------------------------------+-------------+------------ 10 | 2008-10-19 10:23:54 | 10 | 9 @@ -214,7 +239,8 @@ def datetime_order_by_desc_range_between_n_preceding_and_n_following(self): 2 | 2001-10-19 10:23:54 | 4 | 1 3 | 2001-10-19 10:23:54 | 4 | 1 1 | 2000-10-19 10:23:54 | 2 | 1 - """) + """ + ) execute_query( """ @@ -223,16 +249,16 @@ def datetime_order_by_desc_range_between_n_preceding_and_n_following(self): window w as (order by f_timestamp desc range between 31622400 preceding and 31622400 following) """, - expected=expected + expected=expected, ) + @TestFeature @Name("range datetime") @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_DataTypes_DateAndDateTime("1.0") ) def feature(self): - """Check `Date` and `DateTime` data time with range frames. - """ + """Check `Date` and `DateTime` data time with range frames.""" for scenario in loads(current_module(), Scenario): Scenario(run=scenario, flags=TE) diff --git a/tests/testflows/window_functions/tests/range_errors.py b/tests/testflows/window_functions/tests/range_errors.py index 67a9cfb14c9..958a4412b4f 100644 --- a/tests/testflows/window_functions/tests/range_errors.py +++ b/tests/testflows/window_functions/tests/range_errors.py @@ -3,9 +3,12 @@ from testflows.core import * from window_functions.requirements import * from window_functions.tests.common import * + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_MultipleColumnsInOrderBy_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_MultipleColumnsInOrderBy_Error( + "1.0" + ) ) def error_more_than_one_order_by_column(self): """Check that using more than one column in order by with range frame @@ -14,25 +17,30 @@ def error_more_than_one_order_by_column(self): exitcode = 36 message = "DB::Exception: Received from localhost:9000. DB::Exception: The RANGE OFFSET window frame requires exactly one ORDER BY column, 2 given" - sql = ("select sum(salary) over (order by enroll_date, salary range between 1 preceding and 2 following) AS sum, " - "salary, enroll_date from empsalary") + sql = ( + "select sum(salary) over (order by enroll_date, salary range between 1 preceding and 2 following) AS sum, " + "salary, enroll_date from empsalary" + ) with When("I execute query", description=sql): r = current().context.node.query(sql, exitcode=exitcode, message=message) + @TestScenario def error_missing_order_by(self): - """Check that using range frame with offsets without order by returns an error. - """ + """Check that using range frame with offsets without order by returns an error.""" exitcode = 36 message = "DB::Exception: The RANGE OFFSET window frame requires exactly one ORDER BY column, 0 given" - sql = ("select sum(salary) over (range between 1 preceding and 2 following) AS sum, " - "salary, enroll_date from empsalary") + sql = ( + "select sum(salary) over (range between 1 preceding and 2 following) AS sum, " + "salary, enroll_date from empsalary" + ) with When("I execute query", description=sql): r = current().context.node.query(sql, exitcode=exitcode, message=message) + @TestScenario def error_missing_order_by_with_partition_by_clause(self): """Check that range frame with offsets used with partition by but @@ -41,64 +49,66 @@ def error_missing_order_by_with_partition_by_clause(self): exitcode = 36 message = "DB::Exception: The RANGE OFFSET window frame requires exactly one ORDER BY column, 0 given" - sql = ("select f1, sum(f1) over (partition by f1 range between 1 preceding and 1 following) AS sum " - "from t1 where f1 = f2") + sql = ( + "select f1, sum(f1) over (partition by f1 range between 1 preceding and 1 following) AS sum " + "from t1 where f1 = f2" + ) with When("I execute query", description=sql): r = current().context.node.query(sql, exitcode=exitcode, message=message) + @TestScenario def error_range_over_non_numerical_column(self): - """Check that range over non numerical column returns an error. - """ + """Check that range over non numerical column returns an error.""" exitcode = 48 message = "DB::Exception: The RANGE OFFSET frame for 'DB::ColumnLowCardinality' ORDER BY column is not implemented" - sql = ("select sum(salary) over (order by depname range between 1 preceding and 2 following) as sum, " - "salary, enroll_date from empsalary") + sql = ( + "select sum(salary) over (order by depname range between 1 preceding and 2 following) as sum, " + "salary, enroll_date from empsalary" + ) with When("I execute query", description=sql): r = current().context.node.query(sql, exitcode=exitcode, message=message) + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_ExprPreceding_ExprValue("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_ExprPreceding_ExprValue("1.0")) def error_negative_preceding_offset(self): - """Check that non-positive value of preceding offset returns an error. - """ + """Check that non-positive value of preceding offset returns an error.""" exitcode = 36 message = "DB::Exception: Frame start offset must be greater than zero, -1 given" - sql = ("select max(enroll_date) over (order by salary range between -1 preceding and 2 following) AS max, " - "salary, enroll_date from empsalary") + sql = ( + "select max(enroll_date) over (order by salary range between -1 preceding and 2 following) AS max, " + "salary, enroll_date from empsalary" + ) with When("I execute query", description=sql): r = current().context.node.query(sql, exitcode=exitcode, message=message) + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_ExprFollowing_ExprValue("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_ExprFollowing_ExprValue("1.0")) def error_negative_following_offset(self): - """Check that non-positive value of following offset returns an error. - """ + """Check that non-positive value of following offset returns an error.""" exitcode = 36 message = "DB::Exception: Frame end offset must be greater than zero, -2 given" - sql = ("select max(enroll_date) over (order by salary range between 1 preceding and -2 following) AS max, " - "salary, enroll_date from empsalary") + sql = ( + "select max(enroll_date) over (order by salary range between 1 preceding and -2 following) AS max, " + "salary, enroll_date from empsalary" + ) with When("I execute query", description=sql): r = current().context.node.query(sql, exitcode=exitcode, message=message) + @TestFeature @Name("range errors") -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame("1.0")) def feature(self): - """Check different error conditions when usign range frame. - """ + """Check different error conditions when usign range frame.""" for scenario in loads(current_module(), Scenario): Scenario(run=scenario, flags=TE) diff --git a/tests/testflows/window_functions/tests/range_frame.py b/tests/testflows/window_functions/tests/range_frame.py index 71f00965547..186ca154068 100644 --- a/tests/testflows/window_functions/tests/range_frame.py +++ b/tests/testflows/window_functions/tests/range_frame.py @@ -3,41 +3,51 @@ from testflows.core import * from window_functions.requirements import * from window_functions.tests.common import * + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_MissingFrameExtent_Error("1.0") ) def missing_frame_extent(self): - """Check that when range frame has missing frame extent then an error is returned. - """ + """Check that when range frame has missing frame extent then an error is returned.""" exitcode, message = syntax_error() - self.context.node.query("SELECT number,sum(number) OVER (ORDER BY number RANGE) FROM numbers(1,3)", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (ORDER BY number RANGE) FROM numbers(1,3)", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_InvalidFrameExtent_Error("1.0") ) def invalid_frame_extent(self): - """Check that when range frame has invalid frame extent then an error is returned. - """ + """Check that when range frame has invalid frame extent then an error is returned.""" exitcode, message = syntax_error() - self.context.node.query("SELECT number,sum(number) OVER (ORDER BY number RANGE '1') FROM numbers(1,3)", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (ORDER BY number RANGE '1') FROM numbers(1,3)", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_CurrentRow_Peers("1.0"), - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_CurrentRow_WithoutOrderBy("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_CurrentRow_WithoutOrderBy( + "1.0" + ), ) def start_current_row_without_order_by(self): """Check range current row frame without order by and that the peers of the current row are rows that have values in the same order bucket. In this case without order by clause all rows are the peers of the current row. """ - expected = convert_output(""" + expected = convert_output( + """ empno | salary | sum --------+--------+-------- 1 | 5000 | 47100 @@ -50,23 +60,28 @@ def start_current_row_without_order_by(self): 9 | 4500 | 47100 10 | 5200 | 47100 11 | 5200 | 47100 - """) + """ + ) execute_query( "SELECT * FROM (SELECT empno, salary, sum(salary) OVER (RANGE CURRENT ROW) AS sum FROM empsalary) ORDER BY empno", - expected=expected + expected=expected, ) + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_CurrentRow_Peers("1.0"), - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_CurrentRow_WithOrderBy("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_CurrentRow_WithOrderBy( + "1.0" + ), ) def start_current_row_with_order_by(self): """Check range current row frame with order by and that the peers of the current row are rows that have values in the same order bucket. """ - expected = convert_output(""" + expected = convert_output( + """ empno | depname | salary | sum --------+-----------+--------+--------- 1 | sales | 5000 | 14600 @@ -79,38 +94,50 @@ def start_current_row_with_order_by(self): 9 | develop | 4500 | 25100 10 | develop | 5200 | 25100 11 | develop | 5200 | 25100 - """) + """ + ) execute_query( "SELECT * FROM (SELECT empno, depname, salary, sum(salary) OVER (ORDER BY depname RANGE CURRENT ROW) AS sum FROM empsalary) ORDER BY empno", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_UnboundedFollowing_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_UnboundedFollowing_Error( + "1.0" + ) ) def start_unbounded_following_error(self): - """Check range current row frame with or without order by returns an error. - """ + """Check range current row frame with or without order by returns an error.""" exitcode, message = frame_start_error() with Example("without order by"): - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (RANGE UNBOUNDED FOLLOWING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (RANGE UNBOUNDED FOLLOWING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) with Example("with order by"): - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE UNBOUNDED FOLLOWING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE UNBOUNDED FOLLOWING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_UnboundedPreceding_WithoutOrderBy("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_UnboundedPreceding_WithoutOrderBy( + "1.0" + ) ) def start_unbounded_preceding_without_order_by(self): - """Check range unbounded preceding frame without order by. - """ - expected = convert_output(""" + """Check range unbounded preceding frame without order by.""" + expected = convert_output( + """ empno | depname | salary | sum --------+-----------+--------+--------- 7 | develop | 4200 | 25100 @@ -118,21 +145,25 @@ def start_unbounded_preceding_without_order_by(self): 9 | develop | 4500 | 25100 10 | develop | 5200 | 25100 11 | develop | 5200 | 25100 - """) + """ + ) execute_query( "SELECT * FROM (SELECT empno, depname, salary, sum(salary) OVER (RANGE UNBOUNDED PRECEDING) AS sum FROM empsalary WHERE depname = 'develop') ORDER BY empno", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_UnboundedPreceding_WithOrderBy("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_UnboundedPreceding_WithOrderBy( + "1.0" + ) ) def start_unbounded_preceding_with_order_by(self): - """Check range unbounded preceding frame with order by. - """ - expected = convert_output(""" + """Check range unbounded preceding frame with order by.""" + expected = convert_output( + """ empno | depname | salary | sum --------+-----------+--------+--------- 1 | sales | 5000 | 47100 @@ -145,45 +176,59 @@ def start_unbounded_preceding_with_order_by(self): 9 | develop | 4500 | 25100 10 | develop | 5200 | 25100 11 | develop | 5200 | 25100 - """) + """ + ) execute_query( "SELECT * FROM (SELECT empno, depname, salary, sum(salary) OVER (ORDER BY depname RANGE UNBOUNDED PRECEDING) AS sum FROM empsalary) ORDER BY empno", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_ExprFollowing_WithoutOrderBy_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_ExprFollowing_WithoutOrderBy_Error( + "1.0" + ) ) def start_expr_following_without_order_by_error(self): - """Check range expr following frame without order by returns an error. - """ + """Check range expr following frame without order by returns an error.""" exitcode, message = window_frame_error() - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (RANGE 1 FOLLOWING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (RANGE 1 FOLLOWING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_ExprFollowing_WithOrderBy_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_ExprFollowing_WithOrderBy_Error( + "1.0" + ) ) def start_expr_following_with_order_by_error(self): - """Check range expr following frame with order by returns an error. - """ + """Check range expr following frame with order by returns an error.""" exitcode, message = window_frame_error() - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE 1 FOLLOWING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE 1 FOLLOWING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_ExprPreceding_WithOrderBy("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_ExprPreceding_WithOrderBy( + "1.0" + ) ) def start_expr_preceding_with_order_by(self): - """Check range expr preceding frame with order by. - """ - expected = convert_output(""" + """Check range expr preceding frame with order by.""" + expected = convert_output( + """ empno | depname | salary | sum --------+-----------+--------+--------- 1 | sales | 5000 | 5000 @@ -196,46 +241,60 @@ def start_expr_preceding_with_order_by(self): 9 | develop | 4500 | 4500 10 | develop | 5200 | 10400 11 | develop | 5200 | 10400 - """) + """ + ) execute_query( "SELECT * FROM (SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE 1 PRECEDING) AS sum FROM empsalary) ORDER BY empno", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_ExprPreceding_OrderByNonNumericalColumn_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_ExprPreceding_OrderByNonNumericalColumn_Error( + "1.0" + ) ) def start_expr_preceding_order_by_non_numerical_column_error(self): - """Check range expr preceding frame with order by non-numerical column returns an error. - """ + """Check range expr preceding frame with order by non-numerical column returns an error.""" exitcode, message = frame_range_offset_error() - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (ORDER BY depname RANGE 1 PRECEDING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (ORDER BY depname RANGE 1 PRECEDING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_ExprPreceding_WithoutOrderBy_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Start_ExprPreceding_WithoutOrderBy_Error( + "1.0" + ) ) def start_expr_preceding_without_order_by_error(self): - """Check range expr preceding frame without order by returns an error. - """ + """Check range expr preceding frame without order by returns an error.""" exitcode, message = frame_requires_order_by_error() - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (RANGE 1 PRECEDING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (RANGE 1 PRECEDING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_CurrentRow_CurrentRow("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_CurrentRow_CurrentRow( + "1.0" + ) ) def between_current_row_and_current_row(self): - """Check range between current row and current row frame with or without order by. - """ + """Check range between current row and current row frame with or without order by.""" with Example("without order by"): - expected = convert_output(""" + expected = convert_output( + """ empno | depname | salary | sum --------+-----------+--------+--------- 7 | develop | 4200 | 25100 @@ -243,15 +302,17 @@ def between_current_row_and_current_row(self): 9 | develop | 4500 | 25100 10 | develop | 5200 | 25100 11 | develop | 5200 | 25100 - """) + """ + ) execute_query( "SELECT * FROM (SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN CURRENT ROW AND CURRENT ROW) AS sum FROM empsalary WHERE depname = 'develop') ORDER BY empno", - expected=expected + expected=expected, ) with Example("with order by"): - expected = convert_output(""" + expected = convert_output( + """ empno | depname | salary | sum --------+-----------+--------+------ 7 | develop | 4200 | 4200 @@ -259,39 +320,51 @@ def between_current_row_and_current_row(self): 9 | develop | 4500 | 4500 10 | develop | 5200 | 5200 11 | develop | 5200 | 5200 - """) + """ + ) execute_query( "SELECT empno, depname, salary, sum(salary) OVER (ORDER BY empno RANGE BETWEEN CURRENT ROW AND CURRENT ROW) AS sum FROM empsalary WHERE depname = 'develop'", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_CurrentRow_UnboundedPreceding_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_CurrentRow_UnboundedPreceding_Error( + "1.0" + ) ) def between_current_row_and_unbounded_preceding_error(self): - """Check range between current row and unbounded preceding frame with or without order by returns an error. - """ + """Check range between current row and unbounded preceding frame with or without order by returns an error.""" exitcode, message = frame_end_error() with Example("without order by"): - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN CURRENT ROW AND UNBOUNDED PRECEDING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN CURRENT ROW AND UNBOUNDED PRECEDING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) with Example("with order by"): - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN CURRENT ROW AND UNBOUNDED PRECEDING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN CURRENT ROW AND UNBOUNDED PRECEDING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_CurrentRow_UnboundedFollowing("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_CurrentRow_UnboundedFollowing( + "1.0" + ) ) def between_current_row_and_unbounded_following(self): - """Check range between current row and unbounded following frame with or without order by. - """ + """Check range between current row and unbounded following frame with or without order by.""" with Example("without order by"): - expected = convert_output(""" + expected = convert_output( + """ empno | depname | salary | sum --------+-----------+--------+--------- 7 | develop | 4200 | 25100 @@ -299,15 +372,17 @@ def between_current_row_and_unbounded_following(self): 9 | develop | 4500 | 25100 10 | develop | 5200 | 25100 11 | develop | 5200 | 25100 - """) + """ + ) execute_query( "SELECT * FROM (SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS sum FROM empsalary WHERE depname = 'develop') ORDER BY empno", - expected=expected + expected=expected, ) with Example("with order by"): - expected = convert_output(""" + expected = convert_output( + """ empno | depname | salary | sum --------+-----------+--------+--------- 7 | develop | 4200 | 25100 @@ -315,15 +390,17 @@ def between_current_row_and_unbounded_following(self): 9 | develop | 4500 | 14900 10 | develop | 5200 | 10400 11 | develop | 5200 | 5200 - """) + """ + ) execute_query( "SELECT empno, depname, salary, sum(salary) OVER (ORDER BY empno RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS sum FROM empsalary WHERE depname = 'develop'", - expected=expected + expected=expected, ) with Example("with order by from tenk1"): - expected = convert_output(""" + expected = convert_output( + """ sum | unique1 | four -----+---------+------ 45 | 0 | 0 @@ -336,35 +413,44 @@ def between_current_row_and_unbounded_following(self): 10 | 7 | 3 45 | 8 | 0 33 | 9 | 1 - """) + """ + ) execute_query( "SELECT * FROM (SELECT sum(unique1) over (order by four range between current row and unbounded following) AS sum," "unique1, four " "FROM tenk1 WHERE unique1 < 10) ORDER BY unique1", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_CurrentRow_ExprFollowing_WithoutOrderBy_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_CurrentRow_ExprFollowing_WithoutOrderBy_Error( + "1.0" + ) ) def between_current_row_and_expr_following_without_order_by_error(self): - """Check range between current row and expr following frame without order by returns an error. - """ + """Check range between current row and expr following frame without order by returns an error.""" exitcode, message = frame_requires_order_by_error() - self.context.node.query("SELECT number,sum(number) OVER (RANGE BETWEEN CURRENT ROW AND 1 FOLLOWING) FROM numbers(1,3)", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (RANGE BETWEEN CURRENT ROW AND 1 FOLLOWING) FROM numbers(1,3)", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_CurrentRow_ExprFollowing_WithOrderBy("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_CurrentRow_ExprFollowing_WithOrderBy( + "1.0" + ) ) def between_current_row_and_expr_following_with_order_by(self): - """Check range between current row and expr following frame with order by. - """ - expected = convert_output(""" + """Check range between current row and expr following frame with order by.""" + expected = convert_output( + """ empno | depname | salary | sum --------+-----------+--------+--------- 1 | sales | 5000 | 8900 @@ -377,39 +463,51 @@ def between_current_row_and_expr_following_with_order_by(self): 9 | develop | 4500 | 9700 10 | develop | 5200 | 10400 11 | develop | 5200 | 5200 - """) + """ + ) execute_query( "SELECT empno, depname, salary, sum(salary) OVER (ORDER BY empno RANGE BETWEEN CURRENT ROW AND 1 FOLLOWING) AS sum FROM empsalary", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_CurrentRow_ExprPreceding_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_CurrentRow_ExprPreceding_Error( + "1.0" + ) ) def between_current_row_and_expr_preceding_error(self): - """Check range between current row and expr preceding frame with or without order by returns an error. - """ + """Check range between current row and expr preceding frame with or without order by returns an error.""" exitcode, message = window_frame_error() with Example("without order by"): - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN CURRENT ROW AND 1 PRECEDING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN CURRENT ROW AND 1 PRECEDING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) with Example("with order by"): - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN CURRENT ROW AND 1 PRECEDING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN CURRENT ROW AND 1 PRECEDING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_CurrentRow("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_CurrentRow( + "1.0" + ) ) def between_unbounded_preceding_and_current_row(self): - """Check range between unbounded preceding and current row frame with and without order by. - """ + """Check range between unbounded preceding and current row frame with and without order by.""" with Example("with order by"): - expected = convert_output(""" + expected = convert_output( + """ four | ten | sum | last_value ------+-----+-----+------------ 0 | 0 | 0 | 0 @@ -432,18 +530,20 @@ def between_unbounded_preceding_and_current_row(self): 3 | 5 | 9 | 5 3 | 7 | 16 | 7 3 | 9 | 25 | 9 - """) + """ + ) execute_query( "SELECT four, ten," "sum(ten) over (partition by four order by ten range between unbounded preceding and current row) AS sum," "last_value(ten) over (partition by four order by ten range between unbounded preceding and current row) AS last_value " "FROM (select distinct ten, four from tenk1)", - expected=expected + expected=expected, ) with Example("without order by"): - expected = convert_output(""" + expected = convert_output( + """ empno | depname | salary | sum --------+-----------+--------+--------- 7 | develop | 4200 | 25100 @@ -451,39 +551,51 @@ def between_unbounded_preceding_and_current_row(self): 9 | develop | 4500 | 25100 10 | develop | 5200 | 25100 11 | develop | 5200 | 25100 - """) + """ + ) execute_query( "SELECT * FROM (SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS sum FROM empsalary WHERE depname = 'develop') ORDER BY empno", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_UnboundedPreceding_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_UnboundedPreceding_Error( + "1.0" + ) ) def between_unbounded_preceding_and_unbounded_preceding_error(self): - """Check range between unbounded preceding and unbounded preceding frame with or without order by returns an error. - """ + """Check range between unbounded preceding and unbounded preceding frame with or without order by returns an error.""" exitcode, message = frame_end_error() with Example("without order by"): - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED PRECEDING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED PRECEDING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) with Example("with order by"): - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED PRECEDING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED PRECEDING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_UnboundedFollowing("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_UnboundedFollowing( + "1.0" + ) ) def between_unbounded_preceding_and_unbounded_following(self): - """Check range between unbounded preceding and unbounded following range with and without order by. - """ + """Check range between unbounded preceding and unbounded following range with and without order by.""" with Example("with order by"): - expected = convert_output(""" + expected = convert_output( + """ four | ten | sum | last_value ------+-----+-----+------------ 0 | 0 | 20 | 8 @@ -506,18 +618,20 @@ def between_unbounded_preceding_and_unbounded_following(self): 3 | 5 | 25 | 9 3 | 7 | 25 | 9 3 | 9 | 25 | 9 - """) + """ + ) execute_query( "SELECT four, ten, " "sum(ten) over (partition by four order by ten range between unbounded preceding and unbounded following) AS sum, " "last_value(ten) over (partition by four order by ten range between unbounded preceding and unbounded following) AS last_value " "FROM (select distinct ten, four from tenk1)", - expected=expected + expected=expected, ) with Example("without order by"): - expected = convert_output(""" + expected = convert_output( + """ empno | depname | salary | sum --------+-----------+--------+--------- 1 | sales | 5000 | 47100 @@ -530,45 +644,59 @@ def between_unbounded_preceding_and_unbounded_following(self): 9 | develop | 4500 | 47100 10 | develop | 5200 | 47100 11 | develop | 5200 | 47100 - """) + """ + ) execute_query( "SELECT * FROM (SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS sum FROM empsalary) ORDER BY empno", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_ExprFollowing_WithoutOrderBy_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_ExprFollowing_WithoutOrderBy_Error( + "1.0" + ) ) def between_unbounded_preceding_and_expr_following_without_order_by_error(self): - """Check range between unbounded preceding and expr following frame without order by returns an error. - """ + """Check range between unbounded preceding and expr following frame without order by returns an error.""" exitcode, message = frame_requires_order_by_error() - self.context.node.query("SELECT number,sum(number) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_ExprPreceding_WithoutOrderBy_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_ExprPreceding_WithoutOrderBy_Error( + "1.0" + ) ) def between_unbounded_preceding_and_expr_preceding_without_order_by_error(self): - """Check range between unbounded preceding and expr preceding frame without order by returns an error. - """ + """Check range between unbounded preceding and expr preceding frame without order by returns an error.""" exitcode, message = frame_requires_order_by_error() - self.context.node.query("SELECT number,sum(number) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (RANGE BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_ExprFollowing_WithOrderBy("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_ExprFollowing_WithOrderBy( + "1.0" + ) ) def between_unbounded_preceding_and_expr_following_with_order_by(self): - """Check range between unbounded preceding and expr following frame with order by. - """ - expected = convert_output(""" + """Check range between unbounded preceding and expr following frame with order by.""" + expected = convert_output( + """ empno | depname | salary | sum --------+-----------+--------+--------- 1 | sales | 5000 | 41100 @@ -581,21 +709,25 @@ def between_unbounded_preceding_and_expr_following_with_order_by(self): 9 | develop | 4500 | 30700 10 | develop | 5200 | 41100 11 | develop | 5200 | 41100 - """) + """ + ) execute_query( "SELECT * FROM (SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN UNBOUNDED PRECEDING AND 500 FOLLOWING) AS sum FROM empsalary) ORDER BY empno", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_ExprPreceding_WithOrderBy("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedPreceding_ExprPreceding_WithOrderBy( + "1.0" + ) ) def between_unbounded_preceding_and_expr_preceding_with_order_by(self): - """Check range between unbounded preceding and expr preceding frame with order by. - """ - expected = convert_output(""" + """Check range between unbounded preceding and expr preceding frame with order by.""" + expected = convert_output( + """ empno | depname | salary | sum --------+-----------+--------+--------- 1 | sales | 5000 | 16100 @@ -608,171 +740,243 @@ def between_unbounded_preceding_and_expr_preceding_with_order_by(self): 9 | develop | 4500 | 7400 10 | develop | 5200 | 16100 11 | develop | 5200 | 16100 - """) + """ + ) execute_query( "SELECT * FROM (SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN UNBOUNDED PRECEDING AND 500 PRECEDING) AS sum FROM empsalary) ORDER BY empno", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedFollowing_CurrentRow_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedFollowing_CurrentRow_Error( + "1.0" + ) ) def between_unbounded_following_and_current_row_error(self): - """Check range between unbounded following and current row frame with or without order by returns an error. - """ + """Check range between unbounded following and current row frame with or without order by returns an error.""" exitcode, message = frame_start_error() with Example("without order by"): - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN UNBOUNDED FOLLOWING AND CURRENT ROW) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN UNBOUNDED FOLLOWING AND CURRENT ROW) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) with Example("with order by"): - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN UNBOUNDED FOLLOWING AND CURRENT ROW) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN UNBOUNDED FOLLOWING AND CURRENT ROW) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedFollowing_UnboundedFollowing_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedFollowing_UnboundedFollowing_Error( + "1.0" + ) ) def between_unbounded_following_and_unbounded_following_error(self): - """Check range between unbounded following and unbounded following frame with or without order by returns an error. - """ + """Check range between unbounded following and unbounded following frame with or without order by returns an error.""" exitcode, message = frame_start_error() with Example("without order by"): - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED FOLLOWING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED FOLLOWING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) with Example("with order by"): - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED FOLLOWING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED FOLLOWING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedFollowing_UnboundedPreceding_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedFollowing_UnboundedPreceding_Error( + "1.0" + ) ) def between_unbounded_following_and_unbounded_preceding_error(self): - """Check range between unbounded following and unbounded preceding frame with or without order by returns an error. - """ + """Check range between unbounded following and unbounded preceding frame with or without order by returns an error.""" exitcode, message = frame_start_error() with Example("without order by"): - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED PRECEDING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED PRECEDING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) with Example("with order by"): - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED PRECEDING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED PRECEDING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedFollowing_ExprPreceding_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedFollowing_ExprPreceding_Error( + "1.0" + ) ) def between_unbounded_following_and_expr_preceding_error(self): - """Check range between unbounded following and expr preceding frame with or without order by returns an error. - """ + """Check range between unbounded following and expr preceding frame with or without order by returns an error.""" exitcode, message = frame_start_error() with Example("without order by"): - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN UNBOUNDED FOLLOWING AND 1 PRECEDING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN UNBOUNDED FOLLOWING AND 1 PRECEDING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) with Example("with order by"): - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN UNBOUNDED FOLLOWING AND 1 PRECEDING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN UNBOUNDED FOLLOWING AND 1 PRECEDING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedFollowing_ExprFollowing_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_UnboundedFollowing_ExprFollowing_Error( + "1.0" + ) ) def between_unbounded_following_and_expr_following_error(self): - """Check range between unbounded following and expr following frame with or without order by returns an error. - """ + """Check range between unbounded following and expr following frame with or without order by returns an error.""" exitcode, message = frame_start_error() with Example("without order by"): - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN UNBOUNDED FOLLOWING AND 1 FOLLOWING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (RANGE BETWEEN UNBOUNDED FOLLOWING AND 1 FOLLOWING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) with Example("with order by"): - self.context.node.query("SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN UNBOUNDED FOLLOWING AND 1 FOLLOWING) AS sum FROM empsalary", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN UNBOUNDED FOLLOWING AND 1 FOLLOWING) AS sum FROM empsalary", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_CurrentRow_WithoutOrderBy_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_CurrentRow_WithoutOrderBy_Error( + "1.0" + ) ) def between_expr_preceding_and_current_row_without_order_by_error(self): - """Check range between expr preceding and current row frame without order by returns an error. - """ + """Check range between expr preceding and current row frame without order by returns an error.""" exitcode, message = frame_requires_order_by_error() - self.context.node.query("SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND CURRENT ROW) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND CURRENT ROW) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_UnboundedFollowing_WithoutOrderBy_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_UnboundedFollowing_WithoutOrderBy_Error( + "1.0" + ) ) def between_expr_preceding_and_unbounded_following_without_order_by_error(self): - """Check range between expr preceding and unbounded following frame without order by returns an error. - """ + """Check range between expr preceding and unbounded following frame without order by returns an error.""" exitcode, message = frame_requires_order_by_error() - self.context.node.query("SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_ExprFollowing_WithoutOrderBy_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_ExprFollowing_WithoutOrderBy_Error( + "1.0" + ) ) def between_expr_preceding_and_expr_following_without_order_by_error(self): - """Check range between expr preceding and expr following frame without order by returns an error. - """ + """Check range between expr preceding and expr following frame without order by returns an error.""" exitcode, message = frame_requires_order_by_error() - self.context.node.query("SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_ExprPreceding_WithoutOrderBy_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_ExprPreceding_WithoutOrderBy_Error( + "1.0" + ) ) def between_expr_preceding_and_expr_preceding_without_order_by_error(self): - """Check range between expr preceding and expr preceding frame without order by returns an error. - """ + """Check range between expr preceding and expr preceding frame without order by returns an error.""" exitcode, message = frame_requires_order_by_error() - self.context.node.query("SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND 0 PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND 0 PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_UnboundedPreceding_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_UnboundedPreceding_Error( + "1.0" + ) ) def between_expr_preceding_and_unbounded_preceding_error(self): - """Check range between expr preceding and unbounded preceding frame with or without order by returns an error. - """ + """Check range between expr preceding and unbounded preceding frame with or without order by returns an error.""" exitcode, message = frame_end_unbounded_preceding_error() with Example("without order by"): - self.context.node.query("SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND UNBOUNDED PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND UNBOUNDED PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) with Example("with order by"): - self.context.node.query("SELECT number,sum(number) OVER (ORDER BY salary RANGE BETWEEN 1 PRECEDING AND UNBOUNDED PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (ORDER BY salary RANGE BETWEEN 1 PRECEDING AND UNBOUNDED PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_CurrentRow_WithOrderBy("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_CurrentRow_WithOrderBy( + "1.0" + ) ) def between_expr_preceding_and_current_row_with_order_by(self): - """Check range between expr preceding and current row frame with order by. - """ - expected = convert_output(""" + """Check range between expr preceding and current row frame with order by.""" + expected = convert_output( + """ empno | depname | salary | sum --------+-----------+--------+--------- 1 | sales | 5000 | 5000 @@ -785,21 +989,25 @@ def between_expr_preceding_and_current_row_with_order_by(self): 9 | develop | 4500 | 36700 10 | develop | 5200 | 41900 11 | develop | 5200 | 47100 - """) + """ + ) execute_query( "SELECT empno, depname, salary, sum(salary) OVER (ORDER BY empno RANGE BETWEEN 500 PRECEDING AND CURRENT ROW) AS sum FROM empsalary", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_UnboundedFollowing_WithOrderBy("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_UnboundedFollowing_WithOrderBy( + "1.0" + ) ) def between_expr_preceding_and_unbounded_following_with_order_by(self): - """Check range between expr preceding and unbounded following frame with order by. - """ - expected = convert_output(""" + """Check range between expr preceding and unbounded following frame with order by.""" + expected = convert_output( + """ empno | depname | salary | sum --------+-----------+--------+--------- 1 | sales | 5000 | 35500 @@ -812,22 +1020,26 @@ def between_expr_preceding_and_unbounded_following_with_order_by(self): 9 | develop | 4500 | 39700 10 | develop | 5200 | 31000 11 | develop | 5200 | 31000 - """) + """ + ) execute_query( "SELECT * FROM (SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN 500 PRECEDING AND UNBOUNDED FOLLOWING) AS sum FROM empsalary) ORDER BY empno", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_ExprFollowing_WithOrderBy("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_ExprFollowing_WithOrderBy( + "1.0" + ) ) def between_expr_preceding_and_expr_following_with_order_by(self): - """Check range between expr preceding and expr following frame with order by. - """ + """Check range between expr preceding and expr following frame with order by.""" with Example("empsalary"): - expected = convert_output(""" + expected = convert_output( + """ empno | depname | salary | sum --------+-----------+--------+--------- 1 | sales | 5000 | 29500 @@ -840,15 +1052,17 @@ def between_expr_preceding_and_expr_following_with_order_by(self): 9 | develop | 4500 | 23300 10 | develop | 5200 | 25000 11 | develop | 5200 | 25000 - """) + """ + ) execute_query( "SELECT * FROM (SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN 500 PRECEDING AND 500 FOLLOWING) AS sum FROM empsalary) ORDER BY empno", - expected=expected + expected=expected, ) with Example("tenk1"): - expected = convert_output(""" + expected = convert_output( + """ sum | unique1 | four -----+---------+------ 4 | 0 | 0 @@ -861,24 +1075,28 @@ def between_expr_preceding_and_expr_following_with_order_by(self): 8 | 6 | 2 10 | 3 | 3 10 | 7 | 3 - """) + """ + ) execute_query( "SELECT sum(unique1) over (partition by four order by unique1 range between 5 preceding and 6 following) AS sum, " "unique1, four " "FROM tenk1 WHERE unique1 < 10", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_ExprPreceding_WithOrderBy("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_ExprPreceding_WithOrderBy( + "1.0" + ) ) def between_expr_preceding_and_expr_preceding_with_order_by(self): - """Check range between expr preceding and expr preceding range with order by. - """ + """Check range between expr preceding and expr preceding range with order by.""" with Example("order by asc"): - expected = convert_output(""" + expected = convert_output( + """ sum | unique1 | four -----+---------+------ 0 | 0 | 0 @@ -891,17 +1109,19 @@ def between_expr_preceding_and_expr_preceding_with_order_by(self): 27 | 6 | 2 23 | 3 | 3 23 | 7 | 3 - """) + """ + ) execute_query( "SELECT * FROM (SELECT sum(unique1) over (order by four range between 2 preceding and 1 preceding) AS sum, " - "unique1, four " + "unique1, four " "FROM tenk1 WHERE unique1 < 10) ORDER BY four, unique1", - expected=expected + expected=expected, ) with Example("order by desc"): - expected = convert_output(""" + expected = convert_output( + """ sum | unique1 | four -----+---------+------ 23 | 0 | 0 @@ -914,18 +1134,22 @@ def between_expr_preceding_and_expr_preceding_with_order_by(self): 10 | 6 | 2 0 | 3 | 3 0 | 7 | 3 - """) + """ + ) execute_query( "SELECT * FROM (SELECT sum(unique1) over (order by four desc range between 2 preceding and 1 preceding) AS sum, " "unique1, four " "FROM tenk1 WHERE unique1 < 10) ORDER BY four, unique1", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_ExprPreceding_WithOrderBy_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprPreceding_ExprPreceding_WithOrderBy_Error( + "1.0" + ) ) def between_expr_preceding_and_expr_preceding_with_order_by_error(self): """Check range between expr preceding and expr preceding range with order by returns error @@ -933,77 +1157,111 @@ def between_expr_preceding_and_expr_preceding_with_order_by_error(self): """ exitcode, message = frame_start_error() - self.context.node.query("SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND 2 PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (RANGE BETWEEN 1 PRECEDING AND 2 PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_CurrentRow_WithoutOrderBy_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_CurrentRow_WithoutOrderBy_Error( + "1.0" + ) ) def between_expr_following_and_current_row_without_order_by_error(self): - """Check range between expr following and current row frame without order by returns an error. - """ + """Check range between expr following and current row frame without order by returns an error.""" exitcode, message = window_frame_error() - self.context.node.query("SELECT number,sum(number) OVER (RANGE BETWEEN 0 FOLLOWING AND CURRENT ROW) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (RANGE BETWEEN 0 FOLLOWING AND CURRENT ROW) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_UnboundedFollowing_WithoutOrderBy_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_UnboundedFollowing_WithoutOrderBy_Error( + "1.0" + ) ) def between_expr_following_and_unbounded_following_without_order_by_error(self): - """Check range between expr following and unbounded following frame without order by returns an error. - """ + """Check range between expr following and unbounded following frame without order by returns an error.""" exitcode, message = frame_requires_order_by_error() - self.context.node.query("SELECT number,sum(number) OVER (RANGE BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (RANGE BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_ExprFollowing_WithoutOrderBy_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_ExprFollowing_WithoutOrderBy_Error( + "1.0" + ) ) def between_expr_following_and_expr_following_without_order_by_error(self): - """Check range between expr following and expr following frame without order by returns an error. - """ + """Check range between expr following and expr following frame without order by returns an error.""" exitcode, message = window_frame_error() - self.context.node.query("SELECT number,sum(number) OVER (RANGE BETWEEN 1 FOLLOWING AND 1 FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (RANGE BETWEEN 1 FOLLOWING AND 1 FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_ExprPreceding_WithoutOrderBy_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_ExprPreceding_WithoutOrderBy_Error( + "1.0" + ) ) def between_expr_following_and_expr_preceding_without_order_by_error(self): - """Check range between expr following and expr preceding frame without order by returns an error. - """ + """Check range between expr following and expr preceding frame without order by returns an error.""" exitcode, message = window_frame_error() - self.context.node.query("SELECT number,sum(number) OVER (RANGE BETWEEN 0 FOLLOWING AND 0 PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (RANGE BETWEEN 0 FOLLOWING AND 0 PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_UnboundedPreceding_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_UnboundedPreceding_Error( + "1.0" + ) ) def between_expr_following_and_unbounded_preceding_error(self): - """Check range between expr following and unbounded preceding frame with or without order by returns an error. - """ + """Check range between expr following and unbounded preceding frame with or without order by returns an error.""" exitcode, message = frame_end_unbounded_preceding_error() with Example("without order by"): - self.context.node.query("SELECT number,sum(number) OVER (RANGE BETWEEN 1 FOLLOWING AND UNBOUNDED PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (RANGE BETWEEN 1 FOLLOWING AND UNBOUNDED PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) with Example("with order by"): - self.context.node.query("SELECT number,sum(number) OVER (ORDER BY salary RANGE BETWEEN 1 FOLLOWING AND UNBOUNDED PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (ORDER BY salary RANGE BETWEEN 1 FOLLOWING AND UNBOUNDED PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_CurrentRow_WithOrderBy_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_CurrentRow_WithOrderBy_Error( + "1.0" + ) ) def between_expr_following_and_current_row_with_order_by_error(self): """Check range between expr following and current row frame with order by returns an error @@ -1011,12 +1269,18 @@ def between_expr_following_and_current_row_with_order_by_error(self): """ exitcode, message = window_frame_error() - self.context.node.query("SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 1 FOLLOWING AND CURRENT ROW) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 1 FOLLOWING AND CURRENT ROW) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_ExprPreceding_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_ExprPreceding_Error( + "1.0" + ) ) def between_expr_following_and_expr_preceding_error(self): """Check range between expr following and expr preceding frame with order by returns an error @@ -1025,16 +1289,25 @@ def between_expr_following_and_expr_preceding_error(self): exitcode, message = frame_start_error() with Example("1 following 0 preceding"): - self.context.node.query("SELECT number,sum(number) OVER (RANGE BETWEEN 1 FOLLOWING AND 0 PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (RANGE BETWEEN 1 FOLLOWING AND 0 PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) with Example("1 following 0 preceding"): - self.context.node.query("SELECT number,sum(number) OVER (RANGE BETWEEN 0 FOLLOWING AND 1 PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (RANGE BETWEEN 0 FOLLOWING AND 1 PRECEDING) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_ExprFollowing_WithOrderBy_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_ExprFollowing_WithOrderBy_Error( + "1.0" + ) ) def between_expr_following_and_expr_following_with_order_by_error(self): """Check range between expr following and expr following frame with order by returns an error @@ -1042,98 +1315,122 @@ def between_expr_following_and_expr_following_with_order_by_error(self): """ exitcode, message = frame_start_error() - self.context.node.query("SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 1 FOLLOWING AND 0 FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 1 FOLLOWING AND 0 FOLLOWING) FROM values('number Int8', (1),(1),(2),(3))", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_CurrentRow_ZeroSpecialCase("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_CurrentRow_ZeroSpecialCase( + "1.0" + ) ) def between_expr_following_and_current_row_zero_special_case(self): """Check range between expr following and current row frame for special case when exp is 0. It is expected to work. """ with When("I use it with order by"): - expected = convert_output(""" + expected = convert_output( + """ number | sum ---------+------ 1 | 2 1 | 2 2 | 2 3 | 3 - """) + """ + ) - execute_query("SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 0 FOLLOWING AND CURRENT ROW) AS sum FROM values('number Int8', (1),(1),(2),(3))", - expected=expected + execute_query( + "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 0 FOLLOWING AND CURRENT ROW) AS sum FROM values('number Int8', (1),(1),(2),(3))", + expected=expected, ) with And("I use it without order by"): - expected = convert_output(""" + expected = convert_output( + """ number | sum ---------+------ 1 | 7 1 | 7 2 | 7 3 | 7 - """) + """ + ) execute_query( "SELECT number,sum(number) OVER (RANGE BETWEEN 0 FOLLOWING AND CURRENT ROW) AS sum FROM values('number Int8', (1),(1),(2),(3))", - expected=expected - ) + expected=expected, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_UnboundedFollowing_WithOrderBy("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_UnboundedFollowing_WithOrderBy( + "1.0" + ) ) def between_expr_following_and_unbounded_following_with_order_by(self): - """Check range between expr following and unbounded following range with order by. - """ - expected = convert_output(""" + """Check range between expr following and unbounded following range with order by.""" + expected = convert_output( + """ number | sum ---------+------ 1 | 5 1 | 5 2 | 3 3 | 0 - """) - + """ + ) execute_query( "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING) AS sum FROM values('number Int8', (1),(1),(2),(3))", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_ExprPreceding_WithOrderBy_ZeroSpecialCase("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_ExprPreceding_WithOrderBy_ZeroSpecialCase( + "1.0" + ) ) def between_expr_following_and_expr_preceding_with_order_by_zero_special_case(self): """Check range between expr following and expr preceding frame for special case when exp is 0. It is expected to work. """ - expected = convert_output(""" + expected = convert_output( + """ number | sum ---------+------ 1 | 2 1 | 2 2 | 2 3 | 3 - """) - - execute_query("SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 0 FOLLOWING AND 0 PRECEDING) AS sum FROM values('number Int8', (1),(1),(2),(3))", - expected=expected + """ ) + execute_query( + "SELECT number,sum(number) OVER (ORDER BY number RANGE BETWEEN 0 FOLLOWING AND 0 PRECEDING) AS sum FROM values('number Int8', (1),(1),(2),(3))", + expected=expected, + ) + + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_ExprFollowing_WithOrderBy("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_Between_ExprFollowing_ExprFollowing_WithOrderBy( + "1.0" + ) ) def between_expr_following_and_expr_following_with_order_by(self): """Check range between expr following and expr following frame with order by when frame start is before frame end. """ - expected = convert_output(""" + expected = convert_output( + """ empno | depname | salary | sum --------+-----------+--------+--------- 1 | sales | 5000 | 6000 @@ -1146,19 +1443,24 @@ def between_expr_following_and_expr_following_with_order_by(self): 9 | develop | 4500 | 15400 10 | develop | 5200 | 6000 11 | develop | 5200 | 6000 - """) + """ + ) execute_query( "SELECT * FROM (SELECT empno, depname, salary, sum(salary) OVER (ORDER BY salary RANGE BETWEEN 500 FOLLOWING AND 1000 FOLLOWING) AS sum FROM empsalary) ORDER BY empno", - expected=expected + expected=expected, ) + @TestScenario -def between_unbounded_preceding_and_current_row_with_expressions_in_order_by_and_aggregate(self): +def between_unbounded_preceding_and_current_row_with_expressions_in_order_by_and_aggregate( + self, +): """Check range between unbounded prceding and current row with expression used in the order by clause and aggregate functions. """ - expected = convert_output(""" + expected = convert_output( + """ four | two | sum | last_value ------+-----+-----+------------ 0 | 0 | 0 | 0 @@ -1181,22 +1483,25 @@ def between_unbounded_preceding_and_current_row_with_expressions_in_order_by_and 3 | 1 | 2 | 1 3 | 1 | 2 | 1 3 | 2 | 4 | 2 - """) + """ + ) execute_query( "SELECT four, toInt8(ten/4) as two, " "sum(toInt8(ten/4)) over (partition by four order by toInt8(ten/4) range between unbounded preceding and current row) AS sum, " "last_value(toInt8(ten/4)) over (partition by four order by toInt8(ten/4) range between unbounded preceding and current row) AS last_value " "FROM (select distinct ten, four from tenk1)", - expected=expected + expected=expected, ) + @TestScenario def between_current_row_and_unbounded_following_modifying_named_window(self): """Check range between current row and unbounded following when modifying named window. """ - expected = convert_output(""" + expected = convert_output( + """ sum | unique1 | four -----+---------+------ 45 | 0 | 0 @@ -1209,20 +1514,22 @@ def between_current_row_and_unbounded_following_modifying_named_window(self): 18 | 2 | 2 10 | 3 | 3 10 | 7 | 3 - """) + """ + ) execute_query( "SELECT * FROM (SELECT sum(unique1) over (w range between current row and unbounded following) AS sum," "unique1, four " "FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four)) ORDER BY unique1", - expected=expected + expected=expected, ) + @TestScenario def between_current_row_and_unbounded_following_in_named_window(self): - """Check range between current row and unbounded following in named window. - """ - expected = convert_output(""" + """Check range between current row and unbounded following in named window.""" + expected = convert_output( + """ first_value | last_value | unique1 | four -------------+------------+---------+------ 0 | 9 | 0 | 0 @@ -1235,27 +1542,31 @@ def between_current_row_and_unbounded_following_in_named_window(self): 7 | 9 | 7 | 3 8 | 9 | 8 | 0 9 | 9 | 9 | 1 - """) + """ + ) execute_query( "SELECT first_value(unique1) over w AS first_value, " "last_value(unique1) over w AS last_value, unique1, four " "FROM tenk1 WHERE unique1 < 10 " "WINDOW w AS (order by unique1 range between current row and unbounded following)", - expected=expected + expected=expected, ) + @TestScenario def between_expr_preceding_and_expr_following_with_partition_by_two_columns(self): """Check range between n preceding and n following frame with partition by two int value columns. """ - expected = convert_output(""" + expected = convert_output( + """ f1 | sum ----+----- 1 | 0 2 | 0 - """) + """ + ) execute_query( """ @@ -1263,20 +1574,23 @@ def between_expr_preceding_and_expr_following_with_partition_by_two_columns(self range between 1 following and 2 following) AS sum from t1 where f1 = f2 """, - expected=expected + expected=expected, ) + @TestScenario def between_expr_preceding_and_expr_following_with_partition_by_same_column_twice(self): """Check range between n preceding and n folowing with partition by the same column twice. """ - expected = convert_output(""" + expected = convert_output( + """ f1 | sum ----+----- 1 | 0 2 | 0 - """) + """ + ) execute_query( """ @@ -1284,20 +1598,23 @@ def between_expr_preceding_and_expr_following_with_partition_by_same_column_twic range between 2 preceding and 1 preceding) AS sum from t1 where f1 = f2) order by f1, sum """, - expected=expected + expected=expected, ) + @TestScenario def between_expr_preceding_and_expr_following_with_partition_and_order_by(self): """Check range between expr preceding and expr following frame used with partition by and order by clauses. """ - expected = convert_output(""" + expected = convert_output( + """ f1 | sum ----+----- 1 | 1 2 | 2 - """) + """ + ) execute_query( """ @@ -1305,14 +1622,15 @@ def between_expr_preceding_and_expr_following_with_partition_and_order_by(self): range between 1 preceding and 1 following) AS sum from t1 where f1 = f2 """, - expected=expected + expected=expected, ) + @TestScenario def order_by_decimal(self): - """Check using range with order by decimal column. - """ - expected = convert_output(""" + """Check using range with order by decimal column.""" + expected = convert_output( + """ id | f_numeric | first_value | last_value ----+-----------+-------------+------------ 0 | -1000 | 0 | 0 @@ -1325,7 +1643,8 @@ def order_by_decimal(self): 7 | 100 | 7 | 7 8 | 1000 | 8 | 8 9 | 0 | 9 | 9 - """) + """ + ) execute_query( """ @@ -1334,14 +1653,15 @@ def order_by_decimal(self): window w as (order by f_numeric range between 1 preceding and 1 following) """, - expected=expected + expected=expected, ) + @TestScenario def order_by_float(self): - """Check using range with order by float column. - """ - expected = convert_output(""" + """Check using range with order by float column.""" + expected = convert_output( + """ id | f_float4 | first_value | last_value ----+-----------+-------------+------------ 0 | -inf | 0 | 0 @@ -1354,7 +1674,8 @@ def order_by_float(self): 7 | 100 | 7 | 7 8 | inf | 8 | 8 9 | nan | 8 | 8 - """) + """ + ) execute_query( """ @@ -1363,14 +1684,15 @@ def order_by_float(self): window w as (order by f_float4 range between 1 preceding and 1 following) """, - expected=expected + expected=expected, ) + @TestScenario def with_nulls(self): - """Check using range frame over window with nulls. - """ - expected = convert_output(""" + """Check using range frame over window with nulls.""" + expected = convert_output( + """ x | y | first_value | last_value ---+----+-------------+------------ \\N | 42 | 42 | 43 @@ -1380,7 +1702,8 @@ def with_nulls(self): 3 | 3 | 1 | 5 4 | 4 | 2 | 5 5 | 5 | 3 | 5 - """) + """ + ) execute_query( """ @@ -1394,17 +1717,17 @@ def with_nulls(self): window w as (order by x asc nulls first range between 2 preceding and 2 following) """, - expected=expected + expected=expected, ) + @TestFeature @Name("range frame") @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame("1.0"), - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_DataTypes_IntAndUInt("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame_DataTypes_IntAndUInt("1.0"), ) def feature(self): - """Check defining range frame. - """ + """Check defining range frame.""" for scenario in loads(current_module(), Scenario): Scenario(run=scenario, flags=TE) diff --git a/tests/testflows/window_functions/tests/range_overflow.py b/tests/testflows/window_functions/tests/range_overflow.py index 0c66e54c8ee..34a9a9592e5 100644 --- a/tests/testflows/window_functions/tests/range_overflow.py +++ b/tests/testflows/window_functions/tests/range_overflow.py @@ -3,133 +3,143 @@ from testflows.core import * from window_functions.requirements import * from window_functions.tests.common import * + @TestScenario def positive_overflow_with_Int16(self): - """Check positive overflow with Int16. - """ - expected = convert_output(""" + """Check positive overflow with Int16.""" + expected = convert_output( + """ x | last_value -------+------------ 32764 | 0 32765 | 0 32766 | 0 - """) + """ + ) execute_query( """ select number as x, last_value(x) over (order by toInt16(x) range between current row and 2147450884 following) AS last_value from numbers(32764, 3) """, - expected=expected + expected=expected, ) + @TestScenario def negative_overflow_with_Int16(self): - """Check negative overflow with Int16. - """ - expected = convert_output(""" + """Check negative overflow with Int16.""" + expected = convert_output( + """ x | last_value --------+------------ -32764 | 0 -32765 | 0 -32766 | 0 - """) + """ + ) execute_query( """ select number as x, last_value(x) over (order by toInt16(x) desc range between current row and 2147450885 following) as last_value from (SELECT -number - 32763 AS number FROM numbers(1, 3)) """, - expected=expected + expected=expected, ) + @TestScenario def positive_overflow_for_Int32(self): - """Check positive overflow for Int32. - """ - expected = convert_output(""" + """Check positive overflow for Int32.""" + expected = convert_output( + """ x | last_value ------------+------------ 2147483644 | 2147483646 2147483645 | 2147483646 2147483646 | 2147483646 - """) + """ + ) execute_query( """ select number as x, last_value(x) over (order by x range between current row and 4 following) as last_value from numbers(2147483644, 3) """, - expected=expected + expected=expected, ) + @TestScenario def negative_overflow_for_Int32(self): - """Check negative overflow for Int32. - """ - expected = convert_output(""" + """Check negative overflow for Int32.""" + expected = convert_output( + """ x | last_value -------------+------------- -2147483644 | -2147483646 -2147483645 | -2147483646 -2147483646 | -2147483646 - """) + """ + ) execute_query( """ select number as x, last_value(x) over (order by x desc range between current row and 5 following) as last_value from (select -number-2147483643 AS number FROM numbers(1,3)) """, - expected=expected + expected=expected, ) + @TestScenario def positive_overflow_for_Int64(self): - """Check positive overflow for Int64. - """ - expected = convert_output(""" + """Check positive overflow for Int64.""" + expected = convert_output( + """ x | last_value ---------------------+--------------------- 9223372036854775804 | 9223372036854775806 9223372036854775805 | 9223372036854775806 9223372036854775806 | 9223372036854775806 - """) + """ + ) execute_query( """ select number as x, last_value(x) over (order by x range between current row and 4 following) as last_value from numbers(9223372036854775804, 3) """, - expected=expected + expected=expected, ) + @TestScenario def negative_overflow_for_Int64(self): - """Check negative overflow for Int64. - """ - expected = convert_output(""" + """Check negative overflow for Int64.""" + expected = convert_output( + """ x | last_value ----------------------+---------------------- -9223372036854775804 | -9223372036854775806 -9223372036854775805 | -9223372036854775806 -9223372036854775806 | -9223372036854775806 - """) + """ + ) execute_query( """ select number as x, last_value(x) over (order by x desc range between current row and 5 following) as last_value from (select -number-9223372036854775803 AS number from numbers(1,3)) """, - expected=expected + expected=expected, ) + @TestFeature @Name("range overflow") -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_RangeFrame("1.0")) def feature(self): - """Check using range frame with overflows. - """ + """Check using range frame with overflows.""" for scenario in loads(current_module(), Scenario): Scenario(run=scenario, flags=TE) diff --git a/tests/testflows/window_functions/tests/rows_frame.py b/tests/testflows/window_functions/tests/rows_frame.py index 07533e8d1ab..f1aed00a9b6 100644 --- a/tests/testflows/window_functions/tests/rows_frame.py +++ b/tests/testflows/window_functions/tests/rows_frame.py @@ -3,38 +3,43 @@ from testflows.core import * from window_functions.requirements import * from window_functions.tests.common import * + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_MissingFrameExtent_Error("1.0") ) def missing_frame_extent(self): - """Check that when rows frame has missing frame extent then an error is returned. - """ + """Check that when rows frame has missing frame extent then an error is returned.""" exitcode, message = syntax_error() - self.context.node.query("SELECT number,sum(number) OVER (ORDER BY number ROWS) FROM numbers(1,3)", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (ORDER BY number ROWS) FROM numbers(1,3)", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_InvalidFrameExtent_Error("1.0") ) def invalid_frame_extent(self): - """Check that when rows frame has invalid frame extent then an error is returned. - """ + """Check that when rows frame has invalid frame extent then an error is returned.""" exitcode, message = frame_offset_nonnegative_error() - self.context.node.query("SELECT number,sum(number) OVER (ORDER BY number ROWS -1) FROM numbers(1,3)", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (ORDER BY number ROWS -1) FROM numbers(1,3)", + exitcode=exitcode, + message=message, + ) + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Start_CurrentRow("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Start_CurrentRow("1.0")) def start_current_row(self): - """Check rows current row frame. - """ - expected = convert_output(""" + """Check rows current row frame.""" + expected = convert_output( + """ empno | salary | sum --------+--------+------- 1 | 5000 | 5000 @@ -47,21 +52,23 @@ def start_current_row(self): 9 | 4500 | 4500 10 | 5200 | 5200 11 | 5200 | 5200 - """) + """ + ) execute_query( "SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS CURRENT ROW) AS sum FROM empsalary ORDER BY empno", - expected=expected + expected=expected, ) + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Start_UnboundedPreceding("1.0") ) def start_unbounded_preceding(self): - """Check rows unbounded preceding frame. - """ - expected = convert_output(""" + """Check rows unbounded preceding frame.""" + expected = convert_output( + """ empno | salary | sum --------+--------+------- 1 | 5000 | 5000 @@ -74,21 +81,23 @@ def start_unbounded_preceding(self): 9 | 4500 | 36700 10 | 5200 | 41900 11 | 5200 | 47100 - """) + """ + ) execute_query( "SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS UNBOUNDED PRECEDING) AS sum FROM empsalary ORDER BY empno", - expected=expected + expected=expected, ) + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Start_ExprPreceding("1.0") ) def start_expr_preceding(self): - """Check rows expr preceding frame. - """ - expected = convert_output(""" + """Check rows expr preceding frame.""" + expected = convert_output( + """ empno | salary | sum --------+--------+-------- 1 | 5000 | 5000 @@ -101,47 +110,55 @@ def start_expr_preceding(self): 9 | 4500 | 10500 10 | 5200 | 9700 11 | 5200 | 10400 - """) + """ + ) execute_query( "SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS 1 PRECEDING) AS sum FROM empsalary ORDER BY empno", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Start_UnboundedFollowing_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Start_UnboundedFollowing_Error( + "1.0" + ) ) def start_unbounded_following_error(self): - """Check rows unbounded following frame returns an error. - """ + """Check rows unbounded following frame returns an error.""" exitcode, message = frame_start_error() self.context.node.query( "SELECT empno, salary, sum(salary) OVER (ROWS UNBOUNDED FOLLOWING) AS sum FROM empsalary ORDER BY empno", - exitcode=exitcode, message=message) + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Start_ExprFollowing_Error("1.0") ) def start_expr_following_error(self): - """Check rows expr following frame returns an error. - """ + """Check rows expr following frame returns an error.""" exitcode, message = window_frame_error() self.context.node.query( "SELECT empno, salary, sum(salary) OVER (ROWS 1 FOLLOWING) AS sum FROM empsalary ORDER BY empno", - exitcode=exitcode, message=message) + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_CurrentRow("1.0") ) def between_current_row_and_current_row(self): - """Check rows between current row and current row frame. - """ - expected = convert_output(""" + """Check rows between current row and current row frame.""" + expected = convert_output( + """ empno | salary | sum --------+--------+-------- 1 | 5000 | 5000 @@ -154,45 +171,59 @@ def between_current_row_and_current_row(self): 9 | 4500 | 4500 10 | 5200 | 5200 11 | 5200 | 5200 - """) + """ + ) execute_query( "SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS BETWEEN CURRENT ROW AND CURRENT ROW) AS sum FROM empsalary", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_ExprPreceding_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_ExprPreceding_Error( + "1.0" + ) ) def between_current_row_and_expr_preceding_error(self): - """Check rows between current row and expr preceding returns an error. - """ + """Check rows between current row and expr preceding returns an error.""" exitcode, message = window_frame_error() - self.context.node.query("SELECT number,sum(number) OVER (ORDER BY number ROWS BETWEEN CURRENT ROW AND 1 PRECEDING) FROM numbers(1,3)", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (ORDER BY number ROWS BETWEEN CURRENT ROW AND 1 PRECEDING) FROM numbers(1,3)", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_UnboundedPreceding_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_UnboundedPreceding_Error( + "1.0" + ) ) def between_current_row_and_unbounded_preceding_error(self): - """Check rows between current row and unbounded preceding returns an error. - """ + """Check rows between current row and unbounded preceding returns an error.""" exitcode, message = frame_end_unbounded_preceding_error() - self.context.node.query("SELECT number,sum(number) OVER (ORDER BY number ROWS BETWEEN CURRENT ROW AND UNBOUNDED PRECEDING) FROM numbers(1,3)", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (ORDER BY number ROWS BETWEEN CURRENT ROW AND UNBOUNDED PRECEDING) FROM numbers(1,3)", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_UnboundedFollowing("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_UnboundedFollowing( + "1.0" + ) ) def between_current_row_and_unbounded_following(self): - """Check rows between current row and unbounded following. - """ - expected = convert_output(""" + """Check rows between current row and unbounded following.""" + expected = convert_output( + """ sum | unique1 | four -----+---------+------ 45 | 0 | 0 @@ -205,23 +236,27 @@ def between_current_row_and_unbounded_following(self): 24 | 7 | 3 17 | 8 | 0 9 | 9 | 1 - """) + """ + ) execute_query( "SELECT sum(unique1) over (order by unique1 rows between current row and unbounded following) AS sum," "unique1, four " "FROM tenk1 WHERE unique1 < 10", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_ExprFollowing("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_CurrentRow_ExprFollowing( + "1.0" + ) ) def between_current_row_and_expr_following(self): - """Check rows between current row and expr following. - """ - expected = convert_output(""" + """Check rows between current row and expr following.""" + expected = convert_output( + """ i | b | bool_and | bool_or ---+---+----------+--------- 1 | 1 | 1 | 1 @@ -229,24 +264,29 @@ def between_current_row_and_expr_following(self): 3 | 0 | 0 | 0 4 | 0 | 0 | 1 5 | 1 | 1 | 1 - """) + """ + ) - execute_query(""" + execute_query( + """ SELECT i, b, groupBitAnd(b) OVER w AS bool_and, groupBitOr(b) OVER w AS bool_or FROM VALUES('i Int8, b UInt8', (1,1), (2,1), (3,0), (4,0), (5,1)) WINDOW w AS (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING) """, - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_CurrentRow("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_CurrentRow( + "1.0" + ) ) def between_unbounded_preceding_and_current_row(self): - """Check rows between unbounded preceding and current row. - """ - expected = convert_output(""" + """Check rows between unbounded preceding and current row.""" + expected = convert_output( + """ four | two | sum | last_value ------+-----+-----+------------ 0 | 0 | 0 | 0 @@ -269,36 +309,45 @@ def between_unbounded_preceding_and_current_row(self): 3 | 1 | 1 | 1 3 | 1 | 2 | 1 3 | 2 | 4 | 2 - """) + """ + ) execute_query( "SELECT four, toInt8(ten/4) as two," "sum(toInt8(ten/4)) over (partition by four order by toInt8(ten/4) rows between unbounded preceding and current row) AS sum," "last_value(toInt8(ten/4)) over (partition by four order by toInt8(ten/4) rows between unbounded preceding and current row) AS last_value " "FROM (select distinct ten, four from tenk1)", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_UnboundedPreceding_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_UnboundedPreceding_Error( + "1.0" + ) ) def between_unbounded_preceding_and_unbounded_preceding_error(self): - """Check rows between unbounded preceding and unbounded preceding returns an error. - """ + """Check rows between unbounded preceding and unbounded preceding returns an error.""" exitcode, message = frame_end_unbounded_preceding_error() - self.context.node.query("SELECT number,sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED PRECEDING) FROM numbers(1,3)", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED PRECEDING) FROM numbers(1,3)", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_ExprPreceding("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_ExprPreceding( + "1.0" + ) ) def between_unbounded_preceding_and_expr_preceding(self): - """Check rows between unbounded preceding and expr preceding frame. - """ - expected = convert_output(""" + """Check rows between unbounded preceding and expr preceding frame.""" + expected = convert_output( + """ empno | salary | sum --------+--------+-------- 1 | 5000 | 0 @@ -311,21 +360,25 @@ def between_unbounded_preceding_and_expr_preceding(self): 9 | 4500 | 32200 10 | 5200 | 36700 11 | 5200 | 41900 - """) + """ + ) execute_query( "SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) AS sum FROM empsalary", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_UnboundedFollowing("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_UnboundedFollowing( + "1.0" + ) ) def between_unbounded_preceding_and_unbounded_following(self): - """Check rows between unbounded preceding and unbounded following frame. - """ - expected = convert_output(""" + """Check rows between unbounded preceding and unbounded following frame.""" + expected = convert_output( + """ empno | salary | sum --------+--------+-------- 1 | 5000 | 47100 @@ -338,21 +391,25 @@ def between_unbounded_preceding_and_unbounded_following(self): 9 | 4500 | 47100 10 | 5200 | 47100 11 | 5200 | 47100 - """) + """ + ) execute_query( "SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS sum FROM empsalary", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_ExprFollowing("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedPreceding_ExprFollowing( + "1.0" + ) ) def between_unbounded_preceding_and_expr_following(self): - """Check rows between unbounded preceding and expr following. - """ - expected = convert_output(""" + """Check rows between unbounded preceding and expr following.""" + expected = convert_output( + """ sum | unique1 | four -----+---------+------ 1 | 0 | 0 @@ -365,52 +422,70 @@ def between_unbounded_preceding_and_expr_following(self): 36 | 7 | 3 45 | 8 | 0 45 | 9 | 1 - """) + """ + ) execute_query( "SELECT sum(unique1) over (order by unique1 rows between unbounded preceding and 1 following) AS sum," "unique1, four " "FROM tenk1 WHERE unique1 < 10", - expected=expected + expected=expected, ) + @TestOutline(Scenario) @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedFollowing_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_UnboundedFollowing_Error( + "1.0" + ) +) +@Examples( + "range", + [ + ("UNBOUNDED FOLLOWING AND CURRENT ROW",), + ("UNBOUNDED FOLLOWING AND UNBOUNDED PRECEDING",), + ("UNBOUNDED FOLLOWING AND UNBOUNDED FOLLOWING",), + ("UNBOUNDED FOLLOWING AND 1 PRECEDING",), + ("UNBOUNDED FOLLOWING AND 1 FOLLOWING",), + ], ) -@Examples("range", [ - ("UNBOUNDED FOLLOWING AND CURRENT ROW",), - ("UNBOUNDED FOLLOWING AND UNBOUNDED PRECEDING",), - ("UNBOUNDED FOLLOWING AND UNBOUNDED FOLLOWING",), - ("UNBOUNDED FOLLOWING AND 1 PRECEDING",), - ("UNBOUNDED FOLLOWING AND 1 FOLLOWING",), -]) def between_unbounded_following_error(self, range): - """Check rows between unbounded following and any end frame returns an error. - """ + """Check rows between unbounded following and any end frame returns an error.""" exitcode, message = frame_start_error() - self.context.node.query(f"SELECT number,sum(number) OVER (ROWS BETWEEN {range}) FROM numbers(1,3)", - exitcode=exitcode, message=message) + self.context.node.query( + f"SELECT number,sum(number) OVER (ROWS BETWEEN {range}) FROM numbers(1,3)", + exitcode=exitcode, + message=message, + ) + @TestOutline(Scenario) @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprFollowing_Error("1.0") ) -@Examples("range exitcode message", [ - ("1 FOLLOWING AND CURRENT ROW", *window_frame_error()), - ("1 FOLLOWING AND UNBOUNDED PRECEDING", *frame_end_unbounded_preceding_error()), - ("1 FOLLOWING AND 1 PRECEDING", *frame_start_error()) -]) +@Examples( + "range exitcode message", + [ + ("1 FOLLOWING AND CURRENT ROW", *window_frame_error()), + ("1 FOLLOWING AND UNBOUNDED PRECEDING", *frame_end_unbounded_preceding_error()), + ("1 FOLLOWING AND 1 PRECEDING", *frame_start_error()), + ], +) def between_expr_following_error(self, range, exitcode, message): - """Check cases when rows between expr following returns an error. - """ - self.context.node.query(f"SELECT number,sum(number) OVER (ROWS BETWEEN {range}) FROM numbers(1,3)", - exitcode=exitcode, message=message) + """Check cases when rows between expr following returns an error.""" + self.context.node.query( + f"SELECT number,sum(number) OVER (ROWS BETWEEN {range}) FROM numbers(1,3)", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprFollowing_ExprFollowing_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprFollowing_ExprFollowing_Error( + "1.0" + ) ) def between_expr_following_and_expr_following_error(self): """Check rows between expr following and expr following returns an error when frame end index is less @@ -418,17 +493,23 @@ def between_expr_following_and_expr_following_error(self): """ exitcode, message = frame_start_error() - self.context.node.query("SELECT number,sum(number) OVER (ROWS BETWEEN 1 FOLLOWING AND 0 FOLLOWING) FROM numbers(1,3)", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (ROWS BETWEEN 1 FOLLOWING AND 0 FOLLOWING) FROM numbers(1,3)", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprFollowing_UnboundedFollowing("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprFollowing_UnboundedFollowing( + "1.0" + ) ) def between_expr_following_and_unbounded_following(self): - """Check rows between exp following and unbounded following frame. - """ - expected = convert_output(""" + """Check rows between exp following and unbounded following frame.""" + expected = convert_output( + """ empno | salary | sum --------+--------+-------- 1 | 5000 | 28600 @@ -441,22 +522,27 @@ def between_expr_following_and_unbounded_following(self): 9 | 4500 | 0 10 | 5200 | 0 11 | 5200 | 0 - """) + """ + ) execute_query( "SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS BETWEEN 4 FOLLOWING AND UNBOUNDED FOLLOWING) AS sum FROM empsalary", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprFollowing_ExprFollowing("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprFollowing_ExprFollowing( + "1.0" + ) ) def between_expr_following_and_expr_following(self): """Check rows between exp following and expr following frame when end of the frame is greater than the start of the frame. """ - expected = convert_output(""" + expected = convert_output( + """ empno | salary | sum --------+--------+-------- 1 | 5000 | 17000 @@ -469,68 +555,85 @@ def between_expr_following_and_expr_following(self): 9 | 4500 | 10400 10 | 5200 | 5200 11 | 5200 | 0 - """) + """ + ) execute_query( "SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS BETWEEN 1 FOLLOWING AND 4 FOLLOWING) AS sum FROM empsalary", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_CurrentRow("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_CurrentRow( + "1.0" + ) ) def between_expr_preceding_and_current_row(self): - """Check rows between exp preceding and current row frame. - """ - expected = convert_output(""" + """Check rows between exp preceding and current row frame.""" + expected = convert_output( + """ empno | salary | sum --------+--------+-------- 8 | 6000 | 6000 10 | 5200 | 11200 11 | 5200 | 10400 - """) + """ + ) execute_query( "SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) AS sum FROM empsalary WHERE salary > 5000", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_UnboundedPreceding_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_UnboundedPreceding_Error( + "1.0" + ) ) def between_expr_preceding_and_unbounded_preceding_error(self): - """Check rows between expr preceding and unbounded preceding returns an error. - """ + """Check rows between expr preceding and unbounded preceding returns an error.""" exitcode, message = frame_end_error() - self.context.node.query("SELECT number,sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND UNBOUNDED PRECEDING) FROM numbers(1,3)", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND UNBOUNDED PRECEDING) FROM numbers(1,3)", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_UnboundedFollowing("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_UnboundedFollowing( + "1.0" + ) ) def between_expr_preceding_and_unbounded_following(self): - """Check rows between exp preceding and unbounded following frame. - """ - expected = convert_output(""" + """Check rows between exp preceding and unbounded following frame.""" + expected = convert_output( + """ empno | salary | sum --------+--------+-------- 8 | 6000 | 16400 10 | 5200 | 16400 11 | 5200 | 10400 - """) + """ + ) execute_query( "SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) AS sum FROM empsalary WHERE salary > 5000", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_ExprPreceding_Error("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_ExprPreceding_Error( + "1.0" + ) ) def between_expr_preceding_and_expr_preceding_error(self): """Check rows between expr preceding and expr preceding returns an error when frame end is @@ -538,17 +641,23 @@ def between_expr_preceding_and_expr_preceding_error(self): """ exitcode, message = frame_start_error() - self.context.node.query("SELECT number,sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND 2 PRECEDING) FROM numbers(1,3)", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER (ROWS BETWEEN 1 PRECEDING AND 2 PRECEDING) FROM numbers(1,3)", + exitcode=exitcode, + message=message, + ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_ExprPreceding("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_ExprPreceding( + "1.0" + ) ) def between_expr_preceding_and_expr_preceding(self): - """Check rows between expr preceding and expr preceding frame when frame end is after or at frame start. - """ - expected = convert_output(""" + """Check rows between expr preceding and expr preceding frame when frame end is after or at frame start.""" + expected = convert_output( + """ empno | salary | sum --------+--------+-------- 1 | 5000 | 5000 @@ -561,42 +670,49 @@ def between_expr_preceding_and_expr_preceding(self): 9 | 4500 | 10500 10 | 5200 | 9700 11 | 5200 | 10400 - """) + """ + ) execute_query( "SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS BETWEEN 1 PRECEDING AND 0 PRECEDING) AS sum FROM empsalary", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_ExprFollowing("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_ExprFollowing( + "1.0" + ) ) def between_expr_preceding_and_expr_following(self): - """Check rows between expr preceding and expr following frame. - """ - expected = convert_output(""" + """Check rows between expr preceding and expr following frame.""" + expected = convert_output( + """ empno | salary | sum --------+--------+-------- 8 | 6000 | 11200 10 | 5200 | 16400 11 | 5200 | 10400 - """) + """ + ) execute_query( "SELECT empno, salary, sum(salary) OVER (ORDER BY empno ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum FROM empsalary WHERE salary > 5000", - expected=expected + expected=expected, ) @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprFollowing_ExprFollowing("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprFollowing_ExprFollowing( + "1.0" + ) ) def between_expr_following_and_expr_following_ref(self): - """Check reference result for rows between expr following and expr following range. - """ - expected = convert_output(""" + """Check reference result for rows between expr following and expr following range.""" + expected = convert_output( + """ sum | unique1 | four -----+---------+------ 6 | 0 | 0 @@ -609,23 +725,27 @@ def between_expr_following_and_expr_following_ref(self): 17 | 7 | 3 9 | 8 | 0 0 | 9 | 1 - """) + """ + ) execute_query( "SELECT sum(unique1) over (order by unique1 rows between 1 following and 3 following) AS sum," "unique1, four " "FROM tenk1 WHERE unique1 < 10", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_ExprPreceding("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_ExprPreceding( + "1.0" + ) ) def between_expr_preceding_and_expr_preceding_ref(self): - """Check reference result for rows between expr preceding and expr preceding frame. - """ - expected = convert_output(""" + """Check reference result for rows between expr preceding and expr preceding frame.""" + expected = convert_output( + """ sum | unique1 | four -----+---------+------ 0 | 0 | 0 @@ -638,23 +758,27 @@ def between_expr_preceding_and_expr_preceding_ref(self): 11 | 7 | 3 13 | 8 | 0 15 | 9 | 1 - """) + """ + ) execute_query( "SELECT sum(unique1) over (order by unique1 rows between 2 preceding and 1 preceding) AS sum," "unique1, four " "FROM tenk1 WHERE unique1 < 10", - expected=expected + expected=expected, ) + @TestScenario @Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_ExprFollowing("1.0") + RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame_Between_ExprPreceding_ExprFollowing( + "1.0" + ) ) def between_expr_preceding_and_expr_following_ref(self): - """Check reference result for rows between expr preceding and expr following frame. - """ - expected = convert_output(""" + """Check reference result for rows between expr preceding and expr following frame.""" + expected = convert_output( + """ sum | unique1 | four -----+---------+------ 3 | 0 | 0 @@ -667,22 +791,21 @@ def between_expr_preceding_and_expr_following_ref(self): 35 | 7 | 3 30 | 8 | 0 24 | 9 | 1 - """) + """ + ) execute_query( "SELECT sum(unique1) over (order by unique1 rows between 2 preceding and 2 following) AS sum, " "unique1, four " "FROM tenk1 WHERE unique1 < 10", - expected=expected + expected=expected, ) + @TestFeature @Name("rows frame") -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_RowsFrame("1.0")) def feature(self): - """Check defining rows frame. - """ + """Check defining rows frame.""" for scenario in loads(current_module(), Scenario): Scenario(run=scenario, flags=TE) diff --git a/tests/testflows/window_functions/tests/window_clause.py b/tests/testflows/window_functions/tests/window_clause.py index 714fce89895..17ff5a7ddab 100644 --- a/tests/testflows/window_functions/tests/window_clause.py +++ b/tests/testflows/window_functions/tests/window_clause.py @@ -3,11 +3,12 @@ from testflows.core import * from window_functions.requirements import * from window_functions.tests.common import * + @TestScenario def single_window(self): - """Check defining a single named window using window clause. - """ - expected = convert_output(""" + """Check defining a single named window using window clause.""" + expected = convert_output( + """ depname | empno | salary | sum -----------+-------+--------+------- develop | 7 | 4200 | 4200 @@ -20,35 +21,37 @@ def single_window(self): sales | 1 | 5000 | 5000 sales | 3 | 4800 | 9800 sales | 4 | 4800 | 14600 - """) + """ + ) execute_query( "SELECT depname, empno, salary, sum(salary) OVER w AS sum FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY empno)", - expected=expected + expected=expected, ) + @TestScenario def unused_window(self): - """Check unused window. - """ - expected = convert_output(""" + """Check unused window.""" + expected = convert_output( + """ four ------- - """) + """ + ) execute_query( "SELECT four FROM tenk1 WHERE 0 WINDOW w AS (PARTITION BY ten)", - expected=expected + expected=expected, ) + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_WindowClause_MultipleWindows("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_WindowClause_MultipleWindows("1.0")) def multiple_identical_windows(self): - """Check defining multiple windows using window clause. - """ - expected = convert_output(""" + """Check defining multiple windows using window clause.""" + expected = convert_output( + """ sum | count -------+------- 3500 | 1 @@ -61,22 +64,22 @@ def multiple_identical_windows(self): 41100 | 9 41100 | 9 47100 | 10 - """) + """ + ) execute_query( "SELECT sum(salary) OVER w1 AS sum, count(*) OVER w2 AS count " "FROM empsalary WINDOW w1 AS (ORDER BY salary), w2 AS (ORDER BY salary)", - expected=expected + expected=expected, ) + @TestScenario -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_WindowClause_MultipleWindows("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_WindowClause_MultipleWindows("1.0")) def multiple_windows(self): - """Check defining multiple windows using window clause. - """ - expected = convert_output(""" + """Check defining multiple windows using window clause.""" + expected = convert_output( + """ empno | depname | salary | sum1 | sum2 --------+-----------+--------+-------+-------- 1 | sales | 5000 | 5000 | 5000 @@ -89,33 +92,36 @@ def multiple_windows(self): 9 | develop | 4500 | 14700 | 10500 10 | develop | 5200 | 19900 | 9700 11 | develop | 5200 | 25100 | 10400 - """) - - execute_query("SELECT empno, depname, salary, sum(salary) OVER w1 AS sum1, sum(salary) OVER w2 AS sum2 " - "FROM empsalary WINDOW w1 AS (PARTITION BY depname ORDER BY empno), w2 AS (ORDER BY empno ROWS 1 PRECEDING)", - expected=expected + """ ) + execute_query( + "SELECT empno, depname, salary, sum(salary) OVER w1 AS sum1, sum(salary) OVER w2 AS sum2 " + "FROM empsalary WINDOW w1 AS (PARTITION BY depname ORDER BY empno), w2 AS (ORDER BY empno ROWS 1 PRECEDING)", + expected=expected, + ) + + @TestScenario @Requirements( RQ_SRS_019_ClickHouse_WindowFunctions_WindowClause_MissingWindowSpec_Error("1.0") ) def missing_window_spec(self): - """Check missing window spec in window clause. - """ + """Check missing window spec in window clause.""" exitcode = 62 message = "Exception: Syntax error" - self.context.node.query("SELECT number,sum(number) OVER w1 FROM values('number Int8', (1),(1),(2),(3)) WINDOW w1", - exitcode=exitcode, message=message) + self.context.node.query( + "SELECT number,sum(number) OVER w1 FROM values('number Int8', (1),(1),(2),(3)) WINDOW w1", + exitcode=exitcode, + message=message, + ) + @TestFeature @Name("window clause") -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_WindowClause("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_WindowClause("1.0")) def feature(self): - """Check defining frame clause. - """ + """Check defining frame clause.""" for scenario in loads(current_module(), Scenario): Scenario(run=scenario, flags=TE) diff --git a/tests/testflows/window_functions/tests/window_spec.py b/tests/testflows/window_functions/tests/window_spec.py index aacbc192200..82e3cf0ef47 100644 --- a/tests/testflows/window_functions/tests/window_spec.py +++ b/tests/testflows/window_functions/tests/window_spec.py @@ -2,11 +2,12 @@ from testflows.core import * from window_functions.requirements import * from window_functions.tests.common import * + @TestScenario def partition_clause(self): - """Check window specification that only contains partition clause. - """ - expected = convert_output(""" + """Check window specification that only contains partition clause.""" + expected = convert_output( + """ sum ------- 25100 @@ -19,18 +20,20 @@ def partition_clause(self): 14600 14600 14600 - """) + """ + ) execute_query( "SELECT sum(salary) OVER w AS sum FROM empsalary WINDOW w AS (PARTITION BY depname)", - expected=expected + expected=expected, ) + @TestScenario def orderby_clause(self): - """Check window specification that only contains order by clause. - """ - expected = convert_output(""" + """Check window specification that only contains order by clause.""" + expected = convert_output( + """ sum ------- 25100 @@ -43,18 +46,20 @@ def orderby_clause(self): 47100 47100 47100 - """) + """ + ) execute_query( "SELECT sum(salary) OVER w AS sum FROM empsalary WINDOW w AS (ORDER BY depname)", - expected=expected + expected=expected, ) + @TestScenario def frame_clause(self): - """Check window specification that only contains frame clause. - """ - expected = convert_output(""" + """Check window specification that only contains frame clause.""" + expected = convert_output( + """ sum ------- 5000 @@ -67,18 +72,20 @@ def frame_clause(self): 4500 5200 5200 - """) + """ + ) execute_query( "SELECT sum(salary) OVER w AS sum FROM empsalary WINDOW w AS (ORDER BY empno ROWS CURRENT ROW)", - expected=expected + expected=expected, ) + @TestScenario def partition_with_order_by(self): - """Check window specification that contains partition and order by clauses. - """ - expected = convert_output(""" + """Check window specification that contains partition and order by clauses.""" + expected = convert_output( + """ sum ------- 4200 @@ -91,18 +98,20 @@ def partition_with_order_by(self): 9600 9600 14600 - """) + """ + ) execute_query( "SELECT sum(salary) OVER w AS sum FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary)", - expected=expected + expected=expected, ) + @TestScenario def partition_with_frame(self): - """Check window specification that contains partition and frame clauses. - """ - expected = convert_output(""" + """Check window specification that contains partition and frame clauses.""" + expected = convert_output( + """ sum ------- 4200 @@ -115,18 +124,20 @@ def partition_with_frame(self): 5000 4800 4800 - """) + """ + ) execute_query( "SELECT sum(salary) OVER w AS sum FROM empsalary WINDOW w AS (PARTITION BY depname, empno ROWS 1 PRECEDING)", - expected=expected + expected=expected, ) + @TestScenario def order_by_with_frame(self): - """Check window specification that contains order by and frame clauses. - """ - expected = convert_output(""" + """Check window specification that contains order by and frame clauses.""" + expected = convert_output( + """ sum ------- 4200 @@ -139,18 +150,20 @@ def order_by_with_frame(self): 8500 9800 9600 - """) + """ + ) execute_query( "SELECT sum(salary) OVER w AS sum FROM empsalary WINDOW w AS (ORDER BY depname, empno ROWS 1 PRECEDING)", - expected=expected + expected=expected, ) + @TestScenario def partition_with_order_by_and_frame(self): - """Check window specification that contains all clauses. - """ - expected = convert_output(""" + """Check window specification that contains all clauses.""" + expected = convert_output( + """ sum ------- 4200 @@ -163,18 +176,20 @@ def partition_with_order_by_and_frame(self): 4800 9600 9800 - """) + """ + ) execute_query( "SELECT sum(salary) OVER w AS sum FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary ROWS 1 PRECEDING)", - expected=expected + expected=expected, ) + @TestScenario def empty(self): - """Check defining an empty window specification. - """ - expected = convert_output(""" + """Check defining an empty window specification.""" + expected = convert_output( + """ sum ------- 47100 @@ -187,20 +202,19 @@ def empty(self): 47100 47100 47100 - """) + """ + ) execute_query( "SELECT sum(salary) OVER w AS sum FROM empsalary WINDOW w AS ()", - expected=expected + expected=expected, ) + @TestFeature @Name("window spec") -@Requirements( - RQ_SRS_019_ClickHouse_WindowFunctions_WindowSpec("1.0") -) +@Requirements(RQ_SRS_019_ClickHouse_WindowFunctions_WindowSpec("1.0")) def feature(self): - """Check defining window specifications. - """ + """Check defining window specifications.""" for scenario in loads(current_module(), Scenario): Scenario(run=scenario, flags=TE) diff --git a/utils/changelog/format-changelog.py b/utils/changelog/format-changelog.py index 56fe973eb6f..ef1340d48dd 100755 --- a/utils/changelog/format-changelog.py +++ b/utils/changelog/format-changelog.py @@ -9,25 +9,37 @@ import os import re import sys -parser = argparse.ArgumentParser(description='Format changelog for given PRs.') -parser.add_argument('file', metavar='FILE', type=argparse.FileType('r', encoding='utf-8'), nargs='?', default=sys.stdin, help='File with PR numbers, one per line.') +parser = argparse.ArgumentParser(description="Format changelog for given PRs.") +parser.add_argument( + "file", + metavar="FILE", + type=argparse.FileType("r", encoding="utf-8"), + nargs="?", + default=sys.stdin, + help="File with PR numbers, one per line.", +) args = parser.parse_args() # This function mirrors the PR description checks in ClickhousePullRequestTrigger. # Returns False if the PR should not be mentioned changelog. def parse_one_pull_request(item): - description = item['body'] + description = item["body"] # Don't skip empty lines because they delimit parts of description - lines = [line for line in [x.strip() for x in (description.split('\n') if description else [])]] - lines = [re.sub(r'\s+', ' ', l) for l in lines] + lines = [ + line + for line in [ + x.strip() for x in (description.split("\n") if description else []) + ] + ] + lines = [re.sub(r"\s+", " ", l) for l in lines] - category = '' - entry = '' + category = "" + entry = "" if lines: i = 0 while i < len(lines): - if re.match(r'(?i)^[>*_ ]*change\s*log\s*category', lines[i]): + if re.match(r"(?i)^[>*_ ]*change\s*log\s*category", lines[i]): i += 1 if i >= len(lines): break @@ -36,9 +48,11 @@ def parse_one_pull_request(item): i += 1 if i >= len(lines): break - category = re.sub(r'^[-*\s]*', '', lines[i]) + category = re.sub(r"^[-*\s]*", "", lines[i]) i += 1 - elif re.match(r'(?i)^[>*_ ]*(short\s*description|change\s*log\s*entry)', lines[i]): + elif re.match( + r"(?i)^[>*_ ]*(short\s*description|change\s*log\s*entry)", lines[i] + ): i += 1 # Can have one empty line between header and the entry itself. Filter it out. if i < len(lines) and not lines[i]: @@ -48,7 +62,7 @@ def parse_one_pull_request(item): while i < len(lines) and lines[i]: entry_lines.append(lines[i]) i += 1 - entry = ' '.join(entry_lines) + entry = " ".join(entry_lines) else: i += 1 @@ -58,48 +72,59 @@ def parse_one_pull_request(item): category = "NO CL CATEGORY" # Filter out the PR categories that are not for changelog. - if re.match(r'(?i)doc|((non|in|not|un)[-\s]*significant)|(not[ ]*for[ ]*changelog)', category): + if re.match( + r"(?i)doc|((non|in|not|un)[-\s]*significant)|(not[ ]*for[ ]*changelog)", + category, + ): return False if not entry: # Shouldn't happen, because description check in CI should catch such PRs. category = "NO CL ENTRY" - entry = "NO CL ENTRY: '" + item['title'] + "'" + entry = "NO CL ENTRY: '" + item["title"] + "'" entry = entry.strip() - if entry[-1] != '.': - entry += '.' + if entry[-1] != ".": + entry += "." - item['entry'] = entry - item['category'] = category + item["entry"] = entry + item["category"] = category return True + # This array gives the preferred category order, and is also used to # normalize category names. -categories_preferred_order = ['Backward Incompatible Change', - 'New Feature', 'Performance Improvement', 'Improvement', 'Bug Fix', - 'Build/Testing/Packaging Improvement', 'Other'] +categories_preferred_order = [ + "Backward Incompatible Change", + "New Feature", + "Performance Improvement", + "Improvement", + "Bug Fix", + "Build/Testing/Packaging Improvement", + "Other", +] category_to_pr = collections.defaultdict(lambda: []) users = {} for line in args.file: - pr = json.loads(open(f'pr{line.strip()}.json').read()) - assert(pr['number']) + pr = json.loads(open(f"pr{line.strip()}.json").read()) + assert pr["number"] if not parse_one_pull_request(pr): continue - assert(pr['category']) + assert pr["category"] # Normalize category name for c in categories_preferred_order: - if fuzzywuzzy.fuzz.ratio(pr['category'].lower(), c.lower()) >= 90: - pr['category'] = c + if fuzzywuzzy.fuzz.ratio(pr["category"].lower(), c.lower()) >= 90: + pr["category"] = c break - category_to_pr[pr['category']].append(pr) - user_id = pr['user']['id'] - users[user_id] = json.loads(open(f'user{user_id}.json').read()) + category_to_pr[pr["category"]].append(pr) + user_id = pr["user"]["id"] + users[user_id] = json.loads(open(f"user{user_id}.json").read()) + def print_category(category): print(("#### " + category)) @@ -110,14 +135,25 @@ def print_category(category): # Substitute issue links. # 1) issue number w/o markdown link - pr["entry"] = re.sub(r'([^[])#([0-9]{4,})', r'\1[#\2](https://github.com/ClickHouse/ClickHouse/issues/\2)', pr["entry"]) + pr["entry"] = re.sub( + r"([^[])#([0-9]{4,})", + r"\1[#\2](https://github.com/ClickHouse/ClickHouse/issues/\2)", + pr["entry"], + ) # 2) issue URL w/o markdown link - pr["entry"] = re.sub(r'([^(])https://github.com/ClickHouse/ClickHouse/issues/([0-9]{4,})', r'\1[#\2](https://github.com/ClickHouse/ClickHouse/issues/\2)', pr["entry"]) + pr["entry"] = re.sub( + r"([^(])https://github.com/ClickHouse/ClickHouse/issues/([0-9]{4,})", + r"\1[#\2](https://github.com/ClickHouse/ClickHouse/issues/\2)", + pr["entry"], + ) - print(f'* {pr["entry"]} [#{pr["number"]}]({pr["html_url"]}) ([{user_name}]({user["html_url"]})).') + print( + f'* {pr["entry"]} [#{pr["number"]}]({pr["html_url"]}) ([{user_name}]({user["html_url"]})).' + ) print() + # Print categories in preferred order for category in categories_preferred_order: if category in category_to_pr: diff --git a/utils/check-style/check-black b/utils/check-style/check-black new file mode 100755 index 00000000000..45e7820469b --- /dev/null +++ b/utils/check-style/check-black @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -e + +# We check only our code, that's why we skip contrib +GIT_ROOT=$(git rev-parse --show-cdup) +GIT_ROOT=${GIT_ROOT:-.} +tmp=$(mktemp) +if ! find "$GIT_ROOT" -name '*.py' -not -path "$GIT_ROOT/contrib/*" -exec black --check --diff {} + 1>"$tmp" 2>&1; then + # Show the result only if some files need formatting + cat "$tmp" +fi +rm "$tmp" diff --git a/utils/check-style/check-workflows b/utils/check-style/check-workflows index c0399829c28..6e9cb87ed36 100755 --- a/utils/check-style/check-workflows +++ b/utils/check-style/check-workflows @@ -1,6 +1,9 @@ #!/usr/bin/env bash +set -e + GIT_ROOT=$(git rev-parse --show-cdup) +GIT_ROOT=${GIT_ROOT:-.} act --list --directory="$GIT_ROOT" 1>/dev/null 2>&1 || act --list --directory="$GIT_ROOT" 2>&1 -actionlint +actionlint || : diff --git a/utils/github/backport.py b/utils/github/backport.py index 9227dbf4108..615c0d19ffa 100644 --- a/utils/github/backport.py +++ b/utils/github/backport.py @@ -17,7 +17,9 @@ import sys class Backport: def __init__(self, token, owner, name, team): - self._gh = RemoteRepo(token, owner=owner, name=name, team=team, max_page_size=30, min_page_size=7) + self._gh = RemoteRepo( + token, owner=owner, name=name, team=team, max_page_size=30, min_page_size=7 + ) self._token = token self.default_branch_name = self._gh.default_branch self.ssh_url = self._gh.ssh_url @@ -28,7 +30,7 @@ class Backport: def getBranchesWithRelease(self): branches = set() for pull_request in self._gh.find_pull_requests("release"): - branches.add(pull_request['headRefName']) + branches.add(pull_request["headRefName"]) return branches def execute(self, repo, upstream, until_commit, run_cherrypick): @@ -44,11 +46,11 @@ class Backport: branches.append(branch) if not branches: - logging.info('No release branches found!') + logging.info("No release branches found!") return for branch in branches: - logging.info('Found release branch: %s', branch[0]) + logging.info("Found release branch: %s", branch[0]) if not until_commit: until_commit = branches[0][1] @@ -56,73 +58,128 @@ class Backport: backport_map = {} - RE_MUST_BACKPORT = re.compile(r'^v(\d+\.\d+)-must-backport$') - RE_NO_BACKPORT = re.compile(r'^v(\d+\.\d+)-no-backport$') - RE_BACKPORTED = re.compile(r'^v(\d+\.\d+)-backported$') + RE_MUST_BACKPORT = re.compile(r"^v(\d+\.\d+)-must-backport$") + RE_NO_BACKPORT = re.compile(r"^v(\d+\.\d+)-no-backport$") + RE_BACKPORTED = re.compile(r"^v(\d+\.\d+)-backported$") # pull-requests are sorted by ancestry from the most recent. for pr in pull_requests: - while repo.comparator(branches[-1][1]) >= repo.comparator(pr['mergeCommit']['oid']): - logging.info("PR #{} is already inside {}. Dropping this branch for further PRs".format(pr['number'], branches[-1][0])) + while repo.comparator(branches[-1][1]) >= repo.comparator( + pr["mergeCommit"]["oid"] + ): + logging.info( + "PR #{} is already inside {}. Dropping this branch for further PRs".format( + pr["number"], branches[-1][0] + ) + ) branches.pop() - logging.info("Processing PR #{}".format(pr['number'])) + logging.info("Processing PR #{}".format(pr["number"])) assert len(branches) branch_set = set([branch[0] for branch in branches]) # First pass. Find all must-backports - for label in pr['labels']['nodes']: - if label['name'] == 'pr-must-backport': - backport_map[pr['number']] = branch_set.copy() + for label in pr["labels"]["nodes"]: + if label["name"] == "pr-must-backport": + backport_map[pr["number"]] = branch_set.copy() continue - matched = RE_MUST_BACKPORT.match(label['name']) + matched = RE_MUST_BACKPORT.match(label["name"]) if matched: - if pr['number'] not in backport_map: - backport_map[pr['number']] = set() - backport_map[pr['number']].add(matched.group(1)) + if pr["number"] not in backport_map: + backport_map[pr["number"]] = set() + backport_map[pr["number"]].add(matched.group(1)) # Second pass. Find all no-backports - for label in pr['labels']['nodes']: - if label['name'] == 'pr-no-backport' and pr['number'] in backport_map: - del backport_map[pr['number']] + for label in pr["labels"]["nodes"]: + if label["name"] == "pr-no-backport" and pr["number"] in backport_map: + del backport_map[pr["number"]] break - matched_no_backport = RE_NO_BACKPORT.match(label['name']) - matched_backported = RE_BACKPORTED.match(label['name']) - if matched_no_backport and pr['number'] in backport_map and matched_no_backport.group(1) in backport_map[pr['number']]: - backport_map[pr['number']].remove(matched_no_backport.group(1)) - logging.info('\tskipping %s because of forced no-backport', matched_no_backport.group(1)) - elif matched_backported and pr['number'] in backport_map and matched_backported.group(1) in backport_map[pr['number']]: - backport_map[pr['number']].remove(matched_backported.group(1)) - logging.info('\tskipping %s because it\'s already backported manually', matched_backported.group(1)) + matched_no_backport = RE_NO_BACKPORT.match(label["name"]) + matched_backported = RE_BACKPORTED.match(label["name"]) + if ( + matched_no_backport + and pr["number"] in backport_map + and matched_no_backport.group(1) in backport_map[pr["number"]] + ): + backport_map[pr["number"]].remove(matched_no_backport.group(1)) + logging.info( + "\tskipping %s because of forced no-backport", + matched_no_backport.group(1), + ) + elif ( + matched_backported + and pr["number"] in backport_map + and matched_backported.group(1) in backport_map[pr["number"]] + ): + backport_map[pr["number"]].remove(matched_backported.group(1)) + logging.info( + "\tskipping %s because it's already backported manually", + matched_backported.group(1), + ) for pr, branches in list(backport_map.items()): - logging.info('PR #%s needs to be backported to:', pr) + logging.info("PR #%s needs to be backported to:", pr) for branch in branches: - logging.info('\t%s, and the status is: %s', branch, run_cherrypick(self._token, pr, branch)) + logging.info( + "\t%s, and the status is: %s", + branch, + run_cherrypick(self._token, pr, branch), + ) # print API costs - logging.info('\nGitHub API total costs per query:') + logging.info("\nGitHub API total costs per query:") for name, value in list(self._gh.api_costs.items()): - logging.info('%s : %s', name, value) + logging.info("%s : %s", name, value) if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('--token', type=str, required=True, help='token for Github access') - parser.add_argument('--repo', type=str, required=True, help='path to full repository', metavar='PATH') - parser.add_argument('--til', type=str, help='check PRs from HEAD til this commit', metavar='COMMIT') - parser.add_argument('--dry-run', action='store_true', help='do not create or merge any PRs', default=False) - parser.add_argument('--verbose', '-v', action='store_true', help='more verbose output', default=False) - parser.add_argument('--upstream', '-u', type=str, help='remote name of upstream in repository', default='origin') + parser.add_argument( + "--token", type=str, required=True, help="token for Github access" + ) + parser.add_argument( + "--repo", + type=str, + required=True, + help="path to full repository", + metavar="PATH", + ) + parser.add_argument( + "--til", type=str, help="check PRs from HEAD til this commit", metavar="COMMIT" + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="do not create or merge any PRs", + default=False, + ) + parser.add_argument( + "--verbose", + "-v", + action="store_true", + help="more verbose output", + default=False, + ) + parser.add_argument( + "--upstream", + "-u", + type=str, + help="remote name of upstream in repository", + default="origin", + ) args = parser.parse_args() if args.verbose: - logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.DEBUG) + logging.basicConfig( + format="%(message)s", stream=sys.stdout, level=logging.DEBUG + ) else: - logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.INFO) + logging.basicConfig(format="%(message)s", stream=sys.stdout, level=logging.INFO) - cherrypick_run = lambda token, pr, branch: CherryPick(token, 'ClickHouse', 'ClickHouse', 'core', pr, branch).execute(args.repo, args.dry_run) - bp = Backport(args.token, 'ClickHouse', 'ClickHouse', 'core') + cherrypick_run = lambda token, pr, branch: CherryPick( + token, "ClickHouse", "ClickHouse", "core", pr, branch + ).execute(args.repo, args.dry_run) + bp = Backport(args.token, "ClickHouse", "ClickHouse", "core") bp.execute(args.repo, args.upstream, args.til, cherrypick_run) diff --git a/utils/github/cherrypick.py b/utils/github/cherrypick.py index 8bedf54fefa..c6469fa62a9 100644 --- a/utils/github/cherrypick.py +++ b/utils/github/cherrypick.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -''' +""" Backports changes from PR to release branch. Requires multiple separate runs as part of the implementation. @@ -12,7 +12,7 @@ First run should do the following: Second run checks PR from previous run to be merged or at least being mergeable. If it's not merged then try to merge it. Third run creates PR from backport branch (with merged previous PR) to release branch. -''' +""" try: from clickhouse.utils.github.query import Query as RemoteRepo @@ -29,13 +29,13 @@ import sys class CherryPick: class Status(Enum): - DISCARDED = 'discarded' - NOT_INITIATED = 'not started' - FIRST_MERGEABLE = 'waiting for 1st stage' - FIRST_CONFLICTS = 'conflicts on 1st stage' - SECOND_MERGEABLE = 'waiting for 2nd stage' - SECOND_CONFLICTS = 'conflicts on 2nd stage' - MERGED = 'backported' + DISCARDED = "discarded" + NOT_INITIATED = "not started" + FIRST_MERGEABLE = "waiting for 1st stage" + FIRST_CONFLICTS = "conflicts on 1st stage" + SECOND_MERGEABLE = "waiting for 2nd stage" + SECOND_CONFLICTS = "conflicts on 2nd stage" + MERGED = "backported" def _run(self, args): out = subprocess.check_output(args).rstrip() @@ -50,51 +50,90 @@ class CherryPick: # TODO: check if pull-request is merged. - self.merge_commit_oid = self._pr['mergeCommit']['oid'] + self.merge_commit_oid = self._pr["mergeCommit"]["oid"] self.target_branch = target_branch - self.backport_branch = 'backport/{branch}/{pr}'.format(branch=target_branch, pr=pr_number) - self.cherrypick_branch = 'cherrypick/{branch}/{oid}'.format(branch=target_branch, oid=self.merge_commit_oid) + self.backport_branch = "backport/{branch}/{pr}".format( + branch=target_branch, pr=pr_number + ) + self.cherrypick_branch = "cherrypick/{branch}/{oid}".format( + branch=target_branch, oid=self.merge_commit_oid + ) def getCherryPickPullRequest(self): - return self._gh.find_pull_request(base=self.backport_branch, head=self.cherrypick_branch) + return self._gh.find_pull_request( + base=self.backport_branch, head=self.cherrypick_branch + ) def createCherryPickPullRequest(self, repo_path): DESCRIPTION = ( - 'This pull-request is a first step of an automated backporting.\n' - 'It contains changes like after calling a local command `git cherry-pick`.\n' - 'If you intend to continue backporting this changes, then resolve all conflicts if any.\n' - 'Otherwise, if you do not want to backport them, then just close this pull-request.\n' - '\n' - 'The check results does not matter at this step - you can safely ignore them.\n' - 'Also this pull-request will be merged automatically as it reaches the mergeable state, but you always can merge it manually.\n' + "This pull-request is a first step of an automated backporting.\n" + "It contains changes like after calling a local command `git cherry-pick`.\n" + "If you intend to continue backporting this changes, then resolve all conflicts if any.\n" + "Otherwise, if you do not want to backport them, then just close this pull-request.\n" + "\n" + "The check results does not matter at this step - you can safely ignore them.\n" + "Also this pull-request will be merged automatically as it reaches the mergeable state, but you always can merge it manually.\n" ) # FIXME: replace with something better than os.system() - git_prefix = ['git', '-C', repo_path, '-c', 'user.email=robot-clickhouse@yandex-team.ru', '-c', 'user.name=robot-clickhouse'] - base_commit_oid = self._pr['mergeCommit']['parents']['nodes'][0]['oid'] + git_prefix = [ + "git", + "-C", + repo_path, + "-c", + "user.email=robot-clickhouse@yandex-team.ru", + "-c", + "user.name=robot-clickhouse", + ] + base_commit_oid = self._pr["mergeCommit"]["parents"]["nodes"][0]["oid"] # Create separate branch for backporting, and make it look like real cherry-pick. - self._run(git_prefix + ['checkout', '-f', self.target_branch]) - self._run(git_prefix + ['checkout', '-B', self.backport_branch]) - self._run(git_prefix + ['merge', '-s', 'ours', '--no-edit', base_commit_oid]) + self._run(git_prefix + ["checkout", "-f", self.target_branch]) + self._run(git_prefix + ["checkout", "-B", self.backport_branch]) + self._run(git_prefix + ["merge", "-s", "ours", "--no-edit", base_commit_oid]) # Create secondary branch to allow pull request with cherry-picked commit. - self._run(git_prefix + ['branch', '-f', self.cherrypick_branch, self.merge_commit_oid]) + self._run( + git_prefix + ["branch", "-f", self.cherrypick_branch, self.merge_commit_oid] + ) - self._run(git_prefix + ['push', '-f', 'origin', '{branch}:{branch}'.format(branch=self.backport_branch)]) - self._run(git_prefix + ['push', '-f', 'origin', '{branch}:{branch}'.format(branch=self.cherrypick_branch)]) + self._run( + git_prefix + + [ + "push", + "-f", + "origin", + "{branch}:{branch}".format(branch=self.backport_branch), + ] + ) + self._run( + git_prefix + + [ + "push", + "-f", + "origin", + "{branch}:{branch}".format(branch=self.cherrypick_branch), + ] + ) # Create pull-request like a local cherry-pick - pr = self._gh.create_pull_request(source=self.cherrypick_branch, target=self.backport_branch, - title='Cherry pick #{number} to {target}: {title}'.format( - number=self._pr['number'], target=self.target_branch, - title=self._pr['title'].replace('"', '\\"')), - description='Original pull-request #{}\n\n{}'.format(self._pr['number'], DESCRIPTION)) + pr = self._gh.create_pull_request( + source=self.cherrypick_branch, + target=self.backport_branch, + title="Cherry pick #{number} to {target}: {title}".format( + number=self._pr["number"], + target=self.target_branch, + title=self._pr["title"].replace('"', '\\"'), + ), + description="Original pull-request #{}\n\n{}".format( + self._pr["number"], DESCRIPTION + ), + ) # FIXME: use `team` to leave a single eligible assignee. - self._gh.add_assignee(pr, self._pr['author']) - self._gh.add_assignee(pr, self._pr['mergedBy']) + self._gh.add_assignee(pr, self._pr["author"]) + self._gh.add_assignee(pr, self._pr["mergedBy"]) self._gh.set_label(pr, "do not test") self._gh.set_label(pr, "pr-cherrypick") @@ -102,36 +141,76 @@ class CherryPick: return pr def mergeCherryPickPullRequest(self, cherrypick_pr): - return self._gh.merge_pull_request(cherrypick_pr['id']) + return self._gh.merge_pull_request(cherrypick_pr["id"]) def getBackportPullRequest(self): - return self._gh.find_pull_request(base=self.target_branch, head=self.backport_branch) + return self._gh.find_pull_request( + base=self.target_branch, head=self.backport_branch + ) def createBackportPullRequest(self, cherrypick_pr, repo_path): DESCRIPTION = ( - 'This pull-request is a last step of an automated backporting.\n' - 'Treat it as a standard pull-request: look at the checks and resolve conflicts.\n' - 'Merge it only if you intend to backport changes to the target branch, otherwise just close it.\n' + "This pull-request is a last step of an automated backporting.\n" + "Treat it as a standard pull-request: look at the checks and resolve conflicts.\n" + "Merge it only if you intend to backport changes to the target branch, otherwise just close it.\n" ) - git_prefix = ['git', '-C', repo_path, '-c', 'user.email=robot-clickhouse@clickhouse.com', '-c', 'user.name=robot-clickhouse'] + git_prefix = [ + "git", + "-C", + repo_path, + "-c", + "user.email=robot-clickhouse@clickhouse.com", + "-c", + "user.name=robot-clickhouse", + ] - pr_title = 'Backport #{number} to {target}: {title}'.format( - number=self._pr['number'], target=self.target_branch, - title=self._pr['title'].replace('"', '\\"')) + pr_title = "Backport #{number} to {target}: {title}".format( + number=self._pr["number"], + target=self.target_branch, + title=self._pr["title"].replace('"', '\\"'), + ) - self._run(git_prefix + ['checkout', '-f', self.backport_branch]) - self._run(git_prefix + ['pull', '--ff-only', 'origin', self.backport_branch]) - self._run(git_prefix + ['reset', '--soft', self._run(git_prefix + ['merge-base', 'origin/' + self.target_branch, self.backport_branch])]) - self._run(git_prefix + ['commit', '-a', '--allow-empty', '-m', pr_title]) - self._run(git_prefix + ['push', '-f', 'origin', '{branch}:{branch}'.format(branch=self.backport_branch)]) + self._run(git_prefix + ["checkout", "-f", self.backport_branch]) + self._run(git_prefix + ["pull", "--ff-only", "origin", self.backport_branch]) + self._run( + git_prefix + + [ + "reset", + "--soft", + self._run( + git_prefix + + [ + "merge-base", + "origin/" + self.target_branch, + self.backport_branch, + ] + ), + ] + ) + self._run(git_prefix + ["commit", "-a", "--allow-empty", "-m", pr_title]) + self._run( + git_prefix + + [ + "push", + "-f", + "origin", + "{branch}:{branch}".format(branch=self.backport_branch), + ] + ) - pr = self._gh.create_pull_request(source=self.backport_branch, target=self.target_branch, title=pr_title, - description='Original pull-request #{}\nCherry-pick pull-request #{}\n\n{}'.format(self._pr['number'], cherrypick_pr['number'], DESCRIPTION)) + pr = self._gh.create_pull_request( + source=self.backport_branch, + target=self.target_branch, + title=pr_title, + description="Original pull-request #{}\nCherry-pick pull-request #{}\n\n{}".format( + self._pr["number"], cherrypick_pr["number"], DESCRIPTION + ), + ) # FIXME: use `team` to leave a single eligible assignee. - self._gh.add_assignee(pr, self._pr['author']) - self._gh.add_assignee(pr, self._pr['mergedBy']) + self._gh.add_assignee(pr, self._pr["author"]) + self._gh.add_assignee(pr, self._pr["mergedBy"]) self._gh.set_label(pr, "pr-backport") @@ -142,23 +221,43 @@ class CherryPick: if not pr1: if not dry_run: pr1 = self.createCherryPickPullRequest(repo_path) - logging.debug('Created PR with cherry-pick of %s to %s: %s', self._pr['number'], self.target_branch, pr1['url']) + logging.debug( + "Created PR with cherry-pick of %s to %s: %s", + self._pr["number"], + self.target_branch, + pr1["url"], + ) else: return CherryPick.Status.NOT_INITIATED else: - logging.debug('Found PR with cherry-pick of %s to %s: %s', self._pr['number'], self.target_branch, pr1['url']) + logging.debug( + "Found PR with cherry-pick of %s to %s: %s", + self._pr["number"], + self.target_branch, + pr1["url"], + ) - if not pr1['merged'] and pr1['mergeable'] == 'MERGEABLE' and not pr1['closed']: + if not pr1["merged"] and pr1["mergeable"] == "MERGEABLE" and not pr1["closed"]: if not dry_run: pr1 = self.mergeCherryPickPullRequest(pr1) - logging.debug('Merged PR with cherry-pick of %s to %s: %s', self._pr['number'], self.target_branch, pr1['url']) + logging.debug( + "Merged PR with cherry-pick of %s to %s: %s", + self._pr["number"], + self.target_branch, + pr1["url"], + ) - if not pr1['merged']: - logging.debug('Waiting for PR with cherry-pick of %s to %s: %s', self._pr['number'], self.target_branch, pr1['url']) + if not pr1["merged"]: + logging.debug( + "Waiting for PR with cherry-pick of %s to %s: %s", + self._pr["number"], + self.target_branch, + pr1["url"], + ) - if pr1['closed']: + if pr1["closed"]: return CherryPick.Status.DISCARDED - elif pr1['mergeable'] == 'CONFLICTING': + elif pr1["mergeable"] == "CONFLICTING": return CherryPick.Status.FIRST_CONFLICTS else: return CherryPick.Status.FIRST_MERGEABLE @@ -167,31 +266,58 @@ class CherryPick: if not pr2: if not dry_run: pr2 = self.createBackportPullRequest(pr1, repo_path) - logging.debug('Created PR with backport of %s to %s: %s', self._pr['number'], self.target_branch, pr2['url']) + logging.debug( + "Created PR with backport of %s to %s: %s", + self._pr["number"], + self.target_branch, + pr2["url"], + ) else: return CherryPick.Status.FIRST_MERGEABLE else: - logging.debug('Found PR with backport of %s to %s: %s', self._pr['number'], self.target_branch, pr2['url']) + logging.debug( + "Found PR with backport of %s to %s: %s", + self._pr["number"], + self.target_branch, + pr2["url"], + ) - if pr2['merged']: + if pr2["merged"]: return CherryPick.Status.MERGED - elif pr2['closed']: + elif pr2["closed"]: return CherryPick.Status.DISCARDED - elif pr2['mergeable'] == 'CONFLICTING': + elif pr2["mergeable"] == "CONFLICTING": return CherryPick.Status.SECOND_CONFLICTS else: return CherryPick.Status.SECOND_MERGEABLE if __name__ == "__main__": - logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.DEBUG) + logging.basicConfig(format="%(message)s", stream=sys.stdout, level=logging.DEBUG) parser = argparse.ArgumentParser() - parser.add_argument('--token', '-t', type=str, required=True, help='token for Github access') - parser.add_argument('--pr', type=str, required=True, help='PR# to cherry-pick') - parser.add_argument('--branch', '-b', type=str, required=True, help='target branch name for cherry-pick') - parser.add_argument('--repo', '-r', type=str, required=True, help='path to full repository', metavar='PATH') + parser.add_argument( + "--token", "-t", type=str, required=True, help="token for Github access" + ) + parser.add_argument("--pr", type=str, required=True, help="PR# to cherry-pick") + parser.add_argument( + "--branch", + "-b", + type=str, + required=True, + help="target branch name for cherry-pick", + ) + parser.add_argument( + "--repo", + "-r", + type=str, + required=True, + help="path to full repository", + metavar="PATH", + ) args = parser.parse_args() - cp = CherryPick(args.token, 'ClickHouse', 'ClickHouse', 'core', args.pr, args.branch) + cp = CherryPick( + args.token, "ClickHouse", "ClickHouse", "core", args.pr, args.branch + ) cp.execute(args.repo) diff --git a/utils/github/local.py b/utils/github/local.py index 2ad8d4b8b71..571c9102ba0 100644 --- a/utils/github/local.py +++ b/utils/github/local.py @@ -20,13 +20,14 @@ class RepositoryBase: return -1 else: return 1 + self.comparator = functools.cmp_to_key(cmp) def get_head_commit(self): return self._repo.commit(self._default) def iterate(self, begin, end): - rev_range = '{}...{}'.format(begin, end) + rev_range = "{}...{}".format(begin, end) for commit in self._repo.iter_commits(rev_range, first_parent=True): yield commit @@ -39,27 +40,35 @@ class Repository(RepositoryBase): self._default = self._remote.refs[default_branch_name] def get_release_branches(self): - ''' + """ Returns sorted list of tuples: * remote branch (git.refs.remote.RemoteReference), * base commit (git.Commit), * head (git.Commit)). List is sorted by commits in ascending order. - ''' + """ release_branches = [] - RE_RELEASE_BRANCH_REF = re.compile(r'^refs/remotes/.+/\d+\.\d+$') + RE_RELEASE_BRANCH_REF = re.compile(r"^refs/remotes/.+/\d+\.\d+$") - for branch in [r for r in self._remote.refs if RE_RELEASE_BRANCH_REF.match(r.path)]: + for branch in [ + r for r in self._remote.refs if RE_RELEASE_BRANCH_REF.match(r.path) + ]: base = self._repo.merge_base(self._default, self._repo.commit(branch)) if not base: - logging.info('Branch %s is not based on branch %s. Ignoring.', branch.path, self._default) + logging.info( + "Branch %s is not based on branch %s. Ignoring.", + branch.path, + self._default, + ) elif len(base) > 1: - logging.info('Branch %s has more than one base commit. Ignoring.', branch.path) + logging.info( + "Branch %s has more than one base commit. Ignoring.", branch.path + ) else: release_branches.append((os.path.basename(branch.name), base[0])) - return sorted(release_branches, key=lambda x : self.comparator(x[1])) + return sorted(release_branches, key=lambda x: self.comparator(x[1])) class BareRepository(RepositoryBase): @@ -68,24 +77,32 @@ class BareRepository(RepositoryBase): self._default = self._repo.branches[default_branch_name] def get_release_branches(self): - ''' + """ Returns sorted list of tuples: * branch (git.refs.head?), * base commit (git.Commit), * head (git.Commit)). List is sorted by commits in ascending order. - ''' + """ release_branches = [] - RE_RELEASE_BRANCH_REF = re.compile(r'^refs/heads/\d+\.\d+$') + RE_RELEASE_BRANCH_REF = re.compile(r"^refs/heads/\d+\.\d+$") - for branch in [r for r in self._repo.branches if RE_RELEASE_BRANCH_REF.match(r.path)]: + for branch in [ + r for r in self._repo.branches if RE_RELEASE_BRANCH_REF.match(r.path) + ]: base = self._repo.merge_base(self._default, self._repo.commit(branch)) if not base: - logging.info('Branch %s is not based on branch %s. Ignoring.', branch.path, self._default) + logging.info( + "Branch %s is not based on branch %s. Ignoring.", + branch.path, + self._default, + ) elif len(base) > 1: - logging.info('Branch %s has more than one base commit. Ignoring.', branch.path) + logging.info( + "Branch %s has more than one base commit. Ignoring.", branch.path + ) else: release_branches.append((os.path.basename(branch.name), base[0])) - return sorted(release_branches, key=lambda x : self.comparator(x[1])) + return sorted(release_branches, key=lambda x: self.comparator(x[1])) diff --git a/utils/github/parser.py b/utils/github/parser.py index 570410ba23d..d8348e6d964 100644 --- a/utils/github/parser.py +++ b/utils/github/parser.py @@ -1,19 +1,20 @@ # -*- coding: utf-8 -*- + class Description: - '''Parsed description representation - ''' + """Parsed description representation""" + MAP_CATEGORY_TO_LABEL = { - 'New Feature': 'pr-feature', - 'Bug Fix': 'pr-bugfix', - 'Improvement': 'pr-improvement', - 'Performance Improvement': 'pr-performance', + "New Feature": "pr-feature", + "Bug Fix": "pr-bugfix", + "Improvement": "pr-improvement", + "Performance Improvement": "pr-performance", # 'Backward Incompatible Change': doesn't match anything - 'Build/Testing/Packaging Improvement': 'pr-build', - 'Non-significant (changelog entry is not needed)': 'pr-non-significant', - 'Non-significant (changelog entry is not required)': 'pr-non-significant', - 'Non-significant': 'pr-non-significant', - 'Documentation (changelog entry is not required)': 'pr-documentation', + "Build/Testing/Packaging Improvement": "pr-build", + "Non-significant (changelog entry is not needed)": "pr-non-significant", + "Non-significant (changelog entry is not required)": "pr-non-significant", + "Non-significant": "pr-non-significant", + "Documentation (changelog entry is not required)": "pr-documentation", # 'Other': doesn't match anything } @@ -21,7 +22,7 @@ class Description: self.label_name = str() self.legal = False - self._parse(pull_request['bodyText']) + self._parse(pull_request["bodyText"]) def _parse(self, text): lines = text.splitlines() @@ -38,14 +39,17 @@ class Description: category = stripped next_category = False - if stripped == 'I hereby agree to the terms of the CLA available at: https://yandex.ru/legal/cla/?lang=en': + if ( + stripped + == "I hereby agree to the terms of the CLA available at: https://yandex.ru/legal/cla/?lang=en" + ): self.legal = True category_headers = ( - 'Category (leave one):', - 'Changelog category (leave one):', - 'Changelog category:', - 'Category:' + "Category (leave one):", + "Changelog category (leave one):", + "Changelog category:", + "Category:", ) if stripped in category_headers: @@ -55,6 +59,6 @@ class Description: self.label_name = Description.MAP_CATEGORY_TO_LABEL[category] else: if not category: - print('Cannot find category in pr description') + print("Cannot find category in pr description") else: - print(('Unknown category: ' + category)) + print(("Unknown category: " + category)) diff --git a/utils/github/query.py b/utils/github/query.py index 39b1d0ce003..7afbc57781c 100644 --- a/utils/github/query.py +++ b/utils/github/query.py @@ -4,11 +4,11 @@ import requests class Query: - ''' + """ Implements queries to the Github API using GraphQL - ''' + """ - _PULL_REQUEST = ''' + _PULL_REQUEST = """ author {{ ... on User {{ id @@ -46,7 +46,7 @@ class Query: number title url - ''' + """ def __init__(self, token, owner, name, team, max_page_size=100, min_page_size=10): self._PULL_REQUEST = Query._PULL_REQUEST.format(min_page_size=min_page_size) @@ -62,14 +62,14 @@ class Query: self.api_costs = {} repo = self.get_repository() - self._id = repo['id'] - self.ssh_url = repo['sshUrl'] - self.default_branch = repo['defaultBranchRef']['name'] + self._id = repo["id"] + self.ssh_url = repo["sshUrl"] + self.default_branch = repo["defaultBranchRef"]["name"] self.members = set(self.get_members()) def get_repository(self): - _QUERY = ''' + _QUERY = """ repository(owner: "{owner}" name: "{name}") {{ defaultBranchRef {{ name @@ -77,19 +77,19 @@ class Query: id sshUrl }} - ''' + """ query = _QUERY.format(owner=self._owner, name=self._name) - return self._run(query)['repository'] + return self._run(query)["repository"] def get_members(self): - '''Get all team members for organization + """Get all team members for organization Returns: members: a map of members' logins to ids - ''' + """ - _QUERY = ''' + _QUERY = """ organization(login: "{organization}") {{ team(slug: "{team}") {{ members(first: {max_page_size} {next}) {{ @@ -104,43 +104,54 @@ class Query: }} }} }} - ''' + """ members = {} not_end = True - query = _QUERY.format(organization=self._owner, team=self._team, - max_page_size=self._max_page_size, - next='') + query = _QUERY.format( + organization=self._owner, + team=self._team, + max_page_size=self._max_page_size, + next="", + ) while not_end: - result = self._run(query)['organization']['team'] + result = self._run(query)["organization"]["team"] if result is None: break - result = result['members'] - not_end = result['pageInfo']['hasNextPage'] - query = _QUERY.format(organization=self._owner, team=self._team, - max_page_size=self._max_page_size, - next='after: "{}"'.format(result["pageInfo"]["endCursor"])) + result = result["members"] + not_end = result["pageInfo"]["hasNextPage"] + query = _QUERY.format( + organization=self._owner, + team=self._team, + max_page_size=self._max_page_size, + next='after: "{}"'.format(result["pageInfo"]["endCursor"]), + ) - members += dict([(node['login'], node['id']) for node in result['nodes']]) + members += dict([(node["login"], node["id"]) for node in result["nodes"]]) return members def get_pull_request(self, number): - _QUERY = ''' + _QUERY = """ repository(owner: "{owner}" name: "{name}") {{ pullRequest(number: {number}) {{ {pull_request_data} }} }} - ''' + """ - query = _QUERY.format(owner=self._owner, name=self._name, number=number, - pull_request_data=self._PULL_REQUEST, min_page_size=self._min_page_size) - return self._run(query)['repository']['pullRequest'] + query = _QUERY.format( + owner=self._owner, + name=self._name, + number=number, + pull_request_data=self._PULL_REQUEST, + min_page_size=self._min_page_size, + ) + return self._run(query)["repository"]["pullRequest"] def find_pull_request(self, base, head): - _QUERY = ''' + _QUERY = """ repository(owner: "{owner}" name: "{name}") {{ pullRequests(first: {min_page_size} baseRefName: "{base}" headRefName: "{head}") {{ nodes {{ @@ -149,21 +160,27 @@ class Query: totalCount }} }} - ''' + """ - query = _QUERY.format(owner=self._owner, name=self._name, base=base, head=head, - pull_request_data=self._PULL_REQUEST, min_page_size=self._min_page_size) - result = self._run(query)['repository']['pullRequests'] - if result['totalCount'] > 0: - return result['nodes'][0] + query = _QUERY.format( + owner=self._owner, + name=self._name, + base=base, + head=head, + pull_request_data=self._PULL_REQUEST, + min_page_size=self._min_page_size, + ) + result = self._run(query)["repository"]["pullRequests"] + if result["totalCount"] > 0: + return result["nodes"][0] else: return {} def find_pull_requests(self, label_name): - ''' + """ Get all pull-requests filtered by label name - ''' - _QUERY = ''' + """ + _QUERY = """ repository(owner: "{owner}" name: "{name}") {{ pullRequests(first: {min_page_size} labels: "{label_name}" states: OPEN) {{ nodes {{ @@ -171,18 +188,23 @@ class Query: }} }} }} - ''' + """ - query = _QUERY.format(owner=self._owner, name=self._name, label_name=label_name, - pull_request_data=self._PULL_REQUEST, min_page_size=self._min_page_size) - return self._run(query)['repository']['pullRequests']['nodes'] + query = _QUERY.format( + owner=self._owner, + name=self._name, + label_name=label_name, + pull_request_data=self._PULL_REQUEST, + min_page_size=self._min_page_size, + ) + return self._run(query)["repository"]["pullRequests"]["nodes"] def get_pull_requests(self, before_commit): - ''' + """ Get all merged pull-requests from the HEAD of default branch to the last commit (excluding) - ''' + """ - _QUERY = ''' + _QUERY = """ repository(owner: "{owner}" name: "{name}") {{ defaultBranchRef {{ target {{ @@ -220,44 +242,60 @@ class Query: }} }} }} - ''' + """ pull_requests = [] not_end = True - query = _QUERY.format(owner=self._owner, name=self._name, - max_page_size=self._max_page_size, - min_page_size=self._min_page_size, - pull_request_data=self._PULL_REQUEST, - next='') + query = _QUERY.format( + owner=self._owner, + name=self._name, + max_page_size=self._max_page_size, + min_page_size=self._min_page_size, + pull_request_data=self._PULL_REQUEST, + next="", + ) while not_end: - result = self._run(query)['repository']['defaultBranchRef']['target']['history'] - not_end = result['pageInfo']['hasNextPage'] - query = _QUERY.format(owner=self._owner, name=self._name, - max_page_size=self._max_page_size, - min_page_size=self._min_page_size, - pull_request_data=self._PULL_REQUEST, - next='after: "{}"'.format(result["pageInfo"]["endCursor"])) + result = self._run(query)["repository"]["defaultBranchRef"]["target"][ + "history" + ] + not_end = result["pageInfo"]["hasNextPage"] + query = _QUERY.format( + owner=self._owner, + name=self._name, + max_page_size=self._max_page_size, + min_page_size=self._min_page_size, + pull_request_data=self._PULL_REQUEST, + next='after: "{}"'.format(result["pageInfo"]["endCursor"]), + ) - for commit in result['nodes']: + for commit in result["nodes"]: # FIXME: maybe include `before_commit`? - if str(commit['oid']) == str(before_commit): + if str(commit["oid"]) == str(before_commit): not_end = False break # TODO: fetch all pull-requests that were merged in a single commit. - assert commit['associatedPullRequests']['totalCount'] <= self._min_page_size + assert ( + commit["associatedPullRequests"]["totalCount"] + <= self._min_page_size + ) - for pull_request in commit['associatedPullRequests']['nodes']: - if(pull_request['baseRepository']['nameWithOwner'] == '{}/{}'.format(self._owner, self._name) and - pull_request['baseRefName'] == self.default_branch and - pull_request['mergeCommit']['oid'] == commit['oid']): + for pull_request in commit["associatedPullRequests"]["nodes"]: + if ( + pull_request["baseRepository"]["nameWithOwner"] + == "{}/{}".format(self._owner, self._name) + and pull_request["baseRefName"] == self.default_branch + and pull_request["mergeCommit"]["oid"] == commit["oid"] + ): pull_requests.append(pull_request) return pull_requests - def create_pull_request(self, source, target, title, description="", draft=False, can_modify=True): - _QUERY = ''' + def create_pull_request( + self, source, target, title, description="", draft=False, can_modify=True + ): + _QUERY = """ createPullRequest(input: {{ baseRefName: "{target}", headRefName: "{source}", @@ -271,15 +309,22 @@ class Query: {pull_request_data} }} }} - ''' + """ - query = _QUERY.format(target=target, source=source, id=self._id, title=title, body=description, - draft="true" if draft else "false", modify="true" if can_modify else "false", - pull_request_data=self._PULL_REQUEST) - return self._run(query, is_mutation=True)['createPullRequest']['pullRequest'] + query = _QUERY.format( + target=target, + source=source, + id=self._id, + title=title, + body=description, + draft="true" if draft else "false", + modify="true" if can_modify else "false", + pull_request_data=self._PULL_REQUEST, + ) + return self._run(query, is_mutation=True)["createPullRequest"]["pullRequest"] def merge_pull_request(self, id): - _QUERY = ''' + _QUERY = """ mergePullRequest(input: {{ pullRequestId: "{id}" }}) {{ @@ -287,35 +332,35 @@ class Query: {pull_request_data} }} }} - ''' + """ query = _QUERY.format(id=id, pull_request_data=self._PULL_REQUEST) - return self._run(query, is_mutation=True)['mergePullRequest']['pullRequest'] + return self._run(query, is_mutation=True)["mergePullRequest"]["pullRequest"] # FIXME: figure out how to add more assignees at once def add_assignee(self, pr, assignee): - _QUERY = ''' + _QUERY = """ addAssigneesToAssignable(input: {{ assignableId: "{id1}", assigneeIds: "{id2}" }}) {{ clientMutationId }} - ''' + """ - query = _QUERY.format(id1=pr['id'], id2=assignee['id']) + query = _QUERY.format(id1=pr["id"], id2=assignee["id"]) self._run(query, is_mutation=True) def set_label(self, pull_request, label_name): - ''' + """ Set label by name to the pull request Args: pull_request: JSON object returned by `get_pull_requests()` label_name (string): label name - ''' + """ - _GET_LABEL = ''' + _GET_LABEL = """ repository(owner: "{owner}" name: "{name}") {{ labels(first: {max_page_size} {next} query: "{label_name}") {{ pageInfo {{ @@ -329,36 +374,44 @@ class Query: }} }} }} - ''' + """ - _SET_LABEL = ''' + _SET_LABEL = """ addLabelsToLabelable(input: {{ labelableId: "{pr_id}", labelIds: "{label_id}" }}) {{ clientMutationId }} - ''' + """ labels = [] not_end = True - query = _GET_LABEL.format(owner=self._owner, name=self._name, label_name=label_name, - max_page_size=self._max_page_size, - next='') + query = _GET_LABEL.format( + owner=self._owner, + name=self._name, + label_name=label_name, + max_page_size=self._max_page_size, + next="", + ) while not_end: - result = self._run(query)['repository']['labels'] - not_end = result['pageInfo']['hasNextPage'] - query = _GET_LABEL.format(owner=self._owner, name=self._name, label_name=label_name, - max_page_size=self._max_page_size, - next='after: "{}"'.format(result["pageInfo"]["endCursor"])) + result = self._run(query)["repository"]["labels"] + not_end = result["pageInfo"]["hasNextPage"] + query = _GET_LABEL.format( + owner=self._owner, + name=self._name, + label_name=label_name, + max_page_size=self._max_page_size, + next='after: "{}"'.format(result["pageInfo"]["endCursor"]), + ) - labels += [label for label in result['nodes']] + labels += [label for label in result["nodes"]] if not labels: return - query = _SET_LABEL.format(pr_id=pull_request['id'], label_id=labels[0]['id']) + query = _SET_LABEL.format(pr_id=pull_request["id"], label_id=labels[0]["id"]) self._run(query, is_mutation=True) def _run(self, query, is_mutation=False): @@ -380,19 +433,21 @@ class Query: status_forcelist=status_forcelist, ) adapter = HTTPAdapter(max_retries=retry) - session.mount('http://', adapter) - session.mount('https://', adapter) + session.mount("http://", adapter) + session.mount("https://", adapter) return session - headers = {'Authorization': 'bearer {}'.format(self._token)} + headers = {"Authorization": "bearer {}".format(self._token)} if is_mutation: - query = ''' + query = """ mutation {{ {query} }} - '''.format(query=query) + """.format( + query=query + ) else: - query = ''' + query = """ query {{ {query} rateLimit {{ @@ -400,23 +455,38 @@ class Query: remaining }} }} - '''.format(query=query) + """.format( + query=query + ) while True: - request = requests_retry_session().post('https://api.github.com/graphql', json={'query': query}, headers=headers) + request = requests_retry_session().post( + "https://api.github.com/graphql", json={"query": query}, headers=headers + ) if request.status_code == 200: result = request.json() - if 'errors' in result: - raise Exception('Errors occurred: {}\nOriginal query: {}'.format(result["errors"], query)) + if "errors" in result: + raise Exception( + "Errors occurred: {}\nOriginal query: {}".format( + result["errors"], query + ) + ) if not is_mutation: import inspect + caller = inspect.getouterframes(inspect.currentframe(), 2)[1][3] if caller not in list(self.api_costs.keys()): self.api_costs[caller] = 0 - self.api_costs[caller] += result['data']['rateLimit']['cost'] + self.api_costs[caller] += result["data"]["rateLimit"]["cost"] - return result['data'] + return result["data"] else: import json - raise Exception('Query failed with code {code}:\n{json}'.format(code=request.status_code, json=json.dumps(request.json(), indent=4))) + + raise Exception( + "Query failed with code {code}:\n{json}".format( + code=request.status_code, + json=json.dumps(request.json(), indent=4), + ) + ) diff --git a/utils/grpc-client/clickhouse-grpc-client.py b/utils/grpc-client/clickhouse-grpc-client.py index dfaa7ed4e01..0caa9e6fdca 100755 --- a/utils/grpc-client/clickhouse-grpc-client.py +++ b/utils/grpc-client/clickhouse-grpc-client.py @@ -14,14 +14,14 @@ import grpc # pip3 install grpcio import grpc_tools # pip3 install grpcio-tools import argparse, cmd, os, signal, subprocess, sys, threading, time, uuid -DEFAULT_HOST = 'localhost' +DEFAULT_HOST = "localhost" DEFAULT_PORT = 9100 -DEFAULT_USER_NAME = 'default' -DEFAULT_OUTPUT_FORMAT_FOR_INTERACTIVE_MODE = 'PrettyCompact' -HISTORY_FILENAME = '~/.clickhouse_grpc_history' +DEFAULT_USER_NAME = "default" +DEFAULT_OUTPUT_FORMAT_FOR_INTERACTIVE_MODE = "PrettyCompact" +HISTORY_FILENAME = "~/.clickhouse_grpc_history" HISTORY_SIZE = 1000 STDIN_BUFFER_SIZE = 1048576 -DEFAULT_ENCODING = 'utf-8' +DEFAULT_ENCODING = "utf-8" class ClickHouseGRPCError(Exception): @@ -51,10 +51,20 @@ def error_print(*args, **kwargs): class ClickHouseGRPCClient(cmd.Cmd): - prompt="grpc :) " + prompt = "grpc :) " - def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT, user_name=DEFAULT_USER_NAME, password='', - database='', output_format='', settings='', verbatim=False, show_debug_info=False): + def __init__( + self, + host=DEFAULT_HOST, + port=DEFAULT_PORT, + user_name=DEFAULT_USER_NAME, + password="", + database="", + output_format="", + settings="", + verbatim=False, + show_debug_info=False, + ): super(ClickHouseGRPCClient, self).__init__(completekey=None) self.host = host self.port = port @@ -80,11 +90,20 @@ class ClickHouseGRPCClient(cmd.Cmd): # Executes a simple query and returns its output. def get_simple_query_output(self, query_text): - result = self.stub.ExecuteQuery(clickhouse_grpc_pb2.QueryInfo(query=query_text, user_name=self.user_name, password=self.password, - database=self.database, output_format='TabSeparated', settings=self.settings, - session_id=self.session_id, query_id=str(uuid.uuid4()))) + result = self.stub.ExecuteQuery( + clickhouse_grpc_pb2.QueryInfo( + query=query_text, + user_name=self.user_name, + password=self.password, + database=self.database, + output_format="TabSeparated", + settings=self.settings, + session_id=self.session_id, + query_id=str(uuid.uuid4()), + ) + ) if self.show_debug_info: - print('\nresult={}'.format(result)) + print("\nresult={}".format(result)) ClickHouseGRPCClient.__check_no_errors(result) return result.output.decode(DEFAULT_ENCODING) @@ -110,11 +129,19 @@ class ClickHouseGRPCClient(cmd.Cmd): with KeyboardInterruptHandlerOverride(keyboard_interrupt_handler): try: + def send_query_info(): # send main query info - info = clickhouse_grpc_pb2.QueryInfo(query=query_text, user_name=self.user_name, password=self.password, - database=self.database, output_format=self.output_format, settings=self.settings, - session_id=self.session_id, query_id=str(uuid.uuid4())) + info = clickhouse_grpc_pb2.QueryInfo( + query=query_text, + user_name=self.user_name, + password=self.password, + database=self.database, + output_format=self.output_format, + settings=self.settings, + session_id=self.session_id, + query_id=str(uuid.uuid4()), + ) # send input data if not sys.stdin.isatty(): while True: @@ -130,10 +157,10 @@ class ClickHouseGRPCClient(cmd.Cmd): cancel_event.wait() if cancel_tries > 0: yield clickhouse_grpc_pb2.QueryInfo(cancel=True) - + for result in self.stub.ExecuteQueryWithStreamIO(send_query_info()): if self.show_debug_info: - print('\nresult={}'.format(result)) + print("\nresult={}".format(result)) ClickHouseGRPCClient.__check_no_errors(result) sys.stdout.buffer.write(result.output) sys.stdout.flush() @@ -144,7 +171,11 @@ class ClickHouseGRPCClient(cmd.Cmd): cancel_event.set() if not cancelled: execution_time = time.time() - start_time - self.verbatim_print('\nElapsed: {execution_time} sec.\n'.format(execution_time=execution_time)) + self.verbatim_print( + "\nElapsed: {execution_time} sec.\n".format( + execution_time=execution_time + ) + ) except Exception as e: if raise_exceptions: @@ -153,24 +184,38 @@ class ClickHouseGRPCClient(cmd.Cmd): # Establish connection. def __connect(self): - self.verbatim_print("Connecting to {host}:{port} as user {user_name}.".format(host=self.host, port=self.port, user_name=self.user_name)) + self.verbatim_print( + "Connecting to {host}:{port} as user {user_name}.".format( + host=self.host, port=self.port, user_name=self.user_name + ) + ) # Secure channels are supported by server but not supported by this client. start_time = time.time() - self.channel = grpc.insecure_channel(self.host + ':' + str(self.port)) + self.channel = grpc.insecure_channel(self.host + ":" + str(self.port)) connection_time = 0 - timeout=5 + timeout = 5 while True: try: grpc.channel_ready_future(self.channel).result(timeout=timeout) - break; + break except grpc.FutureTimeoutError: connection_time += timeout - self.verbatim_print("Couldn't connect to ClickHouse server in {connection_time} seconds.".format(connection_time=connection_time)) + self.verbatim_print( + "Couldn't connect to ClickHouse server in {connection_time} seconds.".format( + connection_time=connection_time + ) + ) self.stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(self.channel) connection_time = time.time() - start_time if self.verbatim: - version = self.get_simple_query_output("SELECT version() FORMAT TabSeparated").rstrip('\n') - self.verbatim_print("Connected to ClickHouse server version {version} via gRPC protocol in {connection_time}.".format(version=version, connection_time=connection_time)) + version = self.get_simple_query_output( + "SELECT version() FORMAT TabSeparated" + ).rstrip("\n") + self.verbatim_print( + "Connected to ClickHouse server version {version} via gRPC protocol in {connection_time}.".format( + version=version, connection_time=connection_time + ) + ) def __disconnect(self): if self.channel: @@ -181,32 +226,39 @@ class ClickHouseGRPCClient(cmd.Cmd): @staticmethod def __check_no_errors(result): - if result.HasField('exception'): + if result.HasField("exception"): raise ClickHouseGRPCError(result.exception.display_text) # Use grpcio-tools to generate *pb2.py files from *.proto. @staticmethod def __generate_pb2(): script_dir = os.path.dirname(os.path.realpath(__file__)) - proto_dir = os.path.join(script_dir, './protos') - gen_dir = os.path.join(script_dir, './_gen') - if os.path.exists(os.path.join(gen_dir, 'clickhouse_grpc_pb2_grpc.py')): + proto_dir = os.path.join(script_dir, "./protos") + gen_dir = os.path.join(script_dir, "./_gen") + if os.path.exists(os.path.join(gen_dir, "clickhouse_grpc_pb2_grpc.py")): return os.makedirs(gen_dir, exist_ok=True) - cmd = ['python3', '-m', 'grpc_tools.protoc', '-I'+proto_dir, '--python_out='+gen_dir, '--grpc_python_out='+gen_dir, - proto_dir+'/clickhouse_grpc.proto'] + cmd = [ + "python3", + "-m", + "grpc_tools.protoc", + "-I" + proto_dir, + "--python_out=" + gen_dir, + "--grpc_python_out=" + gen_dir, + proto_dir + "/clickhouse_grpc.proto", + ] p = subprocess.Popen(cmd, stderr=subprocess.PIPE) # We don't want to show grpc_tools warnings. - errors = p.stderr.read().decode().strip('\n').split('\n') - only_warnings = all(('Warning' in error) for error in errors) + errors = p.stderr.read().decode().strip("\n").split("\n") + only_warnings = all(("Warning" in error) for error in errors) if not only_warnings: - error_print('\n'.join(errors)) + error_print("\n".join(errors)) # Import the generated *pb2.py files. @staticmethod def __import_pb2(): script_dir = os.path.dirname(os.path.realpath(__file__)) - gen_dir = os.path.join(script_dir, './_gen') + gen_dir = os.path.join(script_dir, "./_gen") sys.path.append(gen_dir) global clickhouse_grpc_pb2, clickhouse_grpc_pb2_grpc import clickhouse_grpc_pb2, clickhouse_grpc_pb2_grpc @@ -231,9 +283,9 @@ class ClickHouseGRPCClient(cmd.Cmd): # Overrides Cmd.onecmd(). Runs single command. def onecmd(self, line): stripped = line.strip() - if stripped == 'exit' or stripped == 'quit': + if stripped == "exit" or stripped == "quit": return True - if stripped == '': + if stripped == "": return False self.run_query(line, raise_exceptions=False, allow_cancel=True) return False @@ -261,17 +313,61 @@ class ClickHouseGRPCClient(cmd.Cmd): # MAIN + def main(args): - parser = argparse.ArgumentParser(description='ClickHouse client accessing server through gRPC protocol.', add_help=False) - parser.add_argument('--help', help='Show this help message and exit', action='store_true') - parser.add_argument('--host', '-h', help='The server name, ‘localhost’ by default. You can use either the name or the IPv4 or IPv6 address.', default='localhost') - parser.add_argument('--port', help='The port to connect to. This port should be enabled on the ClickHouse server (see grpc_port in the config).', default=9100) - parser.add_argument('--user', '-u', dest='user_name', help='The username. Default value: ‘default’.', default='default') - parser.add_argument('--password', help='The password. Default value: empty string.', default='') - parser.add_argument('--query', '-q', help='The query to process when using non-interactive mode.', default='') - parser.add_argument('--database', '-d', help='Select the current default database. Default value: the current database from the server settings (‘default’ by default).', default='') - parser.add_argument('--format', '-f', dest='output_format', help='Use the specified default format to output the result.', default='') - parser.add_argument('--debug', dest='show_debug_info', help='Enables showing the debug information.', action='store_true') + parser = argparse.ArgumentParser( + description="ClickHouse client accessing server through gRPC protocol.", + add_help=False, + ) + parser.add_argument( + "--help", help="Show this help message and exit", action="store_true" + ) + parser.add_argument( + "--host", + "-h", + help="The server name, ‘localhost’ by default. You can use either the name or the IPv4 or IPv6 address.", + default="localhost", + ) + parser.add_argument( + "--port", + help="The port to connect to. This port should be enabled on the ClickHouse server (see grpc_port in the config).", + default=9100, + ) + parser.add_argument( + "--user", + "-u", + dest="user_name", + help="The username. Default value: ‘default’.", + default="default", + ) + parser.add_argument( + "--password", help="The password. Default value: empty string.", default="" + ) + parser.add_argument( + "--query", + "-q", + help="The query to process when using non-interactive mode.", + default="", + ) + parser.add_argument( + "--database", + "-d", + help="Select the current default database. Default value: the current database from the server settings (‘default’ by default).", + default="", + ) + parser.add_argument( + "--format", + "-f", + dest="output_format", + help="Use the specified default format to output the result.", + default="", + ) + parser.add_argument( + "--debug", + dest="show_debug_info", + help="Enables showing the debug information.", + action="store_true", + ) args = parser.parse_args(args) if args.help: @@ -284,11 +380,18 @@ def main(args): output_format = args.output_format if not output_format and interactive_mode: output_format = DEFAULT_OUTPUT_FORMAT_FOR_INTERACTIVE_MODE - + try: - with ClickHouseGRPCClient(host=args.host, port=args.port, user_name=args.user_name, password=args.password, - database=args.database, output_format=output_format, verbatim=verbatim, - show_debug_info=args.show_debug_info) as client: + with ClickHouseGRPCClient( + host=args.host, + port=args.port, + user_name=args.user_name, + password=args.password, + database=args.database, + output_format=output_format, + verbatim=verbatim, + show_debug_info=args.show_debug_info, + ) as client: if interactive_mode: client.cmdloop() else: @@ -301,5 +404,6 @@ def main(args): if verbatim: print("\nBye") -if __name__ == '__main__': + +if __name__ == "__main__": main(sys.argv[1:]) diff --git a/utils/kafka/consume.py b/utils/kafka/consume.py index c82901f9e0e..74542baf218 100755 --- a/utils/kafka/consume.py +++ b/utils/kafka/consume.py @@ -9,24 +9,40 @@ from pprint import pprint def main(): - parser = argparse.ArgumentParser(description='Kafka Producer client') - parser.add_argument('--server', type=str, metavar='HOST', default='localhost', - help='Kafka bootstrap-server address') - parser.add_argument('--port', type=int, metavar='PORT', default=9092, - help='Kafka bootstrap-server port') - parser.add_argument('--client', type=str, default='ch-kafka-python', - help='custom client id for this producer') - parser.add_argument('--topic', type=str, required=True, - help='name of Kafka topic to store in') - parser.add_argument('--group', type=str, required=True, - help='name of the consumer group') + parser = argparse.ArgumentParser(description="Kafka Producer client") + parser.add_argument( + "--server", + type=str, + metavar="HOST", + default="localhost", + help="Kafka bootstrap-server address", + ) + parser.add_argument( + "--port", + type=int, + metavar="PORT", + default=9092, + help="Kafka bootstrap-server port", + ) + parser.add_argument( + "--client", + type=str, + default="ch-kafka-python", + help="custom client id for this producer", + ) + parser.add_argument( + "--topic", type=str, required=True, help="name of Kafka topic to store in" + ) + parser.add_argument( + "--group", type=str, required=True, help="name of the consumer group" + ) args = parser.parse_args() config = { - 'bootstrap_servers': f'{args.server}:{args.port}', - 'client_id': args.client, - 'group_id': args.group, - 'auto_offset_reset': 'earliest', + "bootstrap_servers": f"{args.server}:{args.port}", + "client_id": args.client, + "group_id": args.group, + "auto_offset_reset": "earliest", } client = kafka.KafkaConsumer(**config) diff --git a/utils/kafka/manage.py b/utils/kafka/manage.py index 7458bdceb74..578a7df7310 100755 --- a/utils/kafka/manage.py +++ b/utils/kafka/manage.py @@ -8,24 +8,48 @@ import argparse def main(): - parser = argparse.ArgumentParser(description='Kafka Topic manager') - parser.add_argument('--server', type=str, metavar='HOST', default='localhost', - help='Kafka bootstrap-server address') - parser.add_argument('--port', type=int, metavar='PORT', default=9092, - help='Kafka bootstrap-server port') - parser.add_argument('--client', type=str, default='ch-kafka-python', - help='custom client id for this producer') + parser = argparse.ArgumentParser(description="Kafka Topic manager") + parser.add_argument( + "--server", + type=str, + metavar="HOST", + default="localhost", + help="Kafka bootstrap-server address", + ) + parser.add_argument( + "--port", + type=int, + metavar="PORT", + default=9092, + help="Kafka bootstrap-server port", + ) + parser.add_argument( + "--client", + type=str, + default="ch-kafka-python", + help="custom client id for this producer", + ) commands = parser.add_mutually_exclusive_group() - commands.add_argument('--create', type=str, metavar='TOPIC', nargs='+', - help='create new topic(s) in the cluster') - commands.add_argument('--delete', type=str, metavar='TOPIC', nargs='+', - help='delete existing topic(s) from the cluster') + commands.add_argument( + "--create", + type=str, + metavar="TOPIC", + nargs="+", + help="create new topic(s) in the cluster", + ) + commands.add_argument( + "--delete", + type=str, + metavar="TOPIC", + nargs="+", + help="delete existing topic(s) from the cluster", + ) args = parser.parse_args() config = { - 'bootstrap_servers': f'{args.server}:{args.port}', - 'client_id': args.client, + "bootstrap_servers": f"{args.server}:{args.port}", + "client_id": args.client, } client = kafka.KafkaAdminClient(**config) diff --git a/utils/kafka/produce.py b/utils/kafka/produce.py index 97e2e6b7705..f82e56d8478 100755 --- a/utils/kafka/produce.py +++ b/utils/kafka/produce.py @@ -13,50 +13,82 @@ import time class Sync(enum.Enum): - NONE = 'none' - LEAD = 'leader' - ALL = 'all' + NONE = "none" + LEAD = "leader" + ALL = "all" def __str__(self): return self.value def convert(self): values = { - str(Sync.NONE): '0', - str(Sync.LEAD): '1', - str(Sync.ALL): 'all', + str(Sync.NONE): "0", + str(Sync.LEAD): "1", + str(Sync.ALL): "all", } return values[self.value] def main(): - parser = argparse.ArgumentParser(description='Produce a single message taken from input') - parser.add_argument('--server', type=str, metavar='HOST', default='localhost', - help='Kafka bootstrap-server address') - parser.add_argument('--port', type=int, metavar='PORT', default=9092, - help='Kafka bootstrap-server port') - parser.add_argument('--client', type=str, default='ch-kafka-python', - help='custom client id for this producer') - parser.add_argument('--topic', type=str, required=True, - help='name of Kafka topic to store in') - parser.add_argument('--retries', type=int, default=0, - help='number of retries to send on failure') - parser.add_argument('--multiply', type=int, default=1, - help='multiplies incoming string many times') - parser.add_argument('--repeat', type=int, default=1, - help='send same (multiplied) message many times') + parser = argparse.ArgumentParser( + description="Produce a single message taken from input" + ) + parser.add_argument( + "--server", + type=str, + metavar="HOST", + default="localhost", + help="Kafka bootstrap-server address", + ) + parser.add_argument( + "--port", + type=int, + metavar="PORT", + default=9092, + help="Kafka bootstrap-server port", + ) + parser.add_argument( + "--client", + type=str, + default="ch-kafka-python", + help="custom client id for this producer", + ) + parser.add_argument( + "--topic", type=str, required=True, help="name of Kafka topic to store in" + ) + parser.add_argument( + "--retries", type=int, default=0, help="number of retries to send on failure" + ) + parser.add_argument( + "--multiply", type=int, default=1, help="multiplies incoming string many times" + ) + parser.add_argument( + "--repeat", + type=int, + default=1, + help="send same (multiplied) message many times", + ) mode_group = parser.add_mutually_exclusive_group() - mode_group.add_argument('--jobs', type=int, default=multiprocessing.cpu_count(), - help='number of concurrent jobs') - mode_group.add_argument('--delay', type=int, metavar='SECONDS', default=0, - help='delay before sending next message') + mode_group.add_argument( + "--jobs", + type=int, + default=multiprocessing.cpu_count(), + help="number of concurrent jobs", + ) + mode_group.add_argument( + "--delay", + type=int, + metavar="SECONDS", + default=0, + help="delay before sending next message", + ) args = parser.parse_args() config = { - 'bootstrap_servers': f'{args.server}:{args.port}', - 'client_id': args.client, - 'retries': args.retries, + "bootstrap_servers": f"{args.server}:{args.port}", + "client_id": args.client, + "retries": args.retries, } client = kafka.KafkaProducer(**config) @@ -66,7 +98,7 @@ def main(): if args.delay > 0: time.sleep(args.delay) client.send(topic=args.topic, value=message) - print(f'iteration {num}: sent a message multiplied {args.multiply} times') + print(f"iteration {num}: sent a message multiplied {args.multiply} times") if args.delay > 0: args.jobs = 1 diff --git a/utils/kafka/status.py b/utils/kafka/status.py index 28ba3c9c36f..12ea3d23bdf 100755 --- a/utils/kafka/status.py +++ b/utils/kafka/status.py @@ -8,18 +8,34 @@ import argparse def main(): - parser = argparse.ArgumentParser(description='Kafka client to get groups and topics status') - parser.add_argument('--server', type=str, metavar='HOST', default='localhost', - help='Kafka bootstrap-server address') - parser.add_argument('--port', type=int, metavar='PORT', default=9092, - help='Kafka bootstrap-server port') - parser.add_argument('--client', type=str, default='ch-kafka-python', - help='custom client id for this producer') + parser = argparse.ArgumentParser( + description="Kafka client to get groups and topics status" + ) + parser.add_argument( + "--server", + type=str, + metavar="HOST", + default="localhost", + help="Kafka bootstrap-server address", + ) + parser.add_argument( + "--port", + type=int, + metavar="PORT", + default=9092, + help="Kafka bootstrap-server port", + ) + parser.add_argument( + "--client", + type=str, + default="ch-kafka-python", + help="custom client id for this producer", + ) args = parser.parse_args() config = { - 'bootstrap_servers': f'{args.server}:{args.port}', - 'client_id': args.client, + "bootstrap_servers": f"{args.server}:{args.port}", + "client_id": args.client, } client = kafka.KafkaAdminClient(**config) @@ -28,10 +44,13 @@ def main(): topics = cluster.topics() for topic in topics: - print(f'Topic "{topic}":', end='') + print(f'Topic "{topic}":', end="") for partition in cluster.partitions_for_topic(topic): tp = kafka.TopicPartition(topic, partition) - print(f' {partition} (begin: {consumer.beginning_offsets([tp])[tp]}, end: {consumer.end_offsets([tp])[tp]})', end='') + print( + f" {partition} (begin: {consumer.beginning_offsets([tp])[tp]}, end: {consumer.end_offsets([tp])[tp]})", + end="", + ) print() groups = client.list_consumer_groups() @@ -41,7 +60,9 @@ def main(): consumer = kafka.KafkaConsumer(**config, group_id=group[0]) offsets = client.list_consumer_group_offsets(group[0]) for topic, offset in offsets.items(): - print(f'\t{topic.topic}[{topic.partition}]: {consumer.beginning_offsets([topic])[topic]}, {offset.offset}, {consumer.end_offsets([topic])[topic]}') + print( + f"\t{topic.topic}[{topic.partition}]: {consumer.beginning_offsets([topic])[topic]}, {offset.offset}, {consumer.end_offsets([topic])[topic]}" + ) consumer.close() client.close() diff --git a/utils/zero_copy/zero_copy_schema_converter.py b/utils/zero_copy/zero_copy_schema_converter.py index c5edef78cce..6103ac69c6e 100755 --- a/utils/zero_copy/zero_copy_schema_converter.py +++ b/utils/zero_copy/zero_copy_schema_converter.py @@ -10,56 +10,90 @@ def parse_args(): Parse command-line arguments. """ parser = argparse.ArgumentParser() - parser.add_argument('--hosts', default=socket.getfqdn() + ':2181', help='ZooKeeper hosts (host:port,host:port,...)') - parser.add_argument('-s', '--secure', default=False, action='store_true', help='Use secure connection') - parser.add_argument('--cert', default='', help='Client TLS certificate file') - parser.add_argument('--key', default='', help='Client TLS key file') - parser.add_argument('--ca', default='', help='Client TLS ca file') - parser.add_argument('-u', '--user', default='', help='ZooKeeper ACL user') - parser.add_argument('-p', '--password', default='', help='ZooKeeper ACL password') - parser.add_argument('-r', '--root', default='/clickhouse', help='ZooKeeper root path for ClickHouse') - parser.add_argument('-z', '--zcroot', default='clickhouse/zero_copy', help='ZooKeeper node for new zero-copy data') - parser.add_argument('--dryrun', default=False, action='store_true', help='Do not perform any actions') - parser.add_argument('--cleanup', default=False, action='store_true', help='Clean old nodes') - parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Verbose mode') + parser.add_argument( + "--hosts", + default=socket.getfqdn() + ":2181", + help="ZooKeeper hosts (host:port,host:port,...)", + ) + parser.add_argument( + "-s", + "--secure", + default=False, + action="store_true", + help="Use secure connection", + ) + parser.add_argument("--cert", default="", help="Client TLS certificate file") + parser.add_argument("--key", default="", help="Client TLS key file") + parser.add_argument("--ca", default="", help="Client TLS ca file") + parser.add_argument("-u", "--user", default="", help="ZooKeeper ACL user") + parser.add_argument("-p", "--password", default="", help="ZooKeeper ACL password") + parser.add_argument( + "-r", "--root", default="/clickhouse", help="ZooKeeper root path for ClickHouse" + ) + parser.add_argument( + "-z", + "--zcroot", + default="clickhouse/zero_copy", + help="ZooKeeper node for new zero-copy data", + ) + parser.add_argument( + "--dryrun", + default=False, + action="store_true", + help="Do not perform any actions", + ) + parser.add_argument( + "--cleanup", default=False, action="store_true", help="Clean old nodes" + ) + parser.add_argument( + "-v", "--verbose", action="store_true", default=False, help="Verbose mode" + ) return parser.parse_args() # Several folders to heuristic that zookeepr node is folder node # May be false positive when someone creates set of tables with same paths -table_nodes = ['alter_partition_version', 'block_numbers', 'blocks', 'columns', 'leader_election'] -zc_nodes = ['zero_copy_s3', 'zero_copy_hdfs'] +table_nodes = [ + "alter_partition_version", + "block_numbers", + "blocks", + "columns", + "leader_election", +] +zc_nodes = ["zero_copy_s3", "zero_copy_hdfs"] def convert_node(client, args, path, zc_node): - base_path = f'{path}/{zc_node}/shared' + base_path = f"{path}/{zc_node}/shared" parts = client.get_children(base_path) - table_id_path = f'{path}/table_shared_id' - table_id = '' + table_id_path = f"{path}/table_shared_id" + table_id = "" if client.exists(table_id_path): - table_id = client.get(table_id_path)[0].decode('UTF-8') + table_id = client.get(table_id_path)[0].decode("UTF-8") else: table_id = str(uuid.uuid4()) if args.verbose: print(f'Make table_id "{table_id_path}" = "{table_id}"') if not args.dryrun: - client.create(table_id_path, bytes(table_id, 'UTF-8')) + client.create(table_id_path, bytes(table_id, "UTF-8")) for part in parts: - part_path = f'{base_path}/{part}' + part_path = f"{base_path}/{part}" uniq_ids = client.get_children(part_path) for uniq_id in uniq_ids: - uniq_path = f'{part_path}/{uniq_id}' + uniq_path = f"{part_path}/{uniq_id}" replicas = client.get_children(uniq_path) for replica in replicas: - replica_path = f'{uniq_path}/{replica}' - new_path = f'{args.root}/{args.zcroot}/{zc_node}/{table_id}/{part}/{uniq_id}/{replica}' + replica_path = f"{uniq_path}/{replica}" + new_path = f"{args.root}/{args.zcroot}/{zc_node}/{table_id}/{part}/{uniq_id}/{replica}" if not client.exists(new_path): if args.verbose: print(f'Make node "{new_path}"') if not args.dryrun: - client.ensure_path(f'{args.root}/{args.zcroot}/{zc_node}/{table_id}/{part}/{uniq_id}') - client.create(new_path, value=b'lock') + client.ensure_path( + f"{args.root}/{args.zcroot}/{zc_node}/{table_id}/{part}/{uniq_id}" + ) + client.create(new_path, value=b"lock") if args.cleanup: if args.verbose: print(f'Remove node "{replica_path}"') @@ -71,7 +105,7 @@ def convert_node(client, args, path, zc_node): client.delete(part_path) if args.cleanup and not args.dryrun: client.delete(base_path) - client.delete(f'{path}/{zc_node}') + client.delete(f"{path}/{zc_node}") def convert_table(client, args, path, nodes): @@ -94,29 +128,30 @@ def scan_recursive(client, args, path): convert_table(client, args, path, nodes) else: for node in nodes: - scan_recursive(client, args, f'{path}/{node}') + scan_recursive(client, args, f"{path}/{node}") def scan(client, args): nodes = client.get_children(args.root) for node in nodes: if node != args.zcroot: - scan_recursive(client, args, f'{args.root}/{node}') + scan_recursive(client, args, f"{args.root}/{node}") def get_client(args): - client = KazooClient(connection_retry=3, - command_retry=3, - timeout=1, - hosts=args.hosts, - use_ssl=args.secure, - certfile=args.cert, - keyfile=args.key, - ca=args.ca - ) + client = KazooClient( + connection_retry=3, + command_retry=3, + timeout=1, + hosts=args.hosts, + use_ssl=args.secure, + certfile=args.cert, + keyfile=args.key, + ca=args.ca, + ) client.start() - if (args.user and args.password): - client.add_auth('digest', f'{args.user}:{args.password}') + if args.user and args.password: + client.add_auth("digest", f"{args.user}:{args.password}") return client @@ -126,5 +161,5 @@ def main(): scan(client, args) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/website/js/base.js b/website/js/base.js index 6704231c69d..9389028f1ef 100644 --- a/website/js/base.js +++ b/website/js/base.js @@ -70,15 +70,6 @@ (function (d, w, c) { (w[c] = w[c] || []).push(function() { var is_single_page = $('html').attr('data-single-page') === 'true'; - try { - w.yaCounter18343495 = new Ya.Metrika2({ - id: 18343495, - clickmap: !is_single_page, - trackLinks: !is_single_page, - accurateTrackBounce: !is_single_page, - webvisor: !is_single_page - }); - } catch(e) { } if (!is_single_page) { $('head').each(function(_, element) { @@ -91,21 +82,7 @@ }); } }); - - var n = d.getElementsByTagName("script")[0], - s = d.createElement("script"), - f = function () { n.parentNode.insertBefore(s, n); }; - s.type = "text/javascript"; - s.async = true; - s.src = "/js/metrika.js"; - if (window.location.hostname.endsWith('clickhouse.com')) { - if (w.opera == "[object Opera]") { - d.addEventListener("DOMContentLoaded", f, false); - } else { - f(); - } - } - })(document, window, "yandex_metrika_callbacks2"); + })(document, window, ""); var beforePrint = function() { var details = document.getElementsByTagName("details");